[next-0.10] cluster node status metrics: report nodes of all active layout versions
Some checks failed
ci/woodpecker/pr/debug Pipeline was successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/deployment/release/2 Pipeline failed
ci/woodpecker/deployment/release/1 Pipeline failed
ci/woodpecker/deployment/debug Pipeline was successful
ci/woodpecker/deployment/release/4 Pipeline failed
ci/woodpecker/deployment/release/3 Pipeline failed
ci/woodpecker/deployment/publish unknown status
Some checks failed
ci/woodpecker/pr/debug Pipeline was successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/deployment/release/2 Pipeline failed
ci/woodpecker/deployment/release/1 Pipeline failed
ci/woodpecker/deployment/debug Pipeline was successful
ci/woodpecker/deployment/release/4 Pipeline failed
ci/woodpecker/deployment/release/3 Pipeline failed
ci/woodpecker/deployment/publish unknown status
This commit is contained in:
parent
643d1aabd8
commit
75e591727d
1 changed files with 34 additions and 34 deletions
|
@ -216,12 +216,12 @@ impl SystemMetrics {
|
||||||
.u64_value_observer("cluster_layout_node_connected", move |observer| {
|
.u64_value_observer("cluster_layout_node_connected", move |observer| {
|
||||||
let layout = system.cluster_layout();
|
let layout = system.cluster_layout();
|
||||||
let nodes = system.get_known_nodes();
|
let nodes = system.get_known_nodes();
|
||||||
for (id, _, config) in layout.current().roles.items().iter() {
|
for id in layout.all_nodes().iter() {
|
||||||
if let Some(role) = &config.0 {
|
let mut kv = vec![KeyValue::new("id", format!("{:?}", id))];
|
||||||
let mut kv = vec![
|
if let Some(role) =
|
||||||
KeyValue::new("id", format!("{:?}", id)),
|
layout.current().roles.get(id).and_then(|r| r.0.as_ref())
|
||||||
KeyValue::new("role_zone", role.zone.clone()),
|
{
|
||||||
];
|
kv.push(KeyValue::new("role_zone", role.zone.clone()));
|
||||||
match role.capacity {
|
match role.capacity {
|
||||||
Some(cap) => {
|
Some(cap) => {
|
||||||
kv.push(KeyValue::new("role_capacity", cap as i64));
|
kv.push(KeyValue::new("role_capacity", cap as i64));
|
||||||
|
@ -231,24 +231,24 @@ impl SystemMetrics {
|
||||||
kv.push(KeyValue::new("role_gateway", 1));
|
kv.push(KeyValue::new("role_gateway", 1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let value;
|
let value;
|
||||||
if let Some(node) = nodes.iter().find(|n| n.id == *id) {
|
if let Some(node) = nodes.iter().find(|n| n.id == *id) {
|
||||||
value = if node.is_up { 1 } else { 0 };
|
|
||||||
// TODO: if we add address and hostname, and those change, we
|
// TODO: if we add address and hostname, and those change, we
|
||||||
// get duplicate metrics, due to bad otel aggregation :(
|
// get duplicate metrics, due to bad otel aggregation :(
|
||||||
// Can probably be fixed when we upgrade opentelemetry
|
// Can probably be fixed when we upgrade opentelemetry
|
||||||
// kv.push(KeyValue::new("address", node.addr.to_string()));
|
// kv.push(KeyValue::new("address", node.addr.to_string()));
|
||||||
// kv.push(KeyValue::new(
|
// kv.push(KeyValue::new(
|
||||||
// "hostname",
|
// "hostname",
|
||||||
// node.status.hostname.clone(),
|
// node.status.hostname.clone(),
|
||||||
// ));
|
// ));
|
||||||
} else {
|
value = if node.is_up { 1 } else { 0 };
|
||||||
value = 0;
|
} else {
|
||||||
}
|
value = 0;
|
||||||
|
|
||||||
observer.observe(value, &kv);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
observer.observe(value, &kv);
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.with_description("Connection status for nodes in the cluster layout")
|
.with_description("Connection status for nodes in the cluster layout")
|
||||||
|
@ -260,12 +260,12 @@ impl SystemMetrics {
|
||||||
.u64_value_observer("cluster_layout_node_disconnected_time", move |observer| {
|
.u64_value_observer("cluster_layout_node_disconnected_time", move |observer| {
|
||||||
let layout = system.cluster_layout();
|
let layout = system.cluster_layout();
|
||||||
let nodes = system.get_known_nodes();
|
let nodes = system.get_known_nodes();
|
||||||
for (id, _, config) in layout.current().roles.items().iter() {
|
for id in layout.all_nodes().iter() {
|
||||||
if let Some(role) = &config.0 {
|
let mut kv = vec![KeyValue::new("id", format!("{:?}", id))];
|
||||||
let mut kv = vec![
|
if let Some(role) =
|
||||||
KeyValue::new("id", format!("{:?}", id)),
|
layout.current().roles.get(id).and_then(|r| r.0.as_ref())
|
||||||
KeyValue::new("role_zone", role.zone.clone()),
|
{
|
||||||
];
|
kv.push(KeyValue::new("role_zone", role.zone.clone()));
|
||||||
match role.capacity {
|
match role.capacity {
|
||||||
Some(cap) => {
|
Some(cap) => {
|
||||||
kv.push(KeyValue::new("role_capacity", cap as i64));
|
kv.push(KeyValue::new("role_capacity", cap as i64));
|
||||||
|
@ -275,19 +275,19 @@ impl SystemMetrics {
|
||||||
kv.push(KeyValue::new("role_gateway", 1));
|
kv.push(KeyValue::new("role_gateway", 1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(node) = nodes.iter().find(|n| n.id == *id) {
|
if let Some(node) = nodes.iter().find(|n| n.id == *id) {
|
||||||
// TODO: see comment above
|
// TODO: see comment above
|
||||||
// kv.push(KeyValue::new("address", node.addr.to_string()));
|
// kv.push(KeyValue::new("address", node.addr.to_string()));
|
||||||
// kv.push(KeyValue::new(
|
// kv.push(KeyValue::new(
|
||||||
// "hostname",
|
// "hostname",
|
||||||
// node.status.hostname.clone(),
|
// node.status.hostname.clone(),
|
||||||
// ));
|
// ));
|
||||||
if node.is_up {
|
if node.is_up {
|
||||||
observer.observe(0, &kv);
|
observer.observe(0, &kv);
|
||||||
} else if let Some(secs) = node.last_seen_secs_ago {
|
} else if let Some(secs) = node.last_seen_secs_ago {
|
||||||
observer.observe(secs, &kv);
|
observer.observe(secs, &kv);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue