[net-fixes] fix issues with local peer address (fix #761)

This commit is contained in:
Alex 2024-03-21 10:45:34 +01:00
parent 5225a81dee
commit 961b4f9af3
Signed by untrusted user: lx
GPG key ID: 0E496D15096376BE
5 changed files with 100 additions and 113 deletions

View file

@ -27,7 +27,7 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
i.id,
NodeResp {
id: hex::encode(i.id),
addr: Some(i.addr),
addr: i.addr,
hostname: i.status.hostname,
is_up: i.is_up,
last_seen_secs_ago: i.last_seen_secs_ago,

View file

@ -57,6 +57,10 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()];
for adv in status.iter().filter(|adv| adv.is_up) {
let host = adv.status.hostname.as_deref().unwrap_or("?");
let addr = match adv.addr {
Some(addr) => addr.to_string(),
None => "N/A".to_string(),
};
if let Some(NodeRoleV(Some(cfg))) = layout.current().roles.get(&adv.id) {
let data_avail = match &adv.status.data_disk_avail {
_ if cfg.capacity.is_none() => "N/A".into(),
@ -71,7 +75,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
id = adv.id,
host = host,
addr = adv.addr,
addr = addr,
tags = cfg.tags.join(","),
zone = cfg.zone,
capacity = cfg.capacity_string(),
@ -91,7 +95,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\tdraining metadata...",
id = adv.id,
host = host,
addr = adv.addr,
addr = addr,
tags = cfg.tags.join(","),
zone = cfg.zone,
));
@ -104,7 +108,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
"{id:?}\t{h}\t{addr}\t\t\t{new_role}",
id = adv.id,
h = host,
addr = adv.addr,
addr = addr,
new_role = new_role,
));
}
@ -120,8 +124,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
let tf = timeago::Formatter::new();
let mut drain_msg = false;
let mut failed_nodes =
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tLast seen".to_string()];
let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()];
let mut listed = HashSet::new();
for ver in layout.versions.iter().rev() {
for (node, _, role) in ver.roles.items().iter() {
@ -142,15 +145,14 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
// Node is in a layout version, is not a gateway node, and is not up:
// it is in a failed state, add proper line to the output
let (host, addr, last_seen) = match adv {
let (host, last_seen) = match adv {
Some(adv) => (
adv.status.hostname.as_deref().unwrap_or("?"),
adv.addr.to_string(),
adv.last_seen_secs_ago
.map(|s| tf.convert(Duration::from_secs(s)))
.unwrap_or_else(|| "never seen".into()),
),
None => ("??", "??".into(), "never seen".into()),
None => ("??", "never seen".into()),
};
let capacity = if ver.version == layout.current().version {
cfg.capacity_string()
@ -159,10 +161,9 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
"draining metadata...".to_string()
};
failed_nodes.push(format!(
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
"{id:?}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
id = node,
host = host,
addr = addr,
tags = cfg.tags.join(","),
zone = cfg.zone,
capacity = capacity,

View file

@ -292,13 +292,7 @@ impl NetApp {
/// the other node with `Netapp::request`
pub async fn try_connect(self: Arc<Self>, ip: SocketAddr, id: NodeID) -> Result<(), Error> {
// Don't connect to ourself, we don't care
// but pretend we did
if id == self.id {
tokio::spawn(async move {
if let Some(h) = self.on_connected_handler.load().as_ref() {
h(id, ip, false);
}
});
return Ok(());
}
@ -327,9 +321,15 @@ impl NetApp {
/// Close the outgoing connection we have to a node specified by its public key,
/// if such a connection is currently open.
pub fn disconnect(self: &Arc<Self>, id: &NodeID) {
// If id is ourself, we're not supposed to have a connection open
if *id != self.id {
let conn = self.client_conns.write().unwrap().remove(id);
// If id is ourself, we're not supposed to have a connection open
if *id == self.id {
// sanity check
assert!(conn.is_none(), "had a connection to local node");
return;
}
if let Some(c) = conn {
debug!(
"Closing connection to {} ({})",
@ -337,14 +337,8 @@ impl NetApp {
c.remote_addr
);
c.close();
} else {
return;
}
}
// call on_disconnected_handler immediately, since the connection
// was removed
// (if id == self.id, we pretend we disconnected)
// call on_disconnected_handler immediately, since the connection was removed
let id = *id;
let self2 = self.clone();
tokio::spawn(async move {
@ -353,6 +347,7 @@ impl NetApp {
}
});
}
}
// Called from conn.rs when an incoming connection is successfully established
// Registers the connection in our list of connections

View file

@ -43,7 +43,7 @@ impl Message for PingMessage {
#[derive(Serialize, Deserialize)]
struct PeerListMessage {
pub list: Vec<(NodeID, SocketAddr)>,
pub list: Vec<(NodeID, Vec<SocketAddr>)>,
}
impl Message for PeerListMessage {
@ -54,12 +54,8 @@ impl Message for PeerListMessage {
#[derive(Debug)]
struct PeerInfoInternal {
// addr is the currently connected address,
// or the last address we were connected to,
// or an arbitrary address some other peer gave us
addr: SocketAddr,
// all_addrs contains all of the addresses everyone gave us
all_addrs: Vec<SocketAddr>,
// known_addrs contains all of the addresses everyone gave us
known_addrs: Vec<SocketAddr>,
state: PeerConnState,
last_send_ping: Option<Instant>,
@ -69,10 +65,9 @@ struct PeerInfoInternal {
}
impl PeerInfoInternal {
fn new(addr: SocketAddr, state: PeerConnState) -> Self {
fn new(state: PeerConnState, known_addr: Option<SocketAddr>) -> Self {
Self {
addr,
all_addrs: vec![addr],
known_addrs: known_addr.map(|x| vec![x]).unwrap_or_default(),
state,
last_send_ping: None,
last_seen: None,
@ -81,8 +76,8 @@ impl PeerInfoInternal {
}
}
fn add_addr(&mut self, addr: SocketAddr) -> bool {
if !self.all_addrs.contains(&addr) {
self.all_addrs.push(addr);
if !self.known_addrs.contains(&addr) {
self.known_addrs.push(addr);
// If we are learning a new address for this node,
// we want to retry connecting
self.state = match self.state {
@ -90,7 +85,7 @@ impl PeerInfoInternal {
PeerConnState::Waiting(_, _) | PeerConnState::Abandonned => {
PeerConnState::Waiting(0, Instant::now())
}
x @ (PeerConnState::Ourself | PeerConnState::Connected) => x,
x @ (PeerConnState::Ourself | PeerConnState::Connected { .. }) => x,
};
true
} else {
@ -104,8 +99,6 @@ impl PeerInfoInternal {
pub struct PeerInfo {
/// The node's identifier (its public key)
pub id: NodeID,
/// The node's network address
pub addr: SocketAddr,
/// The current status of our connection to this node
pub state: PeerConnState,
/// The last time at which the node was seen
@ -136,7 +129,7 @@ pub enum PeerConnState {
Ourself,
/// We currently have a connection to this peer
Connected,
Connected { addr: SocketAddr },
/// Our next connection tentative (the nth, where n is the first value of the tuple)
/// will be at given Instant
@ -152,7 +145,7 @@ pub enum PeerConnState {
impl PeerConnState {
/// Returns true if we can currently send requests to this peer
pub fn is_up(&self) -> bool {
matches!(self, Self::Ourself | Self::Connected)
matches!(self, Self::Ourself | Self::Connected { .. })
}
}
@ -192,11 +185,11 @@ impl KnownHosts {
}
self.hash = hash_state.finalize();
}
fn connected_peers_vec(&self) -> Vec<(NodeID, SocketAddr)> {
fn connected_peers_vec(&self) -> Vec<(NodeID, Vec<SocketAddr>)> {
self.list
.iter()
.filter(|(_, peer)| peer.state.is_up())
.map(|(id, peer)| (*id, peer.addr))
.map(|(id, peer)| (*id, peer.known_addrs.clone()))
.collect::<Vec<_>>()
}
}
@ -231,18 +224,16 @@ impl PeeringManager {
if id != netapp.id {
known_hosts.list.insert(
id,
PeerInfoInternal::new(addr, PeerConnState::Waiting(0, Instant::now())),
PeerInfoInternal::new(PeerConnState::Waiting(0, Instant::now()), Some(addr)),
);
}
}
if let Some(addr) = our_addr {
known_hosts.list.insert(
netapp.id,
PeerInfoInternal::new(addr, PeerConnState::Ourself),
PeerInfoInternal::new(PeerConnState::Ourself, our_addr),
);
known_hosts.update_hash();
}
// TODO for v0.10 / v1.0 : rename the endpoint (it will break compatibility)
let strat = Arc::new(Self {
@ -287,7 +278,7 @@ impl PeeringManager {
for (id, info) in known_hosts.list.iter() {
trace!("{}, {:?}", hex::encode(&id[..8]), info);
match info.state {
PeerConnState::Connected => {
PeerConnState::Connected { .. } => {
let must_ping = match info.last_send_ping {
None => true,
Some(t) => Instant::now() - t > PING_INTERVAL,
@ -330,7 +321,7 @@ impl PeeringManager {
info!(
"Retrying connection to {} at {} ({})",
hex::encode(&id[..8]),
h.all_addrs
h.known_addrs
.iter()
.map(|x| format!("{}", x))
.collect::<Vec<_>>()
@ -339,13 +330,8 @@ impl PeeringManager {
);
h.state = PeerConnState::Trying(i);
let alternate_addrs = h
.all_addrs
.iter()
.filter(|x| **x != h.addr)
.cloned()
.collect::<Vec<_>>();
tokio::spawn(self.clone().try_connect(id, h.addr, alternate_addrs));
let addresses = h.known_addrs.clone();
tokio::spawn(self.clone().try_connect(id, addresses));
}
}
}
@ -373,27 +359,24 @@ impl PeeringManager {
fn update_public_peer_list(&self, known_hosts: &KnownHosts) {
let mut pub_peer_list = Vec::with_capacity(known_hosts.list.len());
for (id, info) in known_hosts.list.iter() {
if *id == self.netapp.id {
// sanity check
assert!(matches!(info.state, PeerConnState::Ourself));
}
let mut pings = info.ping.iter().cloned().collect::<Vec<_>>();
pings.sort();
if !pings.is_empty() {
pub_peer_list.push(PeerInfo {
id: *id,
addr: info.addr,
state: info.state,
last_seen: info.last_seen,
avg_ping: Some(
pings
.iter()
.fold(Duration::from_secs(0), |x, y| x + *y)
.div_f64(pings.len() as f64),
),
avg_ping: Some(pings.iter().sum::<Duration>().div_f64(pings.len() as f64)),
max_ping: pings.last().cloned(),
med_ping: Some(pings[pings.len() / 2]),
});
} else {
pub_peer_list.push(PeerInfo {
id: *id,
addr: info.addr,
state: info.state,
last_seen: info.last_seen,
avg_ping: None,
@ -485,11 +468,12 @@ impl PeeringManager {
}
}
fn handle_peer_list(&self, list: &[(NodeID, SocketAddr)]) {
fn handle_peer_list(&self, list: &[(NodeID, Vec<SocketAddr>)]) {
let mut known_hosts = self.known_hosts.write().unwrap();
let mut changed = false;
for (id, addr) in list.iter() {
for (id, addrs) in list.iter() {
for addr in addrs.iter() {
if let Some(kh) = known_hosts.list.get_mut(id) {
if kh.add_addr(*addr) {
changed = true;
@ -499,6 +483,7 @@ impl PeeringManager {
changed = true;
}
}
}
if changed {
known_hosts.update_hash();
@ -506,15 +491,10 @@ impl PeeringManager {
}
}
async fn try_connect(
self: Arc<Self>,
id: NodeID,
default_addr: SocketAddr,
alternate_addrs: Vec<SocketAddr>,
) {
async fn try_connect(self: Arc<Self>, id: NodeID, addresses: Vec<SocketAddr>) {
let conn_addr = {
let mut ret = None;
for addr in [default_addr].iter().chain(alternate_addrs.iter()) {
for addr in addresses.iter() {
debug!("Trying address {} for peer {}", addr, hex::encode(&id[..8]));
match self.netapp.clone().try_connect(*addr, id).await {
Ok(()) => {
@ -540,7 +520,7 @@ impl PeeringManager {
warn!(
"Could not connect to peer {} ({} addresses tried)",
hex::encode(&id[..8]),
1 + alternate_addrs.len()
addresses.len()
);
let mut known_hosts = self.known_hosts.write().unwrap();
if let Some(host) = known_hosts.list.get_mut(&id) {
@ -560,6 +540,14 @@ impl PeeringManager {
}
fn on_connected(self: &Arc<Self>, id: NodeID, addr: SocketAddr, is_incoming: bool) {
if id == self.netapp.id {
// sanity check
panic!(
"on_connected from local node, id={:?}, addr={}, incoming={}",
id, addr, is_incoming
);
}
let mut known_hosts = self.known_hosts.write().unwrap();
if is_incoming {
if let Some(host) = known_hosts.list.get_mut(&id) {
@ -574,13 +562,13 @@ impl PeeringManager {
addr
);
if let Some(host) = known_hosts.list.get_mut(&id) {
host.state = PeerConnState::Connected;
host.addr = addr;
host.state = PeerConnState::Connected { addr };
host.add_addr(addr);
} else {
known_hosts
.list
.insert(id, PeerInfoInternal::new(addr, PeerConnState::Connected));
known_hosts.list.insert(
id,
PeerInfoInternal::new(PeerConnState::Connected { addr }, Some(addr)),
);
}
}
known_hosts.update_hash();
@ -600,12 +588,8 @@ impl PeeringManager {
}
fn new_peer(&self, id: &NodeID, addr: SocketAddr) -> PeerInfoInternal {
let state = if *id == self.netapp.id {
PeerConnState::Ourself
} else {
PeerConnState::Waiting(0, Instant::now())
};
PeerInfoInternal::new(addr, state)
assert!(*id != self.netapp.id);
PeerInfoInternal::new(PeerConnState::Waiting(0, Instant::now()), Some(addr))
}
}

View file

@ -16,7 +16,7 @@ use tokio::sync::{watch, Notify};
use garage_net::endpoint::{Endpoint, EndpointHandler};
use garage_net::message::*;
use garage_net::peering::PeeringManager;
use garage_net::peering::{PeerConnState, PeeringManager};
use garage_net::util::parse_and_resolve_peer_addr_async;
use garage_net::{NetApp, NetworkKey, NodeID, NodeKey};
@ -142,7 +142,7 @@ pub struct NodeStatus {
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct KnownNodeInfo {
pub id: Uuid,
pub addr: SocketAddr,
pub addr: Option<SocketAddr>,
pub is_up: bool,
pub last_seen_secs_ago: Option<u64>,
pub status: NodeStatus,
@ -381,7 +381,11 @@ impl System {
.iter()
.map(|n| KnownNodeInfo {
id: n.id.into(),
addr: n.addr,
addr: match n.state {
PeerConnState::Ourself => self.rpc_public_addr,
PeerConnState::Connected { addr } => Some(addr),
_ => None,
},
is_up: n.is_up(),
last_seen_secs_ago: n
.last_seen
@ -722,7 +726,10 @@ impl System {
.peering
.get_peer_list()
.iter()
.map(|n| (n.id.into(), n.addr))
.filter_map(|n| match n.state {
PeerConnState::Connected { addr } => Some((n.id.into(), addr)),
_ => None,
})
.collect::<Vec<_>>();
// Before doing it, we read the current peer list file (if it exists)