garage/src/rpc/system.rs

923 lines
26 KiB
Rust
Raw Normal View History

2021-10-14 09:50:12 +00:00
//! Module containing structs related to membership management
use std::collections::{HashMap, HashSet};
2021-10-14 09:50:12 +00:00
use std::io::{Read, Write};
use std::net::{IpAddr, SocketAddr};
use std::path::{Path, PathBuf};
use std::sync::atomic::Ordering;
use std::sync::{Arc, RwLock, RwLockReadGuard};
use std::time::{Duration, Instant};
2021-10-14 09:50:12 +00:00
use arc_swap::ArcSwap;
use async_trait::async_trait;
2023-09-12 12:35:48 +00:00
use futures::join;
2021-10-14 09:50:12 +00:00
use serde::{Deserialize, Serialize};
use sodiumoxide::crypto::sign::ed25519;
2023-09-12 12:35:48 +00:00
use tokio::select;
use tokio::sync::{watch, Notify};
2021-10-14 09:50:12 +00:00
use netapp::endpoint::{Endpoint, EndpointHandler};
use netapp::message::*;
2021-10-14 09:50:12 +00:00
use netapp::peering::fullmesh::FullMeshPeeringStrategy;
use netapp::util::parse_and_resolve_peer_addr_async;
use netapp::{NetApp, NetworkKey, NodeID, NodeKey};
2021-10-14 09:50:12 +00:00
#[cfg(feature = "kubernetes-discovery")]
use garage_util::config::KubernetesDiscoveryConfig;
use garage_util::config::{Config, DataDirEnum};
use garage_util::data::*;
use garage_util::error::*;
2021-10-14 09:50:12 +00:00
use garage_util::persister::Persister;
use garage_util::time::*;
2021-10-14 09:50:12 +00:00
#[cfg(feature = "consul-discovery")]
2022-10-18 17:11:16 +00:00
use crate::consul::ConsulDiscovery;
#[cfg(feature = "kubernetes-discovery")]
use crate::kubernetes::*;
use crate::layout::{
self, manager::LayoutManager, LayoutHelper, LayoutHistory, NodeRoleV, RpcLayoutDigest,
};
use crate::replication_mode::*;
use crate::rpc_helper::*;
2021-10-14 09:50:12 +00:00
use crate::system_metrics::*;
2021-10-14 09:50:12 +00:00
const DISCOVERY_INTERVAL: Duration = Duration::from_secs(60);
const STATUS_EXCHANGE_INTERVAL: Duration = Duration::from_secs(10);
2021-10-14 09:50:12 +00:00
/// Version tag used for version check upon Netapp connection.
/// Cluster nodes with different version tags are deemed
/// incompatible and will refuse to connect.
pub const GARAGE_VERSION_TAG: u64 = 0x676172616765000A; // garage 0x000A
2021-10-14 09:50:12 +00:00
/// RPC endpoint used for calls related to membership
pub const SYSTEM_RPC_PATH: &str = "garage_rpc/system.rs/SystemRpc";
2021-10-14 09:50:12 +00:00
/// RPC messages related to membership
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum SystemRpc {
/// Response to successfull advertisements
Ok,
/// Request to connect to a specific node (in <pubkey>@<host>:<port> format, pubkey = full-length node ID)
Connect(String),
2021-10-14 09:50:12 +00:00
/// Advertise Garage status. Answered with another AdvertiseStatus.
/// Exchanged with every node on a regular basis.
AdvertiseStatus(NodeStatus),
2021-10-14 09:50:12 +00:00
/// Get known nodes states
GetKnownNodes,
/// Return known nodes
ReturnKnownNodes(Vec<KnownNodeInfo>),
/// Ask other node its cluster layout. Answered with AdvertiseClusterLayout
PullClusterLayout,
/// Advertisement of cluster layout. Sent spontanously or in response to PullClusterLayout
AdvertiseClusterLayout(LayoutHistory),
/// Ask other node its cluster layout update trackers.
PullClusterLayoutTrackers,
/// Advertisement of cluster layout update trackers.
AdvertiseClusterLayoutTrackers(layout::UpdateTrackers),
2021-10-14 09:50:12 +00:00
}
impl Rpc for SystemRpc {
type Response = Result<SystemRpc, Error>;
2021-10-14 09:50:12 +00:00
}
2023-01-03 13:44:47 +00:00
#[derive(Serialize, Deserialize)]
pub struct PeerList(Vec<(Uuid, SocketAddr)>);
impl garage_util::migrate::InitialFormat for PeerList {}
2021-10-14 09:50:12 +00:00
/// This node's membership manager
pub struct System {
/// The id of this node
pub id: Uuid,
2021-10-14 09:50:12 +00:00
2023-01-03 13:44:47 +00:00
persist_peer_list: Persister<PeerList>,
2021-10-14 09:50:12 +00:00
local_status: ArcSwap<NodeStatus>,
node_status: RwLock<HashMap<Uuid, (u64, NodeStatus)>>,
2021-10-14 09:50:12 +00:00
pub netapp: Arc<NetApp>,
fullmesh: Arc<FullMeshPeeringStrategy>,
pub(crate) system_endpoint: Arc<Endpoint<SystemRpc, System>>,
2021-10-14 09:50:12 +00:00
rpc_listen_addr: SocketAddr,
#[cfg(any(feature = "consul-discovery", feature = "kubernetes-discovery"))]
rpc_public_addr: Option<SocketAddr>,
bootstrap_peers: Vec<String>,
#[cfg(feature = "consul-discovery")]
2022-10-18 17:11:16 +00:00
consul_discovery: Option<ConsulDiscovery>,
#[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: Option<KubernetesDiscoveryConfig>,
2023-11-09 12:34:14 +00:00
pub layout_manager: Arc<LayoutManager>,
metrics: SystemMetrics,
replication_mode: ReplicationMode,
2021-10-14 09:50:12 +00:00
replication_factor: usize,
/// Path to metadata directory
pub metadata_dir: PathBuf,
/// Path to data directory
pub data_dir: DataDirEnum,
2021-10-14 09:50:12 +00:00
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeStatus {
2021-10-14 09:50:12 +00:00
/// Hostname of the node
pub hostname: Option<String>,
2021-10-14 09:50:12 +00:00
/// Replication factor configured on the node
pub replication_factor: usize,
/// Cluster layout digest
pub layout_digest: RpcLayoutDigest,
/// Disk usage on partition containing metadata directory (tuple: `(avail, total)`)
#[serde(default)]
pub meta_disk_avail: Option<(u64, u64)>,
/// Disk usage on partition containing data directory (tuple: `(avail, total)`)
#[serde(default)]
pub data_disk_avail: Option<(u64, u64)>,
2021-10-14 09:50:12 +00:00
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct KnownNodeInfo {
pub id: Uuid,
pub addr: SocketAddr,
pub is_up: bool,
pub last_seen_secs_ago: Option<u64>,
pub status: NodeStatus,
}
#[derive(Debug, Clone, Copy)]
pub struct ClusterHealth {
/// The current health status of the cluster (see below)
pub status: ClusterHealthStatus,
/// Number of nodes already seen once in the cluster
pub known_nodes: usize,
/// Number of nodes currently connected
pub connected_nodes: usize,
/// Number of storage nodes declared in the current layout
pub storage_nodes: usize,
/// Number of storage nodes currently connected
pub storage_nodes_ok: usize,
/// Number of partitions in the layout
pub partitions: usize,
/// Number of partitions for which we have a quorum of connected nodes
pub partitions_quorum: usize,
/// Number of partitions for which all storage nodes are connected
pub partitions_all_ok: usize,
}
#[derive(Debug, Clone, Copy)]
pub enum ClusterHealthStatus {
/// All nodes are available
Healthy,
/// Some storage nodes are unavailable, but quorum is stil
/// achieved for all partitions
Degraded,
/// Quorum is not available for some partitions
Unavailable,
}
pub fn read_node_id(metadata_dir: &Path) -> Result<NodeID, Error> {
let mut pubkey_file = metadata_dir.to_path_buf();
pubkey_file.push("node_key.pub");
let mut f = std::fs::File::open(pubkey_file.as_path())?;
let mut d = vec![];
f.read_to_end(&mut d)?;
if d.len() != 32 {
return Err(Error::Message("Corrupt node_key.pub file".to_string()));
}
let mut key = [0u8; 32];
key.copy_from_slice(&d[..]);
Ok(NodeID::from_slice(&key[..]).unwrap())
}
pub fn gen_node_key(metadata_dir: &Path) -> Result<NodeKey, Error> {
let mut key_file = metadata_dir.to_path_buf();
key_file.push("node_key");
if key_file.as_path().exists() {
let mut f = std::fs::File::open(key_file.as_path())?;
2021-10-14 09:50:12 +00:00
let mut d = vec![];
f.read_to_end(&mut d)?;
if d.len() != 64 {
return Err(Error::Message("Corrupt node_key file".to_string()));
2021-10-14 09:50:12 +00:00
}
let mut key = [0u8; 64];
key.copy_from_slice(&d[..]);
Ok(NodeKey::from_slice(&key[..]).unwrap())
} else {
if !metadata_dir.exists() {
info!("Metadata directory does not exist, creating it.");
2023-01-26 16:26:32 +00:00
std::fs::create_dir(metadata_dir)?;
}
info!("Generating new node key pair.");
let (pubkey, key) = ed25519::gen_keypair();
{
use std::os::unix::fs::PermissionsExt;
let mut f = std::fs::File::create(key_file.as_path())?;
let mut perm = f.metadata()?.permissions();
perm.set_mode(0o600);
std::fs::set_permissions(key_file.as_path(), perm)?;
f.write_all(&key[..])?;
}
{
let mut pubkey_file = metadata_dir.to_path_buf();
pubkey_file.push("node_key.pub");
let mut f2 = std::fs::File::create(pubkey_file.as_path())?;
f2.write_all(&pubkey[..])?;
}
2021-10-14 09:50:12 +00:00
Ok(key)
2021-10-14 09:50:12 +00:00
}
}
impl System {
/// Create this node's membership manager
pub fn new(
network_key: NetworkKey,
replication_mode: ReplicationMode,
config: &Config,
) -> Result<Arc<Self>, Error> {
// ---- setup netapp RPC protocol ----
let node_key =
gen_node_key(&config.metadata_dir).expect("Unable to read or generate node ID");
info!(
"Node ID of this node: {}",
hex::encode(&node_key.public_key()[..8])
);
2021-10-14 09:50:12 +00:00
let netapp = NetApp::new(GARAGE_VERSION_TAG, network_key, node_key);
let system_endpoint = netapp.endpoint(SYSTEM_RPC_PATH.into());
2021-10-14 09:50:12 +00:00
// ---- setup netapp public listener and full mesh peering strategy ----
let rpc_public_addr = get_rpc_public_addr(config);
if rpc_public_addr.is_none() {
warn!("This Garage node does not know its publicly reachable RPC address, this might hamper intra-cluster communication.");
}
let fullmesh = FullMeshPeeringStrategy::new(netapp.clone(), vec![], rpc_public_addr);
if let Some(ping_timeout) = config.rpc_ping_timeout_msec {
fullmesh.set_ping_timeout_millis(ping_timeout);
}
2021-10-14 09:50:12 +00:00
let persist_peer_list = Persister::new(&config.metadata_dir, "peer_list");
2021-10-14 09:50:12 +00:00
// ---- setup cluster layout and layout manager ----
let replication_factor = replication_mode.replication_factor();
let layout_manager = LayoutManager::new(
config,
netapp.id,
system_endpoint.clone(),
fullmesh.clone(),
replication_mode,
)?;
// ---- set up metrics and status exchange ----
let metrics = SystemMetrics::new(replication_factor);
2023-11-09 12:34:14 +00:00
let mut local_status = NodeStatus::initial(replication_factor, &layout_manager);
local_status.update_disk_usage(&config.metadata_dir, &config.data_dir, &metrics);
// ---- if enabled, set up additionnal peer discovery methods ----
2022-10-18 17:13:52 +00:00
#[cfg(feature = "consul-discovery")]
let consul_discovery = match &config.consul_discovery {
Some(cfg) => Some(
ConsulDiscovery::new(cfg.clone())
.ok_or_message("Invalid Consul discovery configuration")?,
),
None => None,
};
#[cfg(not(feature = "consul-discovery"))]
if config.consul_discovery.is_some() {
warn!("Consul discovery is not enabled in this build.");
}
#[cfg(not(feature = "kubernetes-discovery"))]
if config.kubernetes_discovery.is_some() {
warn!("Kubernetes discovery is not enabled in this build.");
}
// ---- done ----
2021-10-14 09:50:12 +00:00
let sys = Arc::new(System {
id: netapp.id.into(),
persist_peer_list,
local_status: ArcSwap::new(Arc::new(local_status)),
node_status: RwLock::new(HashMap::new()),
2021-10-14 09:50:12 +00:00
netapp: netapp.clone(),
2023-12-11 14:31:47 +00:00
fullmesh,
2021-10-14 09:50:12 +00:00
system_endpoint,
replication_mode,
2021-10-14 09:50:12 +00:00
replication_factor,
rpc_listen_addr: config.rpc_bind_addr,
#[cfg(any(feature = "consul-discovery", feature = "kubernetes-discovery"))]
rpc_public_addr,
bootstrap_peers: config.bootstrap_peers.clone(),
#[cfg(feature = "consul-discovery")]
2022-10-18 17:11:16 +00:00
consul_discovery,
#[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: config.kubernetes_discovery.clone(),
layout_manager,
metrics,
metadata_dir: config.metadata_dir.clone(),
data_dir: config.data_dir.clone(),
2021-10-14 09:50:12 +00:00
});
sys.system_endpoint.set_handler(sys.clone());
Ok(sys)
2021-10-14 09:50:12 +00:00
}
/// Perform bootstraping, starting the ping loop
pub async fn run(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
join!(
self.netapp
.clone()
.listen(self.rpc_listen_addr, None, must_exit.clone()),
self.fullmesh.clone().run(must_exit.clone()),
self.discovery_loop(must_exit.clone()),
self.status_exchange_loop(must_exit.clone()),
2021-10-14 09:50:12 +00:00
);
}
// ---- Public utilities / accessors ----
pub fn cluster_layout(&self) -> RwLockReadGuard<'_, LayoutHelper> {
2023-11-09 12:34:14 +00:00
self.layout_manager.layout()
}
pub fn layout_notify(&self) -> Arc<Notify> {
self.layout_manager.change_notify.clone()
}
pub fn rpc_helper(&self) -> &RpcHelper {
&self.layout_manager.rpc_helper
}
First version of admin API (#298) **Spec:** - [x] Start writing - [x] Specify all layout endpoints - [x] Specify all endpoints for operations on keys - [x] Specify all endpoints for operations on key/bucket permissions - [x] Specify all endpoints for operations on buckets - [x] Specify all endpoints for operations on bucket aliases View rendered spec at <https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/admin-api/doc/drafts/admin-api.md> **Code:** - [x] Refactor code for admin api to use common api code that was created for K2V **General endpoints:** - [x] Metrics - [x] GetClusterStatus - [x] ConnectClusterNodes - [x] GetClusterLayout - [x] UpdateClusterLayout - [x] ApplyClusterLayout - [x] RevertClusterLayout **Key-related endpoints:** - [x] ListKeys - [x] CreateKey - [x] ImportKey - [x] GetKeyInfo - [x] UpdateKey - [x] DeleteKey **Bucket-related endpoints:** - [x] ListBuckets - [x] CreateBucket - [x] GetBucketInfo - [x] DeleteBucket - [x] PutBucketWebsite - [x] DeleteBucketWebsite **Operations on key/bucket permissions:** - [x] BucketAllowKey - [x] BucketDenyKey **Operations on bucket aliases:** - [x] GlobalAliasBucket - [x] GlobalUnaliasBucket - [x] LocalAliasBucket - [x] LocalUnaliasBucket **And also:** - [x] Separate error type for the admin API (this PR includes a quite big refactoring of error handling) - [x] Add management of website access - [ ] Check that nothing is missing wrt what can be done using the CLI - [ ] Improve formatting of the spec - [x] Make sure everyone is cool with the API design Fix #231 Fix #295 Co-authored-by: Alex Auvolat <alex@adnab.me> Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/298 Co-authored-by: Alex <alex@adnab.me> Co-committed-by: Alex <alex@adnab.me>
2022-05-24 10:16:39 +00:00
// ---- Administrative operations (directly available and
// also available through RPC) ----
pub fn get_known_nodes(&self) -> Vec<KnownNodeInfo> {
let node_status = self.node_status.read().unwrap();
let known_nodes = self
.fullmesh
.get_peer_list()
.iter()
.map(|n| KnownNodeInfo {
id: n.id.into(),
addr: n.addr,
is_up: n.is_up(),
last_seen_secs_ago: n
.last_seen
.map(|t| (Instant::now().saturating_duration_since(t)).as_secs()),
First version of admin API (#298) **Spec:** - [x] Start writing - [x] Specify all layout endpoints - [x] Specify all endpoints for operations on keys - [x] Specify all endpoints for operations on key/bucket permissions - [x] Specify all endpoints for operations on buckets - [x] Specify all endpoints for operations on bucket aliases View rendered spec at <https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/admin-api/doc/drafts/admin-api.md> **Code:** - [x] Refactor code for admin api to use common api code that was created for K2V **General endpoints:** - [x] Metrics - [x] GetClusterStatus - [x] ConnectClusterNodes - [x] GetClusterLayout - [x] UpdateClusterLayout - [x] ApplyClusterLayout - [x] RevertClusterLayout **Key-related endpoints:** - [x] ListKeys - [x] CreateKey - [x] ImportKey - [x] GetKeyInfo - [x] UpdateKey - [x] DeleteKey **Bucket-related endpoints:** - [x] ListBuckets - [x] CreateBucket - [x] GetBucketInfo - [x] DeleteBucket - [x] PutBucketWebsite - [x] DeleteBucketWebsite **Operations on key/bucket permissions:** - [x] BucketAllowKey - [x] BucketDenyKey **Operations on bucket aliases:** - [x] GlobalAliasBucket - [x] GlobalUnaliasBucket - [x] LocalAliasBucket - [x] LocalUnaliasBucket **And also:** - [x] Separate error type for the admin API (this PR includes a quite big refactoring of error handling) - [x] Add management of website access - [ ] Check that nothing is missing wrt what can be done using the CLI - [ ] Improve formatting of the spec - [x] Make sure everyone is cool with the API design Fix #231 Fix #295 Co-authored-by: Alex Auvolat <alex@adnab.me> Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/298 Co-authored-by: Alex <alex@adnab.me> Co-committed-by: Alex <alex@adnab.me>
2022-05-24 10:16:39 +00:00
status: node_status
.get(&n.id.into())
.cloned()
.map(|(_, st)| st)
2023-01-26 16:26:32 +00:00
.unwrap_or_else(NodeStatus::unknown),
First version of admin API (#298) **Spec:** - [x] Start writing - [x] Specify all layout endpoints - [x] Specify all endpoints for operations on keys - [x] Specify all endpoints for operations on key/bucket permissions - [x] Specify all endpoints for operations on buckets - [x] Specify all endpoints for operations on bucket aliases View rendered spec at <https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/admin-api/doc/drafts/admin-api.md> **Code:** - [x] Refactor code for admin api to use common api code that was created for K2V **General endpoints:** - [x] Metrics - [x] GetClusterStatus - [x] ConnectClusterNodes - [x] GetClusterLayout - [x] UpdateClusterLayout - [x] ApplyClusterLayout - [x] RevertClusterLayout **Key-related endpoints:** - [x] ListKeys - [x] CreateKey - [x] ImportKey - [x] GetKeyInfo - [x] UpdateKey - [x] DeleteKey **Bucket-related endpoints:** - [x] ListBuckets - [x] CreateBucket - [x] GetBucketInfo - [x] DeleteBucket - [x] PutBucketWebsite - [x] DeleteBucketWebsite **Operations on key/bucket permissions:** - [x] BucketAllowKey - [x] BucketDenyKey **Operations on bucket aliases:** - [x] GlobalAliasBucket - [x] GlobalUnaliasBucket - [x] LocalAliasBucket - [x] LocalUnaliasBucket **And also:** - [x] Separate error type for the admin API (this PR includes a quite big refactoring of error handling) - [x] Add management of website access - [ ] Check that nothing is missing wrt what can be done using the CLI - [ ] Improve formatting of the spec - [x] Make sure everyone is cool with the API design Fix #231 Fix #295 Co-authored-by: Alex Auvolat <alex@adnab.me> Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/298 Co-authored-by: Alex <alex@adnab.me> Co-committed-by: Alex <alex@adnab.me>
2022-05-24 10:16:39 +00:00
})
.collect::<Vec<_>>();
known_nodes
}
pub async fn connect(&self, node: &str) -> Result<(), Error> {
let (pubkey, addrs) = parse_and_resolve_peer_addr_async(node)
.await
.ok_or_else(|| {
Error::Message(format!(
"Unable to parse or resolve node specification: {}",
node
))
})?;
First version of admin API (#298) **Spec:** - [x] Start writing - [x] Specify all layout endpoints - [x] Specify all endpoints for operations on keys - [x] Specify all endpoints for operations on key/bucket permissions - [x] Specify all endpoints for operations on buckets - [x] Specify all endpoints for operations on bucket aliases View rendered spec at <https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/admin-api/doc/drafts/admin-api.md> **Code:** - [x] Refactor code for admin api to use common api code that was created for K2V **General endpoints:** - [x] Metrics - [x] GetClusterStatus - [x] ConnectClusterNodes - [x] GetClusterLayout - [x] UpdateClusterLayout - [x] ApplyClusterLayout - [x] RevertClusterLayout **Key-related endpoints:** - [x] ListKeys - [x] CreateKey - [x] ImportKey - [x] GetKeyInfo - [x] UpdateKey - [x] DeleteKey **Bucket-related endpoints:** - [x] ListBuckets - [x] CreateBucket - [x] GetBucketInfo - [x] DeleteBucket - [x] PutBucketWebsite - [x] DeleteBucketWebsite **Operations on key/bucket permissions:** - [x] BucketAllowKey - [x] BucketDenyKey **Operations on bucket aliases:** - [x] GlobalAliasBucket - [x] GlobalUnaliasBucket - [x] LocalAliasBucket - [x] LocalUnaliasBucket **And also:** - [x] Separate error type for the admin API (this PR includes a quite big refactoring of error handling) - [x] Add management of website access - [ ] Check that nothing is missing wrt what can be done using the CLI - [ ] Improve formatting of the spec - [x] Make sure everyone is cool with the API design Fix #231 Fix #295 Co-authored-by: Alex Auvolat <alex@adnab.me> Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/298 Co-authored-by: Alex <alex@adnab.me> Co-committed-by: Alex <alex@adnab.me>
2022-05-24 10:16:39 +00:00
let mut errors = vec![];
for addr in addrs.iter() {
2022-12-14 15:11:19 +00:00
match self.netapp.clone().try_connect(*addr, pubkey).await {
First version of admin API (#298) **Spec:** - [x] Start writing - [x] Specify all layout endpoints - [x] Specify all endpoints for operations on keys - [x] Specify all endpoints for operations on key/bucket permissions - [x] Specify all endpoints for operations on buckets - [x] Specify all endpoints for operations on bucket aliases View rendered spec at <https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/admin-api/doc/drafts/admin-api.md> **Code:** - [x] Refactor code for admin api to use common api code that was created for K2V **General endpoints:** - [x] Metrics - [x] GetClusterStatus - [x] ConnectClusterNodes - [x] GetClusterLayout - [x] UpdateClusterLayout - [x] ApplyClusterLayout - [x] RevertClusterLayout **Key-related endpoints:** - [x] ListKeys - [x] CreateKey - [x] ImportKey - [x] GetKeyInfo - [x] UpdateKey - [x] DeleteKey **Bucket-related endpoints:** - [x] ListBuckets - [x] CreateBucket - [x] GetBucketInfo - [x] DeleteBucket - [x] PutBucketWebsite - [x] DeleteBucketWebsite **Operations on key/bucket permissions:** - [x] BucketAllowKey - [x] BucketDenyKey **Operations on bucket aliases:** - [x] GlobalAliasBucket - [x] GlobalUnaliasBucket - [x] LocalAliasBucket - [x] LocalUnaliasBucket **And also:** - [x] Separate error type for the admin API (this PR includes a quite big refactoring of error handling) - [x] Add management of website access - [ ] Check that nothing is missing wrt what can be done using the CLI - [ ] Improve formatting of the spec - [x] Make sure everyone is cool with the API design Fix #231 Fix #295 Co-authored-by: Alex Auvolat <alex@adnab.me> Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/298 Co-authored-by: Alex <alex@adnab.me> Co-committed-by: Alex <alex@adnab.me>
2022-05-24 10:16:39 +00:00
Ok(()) => return Ok(()),
Err(e) => {
2022-12-14 15:11:19 +00:00
errors.push((
*addr,
Error::Message(connect_error_message(*addr, pubkey, e)),
));
First version of admin API (#298) **Spec:** - [x] Start writing - [x] Specify all layout endpoints - [x] Specify all endpoints for operations on keys - [x] Specify all endpoints for operations on key/bucket permissions - [x] Specify all endpoints for operations on buckets - [x] Specify all endpoints for operations on bucket aliases View rendered spec at <https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/admin-api/doc/drafts/admin-api.md> **Code:** - [x] Refactor code for admin api to use common api code that was created for K2V **General endpoints:** - [x] Metrics - [x] GetClusterStatus - [x] ConnectClusterNodes - [x] GetClusterLayout - [x] UpdateClusterLayout - [x] ApplyClusterLayout - [x] RevertClusterLayout **Key-related endpoints:** - [x] ListKeys - [x] CreateKey - [x] ImportKey - [x] GetKeyInfo - [x] UpdateKey - [x] DeleteKey **Bucket-related endpoints:** - [x] ListBuckets - [x] CreateBucket - [x] GetBucketInfo - [x] DeleteBucket - [x] PutBucketWebsite - [x] DeleteBucketWebsite **Operations on key/bucket permissions:** - [x] BucketAllowKey - [x] BucketDenyKey **Operations on bucket aliases:** - [x] GlobalAliasBucket - [x] GlobalUnaliasBucket - [x] LocalAliasBucket - [x] LocalUnaliasBucket **And also:** - [x] Separate error type for the admin API (this PR includes a quite big refactoring of error handling) - [x] Add management of website access - [ ] Check that nothing is missing wrt what can be done using the CLI - [ ] Improve formatting of the spec - [x] Make sure everyone is cool with the API design Fix #231 Fix #295 Co-authored-by: Alex Auvolat <alex@adnab.me> Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/298 Co-authored-by: Alex <alex@adnab.me> Co-committed-by: Alex <alex@adnab.me>
2022-05-24 10:16:39 +00:00
}
}
}
if errors.len() == 1 {
Err(Error::Message(errors[0].1.to_string()))
} else {
Err(Error::Message(format!("{:?}", errors)))
}
}
pub fn health(&self) -> ClusterHealth {
let quorum = self.replication_mode.write_quorum();
// Gather information about running nodes.
// Technically, `nodes` contains currently running nodes, as well
// as nodes that this Garage process has been connected to at least
// once since it started.
let nodes = self
.get_known_nodes()
.into_iter()
.map(|n| (n.id, n))
.collect::<HashMap<Uuid, _>>();
let connected_nodes = nodes.iter().filter(|(_, n)| n.is_up).count();
let node_up = |x: &Uuid| nodes.get(x).map(|n| n.is_up).unwrap_or(false);
// Acquire a rwlock read-lock to the current cluster layout
let layout = self.cluster_layout();
// Obtain information about nodes that have a role as storage nodes
// in one of the active layout versions
let mut storage_nodes = HashSet::<Uuid>::with_capacity(16);
for ver in layout.versions.iter() {
storage_nodes.extend(
ver.roles
.items()
.iter()
.filter(|(_, _, v)| matches!(v, NodeRoleV(Some(r)) if r.capacity.is_some()))
.map(|(n, _, _)| *n),
)
}
let storage_nodes_ok = storage_nodes.iter().filter(|x| node_up(x)).count();
// Determine the number of partitions that have:
// - a quorum of up nodes for all write sets (i.e. are available)
// - for which all nodes in all write sets are up (i.e. are fully healthy)
let partitions = layout.current().partitions().collect::<Vec<_>>();
let mut partitions_quorum = 0;
let mut partitions_all_ok = 0;
for (_, hash) in partitions.iter() {
2023-12-08 09:36:37 +00:00
let mut write_sets = layout
.versions
.iter()
.map(|x| x.nodes_of(hash, x.replication_factor));
let has_quorum = write_sets
.clone()
.all(|set| set.filter(|x| node_up(x)).count() >= quorum);
2023-12-08 09:36:37 +00:00
let all_ok = write_sets.all(|mut set| set.all(|x| node_up(&x)));
if has_quorum {
partitions_quorum += 1;
}
if all_ok {
partitions_all_ok += 1;
}
}
// Determine overall cluster status
let status =
2023-12-08 09:36:37 +00:00
if partitions_all_ok == partitions.len() && storage_nodes_ok == storage_nodes.len() {
ClusterHealthStatus::Healthy
} else if partitions_quorum == partitions.len() {
ClusterHealthStatus::Degraded
} else {
ClusterHealthStatus::Unavailable
};
ClusterHealth {
status,
known_nodes: nodes.len(),
connected_nodes,
storage_nodes: storage_nodes.len(),
storage_nodes_ok,
partitions: partitions.len(),
partitions_quorum,
partitions_all_ok,
}
}
2021-10-14 09:50:12 +00:00
// ---- INTERNALS ----
#[cfg(feature = "consul-discovery")]
async fn advertise_to_consul(self: Arc<Self>) {
let c = match &self.consul_discovery {
Some(c) => c,
_ => return,
};
let rpc_public_addr = match self.rpc_public_addr {
Some(addr) => addr,
None => {
warn!("Not advertising to Consul because rpc_public_addr is not defined in config file and could not be autodetected.");
return;
}
};
if let Err(e) = c
.publish_consul_service(
self.netapp.id,
2023-12-07 14:15:59 +00:00
&self.local_status.load_full().hostname.as_deref().unwrap(),
rpc_public_addr,
)
.await
{
error!("Error while publishing Consul service: {}", e);
}
}
#[cfg(feature = "kubernetes-discovery")]
async fn advertise_to_kubernetes(self: Arc<Self>) {
let k = match &self.kubernetes_discovery {
Some(k) => k,
_ => return,
};
let rpc_public_addr = match self.rpc_public_addr {
Some(addr) => addr,
None => {
warn!("Not advertising to Kubernetes because rpc_public_addr is not defined in config file and could not be autodetected.");
return;
}
};
if let Err(e) = publish_kubernetes_node(
k,
self.netapp.id,
2023-12-07 14:15:59 +00:00
&self.local_status.load_full().hostname.as_deref().unwrap(),
rpc_public_addr,
)
.await
{
error!("Error while publishing node to Kubernetes: {}", e);
}
}
fn update_local_status(&self) {
let mut new_si: NodeStatus = self.local_status.load().as_ref().clone();
2021-10-14 09:50:12 +00:00
new_si.layout_digest = self.layout_manager.layout().digest();
new_si.update_disk_usage(&self.metadata_dir, &self.data_dir, &self.metrics);
self.local_status.swap(Arc::new(new_si));
}
First version of admin API (#298) **Spec:** - [x] Start writing - [x] Specify all layout endpoints - [x] Specify all endpoints for operations on keys - [x] Specify all endpoints for operations on key/bucket permissions - [x] Specify all endpoints for operations on buckets - [x] Specify all endpoints for operations on bucket aliases View rendered spec at <https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/admin-api/doc/drafts/admin-api.md> **Code:** - [x] Refactor code for admin api to use common api code that was created for K2V **General endpoints:** - [x] Metrics - [x] GetClusterStatus - [x] ConnectClusterNodes - [x] GetClusterLayout - [x] UpdateClusterLayout - [x] ApplyClusterLayout - [x] RevertClusterLayout **Key-related endpoints:** - [x] ListKeys - [x] CreateKey - [x] ImportKey - [x] GetKeyInfo - [x] UpdateKey - [x] DeleteKey **Bucket-related endpoints:** - [x] ListBuckets - [x] CreateBucket - [x] GetBucketInfo - [x] DeleteBucket - [x] PutBucketWebsite - [x] DeleteBucketWebsite **Operations on key/bucket permissions:** - [x] BucketAllowKey - [x] BucketDenyKey **Operations on bucket aliases:** - [x] GlobalAliasBucket - [x] GlobalUnaliasBucket - [x] LocalAliasBucket - [x] LocalUnaliasBucket **And also:** - [x] Separate error type for the admin API (this PR includes a quite big refactoring of error handling) - [x] Add management of website access - [ ] Check that nothing is missing wrt what can be done using the CLI - [ ] Improve formatting of the spec - [x] Make sure everyone is cool with the API design Fix #231 Fix #295 Co-authored-by: Alex Auvolat <alex@adnab.me> Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/298 Co-authored-by: Alex <alex@adnab.me> Co-committed-by: Alex <alex@adnab.me>
2022-05-24 10:16:39 +00:00
// --- RPC HANDLERS ---
async fn handle_connect(&self, node: &str) -> Result<SystemRpc, Error> {
First version of admin API (#298) **Spec:** - [x] Start writing - [x] Specify all layout endpoints - [x] Specify all endpoints for operations on keys - [x] Specify all endpoints for operations on key/bucket permissions - [x] Specify all endpoints for operations on buckets - [x] Specify all endpoints for operations on bucket aliases View rendered spec at <https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/admin-api/doc/drafts/admin-api.md> **Code:** - [x] Refactor code for admin api to use common api code that was created for K2V **General endpoints:** - [x] Metrics - [x] GetClusterStatus - [x] ConnectClusterNodes - [x] GetClusterLayout - [x] UpdateClusterLayout - [x] ApplyClusterLayout - [x] RevertClusterLayout **Key-related endpoints:** - [x] ListKeys - [x] CreateKey - [x] ImportKey - [x] GetKeyInfo - [x] UpdateKey - [x] DeleteKey **Bucket-related endpoints:** - [x] ListBuckets - [x] CreateBucket - [x] GetBucketInfo - [x] DeleteBucket - [x] PutBucketWebsite - [x] DeleteBucketWebsite **Operations on key/bucket permissions:** - [x] BucketAllowKey - [x] BucketDenyKey **Operations on bucket aliases:** - [x] GlobalAliasBucket - [x] GlobalUnaliasBucket - [x] LocalAliasBucket - [x] LocalUnaliasBucket **And also:** - [x] Separate error type for the admin API (this PR includes a quite big refactoring of error handling) - [x] Add management of website access - [ ] Check that nothing is missing wrt what can be done using the CLI - [ ] Improve formatting of the spec - [x] Make sure everyone is cool with the API design Fix #231 Fix #295 Co-authored-by: Alex Auvolat <alex@adnab.me> Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/298 Co-authored-by: Alex <alex@adnab.me> Co-committed-by: Alex <alex@adnab.me>
2022-05-24 10:16:39 +00:00
self.connect(node).await?;
Ok(SystemRpc::Ok)
2021-10-14 09:50:12 +00:00
}
fn handle_get_known_nodes(&self) -> SystemRpc {
First version of admin API (#298) **Spec:** - [x] Start writing - [x] Specify all layout endpoints - [x] Specify all endpoints for operations on keys - [x] Specify all endpoints for operations on key/bucket permissions - [x] Specify all endpoints for operations on buckets - [x] Specify all endpoints for operations on bucket aliases View rendered spec at <https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/admin-api/doc/drafts/admin-api.md> **Code:** - [x] Refactor code for admin api to use common api code that was created for K2V **General endpoints:** - [x] Metrics - [x] GetClusterStatus - [x] ConnectClusterNodes - [x] GetClusterLayout - [x] UpdateClusterLayout - [x] ApplyClusterLayout - [x] RevertClusterLayout **Key-related endpoints:** - [x] ListKeys - [x] CreateKey - [x] ImportKey - [x] GetKeyInfo - [x] UpdateKey - [x] DeleteKey **Bucket-related endpoints:** - [x] ListBuckets - [x] CreateBucket - [x] GetBucketInfo - [x] DeleteBucket - [x] PutBucketWebsite - [x] DeleteBucketWebsite **Operations on key/bucket permissions:** - [x] BucketAllowKey - [x] BucketDenyKey **Operations on bucket aliases:** - [x] GlobalAliasBucket - [x] GlobalUnaliasBucket - [x] LocalAliasBucket - [x] LocalUnaliasBucket **And also:** - [x] Separate error type for the admin API (this PR includes a quite big refactoring of error handling) - [x] Add management of website access - [ ] Check that nothing is missing wrt what can be done using the CLI - [ ] Improve formatting of the spec - [x] Make sure everyone is cool with the API design Fix #231 Fix #295 Co-authored-by: Alex Auvolat <alex@adnab.me> Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/298 Co-authored-by: Alex <alex@adnab.me> Co-committed-by: Alex <alex@adnab.me>
2022-05-24 10:16:39 +00:00
let known_nodes = self.get_known_nodes();
SystemRpc::ReturnKnownNodes(known_nodes)
}
async fn handle_advertise_status(
self: &Arc<Self>,
from: Uuid,
info: &NodeStatus,
) -> Result<SystemRpc, Error> {
let local_info = self.local_status.load();
if local_info.replication_factor < info.replication_factor {
error!("Some node have a higher replication factor ({}) than this one ({}). This is not supported and will lead to data corruption. Shutting down for safety.",
info.replication_factor,
local_info.replication_factor);
std::process::exit(1);
}
2023-11-09 12:34:14 +00:00
self.layout_manager
.handle_advertise_status(from, &info.layout_digest);
self.node_status
.write()
.unwrap()
.insert(from, (now_msec(), info.clone()));
Ok(SystemRpc::Ok)
}
async fn status_exchange_loop(&self, mut stop_signal: watch::Receiver<bool>) {
while !*stop_signal.borrow() {
2023-09-12 12:35:48 +00:00
let restart_at = Instant::now() + STATUS_EXCHANGE_INTERVAL;
self.update_local_status();
let local_status: NodeStatus = self.local_status.load().as_ref().clone();
let _ = self
.rpc_helper()
.broadcast(
&self.system_endpoint,
SystemRpc::AdvertiseStatus(local_status),
2023-09-12 12:35:48 +00:00
RequestStrategy::with_priority(PRIO_HIGH)
.with_custom_timeout(STATUS_EXCHANGE_INTERVAL),
)
.await;
select! {
2023-09-12 12:35:48 +00:00
_ = tokio::time::sleep_until(restart_at.into()) => {},
_ = stop_signal.changed() => {},
}
}
}
async fn discovery_loop(self: &Arc<Self>, mut stop_signal: watch::Receiver<bool>) {
2021-10-14 09:50:12 +00:00
while !*stop_signal.borrow() {
let not_configured = self.cluster_layout().check().is_err();
2021-10-14 09:50:12 +00:00
let no_peers = self.fullmesh.get_peer_list().len() < self.replication_factor;
2023-11-14 12:06:16 +00:00
let expected_n_nodes = self.cluster_layout().all_nodes().len();
2021-10-14 09:50:12 +00:00
let bad_peers = self
.fullmesh
.get_peer_list()
.iter()
.filter(|p| p.is_up())
.count() != expected_n_nodes;
2021-10-14 09:50:12 +00:00
if not_configured || no_peers || bad_peers {
info!("Doing a bootstrap/discovery step (not_configured: {}, no_peers: {}, bad_peers: {})", not_configured, no_peers, bad_peers);
let mut ping_list = resolve_peers(&self.bootstrap_peers).await;
2021-10-14 09:50:12 +00:00
// Add peer list from list stored on disk
if let Ok(peers) = self.persist_peer_list.load_async().await {
2023-01-03 13:44:47 +00:00
ping_list.extend(peers.0.iter().map(|(id, addr)| ((*id).into(), *addr)))
2021-10-14 09:50:12 +00:00
}
// Fetch peer list from Consul
#[cfg(feature = "consul-discovery")]
if let Some(c) = &self.consul_discovery {
2022-10-18 17:11:16 +00:00
match c.get_consul_nodes().await {
2021-10-14 09:50:12 +00:00
Ok(node_list) => {
ping_list.extend(node_list);
2021-10-14 09:50:12 +00:00
}
Err(e) => {
warn!("Could not retrieve node list from Consul: {}", e);
}
}
}
// Fetch peer list from Kubernetes
#[cfg(feature = "kubernetes-discovery")]
if let Some(k) = &self.kubernetes_discovery {
if !k.skip_crd {
match create_kubernetes_crd().await {
Ok(()) => (),
Err(e) => {
error!("Failed to create kubernetes custom resource: {}", e)
}
};
}
match get_kubernetes_nodes(k).await {
Ok(node_list) => {
ping_list.extend(node_list);
}
Err(e) => {
warn!("Could not retrieve node list from Kubernetes: {}", e);
}
}
}
2021-10-14 09:50:12 +00:00
for (node_id, node_addr) in ping_list {
2022-12-14 15:08:05 +00:00
let self2 = self.clone();
tokio::spawn(async move {
if let Err(e) = self2.netapp.clone().try_connect(node_addr, node_id).await {
2022-12-14 15:11:19 +00:00
error!("{}", connect_error_message(node_addr, node_id, e));
2022-12-14 15:08:05 +00:00
}
});
2021-10-14 09:50:12 +00:00
}
}
if let Err(e) = self.save_peer_list().await {
warn!("Could not save peer list to file: {}", e);
}
#[cfg(feature = "consul-discovery")]
tokio::spawn(self.clone().advertise_to_consul());
#[cfg(feature = "kubernetes-discovery")]
tokio::spawn(self.clone().advertise_to_kubernetes());
2021-10-14 09:50:12 +00:00
select! {
2023-09-12 12:35:48 +00:00
_ = tokio::time::sleep(DISCOVERY_INTERVAL) => {},
_ = stop_signal.changed() => {},
2021-10-14 09:50:12 +00:00
}
}
}
async fn save_peer_list(&self) -> Result<(), Error> {
// Prepare new peer list to save to file
// It is a vec of tuples (node ID as Uuid, node SocketAddr)
let mut peer_list = self
.fullmesh
.get_peer_list()
.iter()
.map(|n| (n.id.into(), n.addr))
.collect::<Vec<_>>();
// Before doing it, we read the current peer list file (if it exists)
// and append it to the list we are about to save,
// so that no peer ID gets lost in the process.
if let Ok(mut prev_peer_list) = self.persist_peer_list.load_async().await {
2023-01-03 13:44:47 +00:00
prev_peer_list
.0
.retain(|(id, _ip)| peer_list.iter().all(|(id2, _ip2)| id2 != id));
peer_list.extend(prev_peer_list.0);
}
// Save new peer list to file
2023-01-03 13:44:47 +00:00
self.persist_peer_list
.save_async(&PeerList(peer_list))
.await
}
2021-10-14 09:50:12 +00:00
}
#[async_trait]
impl EndpointHandler<SystemRpc> for System {
async fn handle(self: &Arc<Self>, msg: &SystemRpc, from: NodeID) -> Result<SystemRpc, Error> {
match msg {
// ---- system functions -> System ----
SystemRpc::Connect(node) => self.handle_connect(node).await,
SystemRpc::AdvertiseStatus(adv) => self.handle_advertise_status(from.into(), adv).await,
SystemRpc::GetKnownNodes => Ok(self.handle_get_known_nodes()),
// ---- layout functions -> LayoutManager ----
SystemRpc::PullClusterLayout => Ok(self.layout_manager.handle_pull_cluster_layout()),
SystemRpc::AdvertiseClusterLayout(adv) => {
self.layout_manager
.handle_advertise_cluster_layout(adv)
.await
}
SystemRpc::PullClusterLayoutTrackers => {
Ok(self.layout_manager.handle_pull_cluster_layout_trackers())
}
SystemRpc::AdvertiseClusterLayoutTrackers(adv) => {
self.layout_manager
.handle_advertise_cluster_layout_trackers(adv)
.await
}
// ---- other -> Error ----
m => Err(Error::unexpected_rpc_message(m)),
2021-10-14 09:50:12 +00:00
}
}
}
impl NodeStatus {
2023-11-09 12:34:14 +00:00
fn initial(replication_factor: usize, layout_manager: &LayoutManager) -> Self {
NodeStatus {
hostname: Some(
gethostname::gethostname()
.into_string()
.unwrap_or_else(|_| "<invalid utf-8>".to_string()),
),
replication_factor,
layout_digest: layout_manager.layout().digest(),
meta_disk_avail: None,
data_disk_avail: None,
}
}
fn unknown() -> Self {
NodeStatus {
hostname: None,
replication_factor: 0,
layout_digest: Default::default(),
meta_disk_avail: None,
data_disk_avail: None,
}
}
fn update_disk_usage(
&mut self,
meta_dir: &Path,
data_dir: &DataDirEnum,
metrics: &SystemMetrics,
) {
use nix::sys::statvfs::statvfs;
let mount_avail = |path: &Path| match statvfs(path) {
Ok(x) => {
let avail = x.blocks_available() as u64 * x.fragment_size() as u64;
let total = x.blocks() as u64 * x.fragment_size() as u64;
2023-09-11 18:00:02 +00:00
Some((x.filesystem_id(), avail, total))
}
Err(_) => None,
};
2023-09-11 18:00:02 +00:00
self.meta_disk_avail = mount_avail(meta_dir).map(|(_, a, t)| (a, t));
self.data_disk_avail = match data_dir {
2023-09-11 18:00:02 +00:00
DataDirEnum::Single(dir) => mount_avail(dir).map(|(_, a, t)| (a, t)),
DataDirEnum::Multiple(dirs) => (|| {
// TODO: more precise calculation that takes into account
// how data is going to be spread among partitions
let mut mounts = HashMap::new();
for dir in dirs.iter() {
if dir.capacity.is_none() {
continue;
}
match mount_avail(&dir.path) {
Some((fsid, avail, total)) => {
mounts.insert(fsid, (avail, total));
}
None => return None,
}
}
2023-09-11 18:00:02 +00:00
Some(
mounts
.into_iter()
.fold((0, 0), |(x, y), (_, (a, b))| (x + a, y + b)),
)
})(),
};
if let Some((avail, total)) = self.meta_disk_avail {
metrics
.values
.meta_disk_avail
.store(avail, Ordering::Relaxed);
metrics
.values
.meta_disk_total
.store(total, Ordering::Relaxed);
}
if let Some((avail, total)) = self.data_disk_avail {
metrics
.values
.data_disk_avail
.store(avail, Ordering::Relaxed);
metrics
.values
.data_disk_total
.store(total, Ordering::Relaxed);
}
}
}
fn get_default_ip() -> Option<IpAddr> {
pnet_datalink::interfaces()
.iter()
.find(|e| e.is_up() && !e.is_loopback() && !e.ips.is_empty())
.and_then(|e| e.ips.first())
.map(|a| a.ip())
}
fn get_rpc_public_addr(config: &Config) -> Option<SocketAddr> {
match &config.rpc_public_addr {
Some(a_str) => {
use std::net::ToSocketAddrs;
match a_str.to_socket_addrs() {
Err(e) => {
error!(
"Cannot resolve rpc_public_addr {} from config file: {}.",
a_str, e
);
None
}
Ok(a) => {
let a = a.collect::<Vec<_>>();
if a.is_empty() {
error!("rpc_public_addr {} resolve to no known IP address", a_str);
}
if a.len() > 1 {
warn!("Multiple possible resolutions for rpc_public_addr: {:?}. Taking the first one.", a);
}
a.into_iter().next()
}
}
}
None => {
let addr = get_default_ip().map(|ip| SocketAddr::new(ip, config.rpc_bind_addr.port()));
if let Some(a) = addr {
warn!("Using autodetected rpc_public_addr: {}. Consider specifying it explicitly in configuration file if possible.", a);
}
addr
}
}
}
async fn resolve_peers(peers: &[String]) -> Vec<(NodeID, SocketAddr)> {
let mut ret = vec![];
for peer in peers.iter() {
match parse_and_resolve_peer_addr_async(peer).await {
Some((pubkey, addrs)) => {
for ip in addrs {
ret.push((pubkey, ip));
}
}
None => {
warn!("Unable to parse and/or resolve peer hostname {}", peer);
}
}
}
ret
}
2022-12-14 15:11:19 +00:00
fn connect_error_message(
addr: SocketAddr,
pubkey: ed25519::PublicKey,
e: netapp::error::Error,
) -> String {
format!("Error establishing RPC connection to remote node: {}@{}.\nThis can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret.\n{}", hex::encode(pubkey), addr, e)
}