netapp/src/client.rs

294 lines
7.5 KiB
Rust
Raw Normal View History

2020-12-02 12:30:47 +00:00
use std::collections::HashMap;
2022-09-01 13:54:11 +00:00
use std::pin::Pin;
use std::sync::atomic::{self, AtomicU32};
use std::sync::{Arc, Mutex};
2022-09-01 13:54:11 +00:00
use std::task::Poll;
2020-12-02 12:30:47 +00:00
use arc_swap::ArcSwapOption;
2022-07-21 15:34:53 +00:00
use async_trait::async_trait;
2022-07-22 10:45:38 +00:00
use bytes::Bytes;
use log::{debug, error, trace};
2020-12-02 12:30:47 +00:00
2022-07-21 15:34:53 +00:00
use futures::io::AsyncReadExt;
2022-09-01 13:54:11 +00:00
use futures::Stream;
2022-07-21 15:34:53 +00:00
use kuska_handshake::async_std::{handshake_client, BoxStream};
use tokio::select;
2020-12-02 12:30:47 +00:00
use tokio::sync::{mpsc, oneshot, watch};
use tokio_util::compat::*;
everywhere: support unix sockets This patch adds support for listening on and connecting to unix sockets. This requires having wrapper types for various tokio specific network abstractions while also supporting things like serialization and deserialization. Unfortionately i was unable to find a published crate fulfilling these requirements. For this reason I've published a crate myself. Called `tokio-unix-tcp`, it serves as a drop in replacement for Tokio's TCP and Unix network types. I plan to maintain this library outside the scope of this project as well, in general the code should be simple and stable enough however to not require maintainance going forward. As i said this crate aims to support the requirement mentioned above. In addition to this it also strives to be more correct about handling the different types of unix sockets, which the libraries i reviewed were weak at. A list of these crates can be found in the crate README under "Related work". --- The changes to netapp can be summarized as the following: - `std::net::SocketAddr` has been replaced by `tokio_unix_tcp::NamedSocketAddr` in most places. This enum encapsulates a IP address and port as well as a path in its variants and describes a concrete socket address netapp can bind or connect to. - In some places `tokio_unix_tcp::SocketAddr` is used instead of `tokio_unix_tcp::NamedSocketAddr` as mentioned above. This is due to the way unix sockets work: The remote peer of a client from the perspective of a server is not a concrete path but `unnamed`. They just share a file descriptor for the actual communication channel. The local address of the server is the actual file system path the server is listening on. In some cases netapp might be configured to connect to another peer using a unix socket and to not send a reachable IP address and port or unix socket path using the `HelloMessage`. As per the above (the client's remote address will be `unnamed`), we have no way of connecting back to that peer. This will currently cause the connection to be aborted by the server. - Listening on Unix sockets requires some additional handling like removing a previous file at the bind path and setting a correct mode (defaulting to `0o222` currently). This is handled by `tokio_unix_tcp`. --- I've tested these changes by including them in garage and running basic administration commands against a node and by running the unit tests here. Basalt peering is currently lacking a proper cost calculation for unix sockets - I'm sadly not familiar with this code.
2023-10-20 00:38:41 +00:00
use tokio_unix_tcp::SocketAddr;
use tokio_unix_tcp::Stream as SocketStream;
#[cfg(feature = "telemetry")]
use opentelemetry::{
trace::{FutureExt, Span, SpanKind, TraceContextExt, Tracer},
2022-02-18 19:23:10 +00:00
Context, KeyValue,
};
#[cfg(feature = "telemetry")]
use opentelemetry_contrib::trace::propagator::binary::*;
2020-12-02 12:30:47 +00:00
use crate::error::*;
2022-07-21 15:34:53 +00:00
use crate::message::*;
2020-12-02 12:30:47 +00:00
use crate::netapp::*;
2022-07-21 15:34:53 +00:00
use crate::recv::*;
use crate::send::*;
2022-07-22 10:45:38 +00:00
use crate::stream::*;
2020-12-02 12:30:47 +00:00
use crate::util::*;
2020-12-02 19:12:24 +00:00
pub(crate) struct ClientConn {
pub(crate) remote_addr: SocketAddr,
pub(crate) peer_id: NodeID,
2020-12-02 19:12:24 +00:00
2022-09-01 13:54:11 +00:00
query_send: ArcSwapOption<mpsc::UnboundedSender<SendItem>>,
2021-10-12 15:59:46 +00:00
next_query_number: AtomicU32,
inflight: Mutex<HashMap<RequestID, oneshot::Sender<ByteStream>>>,
2020-12-02 12:30:47 +00:00
}
impl ClientConn {
pub(crate) async fn init(
netapp: Arc<NetApp>,
everywhere: support unix sockets This patch adds support for listening on and connecting to unix sockets. This requires having wrapper types for various tokio specific network abstractions while also supporting things like serialization and deserialization. Unfortionately i was unable to find a published crate fulfilling these requirements. For this reason I've published a crate myself. Called `tokio-unix-tcp`, it serves as a drop in replacement for Tokio's TCP and Unix network types. I plan to maintain this library outside the scope of this project as well, in general the code should be simple and stable enough however to not require maintainance going forward. As i said this crate aims to support the requirement mentioned above. In addition to this it also strives to be more correct about handling the different types of unix sockets, which the libraries i reviewed were weak at. A list of these crates can be found in the crate README under "Related work". --- The changes to netapp can be summarized as the following: - `std::net::SocketAddr` has been replaced by `tokio_unix_tcp::NamedSocketAddr` in most places. This enum encapsulates a IP address and port as well as a path in its variants and describes a concrete socket address netapp can bind or connect to. - In some places `tokio_unix_tcp::SocketAddr` is used instead of `tokio_unix_tcp::NamedSocketAddr` as mentioned above. This is due to the way unix sockets work: The remote peer of a client from the perspective of a server is not a concrete path but `unnamed`. They just share a file descriptor for the actual communication channel. The local address of the server is the actual file system path the server is listening on. In some cases netapp might be configured to connect to another peer using a unix socket and to not send a reachable IP address and port or unix socket path using the `HelloMessage`. As per the above (the client's remote address will be `unnamed`), we have no way of connecting back to that peer. This will currently cause the connection to be aborted by the server. - Listening on Unix sockets requires some additional handling like removing a previous file at the bind path and setting a correct mode (defaulting to `0o222` currently). This is handled by `tokio_unix_tcp`. --- I've tested these changes by including them in garage and running basic administration commands against a node and by running the unit tests here. Basalt peering is currently lacking a proper cost calculation for unix sockets - I'm sadly not familiar with this code.
2023-10-20 00:38:41 +00:00
socket: SocketStream,
peer_id: NodeID,
2020-12-02 12:30:47 +00:00
) -> Result<(), Error> {
let remote_addr = socket.peer_addr()?;
let mut socket = socket.compat();
2020-12-02 12:30:47 +00:00
// Do handshake to authenticate and prove our identity to server
2020-12-02 12:30:47 +00:00
let handshake = handshake_client(
&mut socket,
2020-12-02 12:30:47 +00:00
netapp.netid.clone(),
2021-10-12 11:18:24 +00:00
netapp.id,
2020-12-02 12:30:47 +00:00
netapp.privkey.clone(),
2021-10-12 11:18:24 +00:00
peer_id,
2020-12-02 12:30:47 +00:00
)
.await?;
debug!(
"Handshake complete (client) with {}@{}",
2023-01-31 22:57:33 +00:00
hex::encode(peer_id),
2020-12-02 12:30:47 +00:00
remote_addr
);
// Create BoxStream layer that encodes content
let (read, write) = socket.split();
let (mut read, write) =
2020-12-02 12:30:47 +00:00
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
// Before doing anything, receive version tag and
// check they are running the same version as us
let mut their_version_tag = VersionTag::default();
read.read_exact(&mut their_version_tag[..]).await?;
if their_version_tag != netapp.version_tag {
let msg = format!(
2022-02-21 15:57:07 +00:00
"different version tags: {} (theirs) vs. {} (ours)",
hex::encode(their_version_tag),
hex::encode(netapp.version_tag)
);
2022-02-21 15:57:07 +00:00
error!("Cannot connect to {}: {}", hex::encode(&peer_id[..8]), msg);
return Err(Error::VersionMismatch(msg));
}
// Build and launch stuff that manages sending requests client-side
2020-12-02 12:30:47 +00:00
let (query_send, query_recv) = mpsc::unbounded_channel();
let (stop_recv_loop, stop_recv_loop_recv) = watch::channel(false);
2020-12-02 12:30:47 +00:00
let conn = Arc::new(ClientConn {
remote_addr,
2021-10-12 11:18:24 +00:00
peer_id,
2021-10-12 15:59:46 +00:00
next_query_number: AtomicU32::from(RequestID::default()),
query_send: ArcSwapOption::new(Some(Arc::new(query_send))),
inflight: Mutex::new(HashMap::new()),
2020-12-02 12:30:47 +00:00
});
2021-10-12 11:18:24 +00:00
netapp.connected_as_client(peer_id, conn.clone());
2020-12-02 12:30:47 +00:00
let debug_name = format!("CLI {}", hex::encode(&peer_id[..8]));
2020-12-02 12:30:47 +00:00
tokio::spawn(async move {
let debug_name_2 = debug_name.clone();
let send_future = tokio::spawn(conn.clone().send_loop(query_recv, write, debug_name_2));
2020-12-02 12:30:47 +00:00
let conn2 = conn.clone();
let recv_future = tokio::spawn(async move {
select! {
r = conn2.recv_loop(read, debug_name) => r,
_ = await_exit(stop_recv_loop_recv) => Ok(())
}
});
send_future.await.log_err("ClientConn send_loop");
2021-10-13 15:14:26 +00:00
// FIXME: should do here: wait for inflight requests to all have their response
stop_recv_loop
.send(true)
.log_err("ClientConn send true to stop_recv_loop");
recv_future.await.log_err("ClientConn recv_loop");
// Make sure we don't wait on any more requests that won't
// have a response
conn.inflight.lock().unwrap().clear();
2020-12-02 12:30:47 +00:00
netapp.disconnected_as_client(&peer_id, conn);
2020-12-02 12:30:47 +00:00
});
Ok(())
}
pub fn close(&self) {
self.query_send.store(None);
2020-12-02 12:30:47 +00:00
}
pub(crate) async fn call<T>(
2020-12-02 12:30:47 +00:00
self: Arc<Self>,
2022-07-21 18:22:56 +00:00
req: Req<T>,
2022-02-21 11:04:09 +00:00
path: &str,
2020-12-02 12:30:47 +00:00
prio: RequestPriority,
2022-07-21 18:22:56 +00:00
) -> Result<Resp<T>, Error>
2020-12-02 12:30:47 +00:00
where
T: Message,
{
let query_send = self.query_send.load_full().ok_or(Error::ConnectionClosed)?;
2020-12-02 12:30:47 +00:00
let id = self
.next_query_number
.fetch_add(1, atomic::Ordering::Relaxed);
2022-02-18 18:01:23 +00:00
cfg_if::cfg_if! {
if #[cfg(feature = "telemetry")] {
let tracer = opentelemetry::global::tracer("netapp");
2022-02-18 19:23:10 +00:00
let mut span = tracer.span_builder(format!("RPC >> {}", path))
2022-04-07 08:31:37 +00:00
.with_kind(SpanKind::Client)
.start(&tracer);
let propagator = BinaryPropagator::new();
2022-07-22 10:45:38 +00:00
let telemetry_id: Bytes = propagator.to_bytes(span.span_context()).to_vec().into();
2022-02-18 18:01:23 +00:00
} else {
2022-07-22 10:45:38 +00:00
let telemetry_id: Bytes = Bytes::new();
2022-02-18 18:01:23 +00:00
}
};
// Encode request
2022-07-22 10:45:38 +00:00
let req_enc = req.into_enc(prio, path.as_bytes().to_vec().into(), telemetry_id);
let req_msg_len = req_enc.msg.len();
let (req_stream, req_order) = req_enc.encode();
2020-12-02 12:30:47 +00:00
2022-02-18 18:01:23 +00:00
// Send request through
2020-12-02 12:30:47 +00:00
let (resp_send, resp_recv) = oneshot::channel();
let old = self.inflight.lock().unwrap().insert(id, resp_send);
if let Some(old_ch) = old {
error!(
"Too many inflight requests! RequestID collision. Interrupting previous request."
);
let _ = old_ch.send(Box::pin(futures::stream::once(async move {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"RequestID collision, too many inflight requests",
))
})));
}
2020-12-02 12:30:47 +00:00
2022-07-25 13:04:52 +00:00
debug!(
"request: query_send {}, path {}, prio {} (serialized message: {} bytes)",
id, path, prio, req_msg_len
2022-07-22 10:45:38 +00:00
);
2022-02-18 19:23:10 +00:00
#[cfg(feature = "telemetry")]
2022-07-22 10:45:38 +00:00
span.set_attribute(KeyValue::new("len_query_msg", req_msg_len as i64));
2022-02-18 19:23:10 +00:00
2022-09-01 13:54:11 +00:00
query_send.send(SendItem::Stream(id, prio, req_order, req_stream))?;
let canceller = CancelOnDrop::new(id, query_send.as_ref().clone());
2020-12-02 12:30:47 +00:00
cfg_if::cfg_if! {
if #[cfg(feature = "telemetry")] {
let stream = resp_recv
.with_context(Context::current_with_span(span))
.await?;
} else {
let stream = resp_recv.await?;
}
}
2022-09-01 13:54:11 +00:00
let stream = Box::pin(canceller.for_stream(stream));
let resp_enc = RespEnc::decode(stream).await?;
2022-07-25 13:04:52 +00:00
debug!("client: got response to request {} (path {})", id, path);
2022-07-22 10:45:38 +00:00
Resp::from_enc(resp_enc)
2020-12-02 12:30:47 +00:00
}
}
impl SendLoop for ClientConn {}
#[async_trait]
impl RecvLoop for ClientConn {
fn recv_handler(self: &Arc<Self>, id: RequestID, stream: ByteStream) {
trace!("ClientConn recv_handler {}", id);
2020-12-07 15:00:12 +00:00
let mut inflight = self.inflight.lock().unwrap();
if let Some(ch) = inflight.remove(&id) {
if ch.send(stream).is_err() {
debug!("Could not send request response, probably because request was interrupted. Dropping response.");
}
2022-09-01 13:54:11 +00:00
} else {
debug!("Got unexpected response to request {}, dropping it", id);
}
}
}
// ----
struct CancelOnDrop {
id: RequestID,
query_send: mpsc::UnboundedSender<SendItem>,
}
impl CancelOnDrop {
fn new(id: RequestID, query_send: mpsc::UnboundedSender<SendItem>) -> Self {
Self { id, query_send }
}
fn for_stream(self, stream: ByteStream) -> CancelOnDropStream {
CancelOnDropStream {
cancel: Some(self),
2023-01-31 22:57:33 +00:00
stream,
2022-09-01 13:54:11 +00:00
}
}
}
impl Drop for CancelOnDrop {
fn drop(&mut self) {
trace!("cancelling request {}", self.id);
let _ = self.query_send.send(SendItem::Cancel(self.id));
}
}
#[pin_project::pin_project]
struct CancelOnDropStream {
cancel: Option<CancelOnDrop>,
#[pin]
stream: ByteStream,
}
impl Stream for CancelOnDropStream {
type Item = Packet;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Option<Self::Item>> {
let this = self.project();
let res = this.stream.poll_next(cx);
if matches!(res, Poll::Ready(None)) {
if let Some(c) = this.cancel.take() {
std::mem::forget(c)
}
}
2022-09-01 13:54:11 +00:00
res
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.stream.size_hint()
2020-12-02 12:30:47 +00:00
}
}