netapp/src/conn.rs

282 lines
7.1 KiB
Rust
Raw Normal View History

2020-12-02 12:30:47 +00:00
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::atomic::{self, AtomicBool, AtomicU16};
use std::sync::{Arc, Mutex};
2020-12-02 12:30:47 +00:00
use bytes::Bytes;
use log::{debug, error, trace};
2020-12-02 12:30:47 +00:00
use tokio::io::split;
use tokio::net::TcpStream;
use tokio::sync::{mpsc, oneshot, watch};
use async_trait::async_trait;
2020-12-02 12:30:47 +00:00
use kuska_handshake::async_std::{
handshake_client, handshake_server, BoxStream, TokioCompatExt, TokioCompatExtRead,
TokioCompatExtWrite,
};
use crate::error::*;
use crate::message::*;
use crate::netapp::*;
use crate::proto::*;
use crate::util::*;
2020-12-02 19:12:24 +00:00
pub(crate) struct ServerConn {
pub(crate) remote_addr: SocketAddr,
pub(crate) peer_id: NodeID,
2020-12-02 19:12:24 +00:00
2020-12-02 12:30:47 +00:00
netapp: Arc<NetApp>,
2020-12-02 19:12:24 +00:00
resp_send: mpsc::UnboundedSender<Option<(RequestID, RequestPriority, Vec<u8>)>>,
2020-12-02 12:30:47 +00:00
close_send: watch::Sender<bool>,
}
impl ServerConn {
pub(crate) async fn run(netapp: Arc<NetApp>, socket: TcpStream) -> Result<(), Error> {
let mut asyncstd_socket = TokioCompatExt::wrap(socket);
let handshake = handshake_server(
&mut asyncstd_socket,
netapp.netid.clone(),
netapp.id.clone(),
2020-12-02 12:30:47 +00:00
netapp.privkey.clone(),
)
.await?;
let peer_id = handshake.peer_pk.clone();
2020-12-02 12:30:47 +00:00
let tokio_socket = asyncstd_socket.into_inner();
2021-02-17 16:43:07 +00:00
let remote_addr = tokio_socket.peer_addr()?;
2020-12-02 12:30:47 +00:00
debug!(
"Handshake complete (server) with {}@{}",
hex::encode(&peer_id),
2020-12-02 12:30:47 +00:00
remote_addr
);
let (read, write) = split(tokio_socket);
let read = TokioCompatExtRead::wrap(read);
let write = TokioCompatExtWrite::wrap(write);
2020-12-07 17:07:55 +00:00
let (read, write) =
2020-12-02 12:30:47 +00:00
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
let (resp_send, resp_recv) = mpsc::unbounded_channel();
let (close_send, close_recv) = watch::channel(false);
let conn = Arc::new(ServerConn {
netapp: netapp.clone(),
remote_addr,
peer_id: peer_id.clone(),
2020-12-02 12:30:47 +00:00
resp_send,
close_send,
});
netapp.connected_as_server(peer_id.clone(), conn.clone());
2020-12-02 12:30:47 +00:00
let conn2 = conn.clone();
let conn3 = conn.clone();
let close_recv2 = close_recv.clone();
2020-12-02 12:30:47 +00:00
tokio::try_join!(
async move {
tokio::select!(
2020-12-07 17:07:55 +00:00
r = conn2.recv_loop(read) => r,
_ = await_exit(close_recv) => Ok(()),
)
},
async move {
tokio::select!(
2020-12-07 17:07:55 +00:00
r = conn3.send_loop(resp_recv, write) => r,
_ = await_exit(close_recv2) => Ok(()),
)
},
2020-12-02 12:30:47 +00:00
)
.map(|_| ())
.log_err("ServerConn recv_loop/send_loop");
netapp.disconnected_as_server(&peer_id, conn);
2020-12-02 12:30:47 +00:00
Ok(())
}
pub fn close(&self) {
self.close_send.broadcast(true).unwrap();
}
}
impl SendLoop for ServerConn {}
#[async_trait]
impl RecvLoop for ServerConn {
async fn recv_handler(self: Arc<Self>, id: u16, bytes: Vec<u8>) {
2020-12-11 12:34:04 +00:00
trace!("ServerConn recv_handler {} ({} bytes)", id, bytes.len());
2020-12-07 15:00:12 +00:00
2020-12-02 12:30:47 +00:00
let bytes: Bytes = bytes.into();
let prio = bytes[0];
let mut kind_bytes = [0u8; 4];
kind_bytes.copy_from_slice(&bytes[1..5]);
let kind = u32::from_be_bytes(kind_bytes);
if let Some(handler) = self.netapp.msg_handlers.load().get(&kind) {
2020-12-02 17:10:07 +00:00
let net_handler = &handler.net_handler;
let resp = net_handler(self.peer_id.clone(), bytes.slice(5..)).await;
2020-12-02 12:30:47 +00:00
self.resp_send
.send(Some((id, prio, resp)))
2020-12-02 12:30:47 +00:00
.log_err("ServerConn recv_handler send resp");
}
}
}
2020-12-02 19:12:24 +00:00
pub(crate) struct ClientConn {
pub(crate) remote_addr: SocketAddr,
pub(crate) peer_id: NodeID,
2020-12-02 19:12:24 +00:00
query_send: mpsc::UnboundedSender<Option<(RequestID, RequestPriority, Vec<u8>)>>,
2020-12-02 12:30:47 +00:00
next_query_number: AtomicU16,
inflight: Mutex<HashMap<RequestID, oneshot::Sender<Vec<u8>>>>,
must_exit: AtomicBool,
stop_recv_loop: watch::Sender<bool>,
2020-12-02 12:30:47 +00:00
}
impl ClientConn {
pub(crate) async fn init(
netapp: Arc<NetApp>,
socket: TcpStream,
peer_id: NodeID,
2020-12-02 12:30:47 +00:00
) -> Result<(), Error> {
let mut asyncstd_socket = TokioCompatExt::wrap(socket);
let handshake = handshake_client(
&mut asyncstd_socket,
netapp.netid.clone(),
netapp.id.clone(),
2020-12-02 12:30:47 +00:00
netapp.privkey.clone(),
peer_id.clone(),
2020-12-02 12:30:47 +00:00
)
.await?;
let tokio_socket = asyncstd_socket.into_inner();
2021-02-17 16:43:07 +00:00
let remote_addr = tokio_socket.peer_addr()?;
2020-12-02 12:30:47 +00:00
debug!(
"Handshake complete (client) with {}@{}",
hex::encode(&peer_id),
2020-12-02 12:30:47 +00:00
remote_addr
);
let (read, write) = split(tokio_socket);
let read = TokioCompatExtRead::wrap(read);
let write = TokioCompatExtWrite::wrap(write);
2020-12-07 17:07:55 +00:00
let (read, write) =
2020-12-02 12:30:47 +00:00
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
let (query_send, query_recv) = mpsc::unbounded_channel();
let (stop_recv_loop, stop_recv_loop_recv) = watch::channel(false);
2020-12-02 12:30:47 +00:00
let conn = Arc::new(ClientConn {
remote_addr,
peer_id: peer_id.clone(),
2020-12-02 12:30:47 +00:00
next_query_number: AtomicU16::from(0u16),
query_send,
inflight: Mutex::new(HashMap::new()),
must_exit: AtomicBool::new(false),
stop_recv_loop,
2020-12-02 12:30:47 +00:00
});
netapp.connected_as_client(peer_id.clone(), conn.clone());
2020-12-02 12:30:47 +00:00
tokio::spawn(async move {
let conn2 = conn.clone();
let conn3 = conn.clone();
2020-12-07 17:07:55 +00:00
tokio::try_join!(conn2.send_loop(query_recv, write), async move {
tokio::select!(
r = conn3.recv_loop(read) => r,
_ = await_exit(stop_recv_loop_recv) => Ok(()),
)
})
2020-12-02 12:30:47 +00:00
.map(|_| ())
.log_err("ClientConn send_loop/recv_loop/dispatch_loop");
netapp.disconnected_as_client(&peer_id, conn);
2020-12-02 12:30:47 +00:00
});
Ok(())
}
pub fn close(&self) {
self.must_exit.store(true, atomic::Ordering::SeqCst);
self.query_send
.send(None)
.log_err("could not write None in query_send");
if self.inflight.lock().unwrap().is_empty() {
self.stop_recv_loop
.broadcast(true)
.log_err("could not write true to stop_recv_loop");
2020-12-02 12:30:47 +00:00
}
}
2020-12-02 17:10:07 +00:00
pub(crate) async fn request<T>(
2020-12-02 12:30:47 +00:00
self: Arc<Self>,
rq: T,
prio: RequestPriority,
) -> Result<<T as Message>::Response, Error>
where
T: Message,
{
let id = self
.next_query_number
.fetch_add(1u16, atomic::Ordering::Relaxed);
let mut bytes = vec![prio];
bytes.extend_from_slice(&u32::to_be_bytes(T::KIND)[..]);
bytes.extend_from_slice(&rmp_to_vec_all_named(&rq)?[..]);
let (resp_send, resp_recv) = oneshot::channel();
let old = self.inflight.lock().unwrap().insert(id, resp_send);
if let Some(old_ch) = old {
error!(
"Too many inflight requests! RequestID collision. Interrupting previous request."
);
if old_ch.send(vec![]).is_err() {
debug!("Could not send empty response to collisionned request, probably because request was interrupted. Dropping response.");
}
}
2020-12-02 12:30:47 +00:00
trace!("request: query_send {}, {} bytes", id, bytes.len());
self.query_send.send(Some((id, prio, bytes)))?;
2020-12-02 12:30:47 +00:00
let resp = resp_recv.await?;
rmp_serde::decode::from_read_ref::<_, Result<<T as Message>::Response, String>>(&resp[..])?
.map_err(Error::Remote)
}
}
impl SendLoop for ClientConn {}
#[async_trait]
impl RecvLoop for ClientConn {
async fn recv_handler(self: Arc<Self>, id: RequestID, msg: Vec<u8>) {
2020-12-11 12:34:04 +00:00
trace!("ClientConn recv_handler {} ({} bytes)", id, msg.len());
2020-12-07 15:00:12 +00:00
let mut inflight = self.inflight.lock().unwrap();
if let Some(ch) = inflight.remove(&id) {
if ch.send(msg).is_err() {
debug!("Could not send request response, probably because request was interrupted. Dropping response.");
}
}
if inflight.is_empty() && self.must_exit.load(atomic::Ordering::SeqCst) {
self.stop_recv_loop
.broadcast(true)
.log_err("could not write true to stop_recv_loop");
}
2020-12-02 12:30:47 +00:00
}
}