282 lines
7.2 KiB
Rust
282 lines
7.2 KiB
Rust
use std::collections::HashMap;
|
|
use std::net::SocketAddr;
|
|
use std::sync::atomic::{self, AtomicBool, AtomicU16};
|
|
use std::sync::{Arc, Mutex};
|
|
|
|
use bytes::Bytes;
|
|
use log::{debug, error, trace};
|
|
|
|
use sodiumoxide::crypto::sign::ed25519;
|
|
use tokio::io::split;
|
|
use tokio::net::TcpStream;
|
|
use tokio::sync::{mpsc, oneshot, watch};
|
|
|
|
use async_trait::async_trait;
|
|
|
|
use kuska_handshake::async_std::{
|
|
handshake_client, handshake_server, BoxStream, TokioCompatExt, TokioCompatExtRead,
|
|
TokioCompatExtWrite,
|
|
};
|
|
|
|
use crate::error::*;
|
|
use crate::message::*;
|
|
use crate::netapp::*;
|
|
use crate::proto::*;
|
|
use crate::util::*;
|
|
|
|
pub(crate) struct ServerConn {
|
|
pub(crate) remote_addr: SocketAddr,
|
|
pub(crate) peer_pk: ed25519::PublicKey,
|
|
|
|
netapp: Arc<NetApp>,
|
|
|
|
resp_send: mpsc::UnboundedSender<Option<(RequestID, RequestPriority, Vec<u8>)>>,
|
|
close_send: watch::Sender<bool>,
|
|
}
|
|
|
|
impl ServerConn {
|
|
pub(crate) async fn run(netapp: Arc<NetApp>, socket: TcpStream) -> Result<(), Error> {
|
|
let mut asyncstd_socket = TokioCompatExt::wrap(socket);
|
|
let handshake = handshake_server(
|
|
&mut asyncstd_socket,
|
|
netapp.netid.clone(),
|
|
netapp.pubkey.clone(),
|
|
netapp.privkey.clone(),
|
|
)
|
|
.await?;
|
|
let peer_pk = handshake.peer_pk.clone();
|
|
|
|
let tokio_socket = asyncstd_socket.into_inner();
|
|
let remote_addr = tokio_socket.peer_addr().unwrap();
|
|
|
|
debug!(
|
|
"Handshake complete (server) with {}@{}",
|
|
hex::encode(&peer_pk),
|
|
remote_addr
|
|
);
|
|
|
|
let (read, write) = split(tokio_socket);
|
|
|
|
let read = TokioCompatExtRead::wrap(read);
|
|
let write = TokioCompatExtWrite::wrap(write);
|
|
|
|
let (read, write) =
|
|
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
|
|
|
|
let (resp_send, resp_recv) = mpsc::unbounded_channel();
|
|
|
|
let (close_send, close_recv) = watch::channel(false);
|
|
|
|
let conn = Arc::new(ServerConn {
|
|
netapp: netapp.clone(),
|
|
remote_addr,
|
|
peer_pk: peer_pk.clone(),
|
|
resp_send,
|
|
close_send,
|
|
});
|
|
|
|
netapp.connected_as_server(peer_pk.clone(), conn.clone());
|
|
|
|
let conn2 = conn.clone();
|
|
let conn3 = conn.clone();
|
|
let close_recv2 = close_recv.clone();
|
|
tokio::try_join!(
|
|
async move {
|
|
tokio::select!(
|
|
r = conn2.recv_loop(read) => r,
|
|
_ = await_exit(close_recv) => Ok(()),
|
|
)
|
|
},
|
|
async move {
|
|
tokio::select!(
|
|
r = conn3.send_loop(resp_recv, write) => r,
|
|
_ = await_exit(close_recv2) => Ok(()),
|
|
)
|
|
},
|
|
)
|
|
.map(|_| ())
|
|
.log_err("ServerConn recv_loop/send_loop");
|
|
|
|
netapp.disconnected_as_server(&peer_pk, conn);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
pub fn close(&self) {
|
|
self.close_send.broadcast(true).unwrap();
|
|
}
|
|
}
|
|
|
|
impl SendLoop for ServerConn {}
|
|
|
|
#[async_trait]
|
|
impl RecvLoop for ServerConn {
|
|
async fn recv_handler(self: Arc<Self>, id: u16, bytes: Vec<u8>) {
|
|
trace!("ServerConn recv_handler {} ({} bytes)", id, bytes.len());
|
|
|
|
let bytes: Bytes = bytes.into();
|
|
|
|
let prio = bytes[0];
|
|
|
|
let mut kind_bytes = [0u8; 4];
|
|
kind_bytes.copy_from_slice(&bytes[1..5]);
|
|
let kind = u32::from_be_bytes(kind_bytes);
|
|
|
|
if let Some(handler) = self.netapp.msg_handlers.load().get(&kind) {
|
|
let net_handler = &handler.net_handler;
|
|
let resp = net_handler(self.peer_pk.clone(), bytes.slice(5..)).await;
|
|
self.resp_send
|
|
.send(Some((id, prio, resp)))
|
|
.log_err("ServerConn recv_handler send resp");
|
|
}
|
|
}
|
|
}
|
|
pub(crate) struct ClientConn {
|
|
pub(crate) remote_addr: SocketAddr,
|
|
pub(crate) peer_pk: ed25519::PublicKey,
|
|
|
|
query_send: mpsc::UnboundedSender<Option<(RequestID, RequestPriority, Vec<u8>)>>,
|
|
|
|
next_query_number: AtomicU16,
|
|
inflight: Mutex<HashMap<RequestID, oneshot::Sender<Vec<u8>>>>,
|
|
must_exit: AtomicBool,
|
|
stop_recv_loop: watch::Sender<bool>,
|
|
}
|
|
|
|
impl ClientConn {
|
|
pub(crate) async fn init(
|
|
netapp: Arc<NetApp>,
|
|
socket: TcpStream,
|
|
remote_pk: ed25519::PublicKey,
|
|
) -> Result<(), Error> {
|
|
let mut asyncstd_socket = TokioCompatExt::wrap(socket);
|
|
|
|
let handshake = handshake_client(
|
|
&mut asyncstd_socket,
|
|
netapp.netid.clone(),
|
|
netapp.pubkey.clone(),
|
|
netapp.privkey.clone(),
|
|
remote_pk.clone(),
|
|
)
|
|
.await?;
|
|
|
|
let tokio_socket = asyncstd_socket.into_inner();
|
|
let remote_addr = tokio_socket.peer_addr().unwrap();
|
|
|
|
debug!(
|
|
"Handshake complete (client) with {}@{}",
|
|
hex::encode(&remote_pk),
|
|
remote_addr
|
|
);
|
|
|
|
let (read, write) = split(tokio_socket);
|
|
|
|
let read = TokioCompatExtRead::wrap(read);
|
|
let write = TokioCompatExtWrite::wrap(write);
|
|
|
|
let (read, write) =
|
|
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
|
|
|
|
let (query_send, query_recv) = mpsc::unbounded_channel();
|
|
|
|
let (stop_recv_loop, stop_recv_loop_recv) = watch::channel(false);
|
|
|
|
let conn = Arc::new(ClientConn {
|
|
remote_addr,
|
|
peer_pk: remote_pk.clone(),
|
|
next_query_number: AtomicU16::from(0u16),
|
|
query_send,
|
|
inflight: Mutex::new(HashMap::new()),
|
|
must_exit: AtomicBool::new(false),
|
|
stop_recv_loop,
|
|
});
|
|
|
|
netapp.connected_as_client(remote_pk.clone(), conn.clone());
|
|
|
|
tokio::spawn(async move {
|
|
let conn2 = conn.clone();
|
|
let conn3 = conn.clone();
|
|
tokio::try_join!(conn2.send_loop(query_recv, write), async move {
|
|
tokio::select!(
|
|
r = conn3.recv_loop(read) => r,
|
|
_ = await_exit(stop_recv_loop_recv) => Ok(()),
|
|
)
|
|
})
|
|
.map(|_| ())
|
|
.log_err("ClientConn send_loop/recv_loop/dispatch_loop");
|
|
|
|
netapp.disconnected_as_client(&remote_pk, conn);
|
|
});
|
|
|
|
Ok(())
|
|
}
|
|
|
|
pub fn close(&self) {
|
|
self.must_exit.store(true, atomic::Ordering::SeqCst);
|
|
self.query_send
|
|
.send(None)
|
|
.log_err("could not write None in query_send");
|
|
if self.inflight.lock().unwrap().is_empty() {
|
|
self.stop_recv_loop
|
|
.broadcast(true)
|
|
.log_err("could not write true to stop_recv_loop");
|
|
}
|
|
}
|
|
|
|
pub(crate) async fn request<T>(
|
|
self: Arc<Self>,
|
|
rq: T,
|
|
prio: RequestPriority,
|
|
) -> Result<<T as Message>::Response, Error>
|
|
where
|
|
T: Message,
|
|
{
|
|
let id = self
|
|
.next_query_number
|
|
.fetch_add(1u16, atomic::Ordering::Relaxed);
|
|
let mut bytes = vec![prio];
|
|
bytes.extend_from_slice(&u32::to_be_bytes(T::KIND)[..]);
|
|
bytes.extend_from_slice(&rmp_to_vec_all_named(&rq)?[..]);
|
|
|
|
let (resp_send, resp_recv) = oneshot::channel();
|
|
let old = self.inflight.lock().unwrap().insert(id, resp_send);
|
|
if let Some(old_ch) = old {
|
|
error!(
|
|
"Too many inflight requests! RequestID collision. Interrupting previous request."
|
|
);
|
|
if old_ch.send(vec![]).is_err() {
|
|
debug!("Could not send empty response to collisionned request, probably because request was interrupted. Dropping response.");
|
|
}
|
|
}
|
|
|
|
trace!("request: query_send {}, {} bytes", id, bytes.len());
|
|
self.query_send.send(Some((id, prio, bytes)))?;
|
|
|
|
let resp = resp_recv.await?;
|
|
|
|
rmp_serde::decode::from_read_ref::<_, Result<<T as Message>::Response, String>>(&resp[..])?
|
|
.map_err(Error::Remote)
|
|
}
|
|
}
|
|
|
|
impl SendLoop for ClientConn {}
|
|
|
|
#[async_trait]
|
|
impl RecvLoop for ClientConn {
|
|
async fn recv_handler(self: Arc<Self>, id: RequestID, msg: Vec<u8>) {
|
|
trace!("ClientConn recv_handler {} ({} bytes)", id, msg.len());
|
|
|
|
let mut inflight = self.inflight.lock().unwrap();
|
|
if let Some(ch) = inflight.remove(&id) {
|
|
if ch.send(msg).is_err() {
|
|
debug!("Could not send request response, probably because request was interrupted. Dropping response.");
|
|
}
|
|
}
|
|
|
|
if inflight.is_empty() && self.must_exit.load(atomic::Ordering::SeqCst) {
|
|
self.stop_recv_loop
|
|
.broadcast(true)
|
|
.log_err("could not write true to stop_recv_loop");
|
|
}
|
|
}
|
|
}
|