use std::collections::HashMap; use std::net::SocketAddr; use std::sync::atomic::{self, AtomicU32}; use std::sync::{Arc, Mutex}; use arc_swap::ArcSwapOption; use log::{debug, error, trace}; use tokio::net::TcpStream; use tokio::select; use tokio::sync::{mpsc, oneshot, watch}; use tokio_util::compat::*; use futures::io::AsyncReadExt; use async_trait::async_trait; use kuska_handshake::async_std::{handshake_client, BoxStream}; use crate::endpoint::*; use crate::error::*; use crate::netapp::*; use crate::proto::*; use crate::util::*; pub(crate) struct ClientConn { pub(crate) remote_addr: SocketAddr, pub(crate) peer_id: NodeID, query_send: ArcSwapOption)>>, next_query_number: AtomicU32, inflight: Mutex>>>, } impl ClientConn { pub(crate) async fn init( netapp: Arc, socket: TcpStream, peer_id: NodeID, ) -> Result<(), Error> { let remote_addr = socket.peer_addr()?; let mut socket = socket.compat(); let handshake = handshake_client( &mut socket, netapp.netid.clone(), netapp.id, netapp.privkey.clone(), peer_id, ) .await?; debug!( "Handshake complete (client) with {}@{}", hex::encode(&peer_id), remote_addr ); let (read, write) = socket.split(); let (read, write) = BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write(); let (query_send, query_recv) = mpsc::unbounded_channel(); let (stop_recv_loop, stop_recv_loop_recv) = watch::channel(false); let conn = Arc::new(ClientConn { remote_addr, peer_id, next_query_number: AtomicU32::from(RequestID::default()), query_send: ArcSwapOption::new(Some(Arc::new(query_send))), inflight: Mutex::new(HashMap::new()), }); netapp.connected_as_client(peer_id, conn.clone()); tokio::spawn(async move { let send_future = tokio::spawn(conn.clone().send_loop(query_recv, write)); let conn2 = conn.clone(); let recv_future = tokio::spawn(async move { select! { r = conn2.recv_loop(read) => r, _ = await_exit(stop_recv_loop_recv) => Ok(()) } }); send_future.await.log_err("ClientConn send_loop"); // TODO here: wait for inflight requests to all have their response stop_recv_loop .send(true) .log_err("ClientConn send true to stop_recv_loop"); recv_future.await.log_err("ClientConn recv_loop"); // Make sure we don't wait on any more requests that won't // have a response conn.inflight.lock().unwrap().clear(); netapp.disconnected_as_client(&peer_id, conn); }); Ok(()) } pub fn close(&self) { self.query_send.store(None); } pub(crate) async fn call( self: Arc, rq: T, path: &str, prio: RequestPriority, ) -> Result<::Response, Error> where T: Message, { let query_send = self.query_send.load_full().ok_or(Error::ConnectionClosed)?; let id = self .next_query_number .fetch_add(1, atomic::Ordering::Relaxed); let mut bytes = vec![prio, path.as_bytes().len() as u8]; bytes.extend_from_slice(path.as_bytes()); bytes.extend_from_slice(&rmp_to_vec_all_named(&rq)?[..]); let (resp_send, resp_recv) = oneshot::channel(); let old = self.inflight.lock().unwrap().insert(id, resp_send); if let Some(old_ch) = old { error!( "Too many inflight requests! RequestID collision. Interrupting previous request." ); if old_ch.send(vec![]).is_err() { debug!("Could not send empty response to collisionned request, probably because request was interrupted. Dropping response."); } } trace!("request: query_send {}, {} bytes", id, bytes.len()); query_send.send((id, prio, bytes))?; let resp = resp_recv.await?; if resp.is_empty() { return Err(Error::Message( "Response is 0 bytes, either a collision or a protocol error".into(), )); } trace!("request response {}: ", id); let code = resp[0]; if code == 0 { Ok(rmp_serde::decode::from_read_ref::< _, ::Response, >(&resp[1..])?) } else { Err(Error::Remote(format!("Remote error code {}", code))) } } } impl SendLoop for ClientConn {} #[async_trait] impl RecvLoop for ClientConn { fn recv_handler(self: &Arc, id: RequestID, msg: Vec) { trace!("ClientConn recv_handler {} ({} bytes)", id, msg.len()); let mut inflight = self.inflight.lock().unwrap(); if let Some(ch) = inflight.remove(&id) { if ch.send(msg).is_err() { debug!("Could not send request response, probably because request was interrupted. Dropping response."); } } } }