netapp/src/server.rs

205 lines
5.4 KiB
Rust
Raw Normal View History

use std::net::SocketAddr;
use std::sync::Arc;
use arc_swap::ArcSwapOption;
2022-07-21 15:34:53 +00:00
use async_trait::async_trait;
2022-07-25 13:04:52 +00:00
use log::*;
2022-07-21 15:34:53 +00:00
use futures::io::{AsyncReadExt, AsyncWriteExt};
use kuska_handshake::async_std::{handshake_server, BoxStream};
use tokio::net::TcpStream;
use tokio::select;
use tokio::sync::{mpsc, watch};
use tokio_util::compat::*;
#[cfg(feature = "telemetry")]
use opentelemetry::{
2022-02-21 12:11:49 +00:00
trace::{FutureExt, Span, SpanKind, TraceContextExt, TraceId, Tracer},
Context, KeyValue,
};
#[cfg(feature = "telemetry")]
use opentelemetry_contrib::trace::propagator::binary::*;
2022-02-18 18:01:23 +00:00
#[cfg(feature = "telemetry")]
use rand::{thread_rng, Rng};
use crate::error::*;
2022-07-21 15:34:53 +00:00
use crate::message::*;
use crate::netapp::*;
2022-07-21 15:34:53 +00:00
use crate::recv::*;
use crate::send::*;
2022-07-22 10:45:38 +00:00
use crate::stream::*;
use crate::util::*;
// The client and server connection structs (client.rs and server.rs)
// build upon the chunking mechanism which is exclusively contained
// in proto.rs.
// Here, we just care about sending big messages without size limit.
// The format of these messages is described below.
// Chunking happens independently.
// Request message format (client -> server):
// - u8 priority
// - u8 path length
// - [u8; path length] path
// - [u8; *] data
// Response message format (server -> client):
// - u8 response code
// - [u8; *] response
pub(crate) struct ServerConn {
pub(crate) remote_addr: SocketAddr,
pub(crate) peer_id: NodeID,
netapp: Arc<NetApp>,
2022-07-21 15:34:53 +00:00
resp_send: ArcSwapOption<mpsc::UnboundedSender<(RequestID, RequestPriority, ByteStream)>>,
}
impl ServerConn {
pub(crate) async fn run(
netapp: Arc<NetApp>,
socket: TcpStream,
must_exit: watch::Receiver<bool>,
) -> Result<(), Error> {
let remote_addr = socket.peer_addr()?;
let mut socket = socket.compat();
// Do handshake to authenticate client
let handshake = handshake_server(
&mut socket,
netapp.netid.clone(),
netapp.id,
netapp.privkey.clone(),
)
.await?;
let peer_id = handshake.peer_pk;
debug!(
"Handshake complete (server) with {}@{}",
hex::encode(&peer_id),
remote_addr
);
// Create BoxStream layer that encodes content
let (read, write) = socket.split();
let (read, mut write) =
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
// Before doing anything, send version tag, so that client
// can check and disconnect if version is wrong
write.write_all(&netapp.version_tag[..]).await?;
write.flush().await?;
// Build and launch stuff that handles requests server-side
let (resp_send, resp_recv) = mpsc::unbounded_channel();
let conn = Arc::new(ServerConn {
netapp: netapp.clone(),
remote_addr,
peer_id,
resp_send: ArcSwapOption::new(Some(Arc::new(resp_send))),
});
netapp.connected_as_server(peer_id, conn.clone());
let conn2 = conn.clone();
let recv_future = tokio::spawn(async move {
select! {
r = conn2.recv_loop(read) => r,
_ = await_exit(must_exit) => Ok(())
}
});
let send_future = tokio::spawn(conn.clone().send_loop(resp_recv, write));
recv_future.await.log_err("ServerConn recv_loop");
conn.resp_send.store(None);
send_future.await.log_err("ServerConn send_loop");
netapp.disconnected_as_server(&peer_id, conn);
Ok(())
}
2022-07-22 10:45:38 +00:00
async fn recv_handler_aux(self: &Arc<Self>, req_enc: ReqEnc) -> Result<RespEnc, Error> {
let path = String::from_utf8(req_enc.path.to_vec())?;
let handler_opt = {
let endpoints = self.netapp.endpoints.read().unwrap();
2022-07-22 10:45:38 +00:00
endpoints.get(&path[..]).map(|e| e.clone_endpoint())
};
if let Some(handler) = handler_opt {
2022-02-18 18:01:23 +00:00
cfg_if::cfg_if! {
if #[cfg(feature = "telemetry")] {
let tracer = opentelemetry::global::tracer("netapp");
2022-07-22 10:45:38 +00:00
let mut span = if !req_enc.telemetry_id.is_empty() {
let propagator = BinaryPropagator::new();
2022-07-22 10:45:38 +00:00
let context = propagator.from_bytes(req_enc.telemetry_id.to_vec());
let context = Context::new().with_remote_span_context(context);
tracer.span_builder(format!(">> RPC {}", path))
.with_kind(SpanKind::Server)
.start_with_context(&tracer, &context)
2022-02-18 18:01:23 +00:00
} else {
let mut rng = thread_rng();
let trace_id = TraceId::from_bytes(rng.gen());
tracer
.span_builder(format!(">> RPC {}", path))
.with_kind(SpanKind::Server)
.with_trace_id(trace_id)
.start(&tracer)
2022-02-18 18:01:23 +00:00
};
span.set_attribute(KeyValue::new("path", path.to_string()));
2022-07-22 10:45:38 +00:00
span.set_attribute(KeyValue::new("len_query_msg", req_enc.msg.len() as i64));
2022-02-18 18:01:23 +00:00
2022-07-22 10:45:38 +00:00
handler.handle(req_enc, self.peer_id)
2022-02-18 18:01:23 +00:00
.with_context(Context::current_with_span(span))
.await
} else {
2022-07-22 10:45:38 +00:00
handler.handle(req_enc, self.peer_id).await
2022-02-18 18:01:23 +00:00
}
}
} else {
Err(Error::NoHandler)
}
}
}
impl SendLoop for ServerConn {}
#[async_trait]
impl RecvLoop for ServerConn {
fn recv_handler(self: &Arc<Self>, id: RequestID, stream: ByteStream) {
let resp_send = self.resp_send.load_full().unwrap();
let self2 = self.clone();
tokio::spawn(async move {
2022-07-25 13:04:52 +00:00
debug!("server: recv_handler got {}", id);
let (prio, resp_enc) = match ReqEnc::decode(stream).await {
2022-07-22 10:45:38 +00:00
Ok(req_enc) => {
let prio = req_enc.prio;
let resp = self2.recv_handler_aux(req_enc).await;
(
prio,
match resp {
Ok(resp_enc) => resp_enc,
Err(e) => RespEnc::from_err(e),
},
)
}
2022-07-22 10:45:38 +00:00
Err(e) => (PRIO_NORMAL, RespEnc::from_err(e)),
2022-02-21 11:01:04 +00:00
};
2022-07-25 13:04:52 +00:00
debug!("server: sending response to {}", id);
resp_send
2022-07-22 10:45:38 +00:00
.send((id, prio, resp_enc.encode()))
.log_err("ServerConn recv_handler send resp bytes");
Ok::<_, Error>(())
});
}
}