2022-07-21 15:34:53 +00:00
|
|
|
use std::collections::VecDeque;
|
2022-06-05 13:33:43 +00:00
|
|
|
use std::pin::Pin;
|
2020-12-02 12:30:47 +00:00
|
|
|
use std::sync::Arc;
|
2022-06-05 13:33:43 +00:00
|
|
|
use std::task::{Context, Poll};
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2022-07-21 15:34:53 +00:00
|
|
|
use async_trait::async_trait;
|
2022-07-21 17:05:51 +00:00
|
|
|
use bytes::Bytes;
|
2022-07-26 10:11:48 +00:00
|
|
|
use log::*;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2022-07-21 15:34:53 +00:00
|
|
|
use futures::AsyncWriteExt;
|
2021-10-25 07:27:57 +00:00
|
|
|
use kuska_handshake::async_std::BoxStreamWrite;
|
2020-12-07 12:35:24 +00:00
|
|
|
use tokio::sync::mpsc;
|
|
|
|
|
2020-12-02 12:30:47 +00:00
|
|
|
use crate::error::*;
|
2022-07-21 15:34:53 +00:00
|
|
|
use crate::message::*;
|
2022-07-22 10:45:38 +00:00
|
|
|
use crate::stream::*;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2021-10-12 16:13:07 +00:00
|
|
|
// Messages are sent by chunks
|
|
|
|
// Chunk format:
|
|
|
|
// - u32 BE: request id (same for request and response)
|
|
|
|
// - u16 BE: chunk length, possibly with CHUNK_HAS_CONTINUATION flag
|
|
|
|
// when this is not the last chunk of the message
|
|
|
|
// - [u8; chunk_length] chunk data
|
|
|
|
|
2021-10-12 15:59:46 +00:00
|
|
|
pub(crate) type RequestID = u32;
|
2022-07-21 15:34:53 +00:00
|
|
|
pub(crate) type ChunkLength = u16;
|
2022-07-21 15:59:15 +00:00
|
|
|
|
2022-07-21 15:34:53 +00:00
|
|
|
pub(crate) const MAX_CHUNK_LENGTH: ChunkLength = 0x3FF0;
|
|
|
|
pub(crate) const ERROR_MARKER: ChunkLength = 0x4000;
|
|
|
|
pub(crate) const CHUNK_HAS_CONTINUATION: ChunkLength = 0x8000;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2022-07-22 11:23:42 +00:00
|
|
|
struct SendQueue {
|
2022-07-22 11:48:43 +00:00
|
|
|
items: Vec<(u8, VecDeque<SendQueueItem>)>,
|
2022-07-22 11:23:42 +00:00
|
|
|
}
|
|
|
|
|
2020-12-02 12:30:47 +00:00
|
|
|
struct SendQueueItem {
|
|
|
|
id: RequestID,
|
|
|
|
prio: RequestPriority,
|
2022-07-22 11:23:42 +00:00
|
|
|
data: ByteStreamReader,
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl SendQueue {
|
|
|
|
fn new() -> Self {
|
|
|
|
Self {
|
2022-07-22 11:48:43 +00:00
|
|
|
items: Vec::with_capacity(64),
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
fn push(&mut self, item: SendQueueItem) {
|
|
|
|
let prio = item.prio;
|
2021-10-13 15:12:13 +00:00
|
|
|
let pos_prio = match self.items.binary_search_by(|(p, _)| p.cmp(&prio)) {
|
|
|
|
Ok(i) => i,
|
|
|
|
Err(i) => {
|
|
|
|
self.items.insert(i, (prio, VecDeque::new()));
|
|
|
|
i
|
|
|
|
}
|
|
|
|
};
|
|
|
|
self.items[pos_prio].1.push_back(item);
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
2020-12-07 12:35:24 +00:00
|
|
|
fn is_empty(&self) -> bool {
|
|
|
|
self.items.iter().all(|(_k, v)| v.is_empty())
|
|
|
|
}
|
2022-06-19 15:44:07 +00:00
|
|
|
|
|
|
|
// this is like an async fn, but hand implemented
|
|
|
|
fn next_ready(&mut self) -> SendQueuePollNextReady<'_> {
|
|
|
|
SendQueuePollNextReady { queue: self }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct SendQueuePollNextReady<'a> {
|
|
|
|
queue: &'a mut SendQueue,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> futures::Future for SendQueuePollNextReady<'a> {
|
2022-07-21 15:59:15 +00:00
|
|
|
type Output = (RequestID, DataFrame);
|
2022-06-19 15:44:07 +00:00
|
|
|
|
|
|
|
fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Self::Output> {
|
2022-07-25 13:04:52 +00:00
|
|
|
for (i, (_prio, items_at_prio)) in self.queue.items.iter_mut().enumerate() {
|
2022-07-21 15:59:15 +00:00
|
|
|
let mut ready_item = None;
|
|
|
|
for (j, item) in items_at_prio.iter_mut().enumerate() {
|
2022-07-22 11:23:42 +00:00
|
|
|
let mut item_reader = item.data.read_exact_or_eos(MAX_CHUNK_LENGTH as usize);
|
|
|
|
match Pin::new(&mut item_reader).poll(ctx) {
|
2022-07-21 15:59:15 +00:00
|
|
|
Poll::Pending => (),
|
|
|
|
Poll::Ready(ready_v) => {
|
2022-07-25 13:04:52 +00:00
|
|
|
ready_item = Some((j, ready_v));
|
2022-07-21 15:59:15 +00:00
|
|
|
break;
|
2022-06-19 15:44:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-07-21 15:59:15 +00:00
|
|
|
|
2022-07-25 13:04:52 +00:00
|
|
|
if let Some((j, bytes_or_err)) = ready_item {
|
|
|
|
let item = items_at_prio.remove(j).unwrap();
|
|
|
|
let id = item.id;
|
|
|
|
let eos = item.data.eos();
|
|
|
|
|
2022-07-22 11:23:42 +00:00
|
|
|
let data_frame = match bytes_or_err {
|
2022-07-25 13:04:52 +00:00
|
|
|
Ok(bytes) => {
|
|
|
|
trace!(
|
|
|
|
"send queue poll next ready: id {} eos {:?} bytes {}",
|
|
|
|
id,
|
|
|
|
eos,
|
|
|
|
bytes.len()
|
|
|
|
);
|
|
|
|
DataFrame::Data(bytes, !eos)
|
|
|
|
}
|
2022-07-22 11:23:42 +00:00
|
|
|
Err(e) => DataFrame::Error(match e {
|
2022-07-25 13:04:52 +00:00
|
|
|
ReadExactError::Stream(code) => {
|
|
|
|
trace!(
|
|
|
|
"send queue poll next ready: id {} eos {:?} ERROR {}",
|
|
|
|
id,
|
|
|
|
eos,
|
|
|
|
code
|
|
|
|
);
|
|
|
|
code
|
|
|
|
}
|
2022-07-22 11:23:42 +00:00
|
|
|
_ => unreachable!(),
|
|
|
|
}),
|
|
|
|
};
|
2022-07-25 13:04:52 +00:00
|
|
|
|
|
|
|
if !eos && !matches!(data_frame, DataFrame::Error(_)) {
|
2022-07-21 15:59:15 +00:00
|
|
|
items_at_prio.push_back(item);
|
|
|
|
} else if items_at_prio.is_empty() {
|
|
|
|
self.queue.items.remove(i);
|
|
|
|
}
|
2022-07-25 13:04:52 +00:00
|
|
|
|
2022-07-22 11:23:42 +00:00
|
|
|
return Poll::Ready((id, data_frame));
|
2022-07-21 15:59:15 +00:00
|
|
|
}
|
2022-06-19 15:44:07 +00:00
|
|
|
}
|
2022-07-22 11:23:42 +00:00
|
|
|
// If the queue is empty, this futures is eternally pending.
|
|
|
|
// This is ok because we use it in a select with another future
|
|
|
|
// that can interrupt it.
|
2022-06-19 15:44:07 +00:00
|
|
|
Poll::Pending
|
|
|
|
}
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
|
2022-07-22 11:23:42 +00:00
|
|
|
enum DataFrame {
|
|
|
|
/// a fixed size buffer containing some data + a boolean indicating whether
|
|
|
|
/// there may be more data comming from this stream. Can be used for some
|
|
|
|
/// optimization. It's an error to set it to false if there is more data, but it is correct
|
|
|
|
/// (albeit sub-optimal) to set it to true if there is nothing coming after
|
|
|
|
Data(Bytes, bool),
|
|
|
|
/// An error code automatically signals the end of the stream
|
|
|
|
Error(u8),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl DataFrame {
|
|
|
|
fn header(&self) -> [u8; 2] {
|
|
|
|
let header_u16 = match self {
|
|
|
|
DataFrame::Data(data, false) => data.len() as u16,
|
|
|
|
DataFrame::Data(data, true) => data.len() as u16 | CHUNK_HAS_CONTINUATION,
|
|
|
|
DataFrame::Error(e) => *e as u16 | ERROR_MARKER,
|
|
|
|
};
|
|
|
|
ChunkLength::to_be_bytes(header_u16)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn data(&self) -> &[u8] {
|
|
|
|
match self {
|
|
|
|
DataFrame::Data(ref data, _) => &data[..],
|
|
|
|
DataFrame::Error(_) => &[],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-21 11:01:04 +00:00
|
|
|
/// The SendLoop trait, which is implemented both by the client and the server
|
|
|
|
/// connection objects (ServerConna and ClientConn) adds a method `.send_loop()`
|
|
|
|
/// that takes a channel of messages to send and an asynchronous writer,
|
|
|
|
/// and sends messages from the channel to the async writer, putting them in a queue
|
|
|
|
/// before being sent and doing the round-robin sending strategy.
|
|
|
|
///
|
|
|
|
/// The `.send_loop()` exits when the sending end of the channel is closed,
|
|
|
|
/// or if there is an error at any time writing to the async writer.
|
2020-12-02 12:30:47 +00:00
|
|
|
#[async_trait]
|
|
|
|
pub(crate) trait SendLoop: Sync {
|
2020-12-07 17:07:55 +00:00
|
|
|
async fn send_loop<W>(
|
2020-12-02 12:30:47 +00:00
|
|
|
self: Arc<Self>,
|
2022-07-26 10:11:48 +00:00
|
|
|
msg_recv: mpsc::UnboundedReceiver<(RequestID, RequestPriority, ByteStream)>,
|
2021-10-25 07:27:57 +00:00
|
|
|
mut write: BoxStreamWrite<W>,
|
2020-12-07 17:07:55 +00:00
|
|
|
) -> Result<(), Error>
|
|
|
|
where
|
2021-10-12 12:51:28 +00:00
|
|
|
W: AsyncWriteExt + Unpin + Send + Sync,
|
2020-12-07 17:07:55 +00:00
|
|
|
{
|
2020-12-02 12:30:47 +00:00
|
|
|
let mut sending = SendQueue::new();
|
2022-07-26 10:11:48 +00:00
|
|
|
let mut msg_recv = Some(msg_recv);
|
|
|
|
while msg_recv.is_some() || !sending.is_empty() {
|
|
|
|
debug!(
|
|
|
|
"Sending: {:?}",
|
|
|
|
sending
|
|
|
|
.items
|
|
|
|
.iter()
|
|
|
|
.map(|(_, i)| i.iter().map(|x| x.id))
|
|
|
|
.flatten()
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
);
|
|
|
|
|
|
|
|
let recv_fut = async {
|
|
|
|
if let Some(chan) = &mut msg_recv {
|
|
|
|
chan.recv().await
|
|
|
|
} else {
|
|
|
|
futures::future::pending().await
|
|
|
|
}
|
|
|
|
};
|
2022-06-19 15:44:07 +00:00
|
|
|
let send_fut = sending.next_ready();
|
|
|
|
|
|
|
|
// recv_fut is cancellation-safe according to tokio doc,
|
|
|
|
// send_fut is cancellation-safe as implemented above?
|
2022-07-26 10:11:48 +00:00
|
|
|
tokio::select! {
|
|
|
|
sth = recv_fut => {
|
2022-06-19 15:44:07 +00:00
|
|
|
if let Some((id, prio, data)) = sth {
|
2022-07-25 13:04:52 +00:00
|
|
|
trace!("send_loop: add stream {} to send", id);
|
2022-06-19 15:44:07 +00:00
|
|
|
sending.push(SendQueueItem {
|
|
|
|
id,
|
|
|
|
prio,
|
2022-07-22 11:23:42 +00:00
|
|
|
data: ByteStreamReader::new(data),
|
2022-06-19 15:44:07 +00:00
|
|
|
});
|
|
|
|
} else {
|
2022-07-26 10:11:48 +00:00
|
|
|
msg_recv = None;
|
2022-06-19 15:44:07 +00:00
|
|
|
};
|
2022-06-05 13:33:43 +00:00
|
|
|
}
|
2022-07-26 10:11:48 +00:00
|
|
|
(id, data) = send_fut => {
|
2022-07-25 13:04:52 +00:00
|
|
|
trace!(
|
|
|
|
"send_loop: id {}, send {} bytes, header_size {}",
|
|
|
|
id,
|
|
|
|
data.data().len(),
|
|
|
|
hex::encode(data.header())
|
|
|
|
);
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2022-06-19 15:44:07 +00:00
|
|
|
let header_id = RequestID::to_be_bytes(id);
|
|
|
|
write.write_all(&header_id[..]).await?;
|
2022-06-05 13:33:43 +00:00
|
|
|
|
2022-06-19 16:42:27 +00:00
|
|
|
write.write_all(&data.header()).await?;
|
|
|
|
write.write_all(data.data()).await?;
|
2022-06-19 15:44:07 +00:00
|
|
|
write.flush().await?;
|
2020-12-07 12:35:24 +00:00
|
|
|
}
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
}
|
2022-02-21 12:45:41 +00:00
|
|
|
|
2021-10-25 11:58:42 +00:00
|
|
|
let _ = write.goodbye().await;
|
2020-12-02 12:30:47 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|