2021-10-13 15:12:13 +00:00
|
|
|
use std::collections::{HashMap, VecDeque};
|
2022-06-05 13:33:43 +00:00
|
|
|
use std::pin::Pin;
|
2020-12-02 12:30:47 +00:00
|
|
|
use std::sync::Arc;
|
2022-06-05 13:33:43 +00:00
|
|
|
use std::task::{Context, Poll};
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2022-06-05 13:33:43 +00:00
|
|
|
use log::{trace, warn};
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2022-06-05 13:33:43 +00:00
|
|
|
use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender};
|
|
|
|
use futures::Stream;
|
2022-06-19 15:44:07 +00:00
|
|
|
use futures::{AsyncReadExt, AsyncWriteExt};
|
2021-10-25 07:27:57 +00:00
|
|
|
use kuska_handshake::async_std::BoxStreamWrite;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2020-12-07 12:35:24 +00:00
|
|
|
use tokio::sync::mpsc;
|
|
|
|
|
|
|
|
use async_trait::async_trait;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
|
|
|
use crate::error::*;
|
2022-06-05 13:33:43 +00:00
|
|
|
use crate::util::AssociatedStream;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2020-12-02 19:12:24 +00:00
|
|
|
/// Priority of a request (click to read more about priorities).
|
|
|
|
///
|
|
|
|
/// This priority value is used to priorize messages
|
|
|
|
/// in the send queue of the client, and their responses in the send queue of the
|
|
|
|
/// server. Lower values mean higher priority.
|
|
|
|
///
|
|
|
|
/// This mechanism is usefull for messages bigger than the maximum chunk size
|
|
|
|
/// (set at `0x4000` bytes), such as large file transfers.
|
|
|
|
/// In such case, all of the messages in the send queue with the highest priority
|
|
|
|
/// will take turns to send individual chunks, in a round-robin fashion.
|
|
|
|
/// Once all highest priority messages are sent successfully, the messages with
|
|
|
|
/// the next highest priority will begin being sent in the same way.
|
|
|
|
///
|
|
|
|
/// The same priority value is given to a request and to its associated response.
|
|
|
|
pub type RequestPriority = u8;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2020-12-02 19:12:24 +00:00
|
|
|
/// Priority class: high
|
|
|
|
pub const PRIO_HIGH: RequestPriority = 0x20;
|
|
|
|
/// Priority class: normal
|
|
|
|
pub const PRIO_NORMAL: RequestPriority = 0x40;
|
|
|
|
/// Priority class: background
|
|
|
|
pub const PRIO_BACKGROUND: RequestPriority = 0x80;
|
|
|
|
/// Priority: primary among given class
|
|
|
|
pub const PRIO_PRIMARY: RequestPriority = 0x00;
|
2021-10-12 11:07:34 +00:00
|
|
|
/// Priority: secondary among given class (ex: `PRIO_HIGH | PRIO_SECONDARY`)
|
2020-12-02 19:12:24 +00:00
|
|
|
pub const PRIO_SECONDARY: RequestPriority = 0x01;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2021-10-12 16:13:07 +00:00
|
|
|
// Messages are sent by chunks
|
|
|
|
// Chunk format:
|
|
|
|
// - u32 BE: request id (same for request and response)
|
|
|
|
// - u16 BE: chunk length, possibly with CHUNK_HAS_CONTINUATION flag
|
|
|
|
// when this is not the last chunk of the message
|
|
|
|
// - [u8; chunk_length] chunk data
|
|
|
|
|
2021-10-12 15:59:46 +00:00
|
|
|
pub(crate) type RequestID = u32;
|
|
|
|
type ChunkLength = u16;
|
2022-06-19 15:44:07 +00:00
|
|
|
const MAX_CHUNK_LENGTH: ChunkLength = 0x3FF0;
|
|
|
|
const ERROR_MARKER: ChunkLength = 0x4000;
|
2021-10-12 15:59:46 +00:00
|
|
|
const CHUNK_HAS_CONTINUATION: ChunkLength = 0x8000;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
|
|
|
struct SendQueueItem {
|
|
|
|
id: RequestID,
|
|
|
|
prio: RequestPriority,
|
2022-06-05 13:33:43 +00:00
|
|
|
data: DataReader,
|
|
|
|
}
|
|
|
|
|
|
|
|
pub(crate) enum Data {
|
|
|
|
Full(Vec<u8>),
|
|
|
|
Streaming(AssociatedStream),
|
|
|
|
}
|
|
|
|
|
|
|
|
#[pin_project::pin_project(project = DataReaderProj)]
|
|
|
|
enum DataReader {
|
|
|
|
Full {
|
|
|
|
#[pin]
|
|
|
|
data: Vec<u8>,
|
|
|
|
pos: usize,
|
|
|
|
},
|
|
|
|
Streaming {
|
|
|
|
#[pin]
|
|
|
|
reader: AssociatedStream,
|
2022-06-05 14:47:29 +00:00
|
|
|
packet: Vec<u8>,
|
|
|
|
pos: usize,
|
|
|
|
buf: Vec<u8>,
|
|
|
|
eos: bool,
|
2022-06-05 13:33:43 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<Data> for DataReader {
|
|
|
|
fn from(data: Data) -> DataReader {
|
|
|
|
match data {
|
|
|
|
Data::Full(data) => DataReader::Full { data, pos: 0 },
|
2022-06-05 14:47:29 +00:00
|
|
|
Data::Streaming(reader) => DataReader::Streaming {
|
|
|
|
reader,
|
|
|
|
packet: Vec::new(),
|
|
|
|
pos: 0,
|
|
|
|
buf: Vec::with_capacity(MAX_CHUNK_LENGTH as usize),
|
|
|
|
eos: false,
|
|
|
|
},
|
2022-06-05 13:33:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-19 15:44:07 +00:00
|
|
|
struct DataReaderItem {
|
|
|
|
/// a fixed size buffer containing some data, possibly padded with 0s
|
|
|
|
data: [u8; MAX_CHUNK_LENGTH as usize],
|
|
|
|
/// actuall lenght of data
|
|
|
|
len: usize,
|
|
|
|
/// whethere there may be more data comming from this stream. Can be used for some
|
|
|
|
/// optimization. It's an error to set it to false if there is more data, but it is correct
|
|
|
|
/// (albeit sub-optimal) to set it to true if there is nothing coming after
|
|
|
|
may_have_more: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl DataReaderItem {
|
|
|
|
fn empty_last() -> Self {
|
|
|
|
DataReaderItem {
|
|
|
|
data: [0; MAX_CHUNK_LENGTH as usize],
|
|
|
|
len: 0,
|
|
|
|
may_have_more: false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-05 13:33:43 +00:00
|
|
|
impl Stream for DataReader {
|
2022-06-19 15:44:07 +00:00
|
|
|
type Item = DataReaderItem;
|
2022-06-05 13:33:43 +00:00
|
|
|
|
|
|
|
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
|
|
|
match self.project() {
|
|
|
|
DataReaderProj::Full { data, pos } => {
|
|
|
|
let len = std::cmp::min(MAX_CHUNK_LENGTH as usize, data.len() - *pos);
|
|
|
|
let end = *pos + len;
|
|
|
|
|
|
|
|
if len == 0 {
|
|
|
|
Poll::Ready(None)
|
|
|
|
} else {
|
|
|
|
let mut body = [0; MAX_CHUNK_LENGTH as usize];
|
|
|
|
body[..len].copy_from_slice(&data[*pos..end]);
|
|
|
|
*pos = end;
|
2022-06-19 15:44:07 +00:00
|
|
|
Poll::Ready(Some(DataReaderItem {
|
|
|
|
data: body,
|
|
|
|
len,
|
|
|
|
may_have_more: end < data.len(),
|
|
|
|
}))
|
2022-06-05 13:33:43 +00:00
|
|
|
}
|
|
|
|
}
|
2022-06-05 14:47:29 +00:00
|
|
|
DataReaderProj::Streaming {
|
|
|
|
mut reader,
|
|
|
|
packet,
|
|
|
|
pos,
|
|
|
|
buf,
|
|
|
|
eos,
|
|
|
|
} => {
|
|
|
|
if *eos {
|
|
|
|
// eos was reached at previous call to poll_next, where a partial packet
|
|
|
|
// was returned. Now return None
|
|
|
|
return Poll::Ready(None);
|
|
|
|
}
|
|
|
|
loop {
|
|
|
|
let packet_left = packet.len() - *pos;
|
|
|
|
let buf_left = MAX_CHUNK_LENGTH as usize - buf.len();
|
|
|
|
let to_read = std::cmp::min(buf_left, packet_left);
|
|
|
|
buf.extend_from_slice(&packet[*pos..*pos + to_read]);
|
|
|
|
*pos += to_read;
|
|
|
|
if buf.len() == MAX_CHUNK_LENGTH as usize {
|
|
|
|
// we have a full buf, ready to send
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// we don't have a full buf, packet is empty; try receive more
|
|
|
|
if let Some(p) = futures::ready!(reader.as_mut().poll_next(cx)) {
|
|
|
|
*packet = p;
|
|
|
|
*pos = 0;
|
|
|
|
} else {
|
|
|
|
*eos = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut body = [0; MAX_CHUNK_LENGTH as usize];
|
2022-06-07 22:30:56 +00:00
|
|
|
let len = buf.len();
|
|
|
|
body[..len].copy_from_slice(buf);
|
2022-06-05 14:47:29 +00:00
|
|
|
buf.clear();
|
2022-06-19 15:44:07 +00:00
|
|
|
Poll::Ready(Some(DataReaderItem {
|
|
|
|
data: body,
|
|
|
|
len,
|
|
|
|
may_have_more: !*eos,
|
|
|
|
}))
|
2022-06-05 13:33:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct SendQueue {
|
2021-10-13 15:12:13 +00:00
|
|
|
items: VecDeque<(u8, VecDeque<SendQueueItem>)>,
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl SendQueue {
|
|
|
|
fn new() -> Self {
|
|
|
|
Self {
|
2021-10-13 15:12:13 +00:00
|
|
|
items: VecDeque::with_capacity(64),
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
fn push(&mut self, item: SendQueueItem) {
|
|
|
|
let prio = item.prio;
|
2021-10-13 15:12:13 +00:00
|
|
|
let pos_prio = match self.items.binary_search_by(|(p, _)| p.cmp(&prio)) {
|
|
|
|
Ok(i) => i,
|
|
|
|
Err(i) => {
|
|
|
|
self.items.insert(i, (prio, VecDeque::new()));
|
|
|
|
i
|
|
|
|
}
|
|
|
|
};
|
|
|
|
self.items[pos_prio].1.push_back(item);
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
2022-06-19 15:44:07 +00:00
|
|
|
// used only in tests. They should probably be rewriten
|
|
|
|
#[allow(dead_code)]
|
2020-12-02 12:30:47 +00:00
|
|
|
fn pop(&mut self) -> Option<SendQueueItem> {
|
2021-10-13 15:12:13 +00:00
|
|
|
match self.items.pop_front() {
|
2020-12-02 12:30:47 +00:00
|
|
|
None => None,
|
|
|
|
Some((prio, mut items_at_prio)) => {
|
|
|
|
let ret = items_at_prio.pop_front();
|
|
|
|
if !items_at_prio.is_empty() {
|
2021-10-13 15:12:13 +00:00
|
|
|
self.items.push_front((prio, items_at_prio));
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
2020-12-07 15:00:12 +00:00
|
|
|
ret.or_else(|| self.pop())
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-07 12:35:24 +00:00
|
|
|
fn is_empty(&self) -> bool {
|
|
|
|
self.items.iter().all(|(_k, v)| v.is_empty())
|
|
|
|
}
|
2022-06-19 15:44:07 +00:00
|
|
|
|
|
|
|
// this is like an async fn, but hand implemented
|
|
|
|
fn next_ready(&mut self) -> SendQueuePollNextReady<'_> {
|
|
|
|
SendQueuePollNextReady { queue: self }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct SendQueuePollNextReady<'a> {
|
|
|
|
queue: &'a mut SendQueue,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> futures::Future for SendQueuePollNextReady<'a> {
|
|
|
|
type Output = (RequestID, DataReaderItem);
|
|
|
|
|
|
|
|
fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
|
|
for i in 0..self.queue.items.len() {
|
|
|
|
let (_prio, items_at_prio) = &mut self.queue.items[i];
|
|
|
|
|
|
|
|
for _ in 0..items_at_prio.len() {
|
|
|
|
let mut item = items_at_prio.pop_front().unwrap();
|
|
|
|
|
|
|
|
match Pin::new(&mut item.data).poll_next(ctx) {
|
|
|
|
Poll::Pending => items_at_prio.push_back(item),
|
|
|
|
Poll::Ready(Some(data)) => {
|
|
|
|
let id = item.id;
|
|
|
|
if data.may_have_more {
|
|
|
|
self.queue.push(item);
|
|
|
|
} else {
|
|
|
|
if items_at_prio.is_empty() {
|
|
|
|
// this priority level is empty, remove it
|
|
|
|
self.queue.items.remove(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Poll::Ready((id, data));
|
|
|
|
}
|
|
|
|
Poll::Ready(None) => {
|
|
|
|
if items_at_prio.is_empty() {
|
|
|
|
// this priority level is empty, remove it
|
|
|
|
self.queue.items.remove(i);
|
|
|
|
}
|
|
|
|
return Poll::Ready((item.id, DataReaderItem::empty_last()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// TODO what do we do if self.queue is empty? We won't get scheduled again.
|
|
|
|
Poll::Pending
|
|
|
|
}
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
|
2022-02-21 11:01:04 +00:00
|
|
|
/// The SendLoop trait, which is implemented both by the client and the server
|
|
|
|
/// connection objects (ServerConna and ClientConn) adds a method `.send_loop()`
|
|
|
|
/// that takes a channel of messages to send and an asynchronous writer,
|
|
|
|
/// and sends messages from the channel to the async writer, putting them in a queue
|
|
|
|
/// before being sent and doing the round-robin sending strategy.
|
|
|
|
///
|
|
|
|
/// The `.send_loop()` exits when the sending end of the channel is closed,
|
|
|
|
/// or if there is an error at any time writing to the async writer.
|
2020-12-02 12:30:47 +00:00
|
|
|
#[async_trait]
|
|
|
|
pub(crate) trait SendLoop: Sync {
|
2020-12-07 17:07:55 +00:00
|
|
|
async fn send_loop<W>(
|
2020-12-02 12:30:47 +00:00
|
|
|
self: Arc<Self>,
|
2022-06-05 13:33:43 +00:00
|
|
|
mut msg_recv: mpsc::UnboundedReceiver<(RequestID, RequestPriority, Data)>,
|
2021-10-25 07:27:57 +00:00
|
|
|
mut write: BoxStreamWrite<W>,
|
2020-12-07 17:07:55 +00:00
|
|
|
) -> Result<(), Error>
|
|
|
|
where
|
2021-10-12 12:51:28 +00:00
|
|
|
W: AsyncWriteExt + Unpin + Send + Sync,
|
2020-12-07 17:07:55 +00:00
|
|
|
{
|
2020-12-02 12:30:47 +00:00
|
|
|
let mut sending = SendQueue::new();
|
2020-12-07 12:35:24 +00:00
|
|
|
let mut should_exit = false;
|
|
|
|
while !should_exit || !sending.is_empty() {
|
2022-06-19 15:44:07 +00:00
|
|
|
let recv_fut = msg_recv.recv();
|
|
|
|
futures::pin_mut!(recv_fut);
|
|
|
|
let send_fut = sending.next_ready();
|
|
|
|
|
|
|
|
// recv_fut is cancellation-safe according to tokio doc,
|
|
|
|
// send_fut is cancellation-safe as implemented above?
|
|
|
|
use futures::future::Either;
|
|
|
|
match futures::future::select(recv_fut, send_fut).await {
|
|
|
|
Either::Left((sth, _send_fut)) => {
|
|
|
|
if let Some((id, prio, data)) = sth {
|
|
|
|
sending.push(SendQueueItem {
|
|
|
|
id,
|
|
|
|
prio,
|
|
|
|
data: data.into(),
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
should_exit = true;
|
|
|
|
};
|
2022-06-05 13:33:43 +00:00
|
|
|
}
|
2022-06-19 15:44:07 +00:00
|
|
|
Either::Right(((id, data), _recv_fut)) => {
|
|
|
|
trace!("send_loop: sending bytes for {}", id);
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2022-06-19 15:44:07 +00:00
|
|
|
let header_id = RequestID::to_be_bytes(id);
|
|
|
|
write.write_all(&header_id[..]).await?;
|
2022-06-05 13:33:43 +00:00
|
|
|
|
2022-06-19 15:44:07 +00:00
|
|
|
let body = &data.data[..data.len];
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2022-06-19 15:44:07 +00:00
|
|
|
let size_header = if data.may_have_more {
|
|
|
|
ChunkLength::to_be_bytes(data.len as u16 | CHUNK_HAS_CONTINUATION)
|
|
|
|
} else {
|
|
|
|
ChunkLength::to_be_bytes(data.len as u16)
|
|
|
|
};
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2022-02-21 11:01:04 +00:00
|
|
|
write.write_all(&size_header[..]).await?;
|
2022-06-19 15:44:07 +00:00
|
|
|
write.write_all(body).await?;
|
|
|
|
write.flush().await?;
|
2020-12-07 12:35:24 +00:00
|
|
|
}
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
}
|
2022-02-21 12:45:41 +00:00
|
|
|
|
2021-10-25 11:58:42 +00:00
|
|
|
let _ = write.goodbye().await;
|
2020-12-02 12:30:47 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-05 13:33:43 +00:00
|
|
|
struct ChannelPair {
|
2022-06-05 14:47:29 +00:00
|
|
|
receiver: Option<UnboundedReceiver<Vec<u8>>>,
|
|
|
|
sender: Option<UnboundedSender<Vec<u8>>>,
|
2022-06-05 13:33:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl ChannelPair {
|
2022-06-05 14:47:29 +00:00
|
|
|
fn take_receiver(&mut self) -> Option<UnboundedReceiver<Vec<u8>>> {
|
|
|
|
self.receiver.take()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn take_sender(&mut self) -> Option<UnboundedSender<Vec<u8>>> {
|
|
|
|
self.sender.take()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn ref_sender(&mut self) -> Option<&UnboundedSender<Vec<u8>>> {
|
|
|
|
self.sender.as_ref().take()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn insert_into(self, map: &mut HashMap<RequestID, ChannelPair>, index: RequestID) {
|
|
|
|
if self.receiver.is_some() || self.sender.is_some() {
|
|
|
|
map.insert(index, self);
|
|
|
|
}
|
|
|
|
}
|
2022-06-05 13:33:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for ChannelPair {
|
2022-06-05 14:47:29 +00:00
|
|
|
fn default() -> Self {
|
|
|
|
let (send, recv) = unbounded();
|
|
|
|
ChannelPair {
|
|
|
|
receiver: Some(recv),
|
|
|
|
sender: Some(send),
|
|
|
|
}
|
|
|
|
}
|
2022-06-05 13:33:43 +00:00
|
|
|
}
|
|
|
|
|
2022-02-21 11:01:04 +00:00
|
|
|
/// The RecvLoop trait, which is implemented both by the client and the server
|
|
|
|
/// connection objects (ServerConn and ClientConn) adds a method `.recv_loop()`
|
|
|
|
/// and a prototype of a handler for received messages `.recv_handler()` that
|
|
|
|
/// must be filled by implementors. `.recv_loop()` receives messages in a loop
|
|
|
|
/// according to the protocol defined above: chunks of message in progress of being
|
|
|
|
/// received are stored in a buffer, and when the last chunk of a message is received,
|
|
|
|
/// the full message is passed to the receive handler.
|
2020-12-02 12:30:47 +00:00
|
|
|
#[async_trait]
|
|
|
|
pub(crate) trait RecvLoop: Sync + 'static {
|
2022-06-05 13:33:43 +00:00
|
|
|
fn recv_handler(self: &Arc<Self>, id: RequestID, msg: Vec<u8>, stream: AssociatedStream);
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2020-12-07 17:07:55 +00:00
|
|
|
async fn recv_loop<R>(self: Arc<Self>, mut read: R) -> Result<(), Error>
|
|
|
|
where
|
2021-10-12 12:51:28 +00:00
|
|
|
R: AsyncReadExt + Unpin + Send + Sync,
|
2020-12-07 17:07:55 +00:00
|
|
|
{
|
2022-06-05 13:33:43 +00:00
|
|
|
let mut receiving: HashMap<RequestID, Vec<u8>> = HashMap::new();
|
2022-06-05 14:47:29 +00:00
|
|
|
let mut streams: HashMap<RequestID, ChannelPair> = HashMap::new();
|
2020-12-07 12:35:24 +00:00
|
|
|
loop {
|
2020-12-02 12:30:47 +00:00
|
|
|
trace!("recv_loop: reading packet");
|
2021-10-12 15:59:46 +00:00
|
|
|
let mut header_id = [0u8; RequestID::BITS as usize / 8];
|
2021-10-25 07:27:57 +00:00
|
|
|
match read.read_exact(&mut header_id[..]).await {
|
|
|
|
Ok(_) => (),
|
|
|
|
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => break,
|
|
|
|
Err(e) => return Err(e.into()),
|
|
|
|
};
|
2020-12-02 12:30:47 +00:00
|
|
|
let id = RequestID::from_be_bytes(header_id);
|
|
|
|
trace!("recv_loop: got header id: {:04x}", id);
|
|
|
|
|
2021-10-12 15:59:46 +00:00
|
|
|
let mut header_size = [0u8; ChunkLength::BITS as usize / 8];
|
2020-12-07 12:35:24 +00:00
|
|
|
read.read_exact(&mut header_size[..]).await?;
|
2021-10-12 15:59:46 +00:00
|
|
|
let size = ChunkLength::from_be_bytes(header_size);
|
2020-12-07 15:00:12 +00:00
|
|
|
trace!("recv_loop: got header size: {:04x}", size);
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2021-10-12 15:59:46 +00:00
|
|
|
let has_cont = (size & CHUNK_HAS_CONTINUATION) != 0;
|
|
|
|
let size = size & !CHUNK_HAS_CONTINUATION;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
|
|
|
let mut next_slice = vec![0; size as usize];
|
2020-12-07 12:35:24 +00:00
|
|
|
read.read_exact(&mut next_slice[..]).await?;
|
2020-12-07 15:00:12 +00:00
|
|
|
trace!("recv_loop: read {} bytes", next_slice.len());
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2022-06-05 13:33:43 +00:00
|
|
|
if id & 1 == 0 {
|
2022-06-05 14:47:29 +00:00
|
|
|
// main stream
|
2022-06-05 13:33:43 +00:00
|
|
|
let mut msg_bytes = receiving.remove(&id).unwrap_or_default();
|
|
|
|
msg_bytes.extend_from_slice(&next_slice[..]);
|
|
|
|
|
|
|
|
if has_cont {
|
|
|
|
receiving.insert(id, msg_bytes);
|
|
|
|
} else {
|
|
|
|
let mut channel_pair = streams.remove(&(id | 1)).unwrap_or_default();
|
|
|
|
|
|
|
|
if let Some(receiver) = channel_pair.take_receiver() {
|
|
|
|
self.recv_handler(id, msg_bytes, Box::pin(receiver));
|
|
|
|
} else {
|
2022-06-05 14:47:29 +00:00
|
|
|
warn!("Couldn't take receiver part of stream")
|
|
|
|
}
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2022-06-05 14:47:29 +00:00
|
|
|
channel_pair.insert_into(&mut streams, id | 1);
|
2022-06-05 13:33:43 +00:00
|
|
|
}
|
2020-12-02 12:30:47 +00:00
|
|
|
} else {
|
2022-06-05 14:47:29 +00:00
|
|
|
// associated stream
|
|
|
|
let mut channel_pair = streams.remove(&(id)).unwrap_or_default();
|
2022-06-05 13:33:43 +00:00
|
|
|
|
|
|
|
// if we get an error, the receiving end is disconnected. We still need to
|
|
|
|
// reach eos before dropping this sender
|
2022-06-05 14:47:29 +00:00
|
|
|
if !next_slice.is_empty() {
|
|
|
|
if let Some(sender) = channel_pair.ref_sender() {
|
|
|
|
let _ = sender.unbounded_send(next_slice);
|
|
|
|
} else {
|
|
|
|
warn!("Couldn't take sending part of stream")
|
|
|
|
}
|
|
|
|
}
|
2022-06-05 13:33:43 +00:00
|
|
|
|
|
|
|
if !has_cont {
|
2022-06-05 14:47:29 +00:00
|
|
|
channel_pair.take_sender();
|
|
|
|
}
|
2022-06-05 13:33:43 +00:00
|
|
|
|
2022-06-05 14:47:29 +00:00
|
|
|
channel_pair.insert_into(&mut streams, id);
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
}
|
2021-10-25 07:27:57 +00:00
|
|
|
Ok(())
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
}
|
2021-10-20 14:32:47 +00:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_priority_queue() {
|
|
|
|
let i1 = SendQueueItem {
|
|
|
|
id: 1,
|
|
|
|
prio: PRIO_NORMAL,
|
2022-06-05 13:33:43 +00:00
|
|
|
data: DataReader::Full {
|
|
|
|
data: vec![],
|
|
|
|
pos: 0,
|
|
|
|
},
|
2021-10-20 14:32:47 +00:00
|
|
|
};
|
|
|
|
let i2 = SendQueueItem {
|
|
|
|
id: 2,
|
|
|
|
prio: PRIO_HIGH,
|
2022-06-05 13:33:43 +00:00
|
|
|
data: DataReader::Full {
|
|
|
|
data: vec![],
|
|
|
|
pos: 0,
|
|
|
|
},
|
2021-10-20 14:32:47 +00:00
|
|
|
};
|
|
|
|
let i2bis = SendQueueItem {
|
|
|
|
id: 20,
|
|
|
|
prio: PRIO_HIGH,
|
2022-06-05 13:33:43 +00:00
|
|
|
data: DataReader::Full {
|
|
|
|
data: vec![],
|
|
|
|
pos: 0,
|
|
|
|
},
|
2021-10-20 14:32:47 +00:00
|
|
|
};
|
|
|
|
let i3 = SendQueueItem {
|
|
|
|
id: 3,
|
|
|
|
prio: PRIO_HIGH | PRIO_SECONDARY,
|
2022-06-05 13:33:43 +00:00
|
|
|
data: DataReader::Full {
|
|
|
|
data: vec![],
|
|
|
|
pos: 0,
|
|
|
|
},
|
2021-10-20 14:32:47 +00:00
|
|
|
};
|
|
|
|
let i4 = SendQueueItem {
|
|
|
|
id: 4,
|
|
|
|
prio: PRIO_BACKGROUND | PRIO_SECONDARY,
|
2022-06-05 13:33:43 +00:00
|
|
|
data: DataReader::Full {
|
|
|
|
data: vec![],
|
|
|
|
pos: 0,
|
|
|
|
},
|
2021-10-20 14:32:47 +00:00
|
|
|
};
|
|
|
|
let i5 = SendQueueItem {
|
|
|
|
id: 5,
|
|
|
|
prio: PRIO_BACKGROUND | PRIO_PRIMARY,
|
2022-06-05 13:33:43 +00:00
|
|
|
data: DataReader::Full {
|
|
|
|
data: vec![],
|
|
|
|
pos: 0,
|
|
|
|
},
|
2021-10-20 14:32:47 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let mut q = SendQueue::new();
|
|
|
|
|
|
|
|
q.push(i1); // 1
|
|
|
|
let a = q.pop().unwrap(); // empty -> 1
|
|
|
|
assert_eq!(a.id, 1);
|
|
|
|
assert!(q.pop().is_none());
|
|
|
|
|
|
|
|
q.push(a); // 1
|
|
|
|
q.push(i2); // 2 1
|
|
|
|
q.push(i2bis); // [2 20] 1
|
|
|
|
let a = q.pop().unwrap(); // 20 1 -> 2
|
|
|
|
assert_eq!(a.id, 2);
|
|
|
|
let b = q.pop().unwrap(); // 1 -> 20
|
|
|
|
assert_eq!(b.id, 20);
|
|
|
|
let c = q.pop().unwrap(); // empty -> 1
|
|
|
|
assert_eq!(c.id, 1);
|
|
|
|
assert!(q.pop().is_none());
|
|
|
|
|
|
|
|
q.push(a); // 2
|
|
|
|
q.push(b); // [2 20]
|
|
|
|
q.push(c); // [2 20] 1
|
|
|
|
q.push(i3); // [2 20] 3 1
|
|
|
|
q.push(i4); // [2 20] 3 1 4
|
|
|
|
q.push(i5); // [2 20] 3 1 5 4
|
|
|
|
|
|
|
|
let a = q.pop().unwrap(); // 20 3 1 5 4 -> 2
|
|
|
|
assert_eq!(a.id, 2);
|
|
|
|
q.push(a); // [20 2] 3 1 5 4
|
|
|
|
|
|
|
|
let a = q.pop().unwrap(); // 2 3 1 5 4 -> 20
|
|
|
|
assert_eq!(a.id, 20);
|
|
|
|
let b = q.pop().unwrap(); // 3 1 5 4 -> 2
|
|
|
|
assert_eq!(b.id, 2);
|
|
|
|
q.push(b); // 2 3 1 5 4
|
|
|
|
let b = q.pop().unwrap(); // 3 1 5 4 -> 2
|
|
|
|
assert_eq!(b.id, 2);
|
|
|
|
let c = q.pop().unwrap(); // 1 5 4 -> 3
|
|
|
|
assert_eq!(c.id, 3);
|
|
|
|
q.push(b); // 2 1 5 4
|
|
|
|
let b = q.pop().unwrap(); // 1 5 4 -> 2
|
|
|
|
assert_eq!(b.id, 2);
|
|
|
|
let e = q.pop().unwrap(); // 5 4 -> 1
|
|
|
|
assert_eq!(e.id, 1);
|
|
|
|
let f = q.pop().unwrap(); // 4 -> 5
|
|
|
|
assert_eq!(f.id, 5);
|
|
|
|
let g = q.pop().unwrap(); // empty -> 4
|
|
|
|
assert_eq!(g.id, 4);
|
|
|
|
assert!(q.pop().is_none());
|
|
|
|
}
|
|
|
|
}
|