2020-12-02 12:30:47 +00:00
|
|
|
use std::collections::{BTreeMap, HashMap, VecDeque};
|
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
|
|
use log::trace;
|
|
|
|
|
2021-10-12 12:51:28 +00:00
|
|
|
use futures::{AsyncReadExt, AsyncWriteExt};
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2020-12-07 12:35:24 +00:00
|
|
|
use tokio::sync::mpsc;
|
|
|
|
|
|
|
|
use async_trait::async_trait;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
|
|
|
use crate::error::*;
|
|
|
|
|
2020-12-02 19:12:24 +00:00
|
|
|
/// Priority of a request (click to read more about priorities).
|
|
|
|
///
|
|
|
|
/// This priority value is used to priorize messages
|
|
|
|
/// in the send queue of the client, and their responses in the send queue of the
|
|
|
|
/// server. Lower values mean higher priority.
|
|
|
|
///
|
|
|
|
/// This mechanism is usefull for messages bigger than the maximum chunk size
|
|
|
|
/// (set at `0x4000` bytes), such as large file transfers.
|
|
|
|
/// In such case, all of the messages in the send queue with the highest priority
|
|
|
|
/// will take turns to send individual chunks, in a round-robin fashion.
|
|
|
|
/// Once all highest priority messages are sent successfully, the messages with
|
|
|
|
/// the next highest priority will begin being sent in the same way.
|
|
|
|
///
|
|
|
|
/// The same priority value is given to a request and to its associated response.
|
|
|
|
pub type RequestPriority = u8;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2020-12-02 19:12:24 +00:00
|
|
|
/// Priority class: high
|
|
|
|
pub const PRIO_HIGH: RequestPriority = 0x20;
|
|
|
|
/// Priority class: normal
|
|
|
|
pub const PRIO_NORMAL: RequestPriority = 0x40;
|
|
|
|
/// Priority class: background
|
|
|
|
pub const PRIO_BACKGROUND: RequestPriority = 0x80;
|
|
|
|
/// Priority: primary among given class
|
|
|
|
pub const PRIO_PRIMARY: RequestPriority = 0x00;
|
2021-10-12 11:07:34 +00:00
|
|
|
/// Priority: secondary among given class (ex: `PRIO_HIGH | PRIO_SECONDARY`)
|
2020-12-02 19:12:24 +00:00
|
|
|
pub const PRIO_SECONDARY: RequestPriority = 0x01;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2021-10-12 16:13:07 +00:00
|
|
|
// Messages are sent by chunks
|
|
|
|
// Chunk format:
|
|
|
|
// - u32 BE: request id (same for request and response)
|
|
|
|
// - u16 BE: chunk length, possibly with CHUNK_HAS_CONTINUATION flag
|
|
|
|
// when this is not the last chunk of the message
|
|
|
|
// - [u8; chunk_length] chunk data
|
|
|
|
|
2021-10-12 15:59:46 +00:00
|
|
|
pub(crate) type RequestID = u32;
|
|
|
|
type ChunkLength = u16;
|
|
|
|
const MAX_CHUNK_LENGTH: ChunkLength = 0x4000;
|
|
|
|
const CHUNK_HAS_CONTINUATION: ChunkLength = 0x8000;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2021-10-12 16:13:07 +00:00
|
|
|
|
2020-12-02 12:30:47 +00:00
|
|
|
struct SendQueueItem {
|
|
|
|
id: RequestID,
|
|
|
|
prio: RequestPriority,
|
|
|
|
data: Vec<u8>,
|
|
|
|
cursor: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
struct SendQueue {
|
|
|
|
items: BTreeMap<u8, VecDeque<SendQueueItem>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl SendQueue {
|
|
|
|
fn new() -> Self {
|
|
|
|
Self {
|
|
|
|
items: BTreeMap::new(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fn push(&mut self, item: SendQueueItem) {
|
|
|
|
let prio = item.prio;
|
|
|
|
let mut items_at_prio = self
|
|
|
|
.items
|
|
|
|
.remove(&prio)
|
2021-10-12 11:18:24 +00:00
|
|
|
.unwrap_or_else(|| VecDeque::with_capacity(4));
|
2020-12-02 12:30:47 +00:00
|
|
|
items_at_prio.push_back(item);
|
|
|
|
self.items.insert(prio, items_at_prio);
|
|
|
|
}
|
|
|
|
fn pop(&mut self) -> Option<SendQueueItem> {
|
|
|
|
match self.items.pop_first() {
|
|
|
|
None => None,
|
|
|
|
Some((prio, mut items_at_prio)) => {
|
|
|
|
let ret = items_at_prio.pop_front();
|
|
|
|
if !items_at_prio.is_empty() {
|
|
|
|
self.items.insert(prio, items_at_prio);
|
|
|
|
}
|
2020-12-07 15:00:12 +00:00
|
|
|
ret.or_else(|| self.pop())
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-07 12:35:24 +00:00
|
|
|
fn is_empty(&self) -> bool {
|
|
|
|
self.items.iter().all(|(_k, v)| v.is_empty())
|
|
|
|
}
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[async_trait]
|
|
|
|
pub(crate) trait SendLoop: Sync {
|
2020-12-07 17:07:55 +00:00
|
|
|
async fn send_loop<W>(
|
2020-12-02 12:30:47 +00:00
|
|
|
self: Arc<Self>,
|
2020-12-07 12:35:24 +00:00
|
|
|
mut msg_recv: mpsc::UnboundedReceiver<Option<(RequestID, RequestPriority, Vec<u8>)>>,
|
2020-12-07 17:07:55 +00:00
|
|
|
mut write: W,
|
|
|
|
) -> Result<(), Error>
|
|
|
|
where
|
2021-10-12 12:51:28 +00:00
|
|
|
W: AsyncWriteExt + Unpin + Send + Sync,
|
2020-12-07 17:07:55 +00:00
|
|
|
{
|
2020-12-02 12:30:47 +00:00
|
|
|
let mut sending = SendQueue::new();
|
2020-12-07 12:35:24 +00:00
|
|
|
let mut should_exit = false;
|
|
|
|
while !should_exit || !sending.is_empty() {
|
|
|
|
if let Ok(sth) = msg_recv.try_recv() {
|
|
|
|
if let Some((id, prio, data)) = sth {
|
|
|
|
trace!("send_loop: got {}, {} bytes", id, data.len());
|
|
|
|
sending.push(SendQueueItem {
|
|
|
|
id,
|
|
|
|
prio,
|
|
|
|
data,
|
|
|
|
cursor: 0,
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
should_exit = true;
|
|
|
|
}
|
2020-12-02 12:30:47 +00:00
|
|
|
} else if let Some(mut item) = sending.pop() {
|
|
|
|
trace!(
|
|
|
|
"send_loop: sending bytes for {} ({} bytes, {} already sent)",
|
|
|
|
item.id,
|
|
|
|
item.data.len(),
|
|
|
|
item.cursor
|
|
|
|
);
|
2021-10-12 15:59:46 +00:00
|
|
|
let header_id = RequestID::to_be_bytes(item.id);
|
2020-12-07 12:35:24 +00:00
|
|
|
write.write_all(&header_id[..]).await?;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2021-10-12 15:59:46 +00:00
|
|
|
if item.data.len() - item.cursor > MAX_CHUNK_LENGTH as usize {
|
|
|
|
let header_size =
|
|
|
|
ChunkLength::to_be_bytes(MAX_CHUNK_LENGTH | CHUNK_HAS_CONTINUATION);
|
2020-12-07 12:35:24 +00:00
|
|
|
write.write_all(&header_size[..]).await?;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2021-10-12 15:59:46 +00:00
|
|
|
let new_cursor = item.cursor + MAX_CHUNK_LENGTH as usize;
|
2020-12-07 12:35:24 +00:00
|
|
|
write.write_all(&item.data[item.cursor..new_cursor]).await?;
|
2020-12-02 12:30:47 +00:00
|
|
|
item.cursor = new_cursor;
|
|
|
|
|
|
|
|
sending.push(item);
|
|
|
|
} else {
|
2021-10-12 15:59:46 +00:00
|
|
|
let send_len = (item.data.len() - item.cursor) as ChunkLength;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2021-10-12 15:59:46 +00:00
|
|
|
let header_size = ChunkLength::to_be_bytes(send_len);
|
2020-12-07 12:35:24 +00:00
|
|
|
write.write_all(&header_size[..]).await?;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2020-12-07 12:35:24 +00:00
|
|
|
write.write_all(&item.data[item.cursor..]).await?;
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
2020-12-07 15:00:12 +00:00
|
|
|
write.flush().await?;
|
2020-12-02 12:30:47 +00:00
|
|
|
} else {
|
2020-12-07 12:35:24 +00:00
|
|
|
let sth = msg_recv
|
2020-12-02 12:30:47 +00:00
|
|
|
.recv()
|
|
|
|
.await
|
2021-10-12 11:18:24 +00:00
|
|
|
.ok_or_else(|| Error::Message("Connection closed.".into()))?;
|
2020-12-07 12:35:24 +00:00
|
|
|
if let Some((id, prio, data)) = sth {
|
|
|
|
trace!("send_loop: got {}, {} bytes", id, data.len());
|
|
|
|
sending.push(SendQueueItem {
|
|
|
|
id,
|
|
|
|
prio,
|
|
|
|
data,
|
|
|
|
cursor: 0,
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
should_exit = true;
|
|
|
|
}
|
2020-12-02 12:30:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[async_trait]
|
|
|
|
pub(crate) trait RecvLoop: Sync + 'static {
|
2020-12-07 12:35:24 +00:00
|
|
|
// Returns true if we should stop receiving after this
|
2020-12-02 12:30:47 +00:00
|
|
|
async fn recv_handler(self: Arc<Self>, id: RequestID, msg: Vec<u8>);
|
|
|
|
|
2020-12-07 17:07:55 +00:00
|
|
|
async fn recv_loop<R>(self: Arc<Self>, mut read: R) -> Result<(), Error>
|
|
|
|
where
|
2021-10-12 12:51:28 +00:00
|
|
|
R: AsyncReadExt + Unpin + Send + Sync,
|
2020-12-07 17:07:55 +00:00
|
|
|
{
|
2020-12-02 12:30:47 +00:00
|
|
|
let mut receiving = HashMap::new();
|
2020-12-07 12:35:24 +00:00
|
|
|
loop {
|
2020-12-02 12:30:47 +00:00
|
|
|
trace!("recv_loop: reading packet");
|
2021-10-12 15:59:46 +00:00
|
|
|
let mut header_id = [0u8; RequestID::BITS as usize / 8];
|
2020-12-07 12:35:24 +00:00
|
|
|
read.read_exact(&mut header_id[..]).await?;
|
2020-12-02 12:30:47 +00:00
|
|
|
let id = RequestID::from_be_bytes(header_id);
|
|
|
|
trace!("recv_loop: got header id: {:04x}", id);
|
|
|
|
|
2021-10-12 15:59:46 +00:00
|
|
|
let mut header_size = [0u8; ChunkLength::BITS as usize / 8];
|
2020-12-07 12:35:24 +00:00
|
|
|
read.read_exact(&mut header_size[..]).await?;
|
2021-10-12 15:59:46 +00:00
|
|
|
let size = ChunkLength::from_be_bytes(header_size);
|
2020-12-07 15:00:12 +00:00
|
|
|
trace!("recv_loop: got header size: {:04x}", size);
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2021-10-12 15:59:46 +00:00
|
|
|
let has_cont = (size & CHUNK_HAS_CONTINUATION) != 0;
|
|
|
|
let size = size & !CHUNK_HAS_CONTINUATION;
|
2020-12-02 12:30:47 +00:00
|
|
|
|
|
|
|
let mut next_slice = vec![0; size as usize];
|
2020-12-07 12:35:24 +00:00
|
|
|
read.read_exact(&mut next_slice[..]).await?;
|
2020-12-07 15:00:12 +00:00
|
|
|
trace!("recv_loop: read {} bytes", next_slice.len());
|
2020-12-02 12:30:47 +00:00
|
|
|
|
2021-10-12 11:18:24 +00:00
|
|
|
let mut msg_bytes: Vec<_> = receiving.remove(&id).unwrap_or_default();
|
2020-12-02 12:30:47 +00:00
|
|
|
msg_bytes.extend_from_slice(&next_slice[..]);
|
|
|
|
|
|
|
|
if has_cont {
|
|
|
|
receiving.insert(id, msg_bytes);
|
|
|
|
} else {
|
|
|
|
tokio::spawn(self.clone().recv_handler(id, msg_bytes));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|