garage_api: Refactor BodyChunker for stream-composition
This commit is contained in:
parent
f7349f4005
commit
732b4d0b63
1 changed files with 22 additions and 16 deletions
|
@ -1,8 +1,9 @@
|
||||||
use std::collections::{BTreeMap, VecDeque};
|
use std::collections::{BTreeMap, VecDeque};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::stream::*;
|
use futures::prelude::*;
|
||||||
use hyper::{Body, Request, Response};
|
use hyper::body::{Body, Bytes};
|
||||||
|
use hyper::{Request, Response};
|
||||||
use md5::{digest::generic_array::*, Digest as Md5Digest, Md5};
|
use md5::{digest::generic_array::*, Digest as Md5Digest, Md5};
|
||||||
use sha2::Sha256;
|
use sha2::Sha256;
|
||||||
|
|
||||||
|
@ -44,7 +45,7 @@ pub async fn handle_put(
|
||||||
// Parse body of uploaded file
|
// Parse body of uploaded file
|
||||||
let body = req.into_body();
|
let body = req.into_body();
|
||||||
|
|
||||||
let mut chunker = BodyChunker::new(body, garage.config.block_size);
|
let mut chunker = StreamChunker::new(body, garage.config.block_size);
|
||||||
let first_block = chunker.next().await?.unwrap_or_default();
|
let first_block = chunker.next().await?.unwrap_or_default();
|
||||||
|
|
||||||
// If body is small enough, store it directly in the object table
|
// If body is small enough, store it directly in the object table
|
||||||
|
@ -178,13 +179,13 @@ fn ensure_checksum_matches(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn read_and_put_blocks(
|
async fn read_and_put_blocks<E: Into<GarageError>, S: Stream<Item = Result<Bytes, E>> + Unpin>(
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
version: &Version,
|
version: &Version,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
first_block: Vec<u8>,
|
first_block: Vec<u8>,
|
||||||
first_block_hash: Hash,
|
first_block_hash: Hash,
|
||||||
chunker: &mut BodyChunker,
|
chunker: &mut StreamChunker<S, E>,
|
||||||
) -> Result<(u64, GenericArray<u8, typenum::U16>, Hash), Error> {
|
) -> Result<(u64, GenericArray<u8, typenum::U16>, Hash), Error> {
|
||||||
let mut md5hasher = Md5::new();
|
let mut md5hasher = Md5::new();
|
||||||
let mut sha256hasher = Sha256::new();
|
let mut sha256hasher = Sha256::new();
|
||||||
|
@ -205,8 +206,11 @@ async fn read_and_put_blocks(
|
||||||
.rpc_put_block(first_block_hash, first_block);
|
.rpc_put_block(first_block_hash, first_block);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let (_, _, next_block) =
|
let (_, _, next_block) = futures::try_join!(
|
||||||
futures::try_join!(put_curr_block, put_curr_version_block, chunker.next())?;
|
put_curr_block,
|
||||||
|
put_curr_version_block,
|
||||||
|
chunker.next().map_err(Into::into),
|
||||||
|
)?;
|
||||||
if let Some(block) = next_block {
|
if let Some(block) = next_block {
|
||||||
md5hasher.update(&block[..]);
|
md5hasher.update(&block[..]);
|
||||||
sha256hasher.update(&block[..]);
|
sha256hasher.update(&block[..]);
|
||||||
|
@ -266,25 +270,26 @@ async fn put_block_meta(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
struct BodyChunker {
|
struct StreamChunker<S: Stream<Item = Result<Bytes, E>>, E> {
|
||||||
body: Body,
|
stream: S,
|
||||||
read_all: bool,
|
read_all: bool,
|
||||||
block_size: usize,
|
block_size: usize,
|
||||||
buf: VecDeque<u8>,
|
buf: VecDeque<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BodyChunker {
|
impl<S: Stream<Item = Result<Bytes, E>> + Unpin, E> StreamChunker<S, E> {
|
||||||
fn new(body: Body, block_size: usize) -> Self {
|
fn new(stream: S, block_size: usize) -> Self {
|
||||||
Self {
|
Self {
|
||||||
body,
|
stream,
|
||||||
read_all: false,
|
read_all: false,
|
||||||
block_size,
|
block_size,
|
||||||
buf: VecDeque::with_capacity(2 * block_size),
|
buf: VecDeque::with_capacity(2 * block_size),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async fn next(&mut self) -> Result<Option<Vec<u8>>, GarageError> {
|
|
||||||
|
async fn next(&mut self) -> Result<Option<Vec<u8>>, E> {
|
||||||
while !self.read_all && self.buf.len() < self.block_size {
|
while !self.read_all && self.buf.len() < self.block_size {
|
||||||
if let Some(block) = self.body.next().await {
|
if let Some(block) = self.stream.next().await {
|
||||||
let bytes = block?;
|
let bytes = block?;
|
||||||
trace!("Body next: {} bytes", bytes.len());
|
trace!("Body next: {} bytes", bytes.len());
|
||||||
self.buf.extend(&bytes[..]);
|
self.buf.extend(&bytes[..]);
|
||||||
|
@ -292,6 +297,7 @@ impl BodyChunker {
|
||||||
self.read_all = true;
|
self.read_all = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.buf.is_empty() {
|
if self.buf.is_empty() {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
} else if self.buf.len() <= self.block_size {
|
} else if self.buf.len() <= self.block_size {
|
||||||
|
@ -368,12 +374,12 @@ pub async fn handle_put_part(
|
||||||
|
|
||||||
// Read first chuck, and at the same time try to get object to see if it exists
|
// Read first chuck, and at the same time try to get object to see if it exists
|
||||||
let key = key.to_string();
|
let key = key.to_string();
|
||||||
let mut chunker = BodyChunker::new(req.into_body(), garage.config.block_size);
|
let mut chunker = StreamChunker::new(req.into_body(), garage.config.block_size);
|
||||||
|
|
||||||
let (object, version, first_block) = futures::try_join!(
|
let (object, version, first_block) = futures::try_join!(
|
||||||
garage.object_table.get(&bucket_id, &key),
|
garage.object_table.get(&bucket_id, &key),
|
||||||
garage.version_table.get(&version_uuid, &EmptyKey),
|
garage.version_table.get(&version_uuid, &EmptyKey),
|
||||||
chunker.next()
|
chunker.next().map_err(GarageError::from),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Check object is valid and multipart block can be accepted
|
// Check object is valid and multipart block can be accepted
|
||||||
|
|
Loading…
Reference in a new issue