remove size limitation in UploadPartCopy (#280)
All checks were successful
continuous-integration/drone/push Build is passing

This removes the >1mb s3_copy restriction.

This restriction doesn't seem to be documented anywhere (I could be wrong). It also causes some software to fail (such as #248).

Co-authored-by: Rob Landers <landers.robert@gmail.com>
Reviewed-on: #280
Co-authored-by: withinboredom <landers.robert@gmail.com>
Co-committed-by: withinboredom <landers.robert@gmail.com>
This commit is contained in:
withinboredom 2022-04-19 12:49:43 +02:00 committed by Alex
parent 47e57518ec
commit a4f9f19ac3

View file

@ -268,7 +268,6 @@ pub async fn handle_upload_part_copy(
let mut blocks_to_copy = vec![]; let mut blocks_to_copy = vec![];
let mut current_offset = 0; let mut current_offset = 0;
let mut size_to_copy = 0;
for (_bk, block) in source_version.blocks.items().iter() { for (_bk, block) in source_version.blocks.items().iter() {
let (block_begin, block_end) = (current_offset, current_offset + block.size); let (block_begin, block_end) = (current_offset, current_offset + block.size);
@ -289,10 +288,6 @@ pub async fn handle_upload_part_copy(
(Some(b), None) => Some(b as usize..block.size as usize), (Some(b), None) => Some(b as usize..block.size as usize),
(None, None) => None, (None, None) => None,
}; };
size_to_copy += range_to_copy
.as_ref()
.map(|x| x.len() as u64)
.unwrap_or(block.size);
blocks_to_copy.push((block.hash, range_to_copy)); blocks_to_copy.push((block.hash, range_to_copy));
} }
@ -300,13 +295,6 @@ pub async fn handle_upload_part_copy(
current_offset = block_end; current_offset = block_end;
} }
if size_to_copy < 1024 * 1024 {
return Err(Error::BadRequest(format!(
"Not enough data to copy: {} bytes (minimum: 1MB)",
size_to_copy
)));
}
// Now, actually copy the blocks // Now, actually copy the blocks
let mut md5hasher = Md5::new(); let mut md5hasher = Md5::new();