Merge pull request '*: apply clippy recommendations.' (#570) from jpds/garage:clippy-fixes into main

Reviewed-on: Deuxfleurs/garage#570
This commit is contained in:
Alex 2023-05-11 09:33:03 +00:00
commit 375270afd1
13 changed files with 28 additions and 39 deletions

View file

@ -105,7 +105,7 @@ impl AdminApiServer {
let bucket_id = self let bucket_id = self
.garage .garage
.bucket_helper() .bucket_helper()
.resolve_global_bucket_name(&domain) .resolve_global_bucket_name(domain)
.await? .await?
.ok_or(HelperError::NoSuchBucket(domain.to_string()))?; .ok_or(HelperError::NoSuchBucket(domain.to_string()))?;

View file

@ -183,8 +183,8 @@ async fn bucket_info_results(
} }
}), }),
keys: relevant_keys keys: relevant_keys
.into_iter() .into_values()
.map(|(_, key)| { .map(|key| {
let p = key.state.as_option().unwrap(); let p = key.state.as_option().unwrap();
GetBucketInfoKey { GetBucketInfoKey {
access_key_id: key.key_id, access_key_id: key.key_id,

View file

@ -183,8 +183,8 @@ async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Bod
create_bucket: *key_state.allow_create_bucket.get(), create_bucket: *key_state.allow_create_bucket.get(),
}, },
buckets: relevant_buckets buckets: relevant_buckets
.into_iter() .into_values()
.map(|(_, bucket)| { .map(|bucket| {
let state = bucket.state.as_option().unwrap(); let state = bucket.state.as_option().unwrap();
KeyInfoBucketResult { KeyInfoBucketResult {
id: hex::encode(bucket.id), id: hex::encode(bucket.id),

View file

@ -128,7 +128,7 @@ impl<A: ApiHandler> ApiServer<A> {
let uri = req.uri().clone(); let uri = req.uri().clone();
if let Ok(forwarded_for_ip_addr) = if let Ok(forwarded_for_ip_addr) =
forwarded_headers::handle_forwarded_for_headers(&req.headers()) forwarded_headers::handle_forwarded_for_headers(req.headers())
{ {
info!( info!(
"{} (via {}) {} {}", "{} (via {}) {} {}",

View file

@ -282,8 +282,8 @@ pub(crate) async fn handle_poll_range(
if let Some((items, seen_marker)) = resp { if let Some((items, seen_marker)) = resp {
let resp = PollRangeResponse { let resp = PollRangeResponse {
items: items items: items
.into_iter() .into_values()
.map(|(_k, i)| ReadBatchResponseItem::from(i)) .map(ReadBatchResponseItem::from)
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
seen_marker, seen_marker,
}; };

View file

@ -443,7 +443,7 @@ fn body_from_blocks_range(
// block.part_number, which is not the same in the case of a multipart upload) // block.part_number, which is not the same in the case of a multipart upload)
let mut blocks: Vec<(VersionBlock, u64)> = Vec::with_capacity(std::cmp::min( let mut blocks: Vec<(VersionBlock, u64)> = Vec::with_capacity(std::cmp::min(
all_blocks.len(), all_blocks.len(),
4 + ((end - begin) / std::cmp::max(all_blocks[0].1.size as u64, 1024)) as usize, 4 + ((end - begin) / std::cmp::max(all_blocks[0].1.size, 1024)) as usize,
)); ));
let mut block_offset: u64 = 0; let mut block_offset: u64 = 0;
for (_, b) in all_blocks.iter() { for (_, b) in all_blocks.iter() {
@ -454,7 +454,7 @@ fn body_from_blocks_range(
if block_offset < end && block_offset + b.size > begin { if block_offset < end && block_offset + b.size > begin {
blocks.push((*b, block_offset)); blocks.push((*b, block_offset));
} }
block_offset += b.size as u64; block_offset += b.size;
} }
let order_stream = OrderTag::stream(); let order_stream = OrderTag::stream();

View file

@ -220,14 +220,12 @@ fn randomize_next_scrub_run_time(timestamp: u64) -> u64 {
// Take SCRUB_INTERVAL and mix in a random interval of 10 days to attempt to // Take SCRUB_INTERVAL and mix in a random interval of 10 days to attempt to
// balance scrub load across different cluster nodes. // balance scrub load across different cluster nodes.
let next_run_timestamp = timestamp timestamp
+ SCRUB_INTERVAL + SCRUB_INTERVAL
.saturating_add(Duration::from_secs( .saturating_add(Duration::from_secs(
rand::thread_rng().gen_range(0..3600 * 24 * 10), rand::thread_rng().gen_range(0..3600 * 24 * 10),
)) ))
.as_millis() as u64; .as_millis() as u64
next_run_timestamp
} }
impl Default for ScrubWorkerPersisted { impl Default for ScrubWorkerPersisted {
@ -241,18 +239,14 @@ impl Default for ScrubWorkerPersisted {
} }
} }
#[derive(Default)]
enum ScrubWorkerState { enum ScrubWorkerState {
Running(BlockStoreIterator), Running(BlockStoreIterator),
Paused(BlockStoreIterator, u64), // u64 = time when to resume scrub Paused(BlockStoreIterator, u64), // u64 = time when to resume scrub
#[default]
Finished, Finished,
} }
impl Default for ScrubWorkerState {
fn default() -> Self {
ScrubWorkerState::Finished
}
}
#[derive(Debug)] #[derive(Debug)]
pub enum ScrubWorkerCommand { pub enum ScrubWorkerCommand {
Start, Start,

View file

@ -79,7 +79,7 @@ impl RangeSeenMarker {
let bytes = nonversioned_encode(&self)?; let bytes = nonversioned_encode(&self)?;
let bytes = zstd::stream::encode_all(&mut &bytes[..], zstd::DEFAULT_COMPRESSION_LEVEL)?; let bytes = zstd::stream::encode_all(&mut &bytes[..], zstd::DEFAULT_COMPRESSION_LEVEL)?;
Ok(BASE64_STANDARD.encode(&bytes)) Ok(BASE64_STANDARD.encode(bytes))
} }
/// Decode from msgpack+zstd+b64 representation, returns None on error. /// Decode from msgpack+zstd+b64 representation, returns None on error.

View file

@ -44,22 +44,22 @@ pub struct TableData<F: TableSchema, R: TableReplication> {
impl<F: TableSchema, R: TableReplication> TableData<F, R> { impl<F: TableSchema, R: TableReplication> TableData<F, R> {
pub fn new(system: Arc<System>, instance: F, replication: R, db: &db::Db) -> Arc<Self> { pub fn new(system: Arc<System>, instance: F, replication: R, db: &db::Db) -> Arc<Self> {
let store = db let store = db
.open_tree(&format!("{}:table", F::TABLE_NAME)) .open_tree(format!("{}:table", F::TABLE_NAME))
.expect("Unable to open DB tree"); .expect("Unable to open DB tree");
let merkle_tree = db let merkle_tree = db
.open_tree(&format!("{}:merkle_tree", F::TABLE_NAME)) .open_tree(format!("{}:merkle_tree", F::TABLE_NAME))
.expect("Unable to open DB Merkle tree tree"); .expect("Unable to open DB Merkle tree tree");
let merkle_todo = db let merkle_todo = db
.open_tree(&format!("{}:merkle_todo", F::TABLE_NAME)) .open_tree(format!("{}:merkle_todo", F::TABLE_NAME))
.expect("Unable to open DB Merkle TODO tree"); .expect("Unable to open DB Merkle TODO tree");
let insert_queue = db let insert_queue = db
.open_tree(&format!("{}:insert_queue", F::TABLE_NAME)) .open_tree(format!("{}:insert_queue", F::TABLE_NAME))
.expect("Unable to open insert queue DB tree"); .expect("Unable to open insert queue DB tree");
let gc_todo = db let gc_todo = db
.open_tree(&format!("{}:gc_todo_v2", F::TABLE_NAME)) .open_tree(format!("{}:gc_todo_v2", F::TABLE_NAME))
.expect("Unable to open GC DB tree"); .expect("Unable to open GC DB tree");
let gc_todo = CountedTree::new(gc_todo).expect("Cannot count gc_todo_v2"); let gc_todo = CountedTree::new(gc_todo).expect("Cannot count gc_todo_v2");
@ -90,7 +90,7 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
pub fn read_entry(&self, p: &F::P, s: &F::S) -> Result<Option<ByteBuf>, Error> { pub fn read_entry(&self, p: &F::P, s: &F::S) -> Result<Option<ByteBuf>, Error> {
let tree_key = self.tree_key(p, s); let tree_key = self.tree_key(p, s);
if let Some(bytes) = self.store.get(&tree_key)? { if let Some(bytes) = self.store.get(tree_key)? {
Ok(Some(ByteBuf::from(bytes.to_vec()))) Ok(Some(ByteBuf::from(bytes.to_vec())))
} else { } else {
Ok(None) Ok(None)
@ -132,10 +132,10 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
} }
} }
fn read_range_aux<'a>( fn read_range_aux(
&self, &self,
partition_hash: Hash, partition_hash: Hash,
range: db::ValueIter<'a>, range: db::ValueIter,
filter: &Option<F::Filter>, filter: &Option<F::Filter>,
limit: usize, limit: usize,
) -> Result<Vec<Arc<ByteBuf>>, Error> { ) -> Result<Vec<Arc<ByteBuf>>, Error> {

View file

@ -34,8 +34,9 @@ impl DeletedFilter {
} }
} }
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] #[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub enum EnumerationOrder { pub enum EnumerationOrder {
#[default]
Forward, Forward,
Reverse, Reverse,
} }
@ -49,9 +50,3 @@ impl EnumerationOrder {
} }
} }
} }
impl Default for EnumerationOrder {
fn default() -> Self {
EnumerationOrder::Forward
}
}

View file

@ -223,7 +223,7 @@ fn secret_from_file(
#[cfg(unix)] #[cfg(unix)]
if std::env::var("GARAGE_ALLOW_WORLD_READABLE_SECRETS").as_deref() != Ok("true") { if std::env::var("GARAGE_ALLOW_WORLD_READABLE_SECRETS").as_deref() != Ok("true") {
use std::os::unix::fs::MetadataExt; use std::os::unix::fs::MetadataExt;
let metadata = std::fs::metadata(&file_path)?; let metadata = std::fs::metadata(file_path)?;
if metadata.mode() & 0o077 != 0 { if metadata.mode() & 0o077 != 0 {
return Err(format!("File {} is world-readable! (mode: 0{:o}, expected 0600)\nRefusing to start until this is fixed, or environment variable GARAGE_ALLOW_WORLD_READABLE_SECRETS is set to true.", file_path, metadata.mode()).into()); return Err(format!("File {} is world-readable! (mode: 0{:o}, expected 0600)\nRefusing to start until this is fixed, or environment variable GARAGE_ALLOW_WORLD_READABLE_SECRETS is set to true.", file_path, metadata.mode()).into());
} }

View file

@ -13,7 +13,7 @@ pub fn handle_forwarded_for_headers(headers: &HeaderMap<HeaderValue>) -> Result<
.to_str() .to_str()
.ok_or_message("Error parsing X-Forwarded-For header")?; .ok_or_message("Error parsing X-Forwarded-For header")?;
let client_ip = IpAddr::from_str(&forwarded_for_ip_str) let client_ip = IpAddr::from_str(forwarded_for_ip_str)
.ok_or_message("Valid IP address not found in X-Forwarded-For header")?; .ok_or_message("Valid IP address not found in X-Forwarded-For header")?;
Ok(client_ip.to_string()) Ok(client_ip.to_string())

View file

@ -106,7 +106,7 @@ impl WebServer {
addr: SocketAddr, addr: SocketAddr,
) -> Result<Response<Body>, Infallible> { ) -> Result<Response<Body>, Infallible> {
if let Ok(forwarded_for_ip_addr) = if let Ok(forwarded_for_ip_addr) =
forwarded_headers::handle_forwarded_for_headers(&req.headers()) forwarded_headers::handle_forwarded_for_headers(req.headers())
{ {
info!( info!(
"{} (via {}) {} {}", "{} (via {}) {} {}",