fix some clippy lints
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing

This commit is contained in:
Alex 2023-12-11 15:31:47 +01:00
parent e4f493b481
commit 85b5a6bcd1
Signed by: lx
GPG key ID: 0E496D15096376BE
9 changed files with 19 additions and 22 deletions

View file

@ -94,7 +94,7 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
} }
} }
let mut nodes = nodes.into_iter().map(|(_, v)| v).collect::<Vec<_>>(); let mut nodes = nodes.into_values().collect::<Vec<_>>();
nodes.sort_by(|x, y| x.id.cmp(&y.id)); nodes.sort_by(|x, y| x.id.cmp(&y.id));
let res = GetClusterStatusResponse { let res = GetClusterStatusResponse {

View file

@ -129,7 +129,7 @@ impl LayoutHelper {
where where
F: FnOnce(&mut LayoutHistory) -> bool, F: FnOnce(&mut LayoutHistory) -> bool,
{ {
let changed = f(&mut self.layout.as_mut().unwrap()); let changed = f(self.layout.as_mut().unwrap());
if changed { if changed {
*self = Self::new( *self = Self::new(
self.replication_mode, self.replication_mode,

View file

@ -42,8 +42,7 @@ impl LayoutHistory {
let set = self let set = self
.versions .versions
.iter() .iter()
.map(|x| x.all_nodes()) .flat_map(|x| x.all_nodes())
.flatten()
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
set.into_iter().copied().collect::<Vec<_>>() set.into_iter().copied().collect::<Vec<_>>()
} }
@ -56,8 +55,7 @@ impl LayoutHistory {
let set = self let set = self
.versions .versions
.iter() .iter()
.map(|x| x.nongateway_nodes()) .flat_map(|x| x.nongateway_nodes())
.flatten()
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
set.into_iter().copied().collect::<Vec<_>>() set.into_iter().copied().collect::<Vec<_>>()
} }
@ -94,7 +92,7 @@ impl LayoutHistory {
let sync_ack_map_min = self let sync_ack_map_min = self
.update_trackers .update_trackers
.sync_ack_map .sync_ack_map
.min_among(&current_nodes, min_version); .min_among(current_nodes, min_version);
if self.min_stored() < sync_ack_map_min { if self.min_stored() < sync_ack_map_min {
let removed = self.versions.remove(0); let removed = self.versions.remove(0);
info!( info!(
@ -144,7 +142,7 @@ impl LayoutHistory {
let global_min = self let global_min = self
.update_trackers .update_trackers
.sync_map .sync_map
.min_among(&all_nongateway_nodes, min_version); .min_among(all_nongateway_nodes, min_version);
// If the write quorums are equal to the total number of nodes, // If the write quorums are equal to the total number of nodes,
// i.e. no writes can succeed while they are not written to all nodes, // i.e. no writes can succeed while they are not written to all nodes,
@ -281,7 +279,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
let (new_version, msg) = self let (new_version, msg) = self
.current() .current()
.clone() .clone()
.calculate_next_version(&self.staging.get())?; .calculate_next_version(self.staging.get())?;
self.versions.push(new_version); self.versions.push(new_version);
self.cleanup_old_versions(); self.cleanup_old_versions();
@ -297,7 +295,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
pub fn revert_staged_changes(mut self) -> Result<Self, Error> { pub fn revert_staged_changes(mut self) -> Result<Self, Error> {
self.staging.update(LayoutStaging { self.staging.update(LayoutStaging {
parameters: Lww::new(self.current().parameters.clone()), parameters: Lww::new(self.current().parameters),
roles: LwwMap::new(), roles: LwwMap::new(),
}); });

View file

@ -357,7 +357,7 @@ mod v010 {
update_trackers: UpdateTrackers { update_trackers: UpdateTrackers {
ack_map: update_tracker.clone(), ack_map: update_tracker.clone(),
sync_map: update_tracker.clone(), sync_map: update_tracker.clone(),
sync_ack_map: update_tracker.clone(), sync_ack_map: update_tracker,
}, },
staging: Lww::raw(previous.version, staging), staging: Lww::raw(previous.version, staging),
} }

View file

@ -137,19 +137,19 @@ impl LayoutVersion {
// ===================== internal information extractors ====================== // ===================== internal information extractors ======================
pub(crate) fn expect_get_node_capacity(&self, uuid: &Uuid) -> u64 { pub(crate) fn expect_get_node_capacity(&self, uuid: &Uuid) -> u64 {
self.get_node_capacity(&uuid) self.get_node_capacity(uuid)
.expect("non-gateway node with zero capacity") .expect("non-gateway node with zero capacity")
} }
pub(crate) fn expect_get_node_zone(&self, uuid: &Uuid) -> &str { pub(crate) fn expect_get_node_zone(&self, uuid: &Uuid) -> &str {
self.get_node_zone(&uuid).expect("node without a zone") self.get_node_zone(uuid).expect("node without a zone")
} }
/// Returns the sum of capacities of non gateway nodes in the cluster /// Returns the sum of capacities of non gateway nodes in the cluster
fn get_total_capacity(&self) -> u64 { fn get_total_capacity(&self) -> u64 {
let mut total_capacity = 0; let mut total_capacity = 0;
for uuid in self.nongateway_nodes() { for uuid in self.nongateway_nodes() {
total_capacity += self.expect_get_node_capacity(&uuid); total_capacity += self.expect_get_node_capacity(uuid);
} }
total_capacity total_capacity
} }

View file

@ -442,7 +442,7 @@ impl RpcHelper {
// Send one request to each peer of the quorum sets // Send one request to each peer of the quorum sets
let msg = msg.into_req().map_err(netapp::error::Error::from)?; let msg = msg.into_req().map_err(netapp::error::Error::from)?;
let requests = result_tracker.nodes.iter().map(|(peer, _)| { let requests = result_tracker.nodes.keys().map(|peer| {
let self2 = self.clone(); let self2 = self.clone();
let msg = msg.clone(); let msg = msg.clone();
let endpoint2 = endpoint.clone(); let endpoint2 = endpoint.clone();

View file

@ -315,7 +315,7 @@ impl System {
local_status: ArcSwap::new(Arc::new(local_status)), local_status: ArcSwap::new(Arc::new(local_status)),
node_status: RwLock::new(HashMap::new()), node_status: RwLock::new(HashMap::new()),
netapp: netapp.clone(), netapp: netapp.clone(),
fullmesh: fullmesh.clone(), fullmesh,
system_endpoint, system_endpoint,
replication_mode, replication_mode,
replication_factor, replication_factor,

View file

@ -123,15 +123,15 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> {
let mut sync_futures = result_tracker let mut sync_futures = result_tracker
.nodes .nodes
.iter() .keys()
.map(|(node, _)| *node) .copied()
.map(|node| { .map(|node| {
let must_exit = must_exit.clone(); let must_exit = must_exit.clone();
async move { async move {
if node == my_id { if node == my_id {
(node, Ok(())) (node, Ok(()))
} else { } else {
(node, self.do_sync_with(&partition, node, must_exit).await) (node, self.do_sync_with(partition, node, must_exit).await)
} }
} }
}) })
@ -145,7 +145,7 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> {
} }
if result_tracker.too_many_failures() { if result_tracker.too_many_failures() {
return Err(result_tracker.quorum_error()); Err(result_tracker.quorum_error())
} else { } else {
Ok(()) Ok(())
} }

View file

@ -209,8 +209,7 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
// it takes part, to optimize the detection of a quorum. // it takes part, to optimize the detection of a quorum.
let mut write_sets = entries_vec let mut write_sets = entries_vec
.iter() .iter()
.map(|(wss, _)| wss.as_ref().iter().map(|ws| ws.as_slice())) .flat_map(|(wss, _)| wss.as_ref().iter().map(|ws| ws.as_slice()))
.flatten()
.collect::<Vec<&[Uuid]>>(); .collect::<Vec<&[Uuid]>>();
write_sets.sort(); write_sets.sort();
write_sets.dedup(); write_sets.dedup();