fix some clippy lints
This commit is contained in:
parent
e4f493b481
commit
85b5a6bcd1
9 changed files with 19 additions and 22 deletions
|
@ -94,7 +94,7 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
|||
}
|
||||
}
|
||||
|
||||
let mut nodes = nodes.into_iter().map(|(_, v)| v).collect::<Vec<_>>();
|
||||
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
||||
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
||||
|
||||
let res = GetClusterStatusResponse {
|
||||
|
|
|
@ -129,7 +129,7 @@ impl LayoutHelper {
|
|||
where
|
||||
F: FnOnce(&mut LayoutHistory) -> bool,
|
||||
{
|
||||
let changed = f(&mut self.layout.as_mut().unwrap());
|
||||
let changed = f(self.layout.as_mut().unwrap());
|
||||
if changed {
|
||||
*self = Self::new(
|
||||
self.replication_mode,
|
||||
|
|
|
@ -42,8 +42,7 @@ impl LayoutHistory {
|
|||
let set = self
|
||||
.versions
|
||||
.iter()
|
||||
.map(|x| x.all_nodes())
|
||||
.flatten()
|
||||
.flat_map(|x| x.all_nodes())
|
||||
.collect::<HashSet<_>>();
|
||||
set.into_iter().copied().collect::<Vec<_>>()
|
||||
}
|
||||
|
@ -56,8 +55,7 @@ impl LayoutHistory {
|
|||
let set = self
|
||||
.versions
|
||||
.iter()
|
||||
.map(|x| x.nongateway_nodes())
|
||||
.flatten()
|
||||
.flat_map(|x| x.nongateway_nodes())
|
||||
.collect::<HashSet<_>>();
|
||||
set.into_iter().copied().collect::<Vec<_>>()
|
||||
}
|
||||
|
@ -94,7 +92,7 @@ impl LayoutHistory {
|
|||
let sync_ack_map_min = self
|
||||
.update_trackers
|
||||
.sync_ack_map
|
||||
.min_among(¤t_nodes, min_version);
|
||||
.min_among(current_nodes, min_version);
|
||||
if self.min_stored() < sync_ack_map_min {
|
||||
let removed = self.versions.remove(0);
|
||||
info!(
|
||||
|
@ -144,7 +142,7 @@ impl LayoutHistory {
|
|||
let global_min = self
|
||||
.update_trackers
|
||||
.sync_map
|
||||
.min_among(&all_nongateway_nodes, min_version);
|
||||
.min_among(all_nongateway_nodes, min_version);
|
||||
|
||||
// If the write quorums are equal to the total number of nodes,
|
||||
// i.e. no writes can succeed while they are not written to all nodes,
|
||||
|
@ -281,7 +279,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
|
|||
let (new_version, msg) = self
|
||||
.current()
|
||||
.clone()
|
||||
.calculate_next_version(&self.staging.get())?;
|
||||
.calculate_next_version(self.staging.get())?;
|
||||
|
||||
self.versions.push(new_version);
|
||||
self.cleanup_old_versions();
|
||||
|
@ -297,7 +295,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
|
|||
|
||||
pub fn revert_staged_changes(mut self) -> Result<Self, Error> {
|
||||
self.staging.update(LayoutStaging {
|
||||
parameters: Lww::new(self.current().parameters.clone()),
|
||||
parameters: Lww::new(self.current().parameters),
|
||||
roles: LwwMap::new(),
|
||||
});
|
||||
|
||||
|
|
|
@ -357,7 +357,7 @@ mod v010 {
|
|||
update_trackers: UpdateTrackers {
|
||||
ack_map: update_tracker.clone(),
|
||||
sync_map: update_tracker.clone(),
|
||||
sync_ack_map: update_tracker.clone(),
|
||||
sync_ack_map: update_tracker,
|
||||
},
|
||||
staging: Lww::raw(previous.version, staging),
|
||||
}
|
||||
|
|
|
@ -137,19 +137,19 @@ impl LayoutVersion {
|
|||
// ===================== internal information extractors ======================
|
||||
|
||||
pub(crate) fn expect_get_node_capacity(&self, uuid: &Uuid) -> u64 {
|
||||
self.get_node_capacity(&uuid)
|
||||
self.get_node_capacity(uuid)
|
||||
.expect("non-gateway node with zero capacity")
|
||||
}
|
||||
|
||||
pub(crate) fn expect_get_node_zone(&self, uuid: &Uuid) -> &str {
|
||||
self.get_node_zone(&uuid).expect("node without a zone")
|
||||
self.get_node_zone(uuid).expect("node without a zone")
|
||||
}
|
||||
|
||||
/// Returns the sum of capacities of non gateway nodes in the cluster
|
||||
fn get_total_capacity(&self) -> u64 {
|
||||
let mut total_capacity = 0;
|
||||
for uuid in self.nongateway_nodes() {
|
||||
total_capacity += self.expect_get_node_capacity(&uuid);
|
||||
total_capacity += self.expect_get_node_capacity(uuid);
|
||||
}
|
||||
total_capacity
|
||||
}
|
||||
|
|
|
@ -442,7 +442,7 @@ impl RpcHelper {
|
|||
|
||||
// Send one request to each peer of the quorum sets
|
||||
let msg = msg.into_req().map_err(netapp::error::Error::from)?;
|
||||
let requests = result_tracker.nodes.iter().map(|(peer, _)| {
|
||||
let requests = result_tracker.nodes.keys().map(|peer| {
|
||||
let self2 = self.clone();
|
||||
let msg = msg.clone();
|
||||
let endpoint2 = endpoint.clone();
|
||||
|
|
|
@ -315,7 +315,7 @@ impl System {
|
|||
local_status: ArcSwap::new(Arc::new(local_status)),
|
||||
node_status: RwLock::new(HashMap::new()),
|
||||
netapp: netapp.clone(),
|
||||
fullmesh: fullmesh.clone(),
|
||||
fullmesh,
|
||||
system_endpoint,
|
||||
replication_mode,
|
||||
replication_factor,
|
||||
|
|
|
@ -123,15 +123,15 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> {
|
|||
|
||||
let mut sync_futures = result_tracker
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|(node, _)| *node)
|
||||
.keys()
|
||||
.copied()
|
||||
.map(|node| {
|
||||
let must_exit = must_exit.clone();
|
||||
async move {
|
||||
if node == my_id {
|
||||
(node, Ok(()))
|
||||
} else {
|
||||
(node, self.do_sync_with(&partition, node, must_exit).await)
|
||||
(node, self.do_sync_with(partition, node, must_exit).await)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -145,7 +145,7 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> {
|
|||
}
|
||||
|
||||
if result_tracker.too_many_failures() {
|
||||
return Err(result_tracker.quorum_error());
|
||||
Err(result_tracker.quorum_error())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -209,8 +209,7 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
|||
// it takes part, to optimize the detection of a quorum.
|
||||
let mut write_sets = entries_vec
|
||||
.iter()
|
||||
.map(|(wss, _)| wss.as_ref().iter().map(|ws| ws.as_slice()))
|
||||
.flatten()
|
||||
.flat_map(|(wss, _)| wss.as_ref().iter().map(|ws| ws.as_slice()))
|
||||
.collect::<Vec<&[Uuid]>>();
|
||||
write_sets.sort();
|
||||
write_sets.dedup();
|
||||
|
|
Loading…
Reference in a new issue