From 948ff93cf10da1705766c2f0d256c316adcb806b Mon Sep 17 00:00:00 2001 From: Mendes Date: Sun, 1 May 2022 16:05:39 +0200 Subject: [PATCH] Corrected the warnings and errors issued by cargo clippy --- src/rpc/layout.rs | 26 ++++----- src/util/bipartite.rs | 119 +++++++++++++++++++----------------------- 2 files changed, 64 insertions(+), 81 deletions(-) diff --git a/src/rpc/layout.rs b/src/rpc/layout.rs index ac31da72..d0ee3463 100644 --- a/src/rpc/layout.rs +++ b/src/rpc/layout.rs @@ -195,8 +195,8 @@ impl ClusterLayout { .collect::>(); //We create an indexing of the zones let mut zone_id = HashMap::::new(); - for i in 0..part_per_zone_vec.len() { - zone_id.insert(part_per_zone_vec[i].0.clone(), i); + for (i, ppz) in part_per_zone_vec.iter().enumerate() { + zone_id.insert(ppz.0.clone(), i); } //We compute a candidate for the new partition to zone @@ -212,7 +212,7 @@ impl ClusterLayout { let mut node_assignation = vec![vec![None; self.replication_factor]; nb_partitions]; //We will decrement part_per_nod to keep track of the number //of partitions that we still have to associate. - let mut part_per_nod = part_per_nod.clone(); + let mut part_per_nod = part_per_nod; //We minimize the distance to the former assignation(if any) @@ -265,7 +265,7 @@ impl ClusterLayout { && part_per_nod[*id] > 0 }) .collect(); - assert!(possible_nodes.len() > 0); + assert!(!possible_nodes.is_empty()); //We randomly pick a node if let Some(nod) = possible_nodes.choose(&mut rng) { node_assignation[i][j] = Some(*nod); @@ -277,12 +277,12 @@ impl ClusterLayout { //We write the assignation in the 1D table self.ring_assignation_data = Vec::::new(); - for i in 0..nb_partitions { - for j in 0..self.replication_factor { - if let Some(id) = node_assignation[i][j] { + for ass in node_assignation { + for nod in ass { + if let Some(id) = nod { self.ring_assignation_data.push(id as CompactNodeType); } else { - assert!(false) + panic!() } } } @@ -318,7 +318,7 @@ impl ClusterLayout { self.node_id_vec = new_node_id_vec; self.ring_assignation_data = vec![]; - return node_assignation; + node_assignation } ///This function compute the number of partition to assign to @@ -345,7 +345,7 @@ impl ClusterLayout { //Compute the optimal number of partitions per zone let sum_capacities: u32 = zone_capacity.values().sum(); - if sum_capacities <= 0 { + if sum_capacities == 0 { println!("No storage capacity in the network."); return None; } @@ -493,14 +493,10 @@ impl ClusterLayout { .map(|id_nod| match self.node_role(id_nod) { Some(NodeRole { zone: _, - capacity, + capacity: Some(c), tags: _, }) => { - if let Some(c) = capacity { *c - } else { - 0 - } } _ => 0, }) diff --git a/src/util/bipartite.rs b/src/util/bipartite.rs index ade831a4..1e1e9caa 100644 --- a/src/util/bipartite.rs +++ b/src/util/bipartite.rs @@ -31,8 +31,8 @@ struct WeightedEdge { * as possible to old_match. * */ pub fn optimize_matching( - old_match: &Vec>, - new_match: &Vec>, + old_match: &[Vec], + new_match: &[Vec], nb_right: usize, ) -> Vec> { let nb_left = old_match.len(); @@ -72,16 +72,11 @@ pub fn optimize_matching( //Discovering and flipping a cycle with positive weight in this //graph will make the matching closer to old_match. //We use Bellman Ford algorithm to discover positive cycles - loop { - if let Some(cycle) = positive_cycle(&edge_vec, nb_left, nb_right) { - for i in cycle { - //We flip the edges of the cycle. - (edge_vec[i].u, edge_vec[i].v) = (edge_vec[i].v, edge_vec[i].u); - edge_vec[i].w *= -1; - } - } else { - //If there is no cycle, we return the optimal matching. - break; + while let Some(cycle) = positive_cycle(&edge_vec, nb_left, nb_right) { + for i in cycle { + //We flip the edges of the cycle. + (edge_vec[i].u, edge_vec[i].v) = (edge_vec[i].v, edge_vec[i].u); + edge_vec[i].w *= -1; } } @@ -97,7 +92,7 @@ pub fn optimize_matching( //This function finds a positive cycle in a bipartite wieghted graph. fn positive_cycle( - edge_vec: &Vec, + edge_vec: &[WeightedEdge], nb_left: usize, nb_right: usize, ) -> Option> { @@ -122,8 +117,7 @@ fn positive_cycle( //the number of partitions can be much larger than the //number of nodes, we optimize that. for _ in 0..(2 * nb_side_min) { - for j in 0..edge_vec.len() { - let e = edge_vec[j]; + for (j, e) in edge_vec.iter().enumerate() { if weight[e.v] < weight[e.u] + e.w { weight[e.v] = weight[e.u] + e.w; prev[e.v] = j; @@ -148,8 +142,7 @@ fn positive_cycle( curr = edge_vec[prev[curr]].u; } //Now curr is on the cycle. We collect the edges ids. - let mut cycle = Vec::::new(); - cycle.push(prev[curr]); + let mut cycle = vec![prev[curr]]; let mut cycle_vert = edge_vec[prev[curr]].u; while cycle_vert != curr { cycle.push(prev[cycle_vert]); @@ -180,16 +173,16 @@ pub fn dinic_compute_matching(left_cap_vec: Vec, right_cap_vec: Vec) - // 0 will be the source graph.push(vec![ed; left_cap_vec.len()]); - for i in 0..left_cap_vec.len() { - graph[0][i].c = left_cap_vec[i] as i32; + for (i, c) in left_cap_vec.iter().enumerate() { + graph[0][i].c = *c as i32; graph[0][i].v = i + 2; graph[0][i].rev = 0; } //1 will be the sink graph.push(vec![ed; right_cap_vec.len()]); - for i in 0..right_cap_vec.len() { - graph[1][i].c = right_cap_vec[i] as i32; + for (i, c) in right_cap_vec.iter().enumerate() { + graph[1][i].c = *c as i32; graph[1][i].v = i + 2 + left_cap_vec.len(); graph[1][i].rev = 0; } @@ -267,58 +260,52 @@ pub fn dinic_compute_matching(left_cap_vec: Vec, right_cap_vec: Vec) - let mut next_nbd = vec![0; nb_vertices]; let mut lifo = VecDeque::new(); - let flow_upper_bound; - if let Some(x) = left_cap_vec.iter().max() { - flow_upper_bound = *x as i32; + let flow_upper_bound = if let Some(x) = left_cap_vec.iter().max() { + *x as i32 } else { - flow_upper_bound = 0; - assert!(false); - } + panic!(); + }; lifo.push_back((0, flow_upper_bound)); - loop { - if let Some((id_tmp, f_tmp)) = lifo.back() { - let id = *id_tmp; - let f = *f_tmp; - if id == 1 { - //The DFS reached the sink, we can add a - //residual flow. - lifo.pop_back(); - while !lifo.is_empty() { - if let Some((id, _)) = lifo.pop_back() { - let nbd = next_nbd[id]; - graph[id][nbd].flow += f; - let id_v = graph[id][nbd].v; - let nbd_v = graph[id][nbd].rev; - graph[id_v][nbd_v].flow -= f; - } + while let Some((id_tmp, f_tmp)) = lifo.back() { + let id = *id_tmp; + let f = *f_tmp; + if id == 1 { + //The DFS reached the sink, we can add a + //residual flow. + lifo.pop_back(); + while !lifo.is_empty() { + if let Some((id, _)) = lifo.pop_back() { + let nbd = next_nbd[id]; + graph[id][nbd].flow += f; + let id_v = graph[id][nbd].v; + let nbd_v = graph[id][nbd].rev; + graph[id_v][nbd_v].flow -= f; } - lifo.push_back((0, flow_upper_bound)); - continue; } - //else we did not reach the sink - let nbd = next_nbd[id]; - if nbd >= graph[id].len() { - //There is nothing to explore from id anymore - lifo.pop_back(); - if let Some((parent, _)) = lifo.back() { - next_nbd[*parent] += 1; - } - continue; - } - //else we can try to send flow from id to its nbd - let new_flow = min(f, graph[id][nbd].c - graph[id][nbd].flow); - if level[graph[id][nbd].v] <= level[id] || new_flow == 0 { - //We cannot send flow to nbd. - next_nbd[id] += 1; - continue; - } - //otherwise, we send flow to nbd. - lifo.push_back((graph[id][nbd].v, new_flow)); - } else { - break; + lifo.push_back((0, flow_upper_bound)); + continue; } + //else we did not reach the sink + let nbd = next_nbd[id]; + if nbd >= graph[id].len() { + //There is nothing to explore from id anymore + lifo.pop_back(); + if let Some((parent, _)) = lifo.back() { + next_nbd[*parent] += 1; + } + continue; + } + //else we can try to send flow from id to its nbd + let new_flow = min(f, graph[id][nbd].c - graph[id][nbd].flow); + if level[graph[id][nbd].v] <= level[id] || new_flow == 0 { + //We cannot send flow to nbd. + next_nbd[id] += 1; + continue; + } + //otherwise, we send flow to nbd. + lifo.push_back((graph[id][nbd].v, new_flow)); } }