forked from Deuxfleurs/garage
Allow garage layout assign
to assign to several nodes at once
This commit is contained in:
parent
7e0e2ffda2
commit
2814d41842
2 changed files with 56 additions and 46 deletions
|
@ -43,7 +43,11 @@ pub async fn cmd_assign_role(
|
||||||
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
||||||
};
|
};
|
||||||
|
|
||||||
let added_node = find_matching_node(status.iter().map(|adv| adv.id), &args.node_id)?;
|
let added_nodes = args
|
||||||
|
.node_ids
|
||||||
|
.iter()
|
||||||
|
.map(|node_id| find_matching_node(status.iter().map(|adv| adv.id), node_id))
|
||||||
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||||
|
|
||||||
|
@ -75,6 +79,7 @@ pub async fn cmd_assign_role(
|
||||||
return Err(Error::Message("Invalid capacity value: 0".into()));
|
return Err(Error::Message("Invalid capacity value: 0".into()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for added_node in added_nodes {
|
||||||
let new_entry = match roles.get(&added_node) {
|
let new_entry = match roles.get(&added_node) {
|
||||||
Some(NodeRoleV(Some(old))) => {
|
Some(NodeRoleV(Some(old))) => {
|
||||||
let capacity = match args.capacity {
|
let capacity = match args.capacity {
|
||||||
|
@ -85,10 +90,10 @@ pub async fn cmd_assign_role(
|
||||||
let tags = if args.tags.is_empty() {
|
let tags = if args.tags.is_empty() {
|
||||||
old.tags.clone()
|
old.tags.clone()
|
||||||
} else {
|
} else {
|
||||||
args.tags
|
args.tags.clone()
|
||||||
};
|
};
|
||||||
NodeRole {
|
NodeRole {
|
||||||
zone: args.zone.unwrap_or_else(|| old.zone.to_string()),
|
zone: args.zone.clone().unwrap_or_else(|| old.zone.to_string()),
|
||||||
capacity,
|
capacity,
|
||||||
tags,
|
tags,
|
||||||
}
|
}
|
||||||
|
@ -101,9 +106,12 @@ pub async fn cmd_assign_role(
|
||||||
"Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())),
|
"Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())),
|
||||||
};
|
};
|
||||||
NodeRole {
|
NodeRole {
|
||||||
zone: args.zone.ok_or("Please specifiy a zone with the -z flag")?,
|
zone: args
|
||||||
|
.zone
|
||||||
|
.clone()
|
||||||
|
.ok_or("Please specifiy a zone with the -z flag")?,
|
||||||
capacity,
|
capacity,
|
||||||
tags: args.tags,
|
tags: args.tags.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -111,10 +119,11 @@ pub async fn cmd_assign_role(
|
||||||
layout
|
layout
|
||||||
.staging
|
.staging
|
||||||
.merge(&roles.update_mutator(added_node, NodeRoleV(Some(new_entry))));
|
.merge(&roles.update_mutator(added_node, NodeRoleV(Some(new_entry))));
|
||||||
|
}
|
||||||
|
|
||||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||||
|
|
||||||
println!("Role change is staged but not yet commited.");
|
println!("Role changes are staged but not yet commited.");
|
||||||
println!("Use `garage layout show` to view staged role changes,");
|
println!("Use `garage layout show` to view staged role changes,");
|
||||||
println!("and `garage layout apply` to enact staged changes.");
|
println!("and `garage layout apply` to enact staged changes.");
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -196,15 +205,6 @@ pub async fn cmd_apply_layout(
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||||
|
|
||||||
layout.roles.merge(&layout.staging);
|
|
||||||
|
|
||||||
if !layout.calculate_partition_assignation() {
|
|
||||||
return Err(Error::Message("Could not calculate new assignation of partitions to nodes. This can happen if there are less nodes than the desired number of copies of your data (see the replication_mode configuration parameter).".into()));
|
|
||||||
}
|
|
||||||
|
|
||||||
layout.staging.clear();
|
|
||||||
layout.staging_hash = blake2sum(&rmp_to_vec_all_named(&layout.staging).unwrap()[..]);
|
|
||||||
|
|
||||||
match apply_opt.version {
|
match apply_opt.version {
|
||||||
None => {
|
None => {
|
||||||
println!("Please pass the --version flag to ensure that you are writing the correct version of the cluster layout.");
|
println!("Please pass the --version flag to ensure that you are writing the correct version of the cluster layout.");
|
||||||
|
@ -218,6 +218,15 @@ pub async fn cmd_apply_layout(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
layout.roles.merge(&layout.staging);
|
||||||
|
|
||||||
|
if !layout.calculate_partition_assignation() {
|
||||||
|
return Err(Error::Message("Could not calculate new assignation of partitions to nodes. This can happen if there are less nodes than the desired number of copies of your data (see the replication_mode configuration parameter).".into()));
|
||||||
|
}
|
||||||
|
|
||||||
|
layout.staging.clear();
|
||||||
|
layout.staging_hash = blake2sum(&rmp_to_vec_all_named(&layout.staging).unwrap()[..]);
|
||||||
|
|
||||||
layout.version += 1;
|
layout.version += 1;
|
||||||
|
|
||||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||||
|
|
|
@ -92,8 +92,9 @@ pub enum LayoutOperation {
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
#[derive(StructOpt, Debug)]
|
||||||
pub struct AssignRoleOpt {
|
pub struct AssignRoleOpt {
|
||||||
/// Node to which to assign role (prefix of hexadecimal node id)
|
/// Node(s) to which to assign role (prefix of hexadecimal node id)
|
||||||
pub(crate) node_id: String,
|
#[structopt(required = true)]
|
||||||
|
pub(crate) node_ids: Vec<String>,
|
||||||
|
|
||||||
/// Location (zone or datacenter) of the node
|
/// Location (zone or datacenter) of the node
|
||||||
#[structopt(short = "z", long = "zone")]
|
#[structopt(short = "z", long = "zone")]
|
||||||
|
|
Loading…
Reference in a new issue