2022-11-07 20:12:11 +00:00
|
|
|
use bytesize::ByteSize;
|
|
|
|
|
2023-05-17 11:01:37 +00:00
|
|
|
use format_table::format_table;
|
2021-11-09 11:24:04 +00:00
|
|
|
use garage_util::crdt::Crdt;
|
|
|
|
use garage_util::error::*;
|
|
|
|
|
|
|
|
use garage_rpc::layout::*;
|
|
|
|
use garage_rpc::system::*;
|
|
|
|
use garage_rpc::*;
|
|
|
|
|
|
|
|
use crate::cli::*;
|
|
|
|
|
|
|
|
pub async fn cli_layout_command_dispatch(
|
|
|
|
cmd: LayoutOperation,
|
|
|
|
system_rpc_endpoint: &Endpoint<SystemRpc, ()>,
|
|
|
|
rpc_host: NodeID,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
match cmd {
|
2022-10-05 14:04:19 +00:00
|
|
|
LayoutOperation::Assign(assign_opt) => {
|
|
|
|
cmd_assign_role(system_rpc_endpoint, rpc_host, assign_opt).await
|
2021-11-09 11:24:04 +00:00
|
|
|
}
|
|
|
|
LayoutOperation::Remove(remove_opt) => {
|
|
|
|
cmd_remove_role(system_rpc_endpoint, rpc_host, remove_opt).await
|
|
|
|
}
|
|
|
|
LayoutOperation::Show => cmd_show_layout(system_rpc_endpoint, rpc_host).await,
|
|
|
|
LayoutOperation::Apply(apply_opt) => {
|
|
|
|
cmd_apply_layout(system_rpc_endpoint, rpc_host, apply_opt).await
|
|
|
|
}
|
|
|
|
LayoutOperation::Revert(revert_opt) => {
|
|
|
|
cmd_revert_layout(system_rpc_endpoint, rpc_host, revert_opt).await
|
|
|
|
}
|
2022-10-10 15:21:13 +00:00
|
|
|
LayoutOperation::Config(config_opt) => {
|
|
|
|
cmd_config_layout(system_rpc_endpoint, rpc_host, config_opt).await
|
|
|
|
}
|
2023-11-27 15:17:41 +00:00
|
|
|
LayoutOperation::History => cmd_layout_history(system_rpc_endpoint, rpc_host).await,
|
2023-12-07 10:50:00 +00:00
|
|
|
LayoutOperation::SkipDeadNodes(assume_sync_opt) => {
|
|
|
|
cmd_layout_skip_dead_nodes(system_rpc_endpoint, rpc_host, assume_sync_opt).await
|
2023-11-27 15:17:41 +00:00
|
|
|
}
|
2021-11-09 11:24:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn cmd_assign_role(
|
|
|
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
|
|
rpc_host: NodeID,
|
|
|
|
args: AssignRoleOpt,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let status = match rpc_cli
|
2022-07-22 13:20:00 +00:00
|
|
|
.call(&rpc_host, SystemRpc::GetKnownNodes, PRIO_NORMAL)
|
2021-11-09 11:24:04 +00:00
|
|
|
.await??
|
|
|
|
{
|
|
|
|
SystemRpc::ReturnKnownNodes(nodes) => nodes,
|
|
|
|
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
|
|
|
};
|
|
|
|
|
2022-05-09 09:14:55 +00:00
|
|
|
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
2023-11-15 13:20:50 +00:00
|
|
|
let all_nodes = layout.get_all_nodes();
|
2022-05-09 09:14:55 +00:00
|
|
|
|
2022-03-16 13:43:04 +00:00
|
|
|
let added_nodes = args
|
|
|
|
.node_ids
|
|
|
|
.iter()
|
2022-05-09 09:14:55 +00:00
|
|
|
.map(|node_id| {
|
|
|
|
find_matching_node(
|
|
|
|
status
|
|
|
|
.iter()
|
|
|
|
.map(|adv| adv.id)
|
2023-11-14 12:06:16 +00:00
|
|
|
.chain(all_nodes.iter().cloned()),
|
2022-05-09 09:14:55 +00:00
|
|
|
node_id,
|
|
|
|
)
|
|
|
|
})
|
2022-03-16 13:43:04 +00:00
|
|
|
.collect::<Result<Vec<_>, _>>()?;
|
2021-11-09 11:24:04 +00:00
|
|
|
|
2023-11-08 18:28:36 +00:00
|
|
|
let mut roles = layout.current().roles.clone();
|
2023-11-09 10:19:43 +00:00
|
|
|
roles.merge(&layout.staging.get().roles);
|
2021-11-09 11:24:04 +00:00
|
|
|
|
|
|
|
for replaced in args.replace.iter() {
|
2023-11-14 12:06:16 +00:00
|
|
|
let replaced_node = find_matching_node(all_nodes.iter().cloned(), replaced)?;
|
2021-11-09 11:24:04 +00:00
|
|
|
match roles.get(&replaced_node) {
|
|
|
|
Some(NodeRoleV(Some(_))) => {
|
|
|
|
layout
|
2023-11-09 10:19:43 +00:00
|
|
|
.staging
|
|
|
|
.get_mut()
|
|
|
|
.roles
|
2021-11-09 11:24:04 +00:00
|
|
|
.merge(&roles.update_mutator(replaced_node, NodeRoleV(None)));
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
return Err(Error::Message(format!(
|
|
|
|
"Cannot replace node {:?} as it is not currently in planned layout",
|
|
|
|
replaced_node
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if args.capacity.is_some() && args.gateway {
|
|
|
|
return Err(Error::Message(
|
|
|
|
"-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into()));
|
|
|
|
}
|
2022-11-07 20:12:11 +00:00
|
|
|
if args.capacity == Some(ByteSize::b(0)) {
|
2021-11-09 11:24:04 +00:00
|
|
|
return Err(Error::Message("Invalid capacity value: 0".into()));
|
|
|
|
}
|
|
|
|
|
2022-03-16 13:43:04 +00:00
|
|
|
for added_node in added_nodes {
|
|
|
|
let new_entry = match roles.get(&added_node) {
|
|
|
|
Some(NodeRoleV(Some(old))) => {
|
|
|
|
let capacity = match args.capacity {
|
2022-11-07 20:12:11 +00:00
|
|
|
Some(c) => Some(c.as_u64()),
|
2022-03-16 13:43:04 +00:00
|
|
|
None if args.gateway => None,
|
|
|
|
None => old.capacity,
|
|
|
|
};
|
|
|
|
let tags = if args.tags.is_empty() {
|
|
|
|
old.tags.clone()
|
|
|
|
} else {
|
|
|
|
args.tags.clone()
|
|
|
|
};
|
|
|
|
NodeRole {
|
|
|
|
zone: args.zone.clone().unwrap_or_else(|| old.zone.to_string()),
|
|
|
|
capacity,
|
|
|
|
tags,
|
|
|
|
}
|
2021-11-09 11:24:04 +00:00
|
|
|
}
|
2022-03-16 13:43:04 +00:00
|
|
|
_ => {
|
|
|
|
let capacity = match args.capacity {
|
2022-11-07 20:12:11 +00:00
|
|
|
Some(c) => Some(c.as_u64()),
|
2022-03-16 13:43:04 +00:00
|
|
|
None if args.gateway => None,
|
|
|
|
None => return Err(Error::Message(
|
|
|
|
"Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())),
|
|
|
|
};
|
|
|
|
NodeRole {
|
|
|
|
zone: args
|
|
|
|
.zone
|
|
|
|
.clone()
|
|
|
|
.ok_or("Please specifiy a zone with the -z flag")?,
|
|
|
|
capacity,
|
|
|
|
tags: args.tags.clone(),
|
|
|
|
}
|
2021-11-09 11:24:04 +00:00
|
|
|
}
|
2022-03-16 13:43:04 +00:00
|
|
|
};
|
2021-11-09 11:24:04 +00:00
|
|
|
|
2022-03-16 13:43:04 +00:00
|
|
|
layout
|
2023-11-09 10:19:43 +00:00
|
|
|
.staging
|
|
|
|
.get_mut()
|
|
|
|
.roles
|
2022-03-16 13:43:04 +00:00
|
|
|
.merge(&roles.update_mutator(added_node, NodeRoleV(Some(new_entry))));
|
|
|
|
}
|
2021-11-09 11:24:04 +00:00
|
|
|
|
|
|
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
|
|
|
|
2022-03-16 13:43:04 +00:00
|
|
|
println!("Role changes are staged but not yet commited.");
|
2021-11-09 11:24:04 +00:00
|
|
|
println!("Use `garage layout show` to view staged role changes,");
|
|
|
|
println!("and `garage layout apply` to enact staged changes.");
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn cmd_remove_role(
|
|
|
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
|
|
rpc_host: NodeID,
|
|
|
|
args: RemoveRoleOpt,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
|
|
|
|
2023-11-08 18:28:36 +00:00
|
|
|
let mut roles = layout.current().roles.clone();
|
2023-11-09 10:19:43 +00:00
|
|
|
roles.merge(&layout.staging.get().roles);
|
2021-11-09 11:24:04 +00:00
|
|
|
|
|
|
|
let deleted_node =
|
|
|
|
find_matching_node(roles.items().iter().map(|(id, _, _)| *id), &args.node_id)?;
|
|
|
|
|
|
|
|
layout
|
2023-11-09 10:19:43 +00:00
|
|
|
.staging
|
|
|
|
.get_mut()
|
|
|
|
.roles
|
2021-11-09 11:24:04 +00:00
|
|
|
.merge(&roles.update_mutator(deleted_node, NodeRoleV(None)));
|
|
|
|
|
|
|
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
|
|
|
|
|
|
|
println!("Role removal is staged but not yet commited.");
|
|
|
|
println!("Use `garage layout show` to view staged role changes,");
|
|
|
|
println!("and `garage layout apply` to enact staged changes.");
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn cmd_show_layout(
|
|
|
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
|
|
rpc_host: NodeID,
|
|
|
|
) -> Result<(), Error> {
|
2022-10-11 15:17:13 +00:00
|
|
|
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
2021-11-09 11:24:04 +00:00
|
|
|
|
|
|
|
println!("==== CURRENT CLUSTER LAYOUT ====");
|
2023-11-08 18:28:36 +00:00
|
|
|
print_cluster_layout(layout.current(), "No nodes currently have a role in the cluster.\nSee `garage status` to view available nodes.");
|
2021-11-09 11:24:04 +00:00
|
|
|
println!();
|
2023-11-08 18:28:36 +00:00
|
|
|
println!(
|
|
|
|
"Current cluster layout version: {}",
|
|
|
|
layout.current().version
|
|
|
|
);
|
2021-11-09 11:24:04 +00:00
|
|
|
|
2022-10-11 15:17:13 +00:00
|
|
|
let has_role_changes = print_staging_role_changes(&layout);
|
2023-09-12 15:24:51 +00:00
|
|
|
if has_role_changes {
|
2023-11-08 18:28:36 +00:00
|
|
|
let v = layout.current().version;
|
2022-10-11 15:17:13 +00:00
|
|
|
let res_apply = layout.apply_staged_changes(Some(v + 1));
|
2021-11-09 11:24:04 +00:00
|
|
|
|
|
|
|
// this will print the stats of what partitions
|
|
|
|
// will move around when we apply
|
2022-10-11 15:17:13 +00:00
|
|
|
match res_apply {
|
|
|
|
Ok((layout, msg)) => {
|
|
|
|
println!();
|
|
|
|
println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====");
|
2023-11-08 18:28:36 +00:00
|
|
|
print_cluster_layout(layout.current(), "No nodes have a role in the new layout.");
|
2022-10-11 15:17:13 +00:00
|
|
|
println!();
|
|
|
|
|
2022-10-10 15:21:13 +00:00
|
|
|
for line in msg.iter() {
|
|
|
|
println!("{}", line);
|
|
|
|
}
|
|
|
|
println!("To enact the staged role changes, type:");
|
|
|
|
println!();
|
2022-10-11 15:17:13 +00:00
|
|
|
println!(" garage layout apply --version {}", v + 1);
|
2022-10-10 15:21:13 +00:00
|
|
|
println!();
|
2023-11-09 10:19:43 +00:00
|
|
|
println!("You can also revert all proposed changes with: garage layout revert");
|
2022-10-10 15:21:13 +00:00
|
|
|
}
|
2022-11-08 15:15:45 +00:00
|
|
|
Err(e) => {
|
2023-01-05 11:09:25 +00:00
|
|
|
println!("Error while trying to compute the assignment: {}", e);
|
2022-10-10 15:21:13 +00:00
|
|
|
println!("This new layout cannot yet be applied.");
|
2023-11-09 10:19:43 +00:00
|
|
|
println!("You can also revert all proposed changes with: garage layout revert");
|
2022-10-10 15:21:13 +00:00
|
|
|
}
|
|
|
|
}
|
2021-11-09 11:24:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn cmd_apply_layout(
|
|
|
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
|
|
rpc_host: NodeID,
|
|
|
|
apply_opt: ApplyLayoutOpt,
|
|
|
|
) -> Result<(), Error> {
|
2022-05-24 10:16:39 +00:00
|
|
|
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
2022-03-16 13:43:04 +00:00
|
|
|
|
2022-10-05 13:29:48 +00:00
|
|
|
let (layout, msg) = layout.apply_staged_changes(apply_opt.version)?;
|
2022-10-10 15:21:13 +00:00
|
|
|
for line in msg.iter() {
|
|
|
|
println!("{}", line);
|
|
|
|
}
|
2021-11-09 11:24:04 +00:00
|
|
|
|
|
|
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
|
|
|
|
2023-01-05 11:09:25 +00:00
|
|
|
println!("New cluster layout with updated role assignment has been applied in cluster.");
|
2021-11-09 11:24:04 +00:00
|
|
|
println!("Data will now be moved around between nodes accordingly.");
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn cmd_revert_layout(
|
|
|
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
|
|
rpc_host: NodeID,
|
|
|
|
revert_opt: RevertLayoutOpt,
|
|
|
|
) -> Result<(), Error> {
|
2023-11-09 10:19:43 +00:00
|
|
|
if !revert_opt.yes {
|
|
|
|
return Err(Error::Message(
|
|
|
|
"Please add the --yes flag to run the layout revert operation".into(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
2022-05-24 10:16:39 +00:00
|
|
|
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
2021-11-09 11:24:04 +00:00
|
|
|
|
2023-11-09 10:19:43 +00:00
|
|
|
let layout = layout.revert_staged_changes()?;
|
2021-11-09 11:24:04 +00:00
|
|
|
|
|
|
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
|
|
|
|
|
|
|
println!("All proposed role changes in cluster layout have been canceled.");
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2022-10-05 14:04:19 +00:00
|
|
|
pub async fn cmd_config_layout(
|
|
|
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
|
|
rpc_host: NodeID,
|
|
|
|
config_opt: ConfigLayoutOpt,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
2022-10-10 15:21:13 +00:00
|
|
|
|
2022-11-07 20:12:11 +00:00
|
|
|
let mut did_something = false;
|
2022-10-10 15:21:13 +00:00
|
|
|
match config_opt.redundancy {
|
|
|
|
None => (),
|
2023-09-18 09:57:36 +00:00
|
|
|
Some(r_str) => {
|
|
|
|
let r = r_str
|
|
|
|
.parse::<ZoneRedundancy>()
|
|
|
|
.ok_or_message("invalid zone redundancy value")?;
|
|
|
|
if let ZoneRedundancy::AtLeast(r_int) = r {
|
2023-11-08 18:28:36 +00:00
|
|
|
if r_int > layout.current().replication_factor {
|
2023-09-18 09:57:36 +00:00
|
|
|
return Err(Error::Message(format!(
|
|
|
|
"The zone redundancy must be smaller or equal to the \
|
|
|
|
replication factor ({}).",
|
2023-11-08 18:28:36 +00:00
|
|
|
layout.current().replication_factor
|
2023-09-18 09:57:36 +00:00
|
|
|
)));
|
|
|
|
} else if r_int < 1 {
|
|
|
|
return Err(Error::Message(
|
|
|
|
"The zone redundancy must be at least 1.".into(),
|
|
|
|
));
|
|
|
|
}
|
2022-10-10 15:21:13 +00:00
|
|
|
}
|
2023-09-18 09:57:36 +00:00
|
|
|
|
|
|
|
layout
|
2023-11-09 10:19:43 +00:00
|
|
|
.staging
|
|
|
|
.get_mut()
|
|
|
|
.parameters
|
2023-09-18 09:57:36 +00:00
|
|
|
.update(LayoutParameters { zone_redundancy: r });
|
2023-09-18 10:07:45 +00:00
|
|
|
println!("The zone redundancy parameter has been set to '{}'.", r);
|
2022-11-07 20:12:11 +00:00
|
|
|
did_something = true;
|
2022-10-10 15:21:13 +00:00
|
|
|
}
|
|
|
|
}
|
2022-10-05 14:04:19 +00:00
|
|
|
|
2022-11-07 20:12:11 +00:00
|
|
|
if !did_something {
|
|
|
|
return Err(Error::Message(
|
2023-09-18 09:57:36 +00:00
|
|
|
"Please specify an action for `garage layout config`".into(),
|
2022-11-07 20:12:11 +00:00
|
|
|
));
|
|
|
|
}
|
|
|
|
|
2022-10-05 14:04:19 +00:00
|
|
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
2022-10-10 15:21:13 +00:00
|
|
|
Ok(())
|
2022-10-05 14:04:19 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 15:17:41 +00:00
|
|
|
pub async fn cmd_layout_history(
|
|
|
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
|
|
rpc_host: NodeID,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
|
|
|
let min_stored = layout.min_stored();
|
|
|
|
|
|
|
|
println!("==== LAYOUT HISTORY ====");
|
|
|
|
let mut table = vec!["Version\tStatus\tStorage nodes\tGateway nodes".to_string()];
|
|
|
|
for ver in layout
|
|
|
|
.versions
|
|
|
|
.iter()
|
|
|
|
.rev()
|
|
|
|
.chain(layout.old_versions.iter().rev())
|
|
|
|
{
|
|
|
|
let status = if ver.version == layout.current().version {
|
|
|
|
"current"
|
|
|
|
} else if ver.version >= min_stored {
|
|
|
|
"draining"
|
|
|
|
} else {
|
|
|
|
"historical"
|
|
|
|
};
|
|
|
|
table.push(format!(
|
|
|
|
"#{}\t{}\t{}\t{}",
|
|
|
|
ver.version,
|
|
|
|
status,
|
|
|
|
ver.roles
|
|
|
|
.items()
|
|
|
|
.iter()
|
|
|
|
.filter(|(_, _, x)| matches!(x, NodeRoleV(Some(c)) if c.capacity.is_some()))
|
|
|
|
.count(),
|
|
|
|
ver.roles
|
|
|
|
.items()
|
|
|
|
.iter()
|
|
|
|
.filter(|(_, _, x)| matches!(x, NodeRoleV(Some(c)) if c.capacity.is_none()))
|
|
|
|
.count(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
format_table(table);
|
|
|
|
|
|
|
|
println!();
|
|
|
|
println!("==== UPDATE TRACKERS ====");
|
|
|
|
println!("This is the internal data that Garage stores to know which nodes have what data.");
|
|
|
|
println!();
|
|
|
|
let mut table = vec!["Node\tAck\tSync\tSync_ack".to_string()];
|
|
|
|
let all_nodes = layout.get_all_nodes();
|
|
|
|
for node in all_nodes.iter() {
|
|
|
|
table.push(format!(
|
|
|
|
"{:?}\t#{}\t#{}\t#{}",
|
|
|
|
node,
|
2023-12-07 13:27:53 +00:00
|
|
|
layout.update_trackers.ack_map.get(node, min_stored),
|
|
|
|
layout.update_trackers.sync_map.get(node, min_stored),
|
|
|
|
layout.update_trackers.sync_ack_map.get(node, min_stored),
|
2023-11-27 15:17:41 +00:00
|
|
|
));
|
|
|
|
}
|
|
|
|
table[1..].sort();
|
|
|
|
format_table(table);
|
|
|
|
|
|
|
|
if layout.versions.len() > 1 {
|
|
|
|
println!();
|
|
|
|
println!(
|
|
|
|
"If some nodes are not catching up to the latest layout version in the update tracker,"
|
|
|
|
);
|
|
|
|
println!("it might be because they are offline or unable to complete a sync successfully.");
|
|
|
|
println!(
|
|
|
|
"You may force progress using `garage layout assume-sync --version {}`",
|
|
|
|
layout.current().version
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-12-07 10:50:00 +00:00
|
|
|
pub async fn cmd_layout_skip_dead_nodes(
|
2023-11-27 15:17:41 +00:00
|
|
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
|
|
rpc_host: NodeID,
|
2023-12-07 10:50:00 +00:00
|
|
|
opt: SkipDeadNodesOpt,
|
2023-11-27 15:17:41 +00:00
|
|
|
) -> Result<(), Error> {
|
2023-12-07 10:50:00 +00:00
|
|
|
let status = fetch_status(rpc_cli, rpc_host).await?;
|
2023-11-27 15:17:41 +00:00
|
|
|
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
|
|
|
|
2023-12-07 10:50:00 +00:00
|
|
|
if layout.versions.len() == 1 {
|
|
|
|
return Err(Error::Message(
|
|
|
|
"This command cannot be called when there is only one live cluster layout version"
|
|
|
|
.into(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
2023-11-27 15:17:41 +00:00
|
|
|
let min_v = layout.min_stored();
|
|
|
|
if opt.version <= min_v || opt.version > layout.current().version {
|
|
|
|
return Err(Error::Message(format!(
|
|
|
|
"Invalid version, you may use the following version numbers: {}",
|
|
|
|
(min_v + 1..=layout.current().version)
|
|
|
|
.map(|x| x.to_string())
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.join(" ")
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
let all_nodes = layout.get_all_nodes();
|
|
|
|
for node in all_nodes.iter() {
|
2023-12-07 10:50:00 +00:00
|
|
|
if status.iter().any(|x| x.id == *node && x.is_up) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if layout.update_trackers.ack_map.set_max(*node, opt.version) {
|
|
|
|
println!("Increased the ACK tracker for node {:?}", node);
|
|
|
|
}
|
|
|
|
|
|
|
|
if opt.allow_missing_data {
|
|
|
|
if layout.update_trackers.sync_map.set_max(*node, opt.version) {
|
|
|
|
println!("Increased the SYNC tracker for node {:?}", node);
|
|
|
|
}
|
|
|
|
}
|
2023-11-27 15:17:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
|
|
|
println!("Success.");
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-11-09 11:24:04 +00:00
|
|
|
// --- utility ---
|
|
|
|
|
|
|
|
pub async fn fetch_layout(
|
|
|
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
|
|
rpc_host: NodeID,
|
2023-11-08 18:28:36 +00:00
|
|
|
) -> Result<LayoutHistory, Error> {
|
2021-11-09 11:24:04 +00:00
|
|
|
match rpc_cli
|
2022-07-22 13:20:00 +00:00
|
|
|
.call(&rpc_host, SystemRpc::PullClusterLayout, PRIO_NORMAL)
|
2021-11-09 11:24:04 +00:00
|
|
|
.await??
|
|
|
|
{
|
2023-11-09 13:12:05 +00:00
|
|
|
SystemRpc::AdvertiseClusterLayout(t) => Ok(t),
|
2021-11-09 11:24:04 +00:00
|
|
|
resp => Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn send_layout(
|
|
|
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
|
|
rpc_host: NodeID,
|
2023-11-15 14:40:44 +00:00
|
|
|
layout: LayoutHistory,
|
2021-11-09 11:24:04 +00:00
|
|
|
) -> Result<(), Error> {
|
|
|
|
rpc_cli
|
|
|
|
.call(
|
|
|
|
&rpc_host,
|
2023-11-09 13:12:05 +00:00
|
|
|
SystemRpc::AdvertiseClusterLayout(layout),
|
2021-11-09 11:24:04 +00:00
|
|
|
PRIO_NORMAL,
|
|
|
|
)
|
|
|
|
.await??;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-11-08 18:28:36 +00:00
|
|
|
pub fn print_cluster_layout(layout: &LayoutVersion, empty_msg: &str) {
|
2022-11-08 13:23:08 +00:00
|
|
|
let mut table = vec!["ID\tTags\tZone\tCapacity\tUsable capacity".to_string()];
|
2021-11-09 11:24:04 +00:00
|
|
|
for (id, _, role) in layout.roles.items().iter() {
|
|
|
|
let role = match &role.0 {
|
|
|
|
Some(r) => r,
|
|
|
|
_ => continue,
|
|
|
|
};
|
|
|
|
let tags = role.tags.join(",");
|
2022-10-11 15:17:13 +00:00
|
|
|
let usage = layout.get_node_usage(id).unwrap_or(0);
|
2022-11-08 13:23:08 +00:00
|
|
|
let capacity = layout.get_node_capacity(id).unwrap_or(0);
|
|
|
|
if capacity > 0 {
|
|
|
|
table.push(format!(
|
|
|
|
"{:?}\t{}\t{}\t{}\t{} ({:.1}%)",
|
|
|
|
id,
|
|
|
|
tags,
|
|
|
|
role.zone,
|
|
|
|
role.capacity_string(),
|
|
|
|
ByteSize::b(usage as u64 * layout.partition_size).to_string_as(false),
|
|
|
|
(100.0 * usage as f32 * layout.partition_size as f32) / (capacity as f32)
|
|
|
|
));
|
|
|
|
} else {
|
|
|
|
table.push(format!(
|
|
|
|
"{:?}\t{}\t{}\t{}",
|
|
|
|
id,
|
|
|
|
tags,
|
|
|
|
role.zone,
|
2022-11-08 15:15:45 +00:00
|
|
|
role.capacity_string()
|
2022-11-08 13:23:08 +00:00
|
|
|
));
|
|
|
|
};
|
2021-11-09 11:24:04 +00:00
|
|
|
}
|
2023-09-12 15:24:51 +00:00
|
|
|
if table.len() > 1 {
|
2021-11-09 11:24:04 +00:00
|
|
|
format_table(table);
|
2023-09-18 10:07:45 +00:00
|
|
|
println!();
|
|
|
|
println!("Zone redundancy: {}", layout.parameters.zone_redundancy);
|
2023-09-12 15:24:51 +00:00
|
|
|
} else {
|
|
|
|
println!("{}", empty_msg);
|
2022-10-11 15:17:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-08 18:28:36 +00:00
|
|
|
pub fn print_staging_role_changes(layout: &LayoutHistory) -> bool {
|
2023-11-09 10:19:43 +00:00
|
|
|
let staging = layout.staging.get();
|
|
|
|
let has_role_changes = staging
|
|
|
|
.roles
|
2022-05-09 09:14:55 +00:00
|
|
|
.items()
|
|
|
|
.iter()
|
2023-11-08 18:28:36 +00:00
|
|
|
.any(|(k, _, v)| layout.current().roles.get(k) != Some(v));
|
2023-11-09 10:19:43 +00:00
|
|
|
let has_layout_changes = *staging.parameters.get() != layout.current().parameters;
|
2022-05-09 09:14:55 +00:00
|
|
|
|
2023-09-12 15:24:51 +00:00
|
|
|
if has_role_changes || has_layout_changes {
|
2021-11-09 11:24:04 +00:00
|
|
|
println!();
|
|
|
|
println!("==== STAGED ROLE CHANGES ====");
|
2023-09-12 15:24:51 +00:00
|
|
|
if has_role_changes {
|
|
|
|
let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
|
2023-11-09 10:19:43 +00:00
|
|
|
for (id, _, role) in staging.roles.items().iter() {
|
2023-11-08 18:28:36 +00:00
|
|
|
if layout.current().roles.get(id) == Some(role) {
|
2023-09-12 15:24:51 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if let Some(role) = &role.0 {
|
|
|
|
let tags = role.tags.join(",");
|
|
|
|
table.push(format!(
|
|
|
|
"{:?}\t{}\t{}\t{}",
|
|
|
|
id,
|
|
|
|
tags,
|
|
|
|
role.zone,
|
|
|
|
role.capacity_string()
|
|
|
|
));
|
|
|
|
} else {
|
|
|
|
table.push(format!("{:?}\tREMOVED", id));
|
|
|
|
}
|
2021-11-09 11:24:04 +00:00
|
|
|
}
|
2023-09-12 15:24:51 +00:00
|
|
|
format_table(table);
|
|
|
|
println!();
|
|
|
|
}
|
|
|
|
if has_layout_changes {
|
|
|
|
println!(
|
|
|
|
"Zone redundancy: {}",
|
2023-11-09 10:19:43 +00:00
|
|
|
staging.parameters.get().zone_redundancy
|
2023-09-12 15:24:51 +00:00
|
|
|
);
|
2021-11-09 11:24:04 +00:00
|
|
|
}
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|