admin api, cliv2: implement garage layout show using api functions
All checks were successful
ci/woodpecker/push/debug Pipeline was successful

This commit is contained in:
Alex 2025-03-06 17:56:22 +01:00
parent 913e6da41b
commit 004866caac
5 changed files with 198 additions and 194 deletions

View file

@ -1899,6 +1899,7 @@
"required": [
"version",
"roles",
"partitionSize",
"parameters",
"stagedRoleChanges"
],
@ -1906,10 +1907,15 @@
"parameters": {
"$ref": "#/components/schemas/LayoutParameters"
},
"partitionSize": {
"type": "integer",
"format": "int64",
"minimum": 0
},
"roles": {
"type": "array",
"items": {
"$ref": "#/components/schemas/NodeRoleResp"
"$ref": "#/components/schemas/LayoutNodeRole"
}
},
"stagedParameters": {
@ -2059,6 +2065,44 @@
}
}
},
"LayoutNodeRole": {
"type": "object",
"required": [
"id",
"zone",
"tags"
],
"properties": {
"capacity": {
"type": [
"integer",
"null"
],
"format": "int64",
"minimum": 0
},
"id": {
"type": "string"
},
"tags": {
"type": "array",
"items": {
"type": "string"
}
},
"usableCapacity": {
"type": [
"integer",
"null"
],
"format": "int64",
"minimum": 0
},
"zone": {
"type": "string"
}
}
},
"LayoutParameters": {
"type": "object",
"required": [
@ -2853,6 +2897,36 @@
}
}
},
"NodeAssignedRole": {
"type": "object",
"required": [
"id",
"zone",
"tags"
],
"properties": {
"capacity": {
"type": [
"integer",
"null"
],
"format": "int64",
"minimum": 0
},
"id": {
"type": "string"
},
"tags": {
"type": "array",
"items": {
"type": "string"
}
},
"zone": {
"type": "string"
}
}
},
"NodeResp": {
"type": "object",
"required": [
@ -2916,7 +2990,7 @@
"type": "null"
},
{
"$ref": "#/components/schemas/NodeRoleResp"
"$ref": "#/components/schemas/NodeAssignedRole"
}
]
}
@ -2986,36 +3060,6 @@
}
]
},
"NodeRoleResp": {
"type": "object",
"required": [
"id",
"zone",
"tags"
],
"properties": {
"capacity": {
"type": [
"integer",
"null"
],
"format": "int64",
"minimum": 0
},
"id": {
"type": "string"
},
"tags": {
"type": "array",
"items": {
"type": "string"
}
},
"zone": {
"type": "string"
}
}
},
"PreviewClusterLayoutChangesResponse": {
"oneOf": [
{

View file

@ -174,7 +174,7 @@ pub struct GetClusterStatusResponse {
#[serde(rename_all = "camelCase")]
pub struct NodeResp {
pub id: String,
pub role: Option<NodeRoleResp>,
pub role: Option<NodeAssignedRole>,
#[schema(value_type = Option<String> )]
pub addr: Option<SocketAddr>,
pub hostname: Option<String>,
@ -189,7 +189,7 @@ pub struct NodeResp {
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct NodeRoleResp {
pub struct NodeAssignedRole {
pub id: String,
pub zone: String,
pub capacity: Option<u64>,
@ -272,12 +272,23 @@ pub struct GetClusterLayoutRequest;
#[serde(rename_all = "camelCase")]
pub struct GetClusterLayoutResponse {
pub version: u64,
pub roles: Vec<NodeRoleResp>,
pub roles: Vec<LayoutNodeRole>,
pub partition_size: u64,
pub parameters: LayoutParameters,
pub staged_role_changes: Vec<NodeRoleChange>,
pub staged_parameters: Option<LayoutParameters>,
}
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct LayoutNodeRole {
pub id: String,
pub zone: String,
pub capacity: Option<u64>,
pub usable_capacity: Option<u64>,
pub tags: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct NodeRoleChange {
@ -306,13 +317,13 @@ pub enum NodeRoleChangeEnum {
},
}
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
#[derive(Copy, Debug, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct LayoutParameters {
pub zone_redundancy: ZoneRedundancy,
}
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
#[derive(Copy, Debug, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub enum ZoneRedundancy {
AtLeast(usize),

View file

@ -55,7 +55,7 @@ impl RequestHandler for GetClusterStatusRequest {
for (id, _, role) in layout.current().roles.items().iter() {
if let layout::NodeRoleV(Some(r)) = role {
let role = NodeRoleResp {
let role = NodeAssignedRole {
id: hex::encode(id),
zone: r.zone.to_string(),
capacity: r.capacity,
@ -182,16 +182,21 @@ impl RequestHandler for GetClusterLayoutRequest {
}
fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse {
let roles = layout
.current()
let current = layout.current();
let roles = current
.roles
.items()
.iter()
.filter_map(|(k, _, v)| v.0.clone().map(|x| (k, x)))
.map(|(k, v)| NodeRoleResp {
.map(|(k, v)| LayoutNodeRole {
id: hex::encode(k),
zone: v.zone.clone(),
capacity: v.capacity,
usable_capacity: current
.get_node_usage(k)
.ok()
.map(|x| x as u64 * current.partition_size),
tags: v.tags.clone(),
})
.collect::<Vec<_>>();
@ -202,7 +207,7 @@ fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResp
.roles
.items()
.iter()
.filter(|(k, _, v)| layout.current().roles.get(k) != Some(v))
.filter(|(k, _, v)| current.roles.get(k) != Some(v))
.map(|(k, _, v)| match &v.0 {
None => NodeRoleChange {
id: hex::encode(k),
@ -219,17 +224,17 @@ fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResp
})
.collect::<Vec<_>>();
let staged_parameters = if *layout.staging.get().parameters.get() != layout.current().parameters
{
let staged_parameters = if *layout.staging.get().parameters.get() != current.parameters {
Some((*layout.staging.get().parameters.get()).into())
} else {
None
};
GetClusterLayoutResponse {
version: layout.current().version,
version: current.version,
roles,
parameters: layout.current().parameters.into(),
partition_size: current.partition_size,
parameters: current.parameters.into(),
staged_role_changes,
staged_parameters,
}

View file

@ -1,5 +1,3 @@
use bytesize::ByteSize;
use format_table::format_table;
use garage_util::error::*;
@ -9,54 +7,6 @@ use garage_rpc::*;
use crate::cli::structs::*;
pub async fn cmd_show_layout(
rpc_cli: &Endpoint<SystemRpc, ()>,
rpc_host: NodeID,
) -> Result<(), Error> {
let layout = fetch_layout(rpc_cli, rpc_host).await?;
println!("==== CURRENT CLUSTER LAYOUT ====");
print_cluster_layout(layout.current(), "No nodes currently have a role in the cluster.\nSee `garage status` to view available nodes.");
println!();
println!(
"Current cluster layout version: {}",
layout.current().version
);
let has_role_changes = print_staging_role_changes(&layout);
if has_role_changes {
let v = layout.current().version;
let res_apply = layout.apply_staged_changes(Some(v + 1));
// this will print the stats of what partitions
// will move around when we apply
match res_apply {
Ok((layout, msg)) => {
println!();
println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====");
print_cluster_layout(layout.current(), "No nodes have a role in the new layout.");
println!();
for line in msg.iter() {
println!("{}", line);
}
println!("To enact the staged role changes, type:");
println!();
println!(" garage layout apply --version {}", v + 1);
println!();
println!("You can also revert all proposed changes with: garage layout revert");
}
Err(e) => {
println!("Error while trying to compute the assignment: {}", e);
println!("This new layout cannot yet be applied.");
println!("You can also revert all proposed changes with: garage layout revert");
}
}
}
Ok(())
}
pub async fn cmd_layout_history(
rpc_cli: &Endpoint<SystemRpc, ()>,
rpc_host: NodeID,
@ -252,88 +202,3 @@ pub async fn send_layout(
.await??;
Ok(())
}
pub fn print_cluster_layout(layout: &LayoutVersion, empty_msg: &str) {
let mut table = vec!["ID\tTags\tZone\tCapacity\tUsable capacity".to_string()];
for (id, _, role) in layout.roles.items().iter() {
let role = match &role.0 {
Some(r) => r,
_ => continue,
};
let tags = role.tags.join(",");
let usage = layout.get_node_usage(id).unwrap_or(0);
let capacity = layout.get_node_capacity(id).unwrap_or(0);
if capacity > 0 {
table.push(format!(
"{:?}\t{}\t{}\t{}\t{} ({:.1}%)",
id,
tags,
role.zone,
role.capacity_string(),
ByteSize::b(usage as u64 * layout.partition_size).to_string_as(false),
(100.0 * usage as f32 * layout.partition_size as f32) / (capacity as f32)
));
} else {
table.push(format!(
"{:?}\t{}\t{}\t{}",
id,
tags,
role.zone,
role.capacity_string()
));
};
}
if table.len() > 1 {
format_table(table);
println!();
println!("Zone redundancy: {}", layout.parameters.zone_redundancy);
} else {
println!("{}", empty_msg);
}
}
pub fn print_staging_role_changes(layout: &LayoutHistory) -> bool {
let staging = layout.staging.get();
let has_role_changes = staging
.roles
.items()
.iter()
.any(|(k, _, v)| layout.current().roles.get(k) != Some(v));
let has_layout_changes = *staging.parameters.get() != layout.current().parameters;
if has_role_changes || has_layout_changes {
println!();
println!("==== STAGED ROLE CHANGES ====");
if has_role_changes {
let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
for (id, _, role) in staging.roles.items().iter() {
if layout.current().roles.get(id) == Some(role) {
continue;
}
if let Some(role) = &role.0 {
let tags = role.tags.join(",");
table.push(format!(
"{:?}\t{}\t{}\t{}",
id,
tags,
role.zone,
role.capacity_string()
));
} else {
table.push(format!("{:?}\tREMOVED", id));
}
}
format_table(table);
println!();
}
if has_layout_changes {
println!(
"Zone redundancy: {}",
staging.parameters.get().zone_redundancy
);
}
true
} else {
false
}
}

View file

@ -13,6 +13,7 @@ use crate::cli_v2::*;
impl Cli {
pub async fn layout_command_dispatch(&self, cmd: LayoutOperation) -> Result<(), Error> {
match cmd {
LayoutOperation::Show => self.cmd_show_layout().await,
LayoutOperation::Assign(assign_opt) => self.cmd_assign_role(assign_opt).await,
LayoutOperation::Remove(remove_opt) => self.cmd_remove_role(remove_opt).await,
LayoutOperation::Config(config_opt) => self.cmd_config_layout(config_opt).await,
@ -20,9 +21,6 @@ impl Cli {
LayoutOperation::Revert(revert_opt) => self.cmd_revert_layout(revert_opt).await,
// TODO
LayoutOperation::Show => {
cli_v1::cmd_show_layout(&self.system_rpc_endpoint, self.rpc_host).await
}
LayoutOperation::History => {
cli_v1::cmd_layout_history(&self.system_rpc_endpoint, self.rpc_host).await
}
@ -37,6 +35,50 @@ impl Cli {
}
}
pub async fn cmd_show_layout(&self) -> Result<(), Error> {
let layout = self.api_request(GetClusterLayoutRequest).await?;
println!("==== CURRENT CLUSTER LAYOUT ====");
print_cluster_layout(&layout, "No nodes currently have a role in the cluster.\nSee `garage status` to view available nodes.");
println!();
println!("Current cluster layout version: {}", layout.version);
let has_role_changes = print_staging_role_changes(&layout);
if has_role_changes {
let res_apply = self.api_request(PreviewClusterLayoutChangesRequest).await?;
// this will print the stats of what partitions
// will move around when we apply
match res_apply {
PreviewClusterLayoutChangesResponse::Success {
message,
new_layout,
} => {
println!();
println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====");
print_cluster_layout(&new_layout, "No nodes have a role in the new layout.");
println!();
for line in message.iter() {
println!("{}", line);
}
println!("To enact the staged role changes, type:");
println!();
println!(" garage layout apply --version {}", new_layout.version);
println!();
println!("You can also revert all proposed changes with: garage layout revert");
}
PreviewClusterLayoutChangesResponse::Error { error } => {
println!("Error while trying to compute the assignment: {}", error);
println!("This new layout cannot yet be applied.");
println!("You can also revert all proposed changes with: garage layout revert");
}
}
}
Ok(())
}
pub async fn cmd_assign_role(&self, opt: AssignRoleOpt) -> Result<(), Error> {
let status = self.api_request(GetClusterStatusRequest).await?;
let layout = self.api_request(GetClusterLayoutRequest).await?;
@ -218,7 +260,7 @@ pub fn capacity_string(v: Option<u64>) -> String {
pub fn get_staged_or_current_role(
id: &str,
layout: &GetClusterLayoutResponse,
) -> Option<NodeRoleResp> {
) -> Option<NodeAssignedRole> {
for node in layout.staged_role_changes.iter() {
if node.id == id {
return match &node.action {
@ -227,7 +269,7 @@ pub fn get_staged_or_current_role(
zone,
capacity,
tags,
} => Some(NodeRoleResp {
} => Some(NodeAssignedRole {
id: id.to_string(),
zone: zone.to_string(),
capacity: *capacity,
@ -239,7 +281,12 @@ pub fn get_staged_or_current_role(
for node in layout.roles.iter() {
if node.id == id {
return Some(node.clone());
return Some(NodeAssignedRole {
id: node.id.clone(),
zone: node.zone.clone(),
capacity: node.capacity,
tags: node.tags.clone(),
});
}
}
@ -267,11 +314,46 @@ pub fn find_matching_node<'a>(
}
}
pub fn print_cluster_layout(layout: &GetClusterLayoutResponse, empty_msg: &str) {
let mut table = vec!["ID\tTags\tZone\tCapacity\tUsable capacity".to_string()];
for role in layout.roles.iter() {
let tags = role.tags.join(",");
if let (Some(capacity), Some(usable_capacity)) = (role.capacity, role.usable_capacity) {
table.push(format!(
"{:.16}\t{}\t{}\t{}\t{} ({:.1}%)",
role.id,
tags,
role.zone,
capacity_string(role.capacity),
ByteSize::b(usable_capacity).to_string_as(false),
(100.0 * usable_capacity as f32) / (capacity as f32)
));
} else {
table.push(format!(
"{:.16}\t{}\t{}\t{}",
role.id,
tags,
role.zone,
capacity_string(role.capacity),
));
};
}
if table.len() > 1 {
format_table(table);
println!();
println!(
"Zone redundancy: {}",
Into::<layout::ZoneRedundancy>::into(layout.parameters.zone_redundancy)
);
} else {
println!("{}", empty_msg);
}
}
pub fn print_staging_role_changes(layout: &GetClusterLayoutResponse) -> bool {
let has_role_changes = !layout.staged_role_changes.is_empty();
// TODO!! Layout parameters
let has_layout_changes = false;
let has_layout_changes = layout.staged_parameters.is_some();
if has_role_changes || has_layout_changes {
println!();
@ -302,15 +384,12 @@ pub fn print_staging_role_changes(layout: &GetClusterLayoutResponse) -> bool {
format_table(table);
println!();
}
//TODO
/*
if has_layout_changes {
if let Some(p) = layout.staged_parameters.as_ref() {
println!(
"Zone redundancy: {}",
staging.parameters.get().zone_redundancy
Into::<layout::ZoneRedundancy>::into(p.zone_redundancy)
);
}
*/
true
} else {
false