admin api: management of layout parameters through admin api
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
This commit is contained in:
parent
6b19d7628e
commit
e4881e62f1
6 changed files with 204 additions and 73 deletions
|
@ -1875,15 +1875,29 @@
|
||||||
"required": [
|
"required": [
|
||||||
"version",
|
"version",
|
||||||
"roles",
|
"roles",
|
||||||
|
"parameters",
|
||||||
"stagedRoleChanges"
|
"stagedRoleChanges"
|
||||||
],
|
],
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"parameters": {
|
||||||
|
"$ref": "#/components/schemas/LayoutParameters"
|
||||||
|
},
|
||||||
"roles": {
|
"roles": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/components/schemas/NodeRoleResp"
|
"$ref": "#/components/schemas/NodeRoleResp"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"stagedParameters": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/LayoutParameters"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"stagedRoleChanges": {
|
"stagedRoleChanges": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
|
@ -2021,6 +2035,17 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"LayoutParameters": {
|
||||||
|
"type": "object",
|
||||||
|
"required": [
|
||||||
|
"zoneRedundancy"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"zoneRedundancy": {
|
||||||
|
"$ref": "#/components/schemas/ZoneRedundancy"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"ListBucketsResponse": {
|
"ListBucketsResponse": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
|
@ -3109,9 +3134,24 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"UpdateClusterLayoutRequest": {
|
"UpdateClusterLayoutRequest": {
|
||||||
"type": "array",
|
"type": "object",
|
||||||
"items": {
|
"properties": {
|
||||||
"$ref": "#/components/schemas/NodeRoleChange"
|
"parameters": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/LayoutParameters"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"roles": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/components/schemas/NodeRoleChange"
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"UpdateClusterLayoutResponse": {
|
"UpdateClusterLayoutResponse": {
|
||||||
|
@ -3289,6 +3329,28 @@
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
"ZoneRedundancy": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"required": [
|
||||||
|
"atLeast"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"atLeast": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"enum": [
|
||||||
|
"maximum"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"securitySchemes": {
|
"securitySchemes": {
|
||||||
|
|
|
@ -180,9 +180,9 @@ pub struct NodeResp {
|
||||||
pub is_up: bool,
|
pub is_up: bool,
|
||||||
pub last_seen_secs_ago: Option<u64>,
|
pub last_seen_secs_ago: Option<u64>,
|
||||||
pub draining: bool,
|
pub draining: bool,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub data_partition: Option<FreeSpaceResp>,
|
pub data_partition: Option<FreeSpaceResp>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub metadata_partition: Option<FreeSpaceResp>,
|
pub metadata_partition: Option<FreeSpaceResp>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -272,7 +272,9 @@ pub struct GetClusterLayoutRequest;
|
||||||
pub struct GetClusterLayoutResponse {
|
pub struct GetClusterLayoutResponse {
|
||||||
pub version: u64,
|
pub version: u64,
|
||||||
pub roles: Vec<NodeRoleResp>,
|
pub roles: Vec<NodeRoleResp>,
|
||||||
|
pub parameters: LayoutParameters,
|
||||||
pub staged_role_changes: Vec<NodeRoleChange>,
|
pub staged_role_changes: Vec<NodeRoleChange>,
|
||||||
|
pub staged_parameters: Option<LayoutParameters>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
|
@ -303,10 +305,28 @@ pub enum NodeRoleChangeEnum {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct LayoutParameters {
|
||||||
|
pub zone_redundancy: ZoneRedundancy,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub enum ZoneRedundancy {
|
||||||
|
AtLeast(usize),
|
||||||
|
Maximum,
|
||||||
|
}
|
||||||
|
|
||||||
// ---- UpdateClusterLayout ----
|
// ---- UpdateClusterLayout ----
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
pub struct UpdateClusterLayoutRequest(pub Vec<NodeRoleChange>);
|
pub struct UpdateClusterLayoutRequest {
|
||||||
|
#[serde(default)]
|
||||||
|
pub roles: Vec<NodeRoleChange>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub parameters: Option<LayoutParameters>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||||
pub struct UpdateClusterLayoutResponse(pub GetClusterLayoutResponse);
|
pub struct UpdateClusterLayoutResponse(pub GetClusterLayoutResponse);
|
||||||
|
@ -367,7 +387,7 @@ pub struct GetKeyInfoRequest {
|
||||||
pub struct GetKeyInfoResponse {
|
pub struct GetKeyInfoResponse {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub access_key_id: String,
|
pub access_key_id: String,
|
||||||
#[serde(skip_serializing_if = "is_default")]
|
#[serde(default, skip_serializing_if = "is_default")]
|
||||||
pub secret_access_key: Option<String>,
|
pub secret_access_key: Option<String>,
|
||||||
pub permissions: KeyPerm,
|
pub permissions: KeyPerm,
|
||||||
pub buckets: Vec<KeyInfoBucketResponse>,
|
pub buckets: Vec<KeyInfoBucketResponse>,
|
||||||
|
|
|
@ -218,10 +218,19 @@ fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResp
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let staged_parameters = if *layout.staging.get().parameters.get() != layout.current().parameters
|
||||||
|
{
|
||||||
|
Some((*layout.staging.get().parameters.get()).into())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
GetClusterLayoutResponse {
|
GetClusterLayoutResponse {
|
||||||
version: layout.current().version,
|
version: layout.current().version,
|
||||||
roles,
|
roles,
|
||||||
|
parameters: layout.current().parameters.into(),
|
||||||
staged_role_changes,
|
staged_role_changes,
|
||||||
|
staged_parameters,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,7 +251,7 @@ impl RequestHandler for UpdateClusterLayoutRequest {
|
||||||
let mut roles = layout.current().roles.clone();
|
let mut roles = layout.current().roles.clone();
|
||||||
roles.merge(&layout.staging.get().roles);
|
roles.merge(&layout.staging.get().roles);
|
||||||
|
|
||||||
for change in self.0 {
|
for change in self.roles {
|
||||||
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
||||||
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
|
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
|
||||||
|
|
||||||
|
@ -252,11 +261,16 @@ impl RequestHandler for UpdateClusterLayoutRequest {
|
||||||
zone,
|
zone,
|
||||||
capacity,
|
capacity,
|
||||||
tags,
|
tags,
|
||||||
} => Some(layout::NodeRole {
|
} => {
|
||||||
zone,
|
if matches!(capacity, Some(cap) if cap < 1024) {
|
||||||
capacity,
|
return Err(Error::bad_request("Capacity should be at least 1K (1024)"));
|
||||||
tags,
|
}
|
||||||
}),
|
Some(layout::NodeRole {
|
||||||
|
zone,
|
||||||
|
capacity,
|
||||||
|
tags,
|
||||||
|
})
|
||||||
|
}
|
||||||
_ => return Err(Error::bad_request("Invalid layout change")),
|
_ => return Err(Error::bad_request("Invalid layout change")),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -267,6 +281,22 @@ impl RequestHandler for UpdateClusterLayoutRequest {
|
||||||
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(param) = self.parameters {
|
||||||
|
if let ZoneRedundancy::AtLeast(r_int) = param.zone_redundancy {
|
||||||
|
if r_int > layout.current().replication_factor {
|
||||||
|
return Err(Error::bad_request(format!(
|
||||||
|
"The zone redundancy must be smaller or equal to the replication factor ({}).",
|
||||||
|
layout.current().replication_factor
|
||||||
|
)));
|
||||||
|
} else if r_int < 1 {
|
||||||
|
return Err(Error::bad_request(
|
||||||
|
"The zone redundancy must be at least 1.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
layout.staging.get_mut().parameters.update(param.into());
|
||||||
|
}
|
||||||
|
|
||||||
garage
|
garage
|
||||||
.system
|
.system
|
||||||
.layout_manager
|
.layout_manager
|
||||||
|
@ -322,3 +352,39 @@ impl RequestHandler for RevertClusterLayoutRequest {
|
||||||
Ok(RevertClusterLayoutResponse(res))
|
Ok(RevertClusterLayoutResponse(res))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ----
|
||||||
|
|
||||||
|
impl From<layout::ZoneRedundancy> for ZoneRedundancy {
|
||||||
|
fn from(x: layout::ZoneRedundancy) -> Self {
|
||||||
|
match x {
|
||||||
|
layout::ZoneRedundancy::Maximum => ZoneRedundancy::Maximum,
|
||||||
|
layout::ZoneRedundancy::AtLeast(x) => ZoneRedundancy::AtLeast(x),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<layout::ZoneRedundancy> for ZoneRedundancy {
|
||||||
|
fn into(self) -> layout::ZoneRedundancy {
|
||||||
|
match self {
|
||||||
|
ZoneRedundancy::Maximum => layout::ZoneRedundancy::Maximum,
|
||||||
|
ZoneRedundancy::AtLeast(x) => layout::ZoneRedundancy::AtLeast(x),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<layout::LayoutParameters> for LayoutParameters {
|
||||||
|
fn from(x: layout::LayoutParameters) -> Self {
|
||||||
|
LayoutParameters {
|
||||||
|
zone_redundancy: x.zone_redundancy.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<layout::LayoutParameters> for LayoutParameters {
|
||||||
|
fn into(self) -> layout::LayoutParameters {
|
||||||
|
layout::LayoutParameters {
|
||||||
|
zone_redundancy: self.zone_redundancy.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -108,10 +108,7 @@ impl AdminApiRequest {
|
||||||
Endpoint::GetClusterLayout => {
|
Endpoint::GetClusterLayout => {
|
||||||
Ok(AdminApiRequest::GetClusterLayout(GetClusterLayoutRequest))
|
Ok(AdminApiRequest::GetClusterLayout(GetClusterLayoutRequest))
|
||||||
}
|
}
|
||||||
Endpoint::UpdateClusterLayout => {
|
// UpdateClusterLayout semantics changed
|
||||||
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
|
||||||
Ok(AdminApiRequest::UpdateClusterLayout(updates))
|
|
||||||
}
|
|
||||||
Endpoint::ApplyClusterLayout => {
|
Endpoint::ApplyClusterLayout => {
|
||||||
let param = parse_json_body::<ApplyClusterLayoutRequest, _, Error>(req).await?;
|
let param = parse_json_body::<ApplyClusterLayoutRequest, _, Error>(req).await?;
|
||||||
Ok(AdminApiRequest::ApplyClusterLayout(param))
|
Ok(AdminApiRequest::ApplyClusterLayout(param))
|
||||||
|
|
|
@ -57,54 +57,6 @@ pub async fn cmd_show_layout(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn cmd_config_layout(
|
|
||||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
|
||||||
rpc_host: NodeID,
|
|
||||||
config_opt: ConfigLayoutOpt,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
|
||||||
|
|
||||||
let mut did_something = false;
|
|
||||||
match config_opt.redundancy {
|
|
||||||
None => (),
|
|
||||||
Some(r_str) => {
|
|
||||||
let r = r_str
|
|
||||||
.parse::<ZoneRedundancy>()
|
|
||||||
.ok_or_message("invalid zone redundancy value")?;
|
|
||||||
if let ZoneRedundancy::AtLeast(r_int) = r {
|
|
||||||
if r_int > layout.current().replication_factor {
|
|
||||||
return Err(Error::Message(format!(
|
|
||||||
"The zone redundancy must be smaller or equal to the \
|
|
||||||
replication factor ({}).",
|
|
||||||
layout.current().replication_factor
|
|
||||||
)));
|
|
||||||
} else if r_int < 1 {
|
|
||||||
return Err(Error::Message(
|
|
||||||
"The zone redundancy must be at least 1.".into(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
layout
|
|
||||||
.staging
|
|
||||||
.get_mut()
|
|
||||||
.parameters
|
|
||||||
.update(LayoutParameters { zone_redundancy: r });
|
|
||||||
println!("The zone redundancy parameter has been set to '{}'.", r);
|
|
||||||
did_something = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !did_something {
|
|
||||||
return Err(Error::Message(
|
|
||||||
"Please specify an action for `garage layout config`".into(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn cmd_layout_history(
|
pub async fn cmd_layout_history(
|
||||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||||
rpc_host: NodeID,
|
rpc_host: NodeID,
|
||||||
|
|
|
@ -4,6 +4,7 @@ use format_table::format_table;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
|
|
||||||
use garage_api_admin::api::*;
|
use garage_api_admin::api::*;
|
||||||
|
use garage_rpc::layout;
|
||||||
|
|
||||||
use crate::cli::layout as cli_v1;
|
use crate::cli::layout as cli_v1;
|
||||||
use crate::cli::structs::*;
|
use crate::cli::structs::*;
|
||||||
|
@ -14,6 +15,7 @@ impl Cli {
|
||||||
match cmd {
|
match cmd {
|
||||||
LayoutOperation::Assign(assign_opt) => self.cmd_assign_role(assign_opt).await,
|
LayoutOperation::Assign(assign_opt) => self.cmd_assign_role(assign_opt).await,
|
||||||
LayoutOperation::Remove(remove_opt) => self.cmd_remove_role(remove_opt).await,
|
LayoutOperation::Remove(remove_opt) => self.cmd_remove_role(remove_opt).await,
|
||||||
|
LayoutOperation::Config(config_opt) => self.cmd_config_layout(config_opt).await,
|
||||||
LayoutOperation::Apply(apply_opt) => self.cmd_apply_layout(apply_opt).await,
|
LayoutOperation::Apply(apply_opt) => self.cmd_apply_layout(apply_opt).await,
|
||||||
LayoutOperation::Revert(revert_opt) => self.cmd_revert_layout(revert_opt).await,
|
LayoutOperation::Revert(revert_opt) => self.cmd_revert_layout(revert_opt).await,
|
||||||
|
|
||||||
|
@ -21,10 +23,6 @@ impl Cli {
|
||||||
LayoutOperation::Show => {
|
LayoutOperation::Show => {
|
||||||
cli_v1::cmd_show_layout(&self.system_rpc_endpoint, self.rpc_host).await
|
cli_v1::cmd_show_layout(&self.system_rpc_endpoint, self.rpc_host).await
|
||||||
}
|
}
|
||||||
LayoutOperation::Config(config_opt) => {
|
|
||||||
cli_v1::cmd_config_layout(&self.system_rpc_endpoint, self.rpc_host, config_opt)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
LayoutOperation::History => {
|
LayoutOperation::History => {
|
||||||
cli_v1::cmd_layout_history(&self.system_rpc_endpoint, self.rpc_host).await
|
cli_v1::cmd_layout_history(&self.system_rpc_endpoint, self.rpc_host).await
|
||||||
}
|
}
|
||||||
|
@ -100,8 +98,11 @@ impl Cli {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
self.api_request(UpdateClusterLayoutRequest(actions))
|
self.api_request(UpdateClusterLayoutRequest {
|
||||||
.await?;
|
roles: actions,
|
||||||
|
parameters: None,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
println!("Role changes are staged but not yet committed.");
|
println!("Role changes are staged but not yet committed.");
|
||||||
println!("Use `garage layout show` to view staged role changes,");
|
println!("Use `garage layout show` to view staged role changes,");
|
||||||
|
@ -126,8 +127,11 @@ impl Cli {
|
||||||
action: NodeRoleChangeEnum::Remove { remove: true },
|
action: NodeRoleChangeEnum::Remove { remove: true },
|
||||||
}];
|
}];
|
||||||
|
|
||||||
self.api_request(UpdateClusterLayoutRequest(actions))
|
self.api_request(UpdateClusterLayoutRequest {
|
||||||
.await?;
|
roles: actions,
|
||||||
|
parameters: None,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
println!("Role removal is staged but not yet committed.");
|
println!("Role removal is staged but not yet committed.");
|
||||||
println!("Use `garage layout show` to view staged role changes,");
|
println!("Use `garage layout show` to view staged role changes,");
|
||||||
|
@ -135,6 +139,36 @@ impl Cli {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn cmd_config_layout(&self, config_opt: ConfigLayoutOpt) -> Result<(), Error> {
|
||||||
|
let mut did_something = false;
|
||||||
|
match config_opt.redundancy {
|
||||||
|
None => (),
|
||||||
|
Some(r_str) => {
|
||||||
|
let r = r_str
|
||||||
|
.parse::<layout::ZoneRedundancy>()
|
||||||
|
.ok_or_message("invalid zone redundancy value")?;
|
||||||
|
|
||||||
|
self.api_request(UpdateClusterLayoutRequest {
|
||||||
|
roles: vec![],
|
||||||
|
parameters: Some(LayoutParameters {
|
||||||
|
zone_redundancy: r.into(),
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
println!("The zone redundancy parameter has been set to '{}'.", r);
|
||||||
|
did_something = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !did_something {
|
||||||
|
return Err(Error::Message(
|
||||||
|
"Please specify an action for `garage layout config`".into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn cmd_apply_layout(&self, apply_opt: ApplyLayoutOpt) -> Result<(), Error> {
|
pub async fn cmd_apply_layout(&self, apply_opt: ApplyLayoutOpt) -> Result<(), Error> {
|
||||||
let missing_version_error = r#"
|
let missing_version_error = r#"
|
||||||
Please pass the new layout version number to ensure that you are writing the correct version of the cluster layout.
|
Please pass the new layout version number to ensure that you are writing the correct version of the cluster layout.
|
||||||
|
|
Loading…
Add table
Reference in a new issue