Compare commits
39 commits
main
...
refactor-a
Author | SHA1 | Date | |
---|---|---|---|
cf30ae8b99 | |||
783c1e1c70 | |||
bb36360c0b | |||
e3d1571247 | |||
de7bbe8be0 | |||
8e61d149f2 | |||
3018dfd663 | |||
bb5e0c210f | |||
3192088aac | |||
5a89350b38 | |||
3caea5fc06 | |||
ebc0e9319e | |||
f8c6a8373d | |||
076ce04fe5 | |||
f37d5d2b08 | |||
819f4f0050 | |||
69ddaafc60 | |||
145130481e | |||
6ed78abb5c | |||
19454c1679 | |||
1c03941b19 | |||
4f0b923c4f | |||
420bbc162d | |||
12ea4cda5f | |||
5fefbd94e9 | |||
ba810b2e81 | |||
f8ed3fdbc4 | |||
2daeb89834 | |||
4cb45bd398 | |||
d5ad797ad7 | |||
a99925e0ed | |||
f538dc34d3 | |||
ed58f8b0fe | |||
5037b97dd4 | |||
af1a530834 | |||
c99bfe69ea | |||
831f2b0207 | |||
c1eb1610ba | |||
5560a963e0 |
41 changed files with 5253 additions and 2659 deletions
1
Cargo.lock
generated
1
Cargo.lock
generated
|
@ -1402,6 +1402,7 @@ dependencies = [
|
|||
"nom",
|
||||
"opentelemetry",
|
||||
"opentelemetry-prometheus",
|
||||
"paste",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
"prometheus",
|
||||
|
|
|
@ -35,7 +35,7 @@ args@{
|
|||
ignoreLockHash,
|
||||
}:
|
||||
let
|
||||
nixifiedLockHash = "d13a40f6a67a6a1075dbb5a948d7bfceea51958a0b5b6182ad56a9e39ab4dfd0";
|
||||
nixifiedLockHash = "cc8c069ebe713e8225c166aa2bba5cc6e5016f007c6e7b7af36dd49452c859cc";
|
||||
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
||||
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
||||
lockHashIgnored = if ignoreLockHash
|
||||
|
@ -2042,6 +2042,7 @@ in
|
|||
nom = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".nom."7.1.3" { inherit profileName; }).out;
|
||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" then "opentelemetry_prometheus" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry-prometheus."0.10.0" { inherit profileName; }).out;
|
||||
paste = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".paste."1.0.14" { profileName = "__noProfile"; }).out;
|
||||
percent_encoding = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".percent-encoding."2.3.1" { inherit profileName; }).out;
|
||||
pin_project = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-project."1.1.4" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/prometheus" then "prometheus" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".prometheus."0.13.3" { inherit profileName; }).out;
|
||||
|
|
|
@ -62,6 +62,7 @@ mktemp = "0.5"
|
|||
nix = { version = "0.29", default-features = false, features = ["fs"] }
|
||||
nom = "7.1"
|
||||
parse_duration = "2.1"
|
||||
paste = "1.0"
|
||||
pin-project = "1.0.12"
|
||||
pnet_datalink = "0.34"
|
||||
rand = "0.8"
|
||||
|
|
24
doc/api/garage-admin-v2.html
Normal file
24
doc/api/garage-admin-v2.html
Normal file
|
@ -0,0 +1,24 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Garage Adminstration API v0</title>
|
||||
<!-- needed for adaptive design -->
|
||||
<meta charset="utf-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<link href="./css/redoc.css" rel="stylesheet">
|
||||
|
||||
<!--
|
||||
Redoc doesn't change outer page styles
|
||||
-->
|
||||
<style>
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<redoc spec-url='./garage-admin-v2.yml'></redoc>
|
||||
<script src="./redoc.standalone.js"> </script>
|
||||
</body>
|
||||
</html>
|
1336
doc/api/garage-admin-v2.yml
Normal file
1336
doc/api/garage-admin-v2.yml
Normal file
File diff suppressed because it is too large
Load diff
|
@ -13,8 +13,9 @@ We will bump the version numbers prefixed to each API endpoint each time the syn
|
|||
or semantics change, meaning that code that relies on these endpoints will break
|
||||
when changes are introduced.
|
||||
|
||||
The Garage administration API was introduced in version 0.7.2, this document
|
||||
does not apply to older versions of Garage.
|
||||
The Garage administration API was introduced in version 0.7.2, and was
|
||||
changed several times.
|
||||
This document applies only to the Garage v2 API (starting with Garage v2.0.0).
|
||||
|
||||
|
||||
## Access control
|
||||
|
@ -52,11 +53,18 @@ Returns an HTTP status 200 if the node is ready to answer user's requests,
|
|||
and an HTTP status 503 (Service Unavailable) if there are some partitions
|
||||
for which a quorum of nodes is not available.
|
||||
A simple textual message is also returned in a body with content-type `text/plain`.
|
||||
See `/v1/health` for an API that also returns JSON output.
|
||||
See `/v2/GetClusterHealth` for an API that also returns JSON output.
|
||||
|
||||
### Other special endpoints
|
||||
|
||||
#### CheckDomain `GET /check?domain=<domain>`
|
||||
|
||||
Checks whether this Garage cluster serves a website for domain `<domain>`.
|
||||
Returns HTTP 200 Ok if yes, or HTTP 4xx if no website is available for this domain.
|
||||
|
||||
### Cluster operations
|
||||
|
||||
#### GetClusterStatus `GET /v1/status`
|
||||
#### GetClusterStatus `GET /v2/GetClusterStatus`
|
||||
|
||||
Returns the cluster's current status in JSON, including:
|
||||
|
||||
|
@ -70,7 +78,7 @@ Example response body:
|
|||
```json
|
||||
{
|
||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||
"garageVersion": "v1.0.1",
|
||||
"garageVersion": "v2.0.0",
|
||||
"garageFeatures": [
|
||||
"k2v",
|
||||
"lmdb",
|
||||
|
@ -169,7 +177,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### GetClusterHealth `GET /v1/health`
|
||||
#### GetClusterHealth `GET /v2/GetClusterHealth`
|
||||
|
||||
Returns the cluster's current health in JSON format, with the following variables:
|
||||
|
||||
|
@ -202,7 +210,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### ConnectClusterNodes `POST /v1/connect`
|
||||
#### ConnectClusterNodes `POST /v2/ConnectClusterNodes`
|
||||
|
||||
Instructs this Garage node to connect to other Garage nodes at specified addresses.
|
||||
|
||||
|
@ -232,7 +240,7 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetClusterLayout `GET /v1/layout`
|
||||
#### GetClusterLayout `GET /v2/GetClusterLayout`
|
||||
|
||||
Returns the cluster's current layout in JSON, including:
|
||||
|
||||
|
@ -293,7 +301,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### UpdateClusterLayout `POST /v1/layout`
|
||||
#### UpdateClusterLayout `POST /v2/UpdateClusterLayout`
|
||||
|
||||
Send modifications to the cluster layout. These modifications will
|
||||
be included in the staged role changes, visible in subsequent calls
|
||||
|
@ -330,7 +338,7 @@ This returns the new cluster layout with the proposed staged changes,
|
|||
as returned by GetClusterLayout.
|
||||
|
||||
|
||||
#### ApplyClusterLayout `POST /v1/layout/apply`
|
||||
#### ApplyClusterLayout `POST /v2/ApplyClusterLayout`
|
||||
|
||||
Applies to the cluster the layout changes currently registered as
|
||||
staged layout changes.
|
||||
|
@ -350,7 +358,7 @@ existing layout in the cluster.
|
|||
This returns the message describing all the calculations done to compute the new
|
||||
layout, as well as the description of the layout as returned by GetClusterLayout.
|
||||
|
||||
#### RevertClusterLayout `POST /v1/layout/revert`
|
||||
#### RevertClusterLayout `POST /v2/RevertClusterLayout`
|
||||
|
||||
Clears all of the staged layout changes.
|
||||
|
||||
|
@ -374,7 +382,7 @@ as returned by GetClusterLayout.
|
|||
|
||||
### Access key operations
|
||||
|
||||
#### ListKeys `GET /v1/key`
|
||||
#### ListKeys `GET /v2/ListKeys`
|
||||
|
||||
Returns all API access keys in the cluster.
|
||||
|
||||
|
@ -393,8 +401,8 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetKeyInfo `GET /v1/key?id=<acces key id>`
|
||||
#### GetKeyInfo `GET /v1/key?search=<pattern>`
|
||||
#### GetKeyInfo `GET /v2/GetKeyInfo?id=<acces key id>`
|
||||
#### GetKeyInfo `GET /v2/GetKeyInfo?search=<pattern>`
|
||||
|
||||
Returns information about the requested API access key.
|
||||
|
||||
|
@ -468,7 +476,7 @@ Example response:
|
|||
}
|
||||
```
|
||||
|
||||
#### CreateKey `POST /v1/key`
|
||||
#### CreateKey `POST /v2/CreateKey`
|
||||
|
||||
Creates a new API access key.
|
||||
|
||||
|
@ -483,7 +491,7 @@ Request body format:
|
|||
This returns the key info, including the created secret key,
|
||||
in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### ImportKey `POST /v1/key/import`
|
||||
#### ImportKey `POST /v2/ImportKey`
|
||||
|
||||
Imports an existing API key.
|
||||
This will check that the imported key is in the valid format, i.e.
|
||||
|
@ -501,7 +509,7 @@ Request body format:
|
|||
|
||||
This returns the key info in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### UpdateKey `POST /v1/key?id=<acces key id>`
|
||||
#### UpdateKey `POST /v2/UpdateKey?id=<acces key id>`
|
||||
|
||||
Updates information about the specified API access key.
|
||||
|
||||
|
@ -523,14 +531,14 @@ The possible flags in `allow` and `deny` are: `createBucket`.
|
|||
|
||||
This returns the key info in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### DeleteKey `DELETE /v1/key?id=<acces key id>`
|
||||
#### DeleteKey `POST /v2/DeleteKey?id=<acces key id>`
|
||||
|
||||
Deletes an API access key.
|
||||
|
||||
|
||||
### Bucket operations
|
||||
|
||||
#### ListBuckets `GET /v1/bucket`
|
||||
#### ListBuckets `GET /v2/ListBuckets`
|
||||
|
||||
Returns all storage buckets in the cluster.
|
||||
|
||||
|
@ -572,8 +580,8 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetBucketInfo `GET /v1/bucket?id=<bucket id>`
|
||||
#### GetBucketInfo `GET /v1/bucket?globalAlias=<alias>`
|
||||
#### GetBucketInfo `GET /v2/GetBucketInfo?id=<bucket id>`
|
||||
#### GetBucketInfo `GET /v2/GetBucketInfo?globalAlias=<alias>`
|
||||
|
||||
Returns information about the requested storage bucket.
|
||||
|
||||
|
@ -616,7 +624,7 @@ Example response:
|
|||
}
|
||||
```
|
||||
|
||||
#### CreateBucket `POST /v1/bucket`
|
||||
#### CreateBucket `POST /v2/CreateBucket`
|
||||
|
||||
Creates a new storage bucket.
|
||||
|
||||
|
@ -656,7 +664,7 @@ or no alias at all.
|
|||
Technically, you can also specify both `globalAlias` and `localAlias` and that would create
|
||||
two aliases, but I don't see why you would want to do that.
|
||||
|
||||
#### UpdateBucket `PUT /v1/bucket?id=<bucket id>`
|
||||
#### UpdateBucket `POST /v2/UpdateBucket?id=<bucket id>`
|
||||
|
||||
Updates configuration of the given bucket.
|
||||
|
||||
|
@ -688,16 +696,38 @@ In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or
|
|||
to remove the quotas. An absent value will be considered the same as a `null`. It is not possible
|
||||
to change only one of the two quotas.
|
||||
|
||||
#### DeleteBucket `DELETE /v1/bucket?id=<bucket id>`
|
||||
#### DeleteBucket `POST /v2/DeleteBucket?id=<bucket id>`
|
||||
|
||||
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
|
||||
|
||||
Warning: this will delete all aliases associated with the bucket!
|
||||
|
||||
#### CleanupIncompleteUploads `POST /v2/CleanupIncompleteUploads`
|
||||
|
||||
Cleanup all incomplete uploads in a bucket that are older than a specified number
|
||||
of seconds.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"olderThanSecs": 3600
|
||||
}
|
||||
```
|
||||
|
||||
Response format
|
||||
|
||||
```json
|
||||
{
|
||||
"uploadsDeleted": 12
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Operations on permissions for keys on buckets
|
||||
|
||||
#### BucketAllowKey `POST /v1/bucket/allow`
|
||||
#### AllowBucketKey `POST /v2/AllowBucketKey`
|
||||
|
||||
Allows a key to do read/write/owner operations on a bucket.
|
||||
|
||||
|
@ -718,7 +748,7 @@ Request body format:
|
|||
Flags in `permissions` which have the value `true` will be activated.
|
||||
Other flags will remain unchanged.
|
||||
|
||||
#### BucketDenyKey `POST /v1/bucket/deny`
|
||||
#### DenyBucketKey `POST /v2/DenyBucketKey`
|
||||
|
||||
Denies a key from doing read/write/owner operations on a bucket.
|
||||
|
||||
|
@ -742,19 +772,35 @@ Other flags will remain unchanged.
|
|||
|
||||
### Operations on bucket aliases
|
||||
|
||||
#### GlobalAliasBucket `PUT /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||
#### AddBucketAlias `POST /v2/AddBucketAlias`
|
||||
|
||||
Empty body. Creates a global alias for a bucket.
|
||||
Creates an alias for a bucket in the namespace of a specific access key.
|
||||
To create a global alias, specify the `globalAlias` field.
|
||||
To create a local alias, specify the `localAlias` and `accessKeyId` fields.
|
||||
|
||||
#### GlobalUnaliasBucket `DELETE /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||
Request body format:
|
||||
|
||||
Removes a global alias for a bucket.
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"globalAlias": "my-bucket"
|
||||
}
|
||||
```
|
||||
|
||||
#### LocalAliasBucket `PUT /v1/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>`
|
||||
or:
|
||||
|
||||
Empty body. Creates a local alias for a bucket in the namespace of a specific access key.
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||
"localAlias": "my-bucket"
|
||||
}
|
||||
```
|
||||
|
||||
#### LocalUnaliasBucket `DELETE /v1/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>`
|
||||
#### RemoveBucketAlias `POST /v2/RemoveBucketAlias`
|
||||
|
||||
Removes a local alias for a bucket in the namespace of a specific access key.
|
||||
Removes an alias for a bucket in the namespace of a specific access key.
|
||||
To remove a global alias, specify the `globalAlias` field.
|
||||
To remove a local alias, specify the `localAlias` and `accessKeyId` fields.
|
||||
|
||||
Request body format: same as AddBucketAlias.
|
||||
|
|
|
@ -38,6 +38,7 @@ idna.workspace = true
|
|||
tracing.workspace = true
|
||||
md-5.workspace = true
|
||||
nom.workspace = true
|
||||
paste.workspace = true
|
||||
pin-project.workspace = true
|
||||
sha1.workspace = true
|
||||
sha2.workspace = true
|
||||
|
|
620
src/api/admin/api.rs
Normal file
620
src/api/admin/api.rs
Normal file
|
@ -0,0 +1,620 @@
|
|||
use std::collections::HashMap;
|
||||
use std::convert::TryFrom;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use paste::paste;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_rpc::*;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use crate::admin::api_server::{AdminRpc, AdminRpcResponse};
|
||||
use crate::admin::error::Error;
|
||||
use crate::admin::macros::*;
|
||||
use crate::admin::{Admin, RequestHandler};
|
||||
use crate::common_error::CommonErrorDerivative;
|
||||
use crate::helpers::is_default;
|
||||
|
||||
// This generates the following:
|
||||
//
|
||||
// - An enum AdminApiRequest that contains a variant for all endpoints
|
||||
//
|
||||
// - An enum AdminApiResponse that contains a variant for all non-special endpoints.
|
||||
// This enum is serialized in api_server.rs, without the enum tag,
|
||||
// which gives directly the JSON response corresponding to the API call.
|
||||
// This enum does not implement Deserialize as its meaning can be ambiguous.
|
||||
//
|
||||
// - An enum TaggedAdminApiResponse that contains the same variants, but
|
||||
// serializes as a tagged enum. This allows it to be transmitted through
|
||||
// Garage RPC and deserialized correctly upon receival.
|
||||
// Conversion from untagged to tagged can be done using the `.tagged()` method.
|
||||
//
|
||||
// - AdminApiRequest::name() that returns the name of the endpoint
|
||||
//
|
||||
// - impl EndpointHandler for AdminApiHandler, that uses the impl EndpointHandler
|
||||
// of each request type below for non-special endpoints
|
||||
admin_endpoints![
|
||||
// Special endpoints of the Admin API
|
||||
@special Options,
|
||||
@special CheckDomain,
|
||||
@special Health,
|
||||
@special Metrics,
|
||||
|
||||
// Cluster operations
|
||||
GetClusterStatus,
|
||||
GetClusterHealth,
|
||||
ConnectClusterNodes,
|
||||
GetClusterLayout,
|
||||
UpdateClusterLayout,
|
||||
ApplyClusterLayout,
|
||||
RevertClusterLayout,
|
||||
|
||||
// Access key operations
|
||||
ListKeys,
|
||||
GetKeyInfo,
|
||||
CreateKey,
|
||||
ImportKey,
|
||||
UpdateKey,
|
||||
DeleteKey,
|
||||
|
||||
// Bucket operations
|
||||
ListBuckets,
|
||||
GetBucketInfo,
|
||||
CreateBucket,
|
||||
UpdateBucket,
|
||||
DeleteBucket,
|
||||
CleanupIncompleteUploads,
|
||||
|
||||
// Operations on permissions for keys on buckets
|
||||
AllowBucketKey,
|
||||
DenyBucketKey,
|
||||
|
||||
// Operations on bucket aliases
|
||||
AddBucketAlias,
|
||||
RemoveBucketAlias,
|
||||
|
||||
// Worker operations
|
||||
GetWorkerVariable,
|
||||
SetWorkerVariable,
|
||||
];
|
||||
|
||||
local_admin_endpoints![GetWorkerVariable, SetWorkerVariable,];
|
||||
|
||||
// **********************************************
|
||||
// Special endpoints
|
||||
//
|
||||
// These endpoints don't have associated *Response structs
|
||||
// because they directly produce an http::Response
|
||||
// **********************************************
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct OptionsRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CheckDomainRequest {
|
||||
pub domain: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HealthRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MetricsRequest;
|
||||
|
||||
// **********************************************
|
||||
// Cluster operations
|
||||
// **********************************************
|
||||
|
||||
// ---- GetClusterStatus ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GetClusterStatusRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetClusterStatusResponse {
|
||||
pub node: String,
|
||||
pub garage_version: String,
|
||||
pub garage_features: Option<Vec<String>>,
|
||||
pub rust_version: String,
|
||||
pub db_engine: String,
|
||||
pub layout_version: u64,
|
||||
pub nodes: Vec<NodeResp>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeResp {
|
||||
pub id: String,
|
||||
pub role: Option<NodeRoleResp>,
|
||||
pub addr: Option<SocketAddr>,
|
||||
pub hostname: Option<String>,
|
||||
pub is_up: bool,
|
||||
pub last_seen_secs_ago: Option<u64>,
|
||||
pub draining: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub data_partition: Option<FreeSpaceResp>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub metadata_partition: Option<FreeSpaceResp>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeRoleResp {
|
||||
pub id: String,
|
||||
pub zone: String,
|
||||
pub capacity: Option<u64>,
|
||||
pub tags: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct FreeSpaceResp {
|
||||
pub available: u64,
|
||||
pub total: u64,
|
||||
}
|
||||
|
||||
// ---- GetClusterHealth ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GetClusterHealthRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetClusterHealthResponse {
|
||||
pub status: String,
|
||||
pub known_nodes: usize,
|
||||
pub connected_nodes: usize,
|
||||
pub storage_nodes: usize,
|
||||
pub storage_nodes_ok: usize,
|
||||
pub partitions: usize,
|
||||
pub partitions_quorum: usize,
|
||||
pub partitions_all_ok: usize,
|
||||
}
|
||||
|
||||
// ---- ConnectClusterNodes ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConnectClusterNodesRequest(pub Vec<String>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConnectClusterNodesResponse(pub Vec<ConnectNodeResponse>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ConnectNodeResponse {
|
||||
pub success: bool,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
// ---- GetClusterLayout ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GetClusterLayoutRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetClusterLayoutResponse {
|
||||
pub version: u64,
|
||||
pub roles: Vec<NodeRoleResp>,
|
||||
pub staged_role_changes: Vec<NodeRoleChange>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeRoleChange {
|
||||
pub id: String,
|
||||
#[serde(flatten)]
|
||||
pub action: NodeRoleChangeEnum,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum NodeRoleChangeEnum {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Remove { remove: bool },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Update {
|
||||
zone: String,
|
||||
capacity: Option<u64>,
|
||||
tags: Vec<String>,
|
||||
},
|
||||
}
|
||||
|
||||
// ---- UpdateClusterLayout ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateClusterLayoutRequest(pub Vec<NodeRoleChange>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateClusterLayoutResponse(pub GetClusterLayoutResponse);
|
||||
|
||||
// ---- ApplyClusterLayout ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApplyClusterLayoutRequest {
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApplyClusterLayoutResponse {
|
||||
pub message: Vec<String>,
|
||||
pub layout: GetClusterLayoutResponse,
|
||||
}
|
||||
|
||||
// ---- RevertClusterLayout ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RevertClusterLayoutRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RevertClusterLayoutResponse(pub GetClusterLayoutResponse);
|
||||
|
||||
// **********************************************
|
||||
// Access key operations
|
||||
// **********************************************
|
||||
|
||||
// ---- ListKeys ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ListKeysRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ListKeysResponse(pub Vec<ListKeysResponseItem>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ListKeysResponseItem {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
// ---- GetKeyInfo ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GetKeyInfoRequest {
|
||||
pub id: Option<String>,
|
||||
pub search: Option<String>,
|
||||
pub show_secret_key: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetKeyInfoResponse {
|
||||
pub name: String,
|
||||
pub access_key_id: String,
|
||||
#[serde(skip_serializing_if = "is_default")]
|
||||
pub secret_access_key: Option<String>,
|
||||
pub permissions: KeyPerm,
|
||||
pub buckets: Vec<KeyInfoBucketResponse>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KeyPerm {
|
||||
#[serde(default)]
|
||||
pub create_bucket: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KeyInfoBucketResponse {
|
||||
pub id: String,
|
||||
pub global_aliases: Vec<String>,
|
||||
pub local_aliases: Vec<String>,
|
||||
pub permissions: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApiBucketKeyPerm {
|
||||
#[serde(default)]
|
||||
pub read: bool,
|
||||
#[serde(default)]
|
||||
pub write: bool,
|
||||
#[serde(default)]
|
||||
pub owner: bool,
|
||||
}
|
||||
|
||||
// ---- CreateKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CreateKeyRequest {
|
||||
pub name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CreateKeyResponse(pub GetKeyInfoResponse);
|
||||
|
||||
// ---- ImportKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ImportKeyRequest {
|
||||
pub access_key_id: String,
|
||||
pub secret_access_key: String,
|
||||
pub name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ImportKeyResponse(pub GetKeyInfoResponse);
|
||||
|
||||
// ---- UpdateKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateKeyRequest {
|
||||
pub id: String,
|
||||
pub body: UpdateKeyRequestBody,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateKeyResponse(pub GetKeyInfoResponse);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UpdateKeyRequestBody {
|
||||
pub name: Option<String>,
|
||||
pub allow: Option<KeyPerm>,
|
||||
pub deny: Option<KeyPerm>,
|
||||
}
|
||||
|
||||
// ---- DeleteKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DeleteKeyRequest {
|
||||
pub id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DeleteKeyResponse;
|
||||
|
||||
// **********************************************
|
||||
// Bucket operations
|
||||
// **********************************************
|
||||
|
||||
// ---- ListBuckets ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ListBucketsRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ListBucketsResponse(pub Vec<ListBucketsResponseItem>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ListBucketsResponseItem {
|
||||
pub id: String,
|
||||
pub global_aliases: Vec<String>,
|
||||
pub local_aliases: Vec<BucketLocalAlias>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BucketLocalAlias {
|
||||
pub access_key_id: String,
|
||||
pub alias: String,
|
||||
}
|
||||
|
||||
// ---- GetBucketInfo ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GetBucketInfoRequest {
|
||||
pub id: Option<String>,
|
||||
pub global_alias: Option<String>,
|
||||
pub search: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetBucketInfoResponse {
|
||||
pub id: String,
|
||||
pub global_aliases: Vec<String>,
|
||||
pub website_access: bool,
|
||||
#[serde(default)]
|
||||
pub website_config: Option<GetBucketInfoWebsiteResponse>,
|
||||
pub keys: Vec<GetBucketInfoKey>,
|
||||
pub objects: i64,
|
||||
pub bytes: i64,
|
||||
pub unfinished_uploads: i64,
|
||||
pub unfinished_multipart_uploads: i64,
|
||||
pub unfinished_multipart_upload_parts: i64,
|
||||
pub unfinished_multipart_upload_bytes: i64,
|
||||
pub quotas: ApiBucketQuotas,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetBucketInfoWebsiteResponse {
|
||||
pub index_document: String,
|
||||
pub error_document: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetBucketInfoKey {
|
||||
pub access_key_id: String,
|
||||
pub name: String,
|
||||
pub permissions: ApiBucketKeyPerm,
|
||||
pub bucket_local_aliases: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApiBucketQuotas {
|
||||
pub max_size: Option<u64>,
|
||||
pub max_objects: Option<u64>,
|
||||
}
|
||||
|
||||
// ---- CreateBucket ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CreateBucketRequest {
|
||||
pub global_alias: Option<String>,
|
||||
pub local_alias: Option<CreateBucketLocalAlias>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CreateBucketResponse(pub GetBucketInfoResponse);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CreateBucketLocalAlias {
|
||||
pub access_key_id: String,
|
||||
pub alias: String,
|
||||
#[serde(default)]
|
||||
pub allow: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
// ---- UpdateBucket ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateBucketRequest {
|
||||
pub id: String,
|
||||
pub body: UpdateBucketRequestBody,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateBucketResponse(pub GetBucketInfoResponse);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UpdateBucketRequestBody {
|
||||
pub website_access: Option<UpdateBucketWebsiteAccess>,
|
||||
pub quotas: Option<ApiBucketQuotas>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UpdateBucketWebsiteAccess {
|
||||
pub enabled: bool,
|
||||
pub index_document: Option<String>,
|
||||
pub error_document: Option<String>,
|
||||
}
|
||||
|
||||
// ---- DeleteBucket ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DeleteBucketRequest {
|
||||
pub id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DeleteBucketResponse;
|
||||
|
||||
// ---- CleanupIncompleteUploads ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CleanupIncompleteUploadsRequest {
|
||||
pub bucket_id: String,
|
||||
pub older_than_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CleanupIncompleteUploadsResponse {
|
||||
pub uploads_deleted: u64,
|
||||
}
|
||||
|
||||
// **********************************************
|
||||
// Operations on permissions for keys on buckets
|
||||
// **********************************************
|
||||
|
||||
// ---- AllowBucketKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AllowBucketKeyRequest(pub BucketKeyPermChangeRequest);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AllowBucketKeyResponse(pub GetBucketInfoResponse);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BucketKeyPermChangeRequest {
|
||||
pub bucket_id: String,
|
||||
pub access_key_id: String,
|
||||
pub permissions: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
// ---- DenyBucketKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DenyBucketKeyRequest(pub BucketKeyPermChangeRequest);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DenyBucketKeyResponse(pub GetBucketInfoResponse);
|
||||
|
||||
// **********************************************
|
||||
// Operations on bucket aliases
|
||||
// **********************************************
|
||||
|
||||
// ---- AddBucketAlias ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AddBucketAliasRequest {
|
||||
pub bucket_id: String,
|
||||
#[serde(flatten)]
|
||||
pub alias: BucketAliasEnum,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AddBucketAliasResponse(pub GetBucketInfoResponse);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum BucketAliasEnum {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Global { global_alias: String },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Local {
|
||||
local_alias: String,
|
||||
access_key_id: String,
|
||||
},
|
||||
}
|
||||
|
||||
// ---- RemoveBucketAlias ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RemoveBucketAliasRequest {
|
||||
pub bucket_id: String,
|
||||
#[serde(flatten)]
|
||||
pub alias: BucketAliasEnum,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RemoveBucketAliasResponse(pub GetBucketInfoResponse);
|
||||
|
||||
// **********************************************
|
||||
// Worker operations
|
||||
// **********************************************
|
||||
|
||||
// ---- GetWorkerVariable ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalGetWorkerVariableRequest {
|
||||
pub variable: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalGetWorkerVariableResponse(pub HashMap<String, String>);
|
||||
|
||||
// ---- SetWorkerVariable ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalSetWorkerVariableRequest {
|
||||
pub variable: String,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalSetWorkerVariableResponse {
|
||||
pub variable: String,
|
||||
pub value: String,
|
||||
}
|
|
@ -1,11 +1,12 @@
|
|||
use std::collections::HashMap;
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use argon2::password_hash::PasswordHash;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
|
||||
use http::header::{HeaderValue, ACCESS_CONTROL_ALLOW_ORIGIN, AUTHORIZATION};
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::watch;
|
||||
|
||||
use opentelemetry::trace::SpanRef;
|
||||
|
@ -16,20 +17,84 @@ use opentelemetry_prometheus::PrometheusExporter;
|
|||
use prometheus::{Encoder, TextEncoder};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
use garage_rpc::{Endpoint as RpcEndpoint, *};
|
||||
use garage_util::background::BackgroundRunner;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||
|
||||
use crate::generic_server::*;
|
||||
|
||||
use crate::admin::bucket::*;
|
||||
use crate::admin::cluster::*;
|
||||
use crate::admin::api::*;
|
||||
use crate::admin::error::*;
|
||||
use crate::admin::key::*;
|
||||
use crate::admin::router_v0;
|
||||
use crate::admin::router_v1::{Authorization, Endpoint};
|
||||
use crate::admin::router_v1;
|
||||
use crate::admin::Authorization;
|
||||
use crate::admin::RequestHandler;
|
||||
use crate::helpers::*;
|
||||
|
||||
// ---- FOR RPC ----
|
||||
|
||||
pub const ADMIN_RPC_PATH: &str = "garage_api/admin/rpc.rs/Rpc";
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum AdminRpc {
|
||||
Proxy(AdminApiRequest),
|
||||
Internal(LocalAdminApiRequest),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum AdminRpcResponse {
|
||||
ProxyApiOkResponse(TaggedAdminApiResponse),
|
||||
InternalApiOkResponse(LocalAdminApiResponse),
|
||||
ApiErrorResponse {
|
||||
http_code: u16,
|
||||
error_code: String,
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl Rpc for AdminRpc {
|
||||
type Response = Result<AdminRpcResponse, GarageError>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl EndpointHandler<AdminRpc> for AdminApiServer {
|
||||
async fn handle(
|
||||
self: &Arc<Self>,
|
||||
message: &AdminRpc,
|
||||
_from: NodeID,
|
||||
) -> Result<AdminRpcResponse, GarageError> {
|
||||
match message {
|
||||
AdminRpc::Proxy(req) => {
|
||||
info!("Proxied admin API request: {}", req.name());
|
||||
let res = req.clone().handle(&self.garage, &self).await;
|
||||
match res {
|
||||
Ok(res) => Ok(AdminRpcResponse::ProxyApiOkResponse(res.tagged())),
|
||||
Err(e) => Ok(AdminRpcResponse::ApiErrorResponse {
|
||||
http_code: e.http_status_code().as_u16(),
|
||||
error_code: e.code().to_string(),
|
||||
message: e.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
AdminRpc::Internal(req) => {
|
||||
info!("Internal admin API request: {}", req.name());
|
||||
let res = req.clone().handle(&self.garage, &self).await;
|
||||
match res {
|
||||
Ok(res) => Ok(AdminRpcResponse::InternalApiOkResponse(res)),
|
||||
Err(e) => Ok(AdminRpcResponse::ApiErrorResponse {
|
||||
http_code: e.http_status_code().as_u16(),
|
||||
error_code: e.code().to_string(),
|
||||
message: e.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---- FOR HTTP ----
|
||||
|
||||
pub type ResBody = BoxBody<Error>;
|
||||
|
||||
pub struct AdminApiServer {
|
||||
|
@ -38,27 +103,41 @@ pub struct AdminApiServer {
|
|||
exporter: PrometheusExporter,
|
||||
metrics_token: Option<String>,
|
||||
admin_token: Option<String>,
|
||||
pub(crate) background: Arc<BackgroundRunner>,
|
||||
pub(crate) endpoint: Arc<RpcEndpoint<AdminRpc, Self>>,
|
||||
}
|
||||
|
||||
pub enum HttpEndpoint {
|
||||
Old(router_v1::Endpoint),
|
||||
New(String),
|
||||
}
|
||||
|
||||
impl AdminApiServer {
|
||||
pub fn new(
|
||||
garage: Arc<Garage>,
|
||||
background: Arc<BackgroundRunner>,
|
||||
#[cfg(feature = "metrics")] exporter: PrometheusExporter,
|
||||
) -> Self {
|
||||
) -> Arc<Self> {
|
||||
let cfg = &garage.config.admin;
|
||||
let metrics_token = cfg.metrics_token.as_deref().map(hash_bearer_token);
|
||||
let admin_token = cfg.admin_token.as_deref().map(hash_bearer_token);
|
||||
Self {
|
||||
|
||||
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
|
||||
let admin = Arc::new(Self {
|
||||
garage,
|
||||
#[cfg(feature = "metrics")]
|
||||
exporter,
|
||||
metrics_token,
|
||||
admin_token,
|
||||
}
|
||||
background,
|
||||
endpoint,
|
||||
});
|
||||
admin.endpoint.set_handler(admin.clone());
|
||||
admin
|
||||
}
|
||||
|
||||
pub async fn run(
|
||||
self,
|
||||
self: Arc<Self>,
|
||||
bind_addr: UnixOrTCPSocketAddress,
|
||||
must_exit: watch::Receiver<bool>,
|
||||
) -> Result<(), GarageError> {
|
||||
|
@ -68,130 +147,6 @@ impl AdminApiServer {
|
|||
.await
|
||||
}
|
||||
|
||||
fn handle_options(&self, _req: &Request<IncomingBody>) -> Result<Response<ResBody>, Error> {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.header(ALLOW, "OPTIONS, GET, POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||
.body(empty_body())?)
|
||||
}
|
||||
|
||||
async fn handle_check_domain(
|
||||
&self,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let query_params: HashMap<String, String> = req
|
||||
.uri()
|
||||
.query()
|
||||
.map(|v| {
|
||||
url::form_urlencoded::parse(v.as_bytes())
|
||||
.into_owned()
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_else(HashMap::new);
|
||||
|
||||
let has_domain_key = query_params.contains_key("domain");
|
||||
|
||||
if !has_domain_key {
|
||||
return Err(Error::bad_request("No domain query string found"));
|
||||
}
|
||||
|
||||
let domain = query_params
|
||||
.get("domain")
|
||||
.ok_or_internal_error("Could not parse domain query string")?;
|
||||
|
||||
if self.check_domain(domain).await? {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(string_body(format!(
|
||||
"Domain '{domain}' is managed by Garage"
|
||||
)))?)
|
||||
} else {
|
||||
Err(Error::bad_request(format!(
|
||||
"Domain '{domain}' is not managed by Garage"
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_domain(&self, domain: &str) -> Result<bool, Error> {
|
||||
// Resolve bucket from domain name, inferring if the website must be activated for the
|
||||
// domain to be valid.
|
||||
let (bucket_name, must_check_website) = if let Some(bname) = self
|
||||
.garage
|
||||
.config
|
||||
.s3_api
|
||||
.root_domain
|
||||
.as_ref()
|
||||
.and_then(|rd| host_to_bucket(domain, rd))
|
||||
{
|
||||
(bname.to_string(), false)
|
||||
} else if let Some(bname) = self
|
||||
.garage
|
||||
.config
|
||||
.s3_web
|
||||
.as_ref()
|
||||
.and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str()))
|
||||
{
|
||||
(bname.to_string(), true)
|
||||
} else {
|
||||
(domain.to_string(), true)
|
||||
};
|
||||
|
||||
let bucket_id = match self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&bucket_name)
|
||||
.await?
|
||||
{
|
||||
Some(bucket_id) => bucket_id,
|
||||
None => return Ok(false),
|
||||
};
|
||||
|
||||
if !must_check_website {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
let bucket_state = bucket.state.as_option().unwrap();
|
||||
let bucket_website_config = bucket_state.website_config.get();
|
||||
|
||||
match bucket_website_config {
|
||||
Some(_v) => Ok(true),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_health(&self) -> Result<Response<ResBody>, Error> {
|
||||
let health = self.garage.system.health();
|
||||
|
||||
let (status, status_str) = match health.status {
|
||||
ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
|
||||
ClusterHealthStatus::Degraded => (
|
||||
StatusCode::OK,
|
||||
"Garage is operational but some storage nodes are unavailable",
|
||||
),
|
||||
ClusterHealthStatus::Unavailable => (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Quorum is not available for some/all partitions, reads and writes will fail",
|
||||
),
|
||||
};
|
||||
let status_str = format!(
|
||||
"{}\nConsult the full health check API endpoint at /v1/health for more details\n",
|
||||
status_str
|
||||
);
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(status)
|
||||
.header(http::header::CONTENT_TYPE, "text/plain")
|
||||
.body(string_body(status_str))?)
|
||||
}
|
||||
|
||||
fn handle_metrics(&self) -> Result<Response<ResBody>, Error> {
|
||||
#[cfg(feature = "metrics")]
|
||||
{
|
||||
|
@ -222,29 +177,40 @@ impl AdminApiServer {
|
|||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ApiHandler for AdminApiServer {
|
||||
impl ApiHandler for Arc<AdminApiServer> {
|
||||
const API_NAME: &'static str = "admin";
|
||||
const API_NAME_DISPLAY: &'static str = "Admin";
|
||||
|
||||
type Endpoint = Endpoint;
|
||||
type Endpoint = HttpEndpoint;
|
||||
type Error = Error;
|
||||
|
||||
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<Endpoint, Error> {
|
||||
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<HttpEndpoint, Error> {
|
||||
if req.uri().path().starts_with("/v0/") {
|
||||
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
|
||||
Endpoint::from_v0(endpoint_v0)
|
||||
let endpoint_v1 = router_v1::Endpoint::from_v0(endpoint_v0)?;
|
||||
Ok(HttpEndpoint::Old(endpoint_v1))
|
||||
} else if req.uri().path().starts_with("/v1/") {
|
||||
let endpoint_v1 = router_v1::Endpoint::from_request(req)?;
|
||||
Ok(HttpEndpoint::Old(endpoint_v1))
|
||||
} else {
|
||||
Endpoint::from_request(req)
|
||||
Ok(HttpEndpoint::New(req.uri().path().to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
req: Request<IncomingBody>,
|
||||
endpoint: Endpoint,
|
||||
endpoint: HttpEndpoint,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let auth_header = req.headers().get(AUTHORIZATION).cloned();
|
||||
|
||||
let request = match endpoint {
|
||||
HttpEndpoint::Old(endpoint_v1) => AdminApiRequest::from_v1(endpoint_v1, req).await?,
|
||||
HttpEndpoint::New(_) => AdminApiRequest::from_request(req).await?,
|
||||
};
|
||||
|
||||
let required_auth_hash =
|
||||
match endpoint.authorization_type() {
|
||||
match request.authorization_type() {
|
||||
Authorization::None => None,
|
||||
Authorization::MetricsToken => self.metrics_token.as_deref(),
|
||||
Authorization::AdminToken => match self.admin_token.as_deref() {
|
||||
|
@ -256,7 +222,7 @@ impl ApiHandler for AdminApiServer {
|
|||
};
|
||||
|
||||
if let Some(password_hash) = required_auth_hash {
|
||||
match req.headers().get("Authorization") {
|
||||
match auth_header {
|
||||
None => return Err(Error::forbidden("Authorization token must be provided")),
|
||||
Some(authorization) => {
|
||||
verify_bearer_token(&authorization, password_hash)?;
|
||||
|
@ -264,72 +230,28 @@ impl ApiHandler for AdminApiServer {
|
|||
}
|
||||
}
|
||||
|
||||
match endpoint {
|
||||
Endpoint::Options => self.handle_options(&req),
|
||||
Endpoint::CheckDomain => self.handle_check_domain(req).await,
|
||||
Endpoint::Health => self.handle_health(),
|
||||
Endpoint::Metrics => self.handle_metrics(),
|
||||
Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await,
|
||||
Endpoint::GetClusterHealth => handle_get_cluster_health(&self.garage).await,
|
||||
Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await,
|
||||
// Layout
|
||||
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
|
||||
Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
|
||||
Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
|
||||
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage).await,
|
||||
// Keys
|
||||
Endpoint::ListKeys => handle_list_keys(&self.garage).await,
|
||||
Endpoint::GetKeyInfo {
|
||||
id,
|
||||
search,
|
||||
show_secret_key,
|
||||
} => {
|
||||
let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
|
||||
handle_get_key_info(&self.garage, id, search, show_secret_key).await
|
||||
match request {
|
||||
AdminApiRequest::Options(req) => req.handle(&self.garage, &self).await,
|
||||
AdminApiRequest::CheckDomain(req) => req.handle(&self.garage, &self).await,
|
||||
AdminApiRequest::Health(req) => req.handle(&self.garage, &self).await,
|
||||
AdminApiRequest::Metrics(_req) => self.handle_metrics(),
|
||||
req => {
|
||||
let res = req.handle(&self.garage, &self).await?;
|
||||
let mut res = json_ok_response(&res)?;
|
||||
res.headers_mut()
|
||||
.insert(ACCESS_CONTROL_ALLOW_ORIGIN, HeaderValue::from_static("*"));
|
||||
Ok(res)
|
||||
}
|
||||
Endpoint::CreateKey => handle_create_key(&self.garage, req).await,
|
||||
Endpoint::ImportKey => handle_import_key(&self.garage, req).await,
|
||||
Endpoint::UpdateKey { id } => handle_update_key(&self.garage, id, req).await,
|
||||
Endpoint::DeleteKey { id } => handle_delete_key(&self.garage, id).await,
|
||||
// Buckets
|
||||
Endpoint::ListBuckets => handle_list_buckets(&self.garage).await,
|
||||
Endpoint::GetBucketInfo { id, global_alias } => {
|
||||
handle_get_bucket_info(&self.garage, id, global_alias).await
|
||||
}
|
||||
Endpoint::CreateBucket => handle_create_bucket(&self.garage, req).await,
|
||||
Endpoint::DeleteBucket { id } => handle_delete_bucket(&self.garage, id).await,
|
||||
Endpoint::UpdateBucket { id } => handle_update_bucket(&self.garage, id, req).await,
|
||||
// Bucket-key permissions
|
||||
Endpoint::BucketAllowKey => {
|
||||
handle_bucket_change_key_perm(&self.garage, req, true).await
|
||||
}
|
||||
Endpoint::BucketDenyKey => {
|
||||
handle_bucket_change_key_perm(&self.garage, req, false).await
|
||||
}
|
||||
// Bucket aliasing
|
||||
Endpoint::GlobalAliasBucket { id, alias } => {
|
||||
handle_global_alias_bucket(&self.garage, id, alias).await
|
||||
}
|
||||
Endpoint::GlobalUnaliasBucket { id, alias } => {
|
||||
handle_global_unalias_bucket(&self.garage, id, alias).await
|
||||
}
|
||||
Endpoint::LocalAliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => handle_local_alias_bucket(&self.garage, id, access_key_id, alias).await,
|
||||
Endpoint::LocalUnaliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => handle_local_unalias_bucket(&self.garage, id, access_key_id, alias).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiEndpoint for Endpoint {
|
||||
fn name(&self) -> &'static str {
|
||||
Endpoint::name(self)
|
||||
impl ApiEndpoint for HttpEndpoint {
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
match self {
|
||||
Self::Old(endpoint_v1) => Cow::Borrowed(endpoint_v1.name()),
|
||||
Self::New(path) => Cow::Owned(path.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use async_trait::async_trait;
|
||||
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
|
@ -17,102 +17,99 @@ use garage_model::permission::*;
|
|||
use garage_model::s3::mpu_table;
|
||||
use garage_model::s3::object_table::*;
|
||||
|
||||
use crate::admin::api_server::ResBody;
|
||||
use crate::admin::api::*;
|
||||
use crate::admin::error::*;
|
||||
use crate::admin::key::ApiBucketKeyPerm;
|
||||
use crate::admin::{Admin, RequestHandler};
|
||||
use crate::common_error::CommonError;
|
||||
use crate::helpers::*;
|
||||
|
||||
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let buckets = garage
|
||||
.bucket_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(DeletedFilter::NotDeleted),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?;
|
||||
#[async_trait]
|
||||
impl RequestHandler for ListBucketsRequest {
|
||||
type Response = ListBucketsResponse;
|
||||
|
||||
let res = buckets
|
||||
.into_iter()
|
||||
.map(|b| {
|
||||
let state = b.state.as_option().unwrap();
|
||||
ListBucketResultItem {
|
||||
id: hex::encode(b.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
local_aliases: state
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|((k, n), _, _)| BucketLocalAlias {
|
||||
access_key_id: k.to_string(),
|
||||
alias: n.to_string(),
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ListBucketsResponse, Error> {
|
||||
let buckets = garage
|
||||
.bucket_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(DeletedFilter::NotDeleted),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let res = buckets
|
||||
.into_iter()
|
||||
.map(|b| {
|
||||
let state = b.state.as_option().unwrap();
|
||||
ListBucketsResponseItem {
|
||||
id: hex::encode(b.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
local_aliases: state
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|((k, n), _, _)| BucketLocalAlias {
|
||||
access_key_id: k.to_string(),
|
||||
alias: n.to_string(),
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(ListBucketsResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for GetBucketInfoRequest {
|
||||
type Response = GetBucketInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetBucketInfoResponse, Error> {
|
||||
let bucket_id = match (self.id, self.global_alias, self.search) {
|
||||
(Some(id), None, None) => parse_bucket_id(&id)?,
|
||||
(None, Some(ga), None) => garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&ga)
|
||||
.await?
|
||||
.ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?,
|
||||
(None, None, Some(search)) => {
|
||||
garage
|
||||
.bucket_helper()
|
||||
.admin_get_existing_matching_bucket(&search)
|
||||
.await?
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Either id, globalAlias or search must be provided (but not several of them)",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ListBucketResultItem {
|
||||
id: String,
|
||||
global_aliases: Vec<String>,
|
||||
local_aliases: Vec<BucketLocalAlias>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct BucketLocalAlias {
|
||||
access_key_id: String,
|
||||
alias: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ApiBucketQuotas {
|
||||
max_size: Option<u64>,
|
||||
max_objects: Option<u64>,
|
||||
}
|
||||
|
||||
pub async fn handle_get_bucket_info(
|
||||
garage: &Arc<Garage>,
|
||||
id: Option<String>,
|
||||
global_alias: Option<String>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let bucket_id = match (id, global_alias) {
|
||||
(Some(id), None) => parse_bucket_id(&id)?,
|
||||
(None, Some(ga)) => garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&ga)
|
||||
.await?
|
||||
.ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?,
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Either id or globalAlias must be provided (but not both)",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn bucket_info_results(
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
) -> Result<GetBucketInfoResponse, Error> {
|
||||
let bucket = garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
|
@ -175,301 +172,301 @@ async fn bucket_info_results(
|
|||
let state = bucket.state.as_option().unwrap();
|
||||
|
||||
let quotas = state.quotas.get();
|
||||
let res =
|
||||
GetBucketInfoResult {
|
||||
id: hex::encode(bucket.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
website_access: state.website_config.get().is_some(),
|
||||
website_config: state.website_config.get().clone().map(|wsc| {
|
||||
GetBucketInfoWebsiteResult {
|
||||
index_document: wsc.index_document,
|
||||
error_document: wsc.error_document,
|
||||
let res = GetBucketInfoResponse {
|
||||
id: hex::encode(bucket.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
website_access: state.website_config.get().is_some(),
|
||||
website_config: state.website_config.get().clone().map(|wsc| {
|
||||
GetBucketInfoWebsiteResponse {
|
||||
index_document: wsc.index_document,
|
||||
error_document: wsc.error_document,
|
||||
}
|
||||
}),
|
||||
keys: relevant_keys
|
||||
.into_values()
|
||||
.map(|key| {
|
||||
let p = key.state.as_option().unwrap();
|
||||
GetBucketInfoKey {
|
||||
access_key_id: key.key_id,
|
||||
name: p.name.get().to_string(),
|
||||
permissions: p
|
||||
.authorized_buckets
|
||||
.get(&bucket.id)
|
||||
.map(|p| ApiBucketKeyPerm {
|
||||
read: p.allow_read,
|
||||
write: p.allow_write,
|
||||
owner: p.allow_owner,
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
bucket_local_aliases: p
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, b)| *b == Some(bucket.id))
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
}
|
||||
}),
|
||||
keys: relevant_keys
|
||||
.into_values()
|
||||
.map(|key| {
|
||||
let p = key.state.as_option().unwrap();
|
||||
GetBucketInfoKey {
|
||||
access_key_id: key.key_id,
|
||||
name: p.name.get().to_string(),
|
||||
permissions: p
|
||||
.authorized_buckets
|
||||
.get(&bucket.id)
|
||||
.map(|p| ApiBucketKeyPerm {
|
||||
read: p.allow_read,
|
||||
write: p.allow_write,
|
||||
owner: p.allow_owner,
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
bucket_local_aliases: p
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, b)| *b == Some(bucket.id))
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
objects: *counters.get(OBJECTS).unwrap_or(&0),
|
||||
bytes: *counters.get(BYTES).unwrap_or(&0),
|
||||
unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0),
|
||||
unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0),
|
||||
unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0),
|
||||
unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0),
|
||||
quotas: ApiBucketQuotas {
|
||||
max_size: quotas.max_size,
|
||||
max_objects: quotas.max_objects,
|
||||
},
|
||||
};
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
objects: *counters.get(OBJECTS).unwrap_or(&0),
|
||||
bytes: *counters.get(BYTES).unwrap_or(&0),
|
||||
unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0),
|
||||
unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0),
|
||||
unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0),
|
||||
unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0),
|
||||
quotas: ApiBucketQuotas {
|
||||
max_size: quotas.max_size,
|
||||
max_objects: quotas.max_objects,
|
||||
},
|
||||
};
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetBucketInfoResult {
|
||||
id: String,
|
||||
global_aliases: Vec<String>,
|
||||
website_access: bool,
|
||||
#[serde(default)]
|
||||
website_config: Option<GetBucketInfoWebsiteResult>,
|
||||
keys: Vec<GetBucketInfoKey>,
|
||||
objects: i64,
|
||||
bytes: i64,
|
||||
unfinished_uploads: i64,
|
||||
unfinished_multipart_uploads: i64,
|
||||
unfinished_multipart_upload_parts: i64,
|
||||
unfinished_multipart_upload_bytes: i64,
|
||||
quotas: ApiBucketQuotas,
|
||||
}
|
||||
#[async_trait]
|
||||
impl RequestHandler for CreateBucketRequest {
|
||||
type Response = CreateBucketResponse;
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetBucketInfoWebsiteResult {
|
||||
index_document: String,
|
||||
error_document: Option<String>,
|
||||
}
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<CreateBucketResponse, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetBucketInfoKey {
|
||||
access_key_id: String,
|
||||
name: String,
|
||||
permissions: ApiBucketKeyPerm,
|
||||
bucket_local_aliases: Vec<String>,
|
||||
}
|
||||
if let Some(ga) = &self.global_alias {
|
||||
if !is_valid_bucket_name(ga) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{}: {}",
|
||||
ga, INVALID_BUCKET_NAME_MESSAGE
|
||||
)));
|
||||
}
|
||||
|
||||
pub async fn handle_create_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<CreateBucketRequest, _, Error>(req).await?;
|
||||
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
if let Some(ga) = &req.global_alias {
|
||||
if !is_valid_bucket_name(ga) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{}: {}",
|
||||
ga, INVALID_BUCKET_NAME_MESSAGE
|
||||
)));
|
||||
}
|
||||
|
||||
if let Some(alias) = garage.bucket_alias_table.get(&EmptyKey, ga).await? {
|
||||
if alias.state.get().is_some() {
|
||||
return Err(CommonError::BucketAlreadyExists.into());
|
||||
if let Some(alias) = garage.bucket_alias_table.get(&EmptyKey, ga).await? {
|
||||
if alias.state.get().is_some() {
|
||||
return Err(CommonError::BucketAlreadyExists.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(la) = &req.local_alias {
|
||||
if !is_valid_bucket_name(&la.alias) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{}: {}",
|
||||
la.alias, INVALID_BUCKET_NAME_MESSAGE
|
||||
)));
|
||||
if let Some(la) = &self.local_alias {
|
||||
if !is_valid_bucket_name(&la.alias) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{}: {}",
|
||||
la.alias, INVALID_BUCKET_NAME_MESSAGE
|
||||
)));
|
||||
}
|
||||
|
||||
let key = helper.key().get_existing_key(&la.access_key_id).await?;
|
||||
let state = key.state.as_option().unwrap();
|
||||
if matches!(state.local_aliases.get(&la.alias), Some(_)) {
|
||||
return Err(Error::bad_request("Local alias already exists"));
|
||||
}
|
||||
}
|
||||
|
||||
let key = helper.key().get_existing_key(&la.access_key_id).await?;
|
||||
let state = key.state.as_option().unwrap();
|
||||
if matches!(state.local_aliases.get(&la.alias), Some(_)) {
|
||||
return Err(Error::bad_request("Local alias already exists"));
|
||||
let bucket = Bucket::new();
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
if let Some(ga) = &self.global_alias {
|
||||
helper.set_global_bucket_alias(bucket.id, ga).await?;
|
||||
}
|
||||
|
||||
if let Some(la) = &self.local_alias {
|
||||
helper
|
||||
.set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias)
|
||||
.await?;
|
||||
|
||||
if la.allow.read || la.allow.write || la.allow.owner {
|
||||
helper
|
||||
.set_bucket_key_permissions(
|
||||
bucket.id,
|
||||
&la.access_key_id,
|
||||
BucketKeyPerm {
|
||||
timestamp: now_msec(),
|
||||
allow_read: la.allow.read,
|
||||
allow_write: la.allow.write,
|
||||
allow_owner: la.allow.owner,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(CreateBucketResponse(
|
||||
bucket_info_results(garage, bucket.id).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
let bucket = Bucket::new();
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
#[async_trait]
|
||||
impl RequestHandler for DeleteBucketRequest {
|
||||
type Response = DeleteBucketResponse;
|
||||
|
||||
if let Some(ga) = &req.global_alias {
|
||||
helper.set_global_bucket_alias(bucket.id, ga).await?;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<DeleteBucketResponse, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
let bucket_id = parse_bucket_id(&self.id)?;
|
||||
|
||||
let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
|
||||
let state = bucket.state.as_option().unwrap();
|
||||
|
||||
// Check bucket is empty
|
||||
if !helper.bucket().is_bucket_empty(bucket_id).await? {
|
||||
return Err(CommonError::BucketNotEmpty.into());
|
||||
}
|
||||
|
||||
// --- done checking, now commit ---
|
||||
// 1. delete authorization from keys that had access
|
||||
for (key_id, perm) in bucket.authorized_keys() {
|
||||
if perm.is_any() {
|
||||
helper
|
||||
.set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
// 2. delete all local aliases
|
||||
for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
|
||||
if *active {
|
||||
helper
|
||||
.unset_local_bucket_alias(bucket.id, key_id, alias)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
// 3. delete all global aliases
|
||||
for (alias, _, active) in state.aliases.items().iter() {
|
||||
if *active {
|
||||
helper.purge_global_bucket_alias(bucket.id, alias).await?;
|
||||
}
|
||||
}
|
||||
|
||||
// 4. delete bucket
|
||||
bucket.state = Deletable::delete();
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(DeleteBucketResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(la) = &req.local_alias {
|
||||
helper
|
||||
.set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias)
|
||||
#[async_trait]
|
||||
impl RequestHandler for UpdateBucketRequest {
|
||||
type Response = UpdateBucketResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<UpdateBucketResponse, Error> {
|
||||
let bucket_id = parse_bucket_id(&self.id)?;
|
||||
|
||||
let mut bucket = garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
if la.allow.read || la.allow.write || la.allow.owner {
|
||||
helper
|
||||
.set_bucket_key_permissions(
|
||||
bucket.id,
|
||||
&la.access_key_id,
|
||||
BucketKeyPerm {
|
||||
timestamp: now_msec(),
|
||||
allow_read: la.allow.read,
|
||||
allow_write: la.allow.write,
|
||||
allow_owner: la.allow.owner,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
let state = bucket.state.as_option_mut().unwrap();
|
||||
|
||||
bucket_info_results(garage, bucket.id).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct CreateBucketRequest {
|
||||
global_alias: Option<String>,
|
||||
local_alias: Option<CreateBucketLocalAlias>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct CreateBucketLocalAlias {
|
||||
access_key_id: String,
|
||||
alias: String,
|
||||
#[serde(default)]
|
||||
allow: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
pub async fn handle_delete_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
let bucket_id = parse_bucket_id(&id)?;
|
||||
|
||||
let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
|
||||
let state = bucket.state.as_option().unwrap();
|
||||
|
||||
// Check bucket is empty
|
||||
if !helper.bucket().is_bucket_empty(bucket_id).await? {
|
||||
return Err(CommonError::BucketNotEmpty.into());
|
||||
}
|
||||
|
||||
// --- done checking, now commit ---
|
||||
// 1. delete authorization from keys that had access
|
||||
for (key_id, perm) in bucket.authorized_keys() {
|
||||
if perm.is_any() {
|
||||
helper
|
||||
.set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
// 2. delete all local aliases
|
||||
for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
|
||||
if *active {
|
||||
helper
|
||||
.unset_local_bucket_alias(bucket.id, key_id, alias)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
// 3. delete all global aliases
|
||||
for (alias, _, active) in state.aliases.items().iter() {
|
||||
if *active {
|
||||
helper.purge_global_bucket_alias(bucket.id, alias).await?;
|
||||
}
|
||||
}
|
||||
|
||||
// 4. delete bucket
|
||||
bucket.state = Deletable::delete();
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.body(empty_body())?)
|
||||
}
|
||||
|
||||
pub async fn handle_update_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<UpdateBucketRequest, _, Error>(req).await?;
|
||||
let bucket_id = parse_bucket_id(&id)?;
|
||||
|
||||
let mut bucket = garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
let state = bucket.state.as_option_mut().unwrap();
|
||||
|
||||
if let Some(wa) = req.website_access {
|
||||
if wa.enabled {
|
||||
state.website_config.update(Some(WebsiteConfig {
|
||||
index_document: wa.index_document.ok_or_bad_request(
|
||||
"Please specify indexDocument when enabling website access.",
|
||||
)?,
|
||||
error_document: wa.error_document,
|
||||
}));
|
||||
} else {
|
||||
if wa.index_document.is_some() || wa.error_document.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"Cannot specify indexDocument or errorDocument when disabling website access.",
|
||||
));
|
||||
if let Some(wa) = self.body.website_access {
|
||||
if wa.enabled {
|
||||
state.website_config.update(Some(WebsiteConfig {
|
||||
index_document: wa.index_document.ok_or_bad_request(
|
||||
"Please specify indexDocument when enabling website access.",
|
||||
)?,
|
||||
error_document: wa.error_document,
|
||||
}));
|
||||
} else {
|
||||
if wa.index_document.is_some() || wa.error_document.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"Cannot specify indexDocument or errorDocument when disabling website access.",
|
||||
));
|
||||
}
|
||||
state.website_config.update(None);
|
||||
}
|
||||
state.website_config.update(None);
|
||||
}
|
||||
|
||||
if let Some(q) = self.body.quotas {
|
||||
state.quotas.update(BucketQuotas {
|
||||
max_size: q.max_size,
|
||||
max_objects: q.max_objects,
|
||||
});
|
||||
}
|
||||
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(UpdateBucketResponse(
|
||||
bucket_info_results(garage, bucket_id).await?,
|
||||
))
|
||||
}
|
||||
|
||||
if let Some(q) = req.quotas {
|
||||
state.quotas.update(BucketQuotas {
|
||||
max_size: q.max_size,
|
||||
max_objects: q.max_objects,
|
||||
});
|
||||
}
|
||||
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UpdateBucketRequest {
|
||||
website_access: Option<UpdateBucketWebsiteAccess>,
|
||||
quotas: Option<ApiBucketQuotas>,
|
||||
}
|
||||
#[async_trait]
|
||||
impl RequestHandler for CleanupIncompleteUploadsRequest {
|
||||
type Response = CleanupIncompleteUploadsResponse;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UpdateBucketWebsiteAccess {
|
||||
enabled: bool,
|
||||
index_document: Option<String>,
|
||||
error_document: Option<String>,
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<CleanupIncompleteUploadsResponse, Error> {
|
||||
let duration = Duration::from_secs(self.older_than_secs);
|
||||
|
||||
let bucket_id = parse_bucket_id(&self.bucket_id)?;
|
||||
|
||||
let count = garage
|
||||
.bucket_helper()
|
||||
.cleanup_incomplete_uploads(&bucket_id, duration)
|
||||
.await?;
|
||||
|
||||
Ok(CleanupIncompleteUploadsResponse {
|
||||
uploads_deleted: count as u64,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---- BUCKET/KEY PERMISSIONS ----
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for AllowBucketKeyRequest {
|
||||
type Response = AllowBucketKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<AllowBucketKeyResponse, Error> {
|
||||
let res = handle_bucket_change_key_perm(garage, self.0, true).await?;
|
||||
Ok(AllowBucketKeyResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for DenyBucketKeyRequest {
|
||||
type Response = DenyBucketKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<DenyBucketKeyResponse, Error> {
|
||||
let res = handle_bucket_change_key_perm(garage, self.0, false).await?;
|
||||
Ok(DenyBucketKeyResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_bucket_change_key_perm(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
req: BucketKeyPermChangeRequest,
|
||||
new_perm_flag: bool,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
|
||||
|
||||
) -> Result<GetBucketInfoResponse, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
let bucket_id = parse_bucket_id(&req.bucket_id)?;
|
||||
|
@ -502,76 +499,76 @@ pub async fn handle_bucket_change_key_perm(
|
|||
bucket_info_results(garage, bucket.id).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct BucketKeyPermChangeRequest {
|
||||
bucket_id: String,
|
||||
access_key_id: String,
|
||||
permissions: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
// ---- BUCKET ALIASES ----
|
||||
|
||||
pub async fn handle_global_alias_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: String,
|
||||
alias: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||
#[async_trait]
|
||||
impl RequestHandler for AddBucketAliasRequest {
|
||||
type Response = AddBucketAliasResponse;
|
||||
|
||||
let helper = garage.locked_helper().await;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<AddBucketAliasResponse, Error> {
|
||||
let bucket_id = parse_bucket_id(&self.bucket_id)?;
|
||||
|
||||
helper.set_global_bucket_alias(bucket_id, &alias).await?;
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
match self.alias {
|
||||
BucketAliasEnum::Global { global_alias } => {
|
||||
helper
|
||||
.set_global_bucket_alias(bucket_id, &global_alias)
|
||||
.await?;
|
||||
}
|
||||
BucketAliasEnum::Local {
|
||||
local_alias,
|
||||
access_key_id,
|
||||
} => {
|
||||
helper
|
||||
.set_local_bucket_alias(bucket_id, &access_key_id, &local_alias)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AddBucketAliasResponse(
|
||||
bucket_info_results(garage, bucket_id).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_global_unalias_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: String,
|
||||
alias: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||
#[async_trait]
|
||||
impl RequestHandler for RemoveBucketAliasRequest {
|
||||
type Response = RemoveBucketAliasResponse;
|
||||
|
||||
let helper = garage.locked_helper().await;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<RemoveBucketAliasResponse, Error> {
|
||||
let bucket_id = parse_bucket_id(&self.bucket_id)?;
|
||||
|
||||
helper.unset_global_bucket_alias(bucket_id, &alias).await?;
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
}
|
||||
match self.alias {
|
||||
BucketAliasEnum::Global { global_alias } => {
|
||||
helper
|
||||
.unset_global_bucket_alias(bucket_id, &global_alias)
|
||||
.await?;
|
||||
}
|
||||
BucketAliasEnum::Local {
|
||||
local_alias,
|
||||
access_key_id,
|
||||
} => {
|
||||
helper
|
||||
.unset_local_bucket_alias(bucket_id, &access_key_id, &local_alias)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_local_alias_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: String,
|
||||
access_key_id: String,
|
||||
alias: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
helper
|
||||
.set_local_bucket_alias(bucket_id, &access_key_id, &alias)
|
||||
.await?;
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
}
|
||||
|
||||
pub async fn handle_local_unalias_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: String,
|
||||
access_key_id: String,
|
||||
alias: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
helper
|
||||
.unset_local_bucket_alias(bucket_id, &access_key_id, &alias)
|
||||
.await?;
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
Ok(RemoveBucketAliasResponse(
|
||||
bucket_info_results(garage, bucket_id).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// ---- HELPER ----
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use async_trait::async_trait;
|
||||
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
|
@ -12,157 +10,186 @@ use garage_rpc::layout;
|
|||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use crate::admin::api_server::ResBody;
|
||||
use crate::admin::api::*;
|
||||
use crate::admin::error::*;
|
||||
use crate::helpers::{json_ok_response, parse_json_body};
|
||||
use crate::admin::{Admin, RequestHandler};
|
||||
|
||||
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let layout = garage.system.cluster_layout();
|
||||
let mut nodes = garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|i| {
|
||||
(
|
||||
i.id,
|
||||
NodeResp {
|
||||
id: hex::encode(i.id),
|
||||
addr: i.addr,
|
||||
hostname: i.status.hostname,
|
||||
is_up: i.is_up,
|
||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||
data_partition: i
|
||||
.status
|
||||
.data_disk_avail
|
||||
.map(|(avail, total)| FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
#[async_trait]
|
||||
impl RequestHandler for GetClusterStatusRequest {
|
||||
type Response = GetClusterStatusResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterStatusResponse, Error> {
|
||||
let layout = garage.system.cluster_layout();
|
||||
let mut nodes = garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|i| {
|
||||
(
|
||||
i.id,
|
||||
NodeResp {
|
||||
id: hex::encode(i.id),
|
||||
addr: i.addr,
|
||||
hostname: i.status.hostname,
|
||||
is_up: i.is_up,
|
||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||
data_partition: i.status.data_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}
|
||||
}),
|
||||
metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for (id, _, role) in layout.current().roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
let role = NodeRoleResp {
|
||||
id: hex::encode(id),
|
||||
zone: r.zone.to_string(),
|
||||
capacity: r.capacity,
|
||||
tags: r.tags.clone(),
|
||||
};
|
||||
match nodes.get_mut(id) {
|
||||
None => {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
role: Some(role),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
Some(n) => {
|
||||
n.role = Some(role);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for ver in layout.versions().iter().rev().skip(1) {
|
||||
for (id, _, role) in ver.roles.items().iter() {
|
||||
for (id, _, role) in layout.current().roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
if r.capacity.is_some() {
|
||||
if let Some(n) = nodes.get_mut(id) {
|
||||
if n.role.is_none() {
|
||||
n.draining = true;
|
||||
}
|
||||
} else {
|
||||
let role = NodeRoleResp {
|
||||
id: hex::encode(id),
|
||||
zone: r.zone.to_string(),
|
||||
capacity: r.capacity,
|
||||
tags: r.tags.clone(),
|
||||
};
|
||||
match nodes.get_mut(id) {
|
||||
None => {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
draining: true,
|
||||
role: Some(role),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
Some(n) => {
|
||||
n.role = Some(role);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
||||
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
||||
for ver in layout.versions().iter().rev().skip(1) {
|
||||
for (id, _, role) in ver.roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
if r.capacity.is_some() {
|
||||
if let Some(n) = nodes.get_mut(id) {
|
||||
if n.role.is_none() {
|
||||
n.draining = true;
|
||||
}
|
||||
} else {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
draining: true,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let res = GetClusterStatusResponse {
|
||||
node: hex::encode(garage.system.id),
|
||||
garage_version: garage_util::version::garage_version(),
|
||||
garage_features: garage_util::version::garage_features(),
|
||||
rust_version: garage_util::version::rust_version(),
|
||||
db_engine: garage.db.engine(),
|
||||
layout_version: layout.current().version,
|
||||
nodes,
|
||||
};
|
||||
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
||||
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
let health = garage.system.health();
|
||||
let health = ClusterHealth {
|
||||
status: match health.status {
|
||||
ClusterHealthStatus::Healthy => "healthy",
|
||||
ClusterHealthStatus::Degraded => "degraded",
|
||||
ClusterHealthStatus::Unavailable => "unavailable",
|
||||
},
|
||||
known_nodes: health.known_nodes,
|
||||
connected_nodes: health.connected_nodes,
|
||||
storage_nodes: health.storage_nodes,
|
||||
storage_nodes_ok: health.storage_nodes_ok,
|
||||
partitions: health.partitions,
|
||||
partitions_quorum: health.partitions_quorum,
|
||||
partitions_all_ok: health.partitions_all_ok,
|
||||
};
|
||||
Ok(json_ok_response(&health)?)
|
||||
}
|
||||
|
||||
pub async fn handle_connect_cluster_nodes(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<Vec<String>, _, Error>(req).await?;
|
||||
|
||||
let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node)))
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|r| match r {
|
||||
Ok(()) => ConnectClusterNodesResponse {
|
||||
success: true,
|
||||
error: None,
|
||||
},
|
||||
Err(e) => ConnectClusterNodesResponse {
|
||||
success: false,
|
||||
error: Some(format!("{}", e)),
|
||||
},
|
||||
Ok(GetClusterStatusResponse {
|
||||
node: hex::encode(garage.system.id),
|
||||
garage_version: garage_util::version::garage_version().to_string(),
|
||||
garage_features: garage_util::version::garage_features()
|
||||
.map(|features| features.iter().map(ToString::to_string).collect()),
|
||||
rust_version: garage_util::version::rust_version().to_string(),
|
||||
db_engine: garage.db.engine(),
|
||||
layout_version: layout.current().version,
|
||||
nodes,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let res = format_cluster_layout(garage.system.cluster_layout().inner());
|
||||
#[async_trait]
|
||||
impl RequestHandler for GetClusterHealthRequest {
|
||||
type Response = GetClusterHealthResponse;
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterHealthResponse, Error> {
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
let health = garage.system.health();
|
||||
let health = GetClusterHealthResponse {
|
||||
status: match health.status {
|
||||
ClusterHealthStatus::Healthy => "healthy",
|
||||
ClusterHealthStatus::Degraded => "degraded",
|
||||
ClusterHealthStatus::Unavailable => "unavailable",
|
||||
}
|
||||
.to_string(),
|
||||
known_nodes: health.known_nodes,
|
||||
connected_nodes: health.connected_nodes,
|
||||
storage_nodes: health.storage_nodes,
|
||||
storage_nodes_ok: health.storage_nodes_ok,
|
||||
partitions: health.partitions,
|
||||
partitions_quorum: health.partitions_quorum,
|
||||
partitions_all_ok: health.partitions_all_ok,
|
||||
};
|
||||
Ok(health)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for ConnectClusterNodesRequest {
|
||||
type Response = ConnectClusterNodesResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ConnectClusterNodesResponse, Error> {
|
||||
let res = futures::future::join_all(self.0.iter().map(|node| garage.system.connect(node)))
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|r| match r {
|
||||
Ok(()) => ConnectNodeResponse {
|
||||
success: true,
|
||||
error: None,
|
||||
},
|
||||
Err(e) => ConnectNodeResponse {
|
||||
success: false,
|
||||
error: Some(format!("{}", e)),
|
||||
},
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
Ok(ConnectClusterNodesResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for GetClusterLayoutRequest {
|
||||
type Response = GetClusterLayoutResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterLayoutResponse, Error> {
|
||||
Ok(format_cluster_layout(
|
||||
garage.system.cluster_layout().inner(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse {
|
||||
|
@ -212,199 +239,101 @@ fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResp
|
|||
|
||||
// ----
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ClusterHealth {
|
||||
status: &'static str,
|
||||
known_nodes: usize,
|
||||
connected_nodes: usize,
|
||||
storage_nodes: usize,
|
||||
storage_nodes_ok: usize,
|
||||
partitions: usize,
|
||||
partitions_quorum: usize,
|
||||
partitions_all_ok: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetClusterStatusResponse {
|
||||
node: String,
|
||||
garage_version: &'static str,
|
||||
garage_features: Option<&'static [&'static str]>,
|
||||
rust_version: &'static str,
|
||||
db_engine: String,
|
||||
layout_version: u64,
|
||||
nodes: Vec<NodeResp>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ApplyClusterLayoutResponse {
|
||||
message: Vec<String>,
|
||||
layout: GetClusterLayoutResponse,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ConnectClusterNodesResponse {
|
||||
success: bool,
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetClusterLayoutResponse {
|
||||
version: u64,
|
||||
roles: Vec<NodeRoleResp>,
|
||||
staged_role_changes: Vec<NodeRoleChange>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeRoleResp {
|
||||
id: String,
|
||||
zone: String,
|
||||
capacity: Option<u64>,
|
||||
tags: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct FreeSpaceResp {
|
||||
available: u64,
|
||||
total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeResp {
|
||||
id: String,
|
||||
role: Option<NodeRoleResp>,
|
||||
addr: Option<SocketAddr>,
|
||||
hostname: Option<String>,
|
||||
is_up: bool,
|
||||
last_seen_secs_ago: Option<u64>,
|
||||
draining: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
data_partition: Option<FreeSpaceResp>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
metadata_partition: Option<FreeSpaceResp>,
|
||||
}
|
||||
|
||||
// ---- update functions ----
|
||||
|
||||
pub async fn handle_update_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||
#[async_trait]
|
||||
impl RequestHandler for UpdateClusterLayoutRequest {
|
||||
type Response = UpdateClusterLayoutResponse;
|
||||
|
||||
let mut layout = garage.system.cluster_layout().inner().clone();
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<UpdateClusterLayoutResponse, Error> {
|
||||
let mut layout = garage.system.cluster_layout().inner().clone();
|
||||
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
|
||||
for change in updates {
|
||||
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
||||
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
|
||||
for change in self.0 {
|
||||
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
||||
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
|
||||
|
||||
let new_role = match change.action {
|
||||
NodeRoleChangeEnum::Remove { remove: true } => None,
|
||||
NodeRoleChangeEnum::Update {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
} => Some(layout::NodeRole {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
}),
|
||||
_ => return Err(Error::bad_request("Invalid layout change")),
|
||||
};
|
||||
let new_role = match change.action {
|
||||
NodeRoleChangeEnum::Remove { remove: true } => None,
|
||||
NodeRoleChangeEnum::Update {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
} => Some(layout::NodeRole {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
}),
|
||||
_ => return Err(Error::bad_request("Invalid layout change")),
|
||||
};
|
||||
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
||||
}
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(UpdateClusterLayoutResponse(res))
|
||||
}
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
pub async fn handle_apply_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
||||
#[async_trait]
|
||||
impl RequestHandler for ApplyClusterLayoutRequest {
|
||||
type Response = ApplyClusterLayoutResponse;
|
||||
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ApplyClusterLayoutResponse, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let (layout, msg) = layout.apply_staged_changes(Some(self.version))?;
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = ApplyClusterLayoutResponse {
|
||||
message: msg,
|
||||
layout: format_cluster_layout(&layout),
|
||||
};
|
||||
Ok(json_ok_response(&res)?)
|
||||
Ok(ApplyClusterLayoutResponse {
|
||||
message: msg,
|
||||
layout: format_cluster_layout(&layout),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_revert_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let layout = layout.revert_staged_changes()?;
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
#[async_trait]
|
||||
impl RequestHandler for RevertClusterLayoutRequest {
|
||||
type Response = RevertClusterLayoutResponse;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
type UpdateClusterLayoutRequest = Vec<NodeRoleChange>;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ApplyLayoutRequest {
|
||||
version: u64,
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeRoleChange {
|
||||
id: String,
|
||||
#[serde(flatten)]
|
||||
action: NodeRoleChangeEnum,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum NodeRoleChangeEnum {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Remove { remove: bool },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Update {
|
||||
zone: String,
|
||||
capacity: Option<u64>,
|
||||
tags: Vec<String>,
|
||||
},
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<RevertClusterLayoutResponse, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let layout = layout.revert_staged_changes()?;
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(RevertClusterLayoutResponse(res))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ impl From<HelperError> for Error {
|
|||
impl CommonErrorDerivative for Error {}
|
||||
|
||||
impl Error {
|
||||
fn code(&self) -> &'static str {
|
||||
pub fn code(&self) -> &'static str {
|
||||
match self {
|
||||
Error::Common(c) => c.aws_code(),
|
||||
Error::NoSuchAccessKey(_) => "NoSuchAccessKey",
|
||||
|
|
|
@ -1,172 +1,176 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use async_trait::async_trait;
|
||||
|
||||
use garage_table::*;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::*;
|
||||
|
||||
use crate::admin::api_server::ResBody;
|
||||
use crate::admin::api::*;
|
||||
use crate::admin::error::*;
|
||||
use crate::helpers::*;
|
||||
use crate::admin::{Admin, RequestHandler};
|
||||
|
||||
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let res = garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.iter()
|
||||
.map(|k| ListKeyResultItem {
|
||||
id: k.key_id.to_string(),
|
||||
name: k.params().unwrap().name.get().clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
#[async_trait]
|
||||
impl RequestHandler for ListKeysRequest {
|
||||
type Response = ListKeysResponse;
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ListKeyResultItem {
|
||||
id: String,
|
||||
name: String,
|
||||
}
|
||||
|
||||
pub async fn handle_get_key_info(
|
||||
garage: &Arc<Garage>,
|
||||
id: Option<String>,
|
||||
search: Option<String>,
|
||||
show_secret_key: bool,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let key = if let Some(id) = id {
|
||||
garage.key_helper().get_existing_key(&id).await?
|
||||
} else if let Some(search) = search {
|
||||
garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&search)
|
||||
async fn handle(self, garage: &Arc<Garage>, _admin: &Admin) -> Result<ListKeysResponse, Error> {
|
||||
let res = garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
unreachable!();
|
||||
};
|
||||
.iter()
|
||||
.map(|k| ListKeysResponseItem {
|
||||
id: k.key_id.to_string(),
|
||||
name: k.params().unwrap().name.get().clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
key_info_results(garage, key, show_secret_key).await
|
||||
}
|
||||
|
||||
pub async fn handle_create_key(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<CreateKeyRequest, _, Error>(req).await?;
|
||||
|
||||
let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key"));
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
key_info_results(garage, key, true).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct CreateKeyRequest {
|
||||
name: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn handle_import_key(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<ImportKeyRequest, _, Error>(req).await?;
|
||||
|
||||
let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?;
|
||||
if prev_key.is_some() {
|
||||
return Err(Error::KeyAlreadyExists(req.access_key_id.to_string()));
|
||||
Ok(ListKeysResponse(res))
|
||||
}
|
||||
|
||||
let imported_key = Key::import(
|
||||
&req.access_key_id,
|
||||
&req.secret_access_key,
|
||||
req.name.as_deref().unwrap_or("Imported key"),
|
||||
)
|
||||
.ok_or_bad_request("Invalid key format")?;
|
||||
garage.key_table.insert(&imported_key).await?;
|
||||
|
||||
key_info_results(garage, imported_key, false).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ImportKeyRequest {
|
||||
access_key_id: String,
|
||||
secret_access_key: String,
|
||||
name: Option<String>,
|
||||
}
|
||||
#[async_trait]
|
||||
impl RequestHandler for GetKeyInfoRequest {
|
||||
type Response = GetKeyInfoResponse;
|
||||
|
||||
pub async fn handle_update_key(
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<UpdateKeyRequest, _, Error>(req).await?;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetKeyInfoResponse, Error> {
|
||||
let key = match (self.id, self.search) {
|
||||
(Some(id), None) => garage.key_helper().get_existing_key(&id).await?,
|
||||
(None, Some(search)) => {
|
||||
garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&search)
|
||||
.await?
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Either id or search must be provided (but not both)",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
||||
|
||||
let key_state = key.state.as_option_mut().unwrap();
|
||||
|
||||
if let Some(new_name) = req.name {
|
||||
key_state.name.update(new_name);
|
||||
Ok(key_info_results(garage, key, self.show_secret_key).await?)
|
||||
}
|
||||
if let Some(allow) = req.allow {
|
||||
if allow.create_bucket {
|
||||
key_state.allow_create_bucket.update(true);
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for CreateKeyRequest {
|
||||
type Response = CreateKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<CreateKeyResponse, Error> {
|
||||
let key = Key::new(self.name.as_deref().unwrap_or("Unnamed key"));
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
Ok(CreateKeyResponse(
|
||||
key_info_results(garage, key, true).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for ImportKeyRequest {
|
||||
type Response = ImportKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ImportKeyResponse, Error> {
|
||||
let prev_key = garage.key_table.get(&EmptyKey, &self.access_key_id).await?;
|
||||
if prev_key.is_some() {
|
||||
return Err(Error::KeyAlreadyExists(self.access_key_id.to_string()));
|
||||
}
|
||||
|
||||
let imported_key = Key::import(
|
||||
&self.access_key_id,
|
||||
&self.secret_access_key,
|
||||
self.name.as_deref().unwrap_or("Imported key"),
|
||||
)
|
||||
.ok_or_bad_request("Invalid key format")?;
|
||||
garage.key_table.insert(&imported_key).await?;
|
||||
|
||||
Ok(ImportKeyResponse(
|
||||
key_info_results(garage, imported_key, false).await?,
|
||||
))
|
||||
}
|
||||
if let Some(deny) = req.deny {
|
||||
if deny.create_bucket {
|
||||
key_state.allow_create_bucket.update(false);
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for UpdateKeyRequest {
|
||||
type Response = UpdateKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<UpdateKeyResponse, Error> {
|
||||
let mut key = garage.key_helper().get_existing_key(&self.id).await?;
|
||||
|
||||
let key_state = key.state.as_option_mut().unwrap();
|
||||
|
||||
if let Some(new_name) = self.body.name {
|
||||
key_state.name.update(new_name);
|
||||
}
|
||||
if let Some(allow) = self.body.allow {
|
||||
if allow.create_bucket {
|
||||
key_state.allow_create_bucket.update(true);
|
||||
}
|
||||
}
|
||||
if let Some(deny) = self.body.deny {
|
||||
if deny.create_bucket {
|
||||
key_state.allow_create_bucket.update(false);
|
||||
}
|
||||
}
|
||||
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
Ok(UpdateKeyResponse(
|
||||
key_info_results(garage, key, false).await?,
|
||||
))
|
||||
}
|
||||
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
key_info_results(garage, key, false).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UpdateKeyRequest {
|
||||
name: Option<String>,
|
||||
allow: Option<KeyPerm>,
|
||||
deny: Option<KeyPerm>,
|
||||
}
|
||||
#[async_trait]
|
||||
impl RequestHandler for DeleteKeyRequest {
|
||||
type Response = DeleteKeyResponse;
|
||||
|
||||
pub async fn handle_delete_key(
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<DeleteKeyResponse, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
let mut key = helper.key().get_existing_key(&id).await?;
|
||||
let mut key = helper.key().get_existing_key(&self.id).await?;
|
||||
|
||||
helper.delete_key(&mut key).await?;
|
||||
helper.delete_key(&mut key).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.body(empty_body())?)
|
||||
Ok(DeleteKeyResponse)
|
||||
}
|
||||
}
|
||||
|
||||
async fn key_info_results(
|
||||
garage: &Arc<Garage>,
|
||||
key: Key,
|
||||
show_secret: bool,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
) -> Result<GetKeyInfoResponse, Error> {
|
||||
let mut relevant_buckets = HashMap::new();
|
||||
|
||||
let key_state = key.state.as_option().unwrap();
|
||||
|
@ -192,7 +196,7 @@ async fn key_info_results(
|
|||
}
|
||||
}
|
||||
|
||||
let res = GetKeyInfoResult {
|
||||
let res = GetKeyInfoResponse {
|
||||
name: key_state.name.get().clone(),
|
||||
access_key_id: key.key_id.clone(),
|
||||
secret_access_key: if show_secret {
|
||||
|
@ -207,7 +211,7 @@ async fn key_info_results(
|
|||
.into_values()
|
||||
.map(|bucket| {
|
||||
let state = bucket.state.as_option().unwrap();
|
||||
KeyInfoBucketResult {
|
||||
KeyInfoBucketResponse {
|
||||
id: hex::encode(bucket.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
|
@ -237,43 +241,5 @@ async fn key_info_results(
|
|||
.collect::<Vec<_>>(),
|
||||
};
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetKeyInfoResult {
|
||||
name: String,
|
||||
access_key_id: String,
|
||||
#[serde(skip_serializing_if = "is_default")]
|
||||
secret_access_key: Option<String>,
|
||||
permissions: KeyPerm,
|
||||
buckets: Vec<KeyInfoBucketResult>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct KeyPerm {
|
||||
#[serde(default)]
|
||||
create_bucket: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct KeyInfoBucketResult {
|
||||
id: String,
|
||||
global_aliases: Vec<String>,
|
||||
local_aliases: Vec<String>,
|
||||
permissions: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub(crate) struct ApiBucketKeyPerm {
|
||||
#[serde(default)]
|
||||
pub(crate) read: bool,
|
||||
#[serde(default)]
|
||||
pub(crate) write: bool,
|
||||
#[serde(default)]
|
||||
pub(crate) owner: bool,
|
||||
Ok(res)
|
||||
}
|
||||
|
|
230
src/api/admin/macros.rs
Normal file
230
src/api/admin/macros.rs
Normal file
|
@ -0,0 +1,230 @@
|
|||
macro_rules! admin_endpoints {
|
||||
[
|
||||
$(@special $special_endpoint:ident,)*
|
||||
$($endpoint:ident,)*
|
||||
] => {
|
||||
paste! {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum AdminApiRequest {
|
||||
$(
|
||||
$special_endpoint( [<$special_endpoint Request>] ),
|
||||
)*
|
||||
$(
|
||||
$endpoint( [<$endpoint Request>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum AdminApiResponse {
|
||||
$(
|
||||
$endpoint( [<$endpoint Response>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum TaggedAdminApiResponse {
|
||||
$(
|
||||
$endpoint( [<$endpoint Response>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
impl AdminApiRequest {
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
$(
|
||||
Self::$special_endpoint(_) => stringify!($special_endpoint),
|
||||
)*
|
||||
$(
|
||||
Self::$endpoint(_) => stringify!($endpoint),
|
||||
)*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AdminApiResponse {
|
||||
pub fn tagged(self) -> TaggedAdminApiResponse {
|
||||
match self {
|
||||
$(
|
||||
Self::$endpoint(res) => TaggedAdminApiResponse::$endpoint(res),
|
||||
)*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$(
|
||||
impl From< [< $endpoint Request >] > for AdminApiRequest {
|
||||
fn from(req: [< $endpoint Request >]) -> AdminApiRequest {
|
||||
AdminApiRequest::$endpoint(req)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<TaggedAdminApiResponse> for [< $endpoint Response >] {
|
||||
type Error = TaggedAdminApiResponse;
|
||||
fn try_from(resp: TaggedAdminApiResponse) -> Result< [< $endpoint Response >], TaggedAdminApiResponse> {
|
||||
match resp {
|
||||
TaggedAdminApiResponse::$endpoint(v) => Ok(v),
|
||||
x => Err(x),
|
||||
}
|
||||
}
|
||||
}
|
||||
)*
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for AdminApiRequest {
|
||||
type Response = AdminApiResponse;
|
||||
|
||||
async fn handle(self, garage: &Arc<Garage>, admin: &Admin) -> Result<AdminApiResponse, Error> {
|
||||
Ok(match self {
|
||||
$(
|
||||
AdminApiRequest::$special_endpoint(_) => panic!(
|
||||
concat!(stringify!($special_endpoint), " needs to go through a special handler")
|
||||
),
|
||||
)*
|
||||
$(
|
||||
AdminApiRequest::$endpoint(req) => AdminApiResponse::$endpoint(req.handle(garage, admin).await?),
|
||||
)*
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! local_admin_endpoints {
|
||||
[
|
||||
$($endpoint:ident,)*
|
||||
] => {
|
||||
paste! {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum LocalAdminApiRequest {
|
||||
$(
|
||||
$endpoint( [<Local $endpoint Request>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum LocalAdminApiResponse {
|
||||
$(
|
||||
$endpoint( [<Local $endpoint Response>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
$(
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct [< $endpoint Request >] {
|
||||
pub node: String,
|
||||
pub body: [< Local $endpoint Request >],
|
||||
}
|
||||
|
||||
pub type [< $endpoint RequestBody >] = [< Local $endpoint Request >];
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct [< $endpoint Response >] {
|
||||
pub success: HashMap<String, [< Local $endpoint Response >] >,
|
||||
pub error: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl From< [< Local $endpoint Request >] > for LocalAdminApiRequest {
|
||||
fn from(req: [< Local $endpoint Request >]) -> LocalAdminApiRequest {
|
||||
LocalAdminApiRequest::$endpoint(req)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<LocalAdminApiResponse> for [< Local $endpoint Response >] {
|
||||
type Error = LocalAdminApiResponse;
|
||||
fn try_from(resp: LocalAdminApiResponse) -> Result< [< Local $endpoint Response >], LocalAdminApiResponse> {
|
||||
match resp {
|
||||
LocalAdminApiResponse::$endpoint(v) => Ok(v),
|
||||
x => Err(x),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for [< $endpoint Request >] {
|
||||
type Response = [< $endpoint Response >];
|
||||
|
||||
async fn handle(self, garage: &Arc<Garage>, admin: &Admin) -> Result<Self::Response, Error> {
|
||||
let to = match self.node.as_str() {
|
||||
"*" => garage.system.cluster_layout().all_nodes().to_vec(),
|
||||
id => {
|
||||
let nodes = garage.system.cluster_layout().all_nodes()
|
||||
.iter()
|
||||
.filter(|x| hex::encode(x).starts_with(id))
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
if nodes.len() != 1 {
|
||||
return Err(Error::bad_request(format!("Zero or multiple nodes matching {}: {:?}", id, nodes)));
|
||||
}
|
||||
nodes
|
||||
}
|
||||
};
|
||||
|
||||
let resps = garage.system.rpc_helper().call_many(&admin.endpoint,
|
||||
&to,
|
||||
AdminRpc::Internal(self.body.into()),
|
||||
RequestStrategy::with_priority(PRIO_NORMAL),
|
||||
).await?;
|
||||
|
||||
let mut ret = [< $endpoint Response >] {
|
||||
success: HashMap::new(),
|
||||
error: HashMap::new(),
|
||||
};
|
||||
for (node, resp) in resps {
|
||||
match resp {
|
||||
Ok(AdminRpcResponse::InternalApiOkResponse(r)) => {
|
||||
match [< Local $endpoint Response >]::try_from(r) {
|
||||
Ok(r) => {
|
||||
ret.success.insert(hex::encode(node), r);
|
||||
}
|
||||
Err(_) => {
|
||||
ret.error.insert(hex::encode(node), "returned invalid value".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(AdminRpcResponse::ApiErrorResponse{error_code, http_code, message}) => {
|
||||
ret.error.insert(hex::encode(node), format!("{} ({}): {}", error_code, http_code, message));
|
||||
}
|
||||
Ok(_) => {
|
||||
ret.error.insert(hex::encode(node), "returned invalid value".to_string());
|
||||
}
|
||||
Err(e) => {
|
||||
ret.error.insert(hex::encode(node), e.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
)*
|
||||
|
||||
impl LocalAdminApiRequest {
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
$(
|
||||
Self::$endpoint(_) => stringify!($endpoint),
|
||||
)*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for LocalAdminApiRequest {
|
||||
type Response = LocalAdminApiResponse;
|
||||
|
||||
async fn handle(self, garage: &Arc<Garage>, admin: &Admin) -> Result<LocalAdminApiResponse, Error> {
|
||||
Ok(match self {
|
||||
$(
|
||||
LocalAdminApiRequest::$endpoint(req) => LocalAdminApiResponse::$endpoint(req.handle(garage, admin).await?),
|
||||
)*
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use admin_endpoints;
|
||||
pub(crate) use local_admin_endpoints;
|
|
@ -1,8 +1,40 @@
|
|||
pub mod api_server;
|
||||
mod error;
|
||||
mod macros;
|
||||
|
||||
pub mod api;
|
||||
mod router_v0;
|
||||
mod router_v1;
|
||||
mod router_v2;
|
||||
|
||||
mod bucket;
|
||||
mod cluster;
|
||||
mod key;
|
||||
mod special;
|
||||
|
||||
mod worker;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
pub use api_server::AdminApiServer as Admin;
|
||||
|
||||
pub enum Authorization {
|
||||
None,
|
||||
MetricsToken,
|
||||
AdminToken,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait RequestHandler {
|
||||
type Response;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
admin: &Admin,
|
||||
) -> Result<Self::Response, error::Error>;
|
||||
}
|
||||
|
|
|
@ -6,12 +6,6 @@ use crate::admin::error::*;
|
|||
use crate::admin::router_v0;
|
||||
use crate::router_macros::*;
|
||||
|
||||
pub enum Authorization {
|
||||
None,
|
||||
MetricsToken,
|
||||
AdminToken,
|
||||
}
|
||||
|
||||
router_match! {@func
|
||||
|
||||
/// List of all Admin API endpoints.
|
||||
|
@ -210,15 +204,6 @@ impl Endpoint {
|
|||
))),
|
||||
}
|
||||
}
|
||||
/// Get the kind of authorization which is required to perform the operation.
|
||||
pub fn authorization_type(&self) -> Authorization {
|
||||
match self {
|
||||
Self::Health => Authorization::None,
|
||||
Self::CheckDomain => Authorization::None,
|
||||
Self::Metrics => Authorization::MetricsToken,
|
||||
_ => Authorization::AdminToken,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
generateQueryParameters! {
|
||||
|
|
254
src/api/admin/router_v2.rs
Normal file
254
src/api/admin/router_v2.rs
Normal file
|
@ -0,0 +1,254 @@
|
|||
use std::borrow::Cow;
|
||||
|
||||
use hyper::body::Incoming as IncomingBody;
|
||||
use hyper::{Method, Request};
|
||||
use paste::paste;
|
||||
|
||||
use crate::admin::api::*;
|
||||
use crate::admin::error::*;
|
||||
use crate::admin::router_v1;
|
||||
use crate::admin::Authorization;
|
||||
use crate::helpers::*;
|
||||
use crate::router_macros::*;
|
||||
|
||||
impl AdminApiRequest {
|
||||
/// Determine which S3 endpoint a request is for using the request, and a bucket which was
|
||||
/// possibly extracted from the Host header.
|
||||
/// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets
|
||||
pub async fn from_request(req: Request<IncomingBody>) -> Result<Self, Error> {
|
||||
let uri = req.uri().clone();
|
||||
let path = uri.path();
|
||||
let query = uri.query();
|
||||
|
||||
let method = req.method().clone();
|
||||
|
||||
let mut query = QueryParameters::from_query(query.unwrap_or_default())?;
|
||||
|
||||
let res = router_match!(@gen_path_parser_v2 (&method, path, "/v2/", query, req) [
|
||||
@special OPTIONS _ => Options (),
|
||||
@special GET "/check" => CheckDomain (query::domain),
|
||||
@special GET "/health" => Health (),
|
||||
@special GET "/metrics" => Metrics (),
|
||||
// Cluster endpoints
|
||||
GET GetClusterStatus (),
|
||||
GET GetClusterHealth (),
|
||||
POST ConnectClusterNodes (body),
|
||||
// Layout endpoints
|
||||
GET GetClusterLayout (),
|
||||
POST UpdateClusterLayout (body),
|
||||
POST ApplyClusterLayout (body),
|
||||
POST RevertClusterLayout (),
|
||||
// API key endpoints
|
||||
GET GetKeyInfo (query_opt::id, query_opt::search, parse_default(false)::show_secret_key),
|
||||
POST UpdateKey (body_field, query::id),
|
||||
POST CreateKey (body),
|
||||
POST ImportKey (body),
|
||||
POST DeleteKey (query::id),
|
||||
GET ListKeys (),
|
||||
// Bucket endpoints
|
||||
GET GetBucketInfo (query_opt::id, query_opt::global_alias, query_opt::search),
|
||||
GET ListBuckets (),
|
||||
POST CreateBucket (body),
|
||||
POST DeleteBucket (query::id),
|
||||
POST UpdateBucket (body_field, query::id),
|
||||
POST CleanupIncompleteUploads (body),
|
||||
// Bucket-key permissions
|
||||
POST AllowBucketKey (body),
|
||||
POST DenyBucketKey (body),
|
||||
// Bucket aliases
|
||||
POST AddBucketAlias (body),
|
||||
POST RemoveBucketAlias (body),
|
||||
// Worker APIs
|
||||
POST GetWorkerVariable (body_field, query::node),
|
||||
]);
|
||||
|
||||
if let Some(message) = query.nonempty_message() {
|
||||
debug!("Unused query parameter: {}", message)
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Some endpoints work exactly the same in their v2/ version as they did in their v1/ version.
|
||||
/// For these endpoints, we can convert a v1/ call to its equivalent as if it was made using
|
||||
/// its v2/ URL.
|
||||
pub async fn from_v1(
|
||||
v1_endpoint: router_v1::Endpoint,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Self, Error> {
|
||||
use router_v1::Endpoint;
|
||||
|
||||
match v1_endpoint {
|
||||
Endpoint::GetClusterStatus => {
|
||||
Ok(AdminApiRequest::GetClusterStatus(GetClusterStatusRequest))
|
||||
}
|
||||
Endpoint::GetClusterHealth => {
|
||||
Ok(AdminApiRequest::GetClusterHealth(GetClusterHealthRequest))
|
||||
}
|
||||
Endpoint::ConnectClusterNodes => {
|
||||
let req = parse_json_body::<ConnectClusterNodesRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::ConnectClusterNodes(req))
|
||||
}
|
||||
|
||||
// Layout
|
||||
Endpoint::GetClusterLayout => {
|
||||
Ok(AdminApiRequest::GetClusterLayout(GetClusterLayoutRequest))
|
||||
}
|
||||
Endpoint::UpdateClusterLayout => {
|
||||
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::UpdateClusterLayout(updates))
|
||||
}
|
||||
Endpoint::ApplyClusterLayout => {
|
||||
let param = parse_json_body::<ApplyClusterLayoutRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::ApplyClusterLayout(param))
|
||||
}
|
||||
Endpoint::RevertClusterLayout => Ok(AdminApiRequest::RevertClusterLayout(
|
||||
RevertClusterLayoutRequest,
|
||||
)),
|
||||
|
||||
// Keys
|
||||
Endpoint::ListKeys => Ok(AdminApiRequest::ListKeys(ListKeysRequest)),
|
||||
Endpoint::GetKeyInfo {
|
||||
id,
|
||||
search,
|
||||
show_secret_key,
|
||||
} => {
|
||||
let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
|
||||
Ok(AdminApiRequest::GetKeyInfo(GetKeyInfoRequest {
|
||||
id,
|
||||
search,
|
||||
show_secret_key,
|
||||
}))
|
||||
}
|
||||
Endpoint::CreateKey => {
|
||||
let req = parse_json_body::<CreateKeyRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::CreateKey(req))
|
||||
}
|
||||
Endpoint::ImportKey => {
|
||||
let req = parse_json_body::<ImportKeyRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::ImportKey(req))
|
||||
}
|
||||
Endpoint::UpdateKey { id } => {
|
||||
let body = parse_json_body::<UpdateKeyRequestBody, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::UpdateKey(UpdateKeyRequest { id, body }))
|
||||
}
|
||||
|
||||
// DeleteKey semantics changed:
|
||||
// - in v1/ : HTTP DELETE => HTTP 204 No Content
|
||||
// - in v2/ : HTTP POST => HTTP 200 Ok
|
||||
// Endpoint::DeleteKey { id } => Ok(AdminApiRequest::DeleteKey(DeleteKeyRequest { id })),
|
||||
|
||||
// Buckets
|
||||
Endpoint::ListBuckets => Ok(AdminApiRequest::ListBuckets(ListBucketsRequest)),
|
||||
Endpoint::GetBucketInfo { id, global_alias } => {
|
||||
Ok(AdminApiRequest::GetBucketInfo(GetBucketInfoRequest {
|
||||
id,
|
||||
global_alias,
|
||||
search: None,
|
||||
}))
|
||||
}
|
||||
Endpoint::CreateBucket => {
|
||||
let req = parse_json_body::<CreateBucketRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::CreateBucket(req))
|
||||
}
|
||||
|
||||
// DeleteBucket semantics changed::
|
||||
// - in v1/ : HTTP DELETE => HTTP 204 No Content
|
||||
// - in v2/ : HTTP POST => HTTP 200 Ok
|
||||
// Endpoint::DeleteBucket { id } => {
|
||||
// Ok(AdminApiRequest::DeleteBucket(DeleteBucketRequest { id }))
|
||||
// }
|
||||
Endpoint::UpdateBucket { id } => {
|
||||
let body = parse_json_body::<UpdateBucketRequestBody, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::UpdateBucket(UpdateBucketRequest {
|
||||
id,
|
||||
body,
|
||||
}))
|
||||
}
|
||||
|
||||
// Bucket-key permissions
|
||||
Endpoint::BucketAllowKey => {
|
||||
let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::AllowBucketKey(AllowBucketKeyRequest(req)))
|
||||
}
|
||||
Endpoint::BucketDenyKey => {
|
||||
let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::DenyBucketKey(DenyBucketKeyRequest(req)))
|
||||
}
|
||||
// Bucket aliasing
|
||||
Endpoint::GlobalAliasBucket { id, alias } => {
|
||||
Ok(AdminApiRequest::AddBucketAlias(AddBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Global {
|
||||
global_alias: alias,
|
||||
},
|
||||
}))
|
||||
}
|
||||
Endpoint::GlobalUnaliasBucket { id, alias } => Ok(AdminApiRequest::RemoveBucketAlias(
|
||||
RemoveBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Global {
|
||||
global_alias: alias,
|
||||
},
|
||||
},
|
||||
)),
|
||||
Endpoint::LocalAliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => Ok(AdminApiRequest::AddBucketAlias(AddBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Local {
|
||||
local_alias: alias,
|
||||
access_key_id,
|
||||
},
|
||||
})),
|
||||
Endpoint::LocalUnaliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => Ok(AdminApiRequest::RemoveBucketAlias(
|
||||
RemoveBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Local {
|
||||
local_alias: alias,
|
||||
access_key_id,
|
||||
},
|
||||
},
|
||||
)),
|
||||
|
||||
// For endpoints that have different body content syntax, issue
|
||||
// deprecation warning
|
||||
_ => Err(Error::bad_request(format!(
|
||||
"v1/ endpoint is no longer supported: {}",
|
||||
v1_endpoint.name()
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the kind of authorization which is required to perform the operation.
|
||||
pub fn authorization_type(&self) -> Authorization {
|
||||
match self {
|
||||
Self::Options(_) => Authorization::None,
|
||||
Self::Health(_) => Authorization::None,
|
||||
Self::CheckDomain(_) => Authorization::None,
|
||||
Self::Metrics(_) => Authorization::MetricsToken,
|
||||
_ => Authorization::AdminToken,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
generateQueryParameters! {
|
||||
keywords: [],
|
||||
fields: [
|
||||
"node" => node,
|
||||
"domain" => domain,
|
||||
"format" => format,
|
||||
"id" => id,
|
||||
"search" => search,
|
||||
"globalAlias" => global_alias,
|
||||
"alias" => alias,
|
||||
"accessKeyId" => access_key_id,
|
||||
"showSecretKey" => show_secret_key
|
||||
]
|
||||
}
|
144
src/api/admin/special.rs
Normal file
144
src/api/admin/special.rs
Normal file
|
@ -0,0 +1,144 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use http::header::{
|
||||
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW,
|
||||
};
|
||||
use hyper::{Response, StatusCode};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
|
||||
use crate::admin::api::{CheckDomainRequest, HealthRequest, OptionsRequest};
|
||||
use crate::admin::api_server::ResBody;
|
||||
use crate::admin::error::*;
|
||||
use crate::admin::{Admin, RequestHandler};
|
||||
use crate::helpers::*;
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for OptionsRequest {
|
||||
type Response = Response<ResBody>;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
_garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(ALLOW, "OPTIONS,GET,POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS,GET,POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_HEADERS, "authorization,content-type")
|
||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||
.body(empty_body())?)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for CheckDomainRequest {
|
||||
type Response = Response<ResBody>;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
if check_domain(garage, &self.domain).await? {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(string_body(format!(
|
||||
"Domain '{}' is managed by Garage",
|
||||
self.domain
|
||||
)))?)
|
||||
} else {
|
||||
Err(Error::bad_request(format!(
|
||||
"Domain '{}' is not managed by Garage",
|
||||
self.domain
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_domain(garage: &Arc<Garage>, domain: &str) -> Result<bool, Error> {
|
||||
// Resolve bucket from domain name, inferring if the website must be activated for the
|
||||
// domain to be valid.
|
||||
let (bucket_name, must_check_website) = if let Some(bname) = garage
|
||||
.config
|
||||
.s3_api
|
||||
.root_domain
|
||||
.as_ref()
|
||||
.and_then(|rd| host_to_bucket(domain, rd))
|
||||
{
|
||||
(bname.to_string(), false)
|
||||
} else if let Some(bname) = garage
|
||||
.config
|
||||
.s3_web
|
||||
.as_ref()
|
||||
.and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str()))
|
||||
{
|
||||
(bname.to_string(), true)
|
||||
} else {
|
||||
(domain.to_string(), true)
|
||||
};
|
||||
|
||||
let bucket_id = match garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&bucket_name)
|
||||
.await?
|
||||
{
|
||||
Some(bucket_id) => bucket_id,
|
||||
None => return Ok(false),
|
||||
};
|
||||
|
||||
if !must_check_website {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let bucket = garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
let bucket_state = bucket.state.as_option().unwrap();
|
||||
let bucket_website_config = bucket_state.website_config.get();
|
||||
|
||||
match bucket_website_config {
|
||||
Some(_v) => Ok(true),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for HealthRequest {
|
||||
type Response = Response<ResBody>;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let health = garage.system.health();
|
||||
|
||||
let (status, status_str) = match health.status {
|
||||
ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
|
||||
ClusterHealthStatus::Degraded => (
|
||||
StatusCode::OK,
|
||||
"Garage is operational but some storage nodes are unavailable",
|
||||
),
|
||||
ClusterHealthStatus::Unavailable => (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Quorum is not available for some/all partitions, reads and writes will fail",
|
||||
),
|
||||
};
|
||||
let status_str = format!(
|
||||
"{}\nConsult the full health check API endpoint at /v2/GetClusterHealth for more details\n",
|
||||
status_str
|
||||
);
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(status)
|
||||
.header(http::header::CONTENT_TYPE, "text/plain")
|
||||
.body(string_body(status_str))?)
|
||||
}
|
||||
}
|
50
src/api/admin/worker.rs
Normal file
50
src/api/admin/worker.rs
Normal file
|
@ -0,0 +1,50 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use crate::admin::api::*;
|
||||
use crate::admin::error::Error;
|
||||
use crate::admin::{Admin, RequestHandler};
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for LocalGetWorkerVariableRequest {
|
||||
type Response = LocalGetWorkerVariableResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalGetWorkerVariableResponse, Error> {
|
||||
let mut res = HashMap::new();
|
||||
if let Some(k) = self.variable {
|
||||
res.insert(k.clone(), garage.bg_vars.get(&k)?);
|
||||
} else {
|
||||
let vars = garage.bg_vars.get_all();
|
||||
for (k, v) in vars.iter() {
|
||||
res.insert(k.to_string(), v.to_string());
|
||||
}
|
||||
}
|
||||
Ok(LocalGetWorkerVariableResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl RequestHandler for LocalSetWorkerVariableRequest {
|
||||
type Response = LocalSetWorkerVariableResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalSetWorkerVariableResponse, Error> {
|
||||
garage.bg_vars.set(&self.variable, &self.value)?;
|
||||
|
||||
Ok(LocalSetWorkerVariableResponse {
|
||||
variable: self.variable,
|
||||
value: self.value,
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,3 +1,4 @@
|
|||
use std::borrow::Cow;
|
||||
use std::convert::Infallible;
|
||||
use std::fs::{self, Permissions};
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
|
@ -37,7 +38,7 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
|
|||
use crate::helpers::{BoxBody, ErrorBody};
|
||||
|
||||
pub(crate) trait ApiEndpoint: Send + Sync + 'static {
|
||||
fn name(&self) -> &'static str;
|
||||
fn name(&self) -> Cow<'static, str>;
|
||||
fn add_span_attributes(&self, span: SpanRef<'_>);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
@ -181,8 +182,8 @@ impl ApiHandler for K2VApiServer {
|
|||
}
|
||||
|
||||
impl ApiEndpoint for K2VApiEndpoint {
|
||||
fn name(&self) -> &'static str {
|
||||
self.endpoint.name()
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
Cow::Borrowed(self.endpoint.name())
|
||||
}
|
||||
|
||||
fn add_span_attributes(&self, span: SpanRef<'_>) {
|
||||
|
|
|
@ -44,6 +44,68 @@ macro_rules! router_match {
|
|||
}
|
||||
}
|
||||
}};
|
||||
(@gen_path_parser_v2 ($method:expr, $reqpath:expr, $pathprefix:literal, $query:expr, $req:expr)
|
||||
[
|
||||
$(@special $spec_meth:ident $spec_path:pat => $spec_api:ident $spec_params:tt,)*
|
||||
$($meth:ident $api:ident $params:tt,)*
|
||||
]) => {{
|
||||
{
|
||||
#[allow(unused_parens)]
|
||||
match ($method, $reqpath) {
|
||||
$(
|
||||
(&Method::$spec_meth, $spec_path) => AdminApiRequest::$spec_api (
|
||||
router_match!(@@gen_parse_request $spec_api, $spec_params, $query, $req)
|
||||
),
|
||||
)*
|
||||
$(
|
||||
(&Method::$meth, concat!($pathprefix, stringify!($api)))
|
||||
=> AdminApiRequest::$api (
|
||||
router_match!(@@gen_parse_request $api, $params, $query, $req)
|
||||
),
|
||||
)*
|
||||
(m, p) => {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Unknown API endpoint: {} {}",
|
||||
m, p
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, (), $query: expr, $req:expr) => {{
|
||||
paste!(
|
||||
[< $api Request >]
|
||||
)
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, (body), $query: expr, $req:expr) => {{
|
||||
paste!({
|
||||
parse_json_body::< [<$api Request>], _, Error>($req).await?
|
||||
})
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, (body_field, $($conv:ident $(($conv_arg:expr))? :: $param:ident),*), $query: expr, $req:expr)
|
||||
=>
|
||||
{{
|
||||
paste!({
|
||||
let body = parse_json_body::< [<$api RequestBody>], _, Error>($req).await?;
|
||||
[< $api Request >] {
|
||||
body,
|
||||
$(
|
||||
$param: router_match!(@@parse_param $query, $conv $(($conv_arg))?, $param),
|
||||
)+
|
||||
}
|
||||
})
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, ($($conv:ident $(($conv_arg:expr))? :: $param:ident),*), $query: expr, $req:expr)
|
||||
=>
|
||||
{{
|
||||
paste!({
|
||||
[< $api Request >] {
|
||||
$(
|
||||
$param: router_match!(@@parse_param $query, $conv $(($conv_arg))?, $param),
|
||||
)+
|
||||
}
|
||||
})
|
||||
}};
|
||||
(@gen_parser ($keyword:expr, $key:ident, $query:expr, $header:expr),
|
||||
key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*],
|
||||
no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{
|
||||
|
@ -84,7 +146,10 @@ macro_rules! router_match {
|
|||
}};
|
||||
(@@parse_param $query:expr, query, $param:ident) => {{
|
||||
// extract mendatory query parameter
|
||||
$query.$param.take().ok_or_bad_request("Missing argument for endpoint")?.into_owned()
|
||||
$query.$param.take()
|
||||
.ok_or_bad_request(
|
||||
format!("Missing argument `{}` for endpoint", stringify!($param))
|
||||
)?.into_owned()
|
||||
}};
|
||||
(@@parse_param $query:expr, opt_parse, $param:ident) => {{
|
||||
// extract and parse optional query parameter
|
||||
|
@ -98,10 +163,22 @@ macro_rules! router_match {
|
|||
(@@parse_param $query:expr, parse, $param:ident) => {{
|
||||
// extract and parse mandatory query parameter
|
||||
// both missing and un-parseable parameters are reported as errors
|
||||
$query.$param.take().ok_or_bad_request("Missing argument for endpoint")?
|
||||
$query.$param.take()
|
||||
.ok_or_bad_request(
|
||||
format!("Missing argument `{}` for endpoint", stringify!($param))
|
||||
)?
|
||||
.parse()
|
||||
.map_err(|_| Error::bad_request("Failed to parse query parameter"))?
|
||||
}};
|
||||
(@@parse_param $query:expr, parse_default($default:expr), $param:ident) => {{
|
||||
// extract and parse optional query parameter
|
||||
// using provided value as default if paramter is missing
|
||||
$query.$param.take().map(|x| x
|
||||
.parse()
|
||||
.map_err(|_| Error::bad_request("Failed to parse query parameter")))
|
||||
.transpose()?
|
||||
.unwrap_or($default)
|
||||
}};
|
||||
(@func
|
||||
$(#[$doc:meta])*
|
||||
pub enum Endpoint {
|
||||
|
@ -185,6 +262,7 @@ macro_rules! generateQueryParameters {
|
|||
},
|
||||
)*
|
||||
$(
|
||||
// FIXME: remove if !v.is_empty() ?
|
||||
$f_param => if !v.is_empty() {
|
||||
if res.$f_name.replace(v).is_some() {
|
||||
return Err(Error::bad_request(format!(
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
@ -356,8 +357,8 @@ impl ApiHandler for S3ApiServer {
|
|||
}
|
||||
|
||||
impl ApiEndpoint for S3ApiEndpoint {
|
||||
fn name(&self) -> &'static str {
|
||||
self.endpoint.name()
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
Cow::Borrowed(self.endpoint.name())
|
||||
}
|
||||
|
||||
fn add_span_attributes(&self, span: SpanRef<'_>) {
|
||||
|
|
|
@ -1,500 +0,0 @@
|
|||
use std::collections::HashMap;
|
||||
use std::fmt::Write;
|
||||
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::time::*;
|
||||
|
||||
use garage_table::*;
|
||||
|
||||
use garage_model::bucket_alias_table::*;
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_model::helper::error::{Error, OkOrBadRequest};
|
||||
use garage_model::permission::*;
|
||||
|
||||
use crate::cli::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl AdminRpcHandler {
|
||||
pub(super) async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
|
||||
match cmd {
|
||||
BucketOperation::List => self.handle_list_buckets().await,
|
||||
BucketOperation::Info(query) => self.handle_bucket_info(query).await,
|
||||
BucketOperation::Create(query) => self.handle_create_bucket(&query.name).await,
|
||||
BucketOperation::Delete(query) => self.handle_delete_bucket(query).await,
|
||||
BucketOperation::Alias(query) => self.handle_alias_bucket(query).await,
|
||||
BucketOperation::Unalias(query) => self.handle_unalias_bucket(query).await,
|
||||
BucketOperation::Allow(query) => self.handle_bucket_allow(query).await,
|
||||
BucketOperation::Deny(query) => self.handle_bucket_deny(query).await,
|
||||
BucketOperation::Website(query) => self.handle_bucket_website(query).await,
|
||||
BucketOperation::SetQuotas(query) => self.handle_bucket_set_quotas(query).await,
|
||||
BucketOperation::CleanupIncompleteUploads(query) => {
|
||||
self.handle_bucket_cleanup_incomplete_uploads(query).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_list_buckets(&self) -> Result<AdminRpc, Error> {
|
||||
let buckets = self
|
||||
.garage
|
||||
.bucket_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(DeletedFilter::NotDeleted),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::BucketList(buckets))
|
||||
}
|
||||
|
||||
async fn handle_bucket_info(&self, query: &BucketOpt) -> Result<AdminRpc, Error> {
|
||||
let bucket_id = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.admin_get_existing_matching_bucket(&query.name)
|
||||
.await?;
|
||||
|
||||
let bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
let counters = self
|
||||
.garage
|
||||
.object_counter_table
|
||||
.table
|
||||
.get(&bucket_id, &EmptyKey)
|
||||
.await?
|
||||
.map(|x| x.filtered_values(&self.garage.system.cluster_layout()))
|
||||
.unwrap_or_default();
|
||||
|
||||
let mpu_counters = self
|
||||
.garage
|
||||
.mpu_counter_table
|
||||
.table
|
||||
.get(&bucket_id, &EmptyKey)
|
||||
.await?
|
||||
.map(|x| x.filtered_values(&self.garage.system.cluster_layout()))
|
||||
.unwrap_or_default();
|
||||
|
||||
let mut relevant_keys = HashMap::new();
|
||||
for (k, _) in bucket
|
||||
.state
|
||||
.as_option()
|
||||
.unwrap()
|
||||
.authorized_keys
|
||||
.items()
|
||||
.iter()
|
||||
{
|
||||
if let Some(key) = self
|
||||
.garage
|
||||
.key_table
|
||||
.get(&EmptyKey, k)
|
||||
.await?
|
||||
.filter(|k| !k.is_deleted())
|
||||
{
|
||||
relevant_keys.insert(k.clone(), key);
|
||||
}
|
||||
}
|
||||
for ((k, _), _, _) in bucket
|
||||
.state
|
||||
.as_option()
|
||||
.unwrap()
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
{
|
||||
if relevant_keys.contains_key(k) {
|
||||
continue;
|
||||
}
|
||||
if let Some(key) = self.garage.key_table.get(&EmptyKey, k).await? {
|
||||
relevant_keys.insert(k.clone(), key);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AdminRpc::BucketInfo {
|
||||
bucket,
|
||||
relevant_keys,
|
||||
counters,
|
||||
mpu_counters,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(clippy::ptr_arg)]
|
||||
async fn handle_create_bucket(&self, name: &String) -> Result<AdminRpc, Error> {
|
||||
if !is_valid_bucket_name(name) {
|
||||
return Err(Error::BadRequest(format!(
|
||||
"{}: {}",
|
||||
name, INVALID_BUCKET_NAME_MESSAGE
|
||||
)));
|
||||
}
|
||||
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
if let Some(alias) = self.garage.bucket_alias_table.get(&EmptyKey, name).await? {
|
||||
if alias.state.get().is_some() {
|
||||
return Err(Error::BadRequest(format!("Bucket {} already exists", name)));
|
||||
}
|
||||
}
|
||||
|
||||
// ---- done checking, now commit ----
|
||||
|
||||
let bucket = Bucket::new();
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
helper.set_global_bucket_alias(bucket.id, name).await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!("Bucket {} was created.", name)))
|
||||
}
|
||||
|
||||
async fn handle_delete_bucket(&self, query: &DeleteBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
let bucket_id = helper
|
||||
.bucket()
|
||||
.admin_get_existing_matching_bucket(&query.name)
|
||||
.await?;
|
||||
|
||||
// Get the alias, but keep in minde here the bucket name
|
||||
// given in parameter can also be directly the bucket's ID.
|
||||
// In that case bucket_alias will be None, and
|
||||
// we can still delete the bucket if it has zero aliases
|
||||
// (a condition which we try to prevent but that could still happen somehow).
|
||||
// We just won't try to delete an alias entry because there isn't one.
|
||||
let bucket_alias = self
|
||||
.garage
|
||||
.bucket_alias_table
|
||||
.get(&EmptyKey, &query.name)
|
||||
.await?;
|
||||
|
||||
// Check bucket doesn't have other aliases
|
||||
let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
|
||||
let bucket_state = bucket.state.as_option().unwrap();
|
||||
if bucket_state
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, active)| *active)
|
||||
.any(|(name, _, _)| name != &query.name)
|
||||
{
|
||||
return Err(Error::BadRequest(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", query.name)));
|
||||
}
|
||||
if bucket_state
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.any(|(_, _, active)| *active)
|
||||
{
|
||||
return Err(Error::BadRequest(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", query.name)));
|
||||
}
|
||||
|
||||
// Check bucket is empty
|
||||
if !helper.bucket().is_bucket_empty(bucket_id).await? {
|
||||
return Err(Error::BadRequest(format!(
|
||||
"Bucket {} is not empty",
|
||||
query.name
|
||||
)));
|
||||
}
|
||||
|
||||
if !query.yes {
|
||||
return Err(Error::BadRequest(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// --- done checking, now commit ---
|
||||
// 1. delete authorization from keys that had access
|
||||
for (key_id, _) in bucket.authorized_keys() {
|
||||
helper
|
||||
.set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 2. delete bucket alias
|
||||
if bucket_alias.is_some() {
|
||||
helper
|
||||
.purge_global_bucket_alias(bucket_id, &query.name)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 3. delete bucket
|
||||
bucket.state = Deletable::delete();
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name)))
|
||||
}
|
||||
|
||||
async fn handle_alias_bucket(&self, query: &AliasBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
let bucket_id = helper
|
||||
.bucket()
|
||||
.admin_get_existing_matching_bucket(&query.existing_bucket)
|
||||
.await?;
|
||||
|
||||
if let Some(key_pattern) = &query.local {
|
||||
let key = helper.key().get_existing_matching_key(key_pattern).await?;
|
||||
|
||||
helper
|
||||
.set_local_bucket_alias(bucket_id, &key.key_id, &query.new_name)
|
||||
.await?;
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Alias {} now points to bucket {:?} in namespace of key {}",
|
||||
query.new_name, bucket_id, key.key_id
|
||||
)))
|
||||
} else {
|
||||
helper
|
||||
.set_global_bucket_alias(bucket_id, &query.new_name)
|
||||
.await?;
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Alias {} now points to bucket {:?}",
|
||||
query.new_name, bucket_id
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_unalias_bucket(&self, query: &UnaliasBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
if let Some(key_pattern) = &query.local {
|
||||
let key = helper.key().get_existing_matching_key(key_pattern).await?;
|
||||
|
||||
let bucket_id = key
|
||||
.state
|
||||
.as_option()
|
||||
.unwrap()
|
||||
.local_aliases
|
||||
.get(&query.name)
|
||||
.cloned()
|
||||
.flatten()
|
||||
.ok_or_bad_request("Bucket not found")?;
|
||||
|
||||
helper
|
||||
.unset_local_bucket_alias(bucket_id, &key.key_id, &query.name)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Alias {} no longer points to bucket {:?} in namespace of key {}",
|
||||
&query.name, bucket_id, key.key_id
|
||||
)))
|
||||
} else {
|
||||
let bucket_id = helper
|
||||
.bucket()
|
||||
.resolve_global_bucket_name(&query.name)
|
||||
.await?
|
||||
.ok_or_bad_request("Bucket not found")?;
|
||||
|
||||
helper
|
||||
.unset_global_bucket_alias(bucket_id, &query.name)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Alias {} no longer points to bucket {:?}",
|
||||
&query.name, bucket_id
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
let bucket_id = helper
|
||||
.bucket()
|
||||
.admin_get_existing_matching_bucket(&query.bucket)
|
||||
.await?;
|
||||
let key = helper
|
||||
.key()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
|
||||
let allow_read = query.read || key.allow_read(&bucket_id);
|
||||
let allow_write = query.write || key.allow_write(&bucket_id);
|
||||
let allow_owner = query.owner || key.allow_owner(&bucket_id);
|
||||
|
||||
helper
|
||||
.set_bucket_key_permissions(
|
||||
bucket_id,
|
||||
&key.key_id,
|
||||
BucketKeyPerm {
|
||||
timestamp: now_msec(),
|
||||
allow_read,
|
||||
allow_write,
|
||||
allow_owner,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"New permissions for {} on {}: read {}, write {}, owner {}.",
|
||||
&key.key_id, &query.bucket, allow_read, allow_write, allow_owner
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_bucket_deny(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
let bucket_id = helper
|
||||
.bucket()
|
||||
.admin_get_existing_matching_bucket(&query.bucket)
|
||||
.await?;
|
||||
let key = helper
|
||||
.key()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
|
||||
let allow_read = !query.read && key.allow_read(&bucket_id);
|
||||
let allow_write = !query.write && key.allow_write(&bucket_id);
|
||||
let allow_owner = !query.owner && key.allow_owner(&bucket_id);
|
||||
|
||||
helper
|
||||
.set_bucket_key_permissions(
|
||||
bucket_id,
|
||||
&key.key_id,
|
||||
BucketKeyPerm {
|
||||
timestamp: now_msec(),
|
||||
allow_read,
|
||||
allow_write,
|
||||
allow_owner,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"New permissions for {} on {}: read {}, write {}, owner {}.",
|
||||
&key.key_id, &query.bucket, allow_read, allow_write, allow_owner
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_bucket_website(&self, query: &WebsiteOpt) -> Result<AdminRpc, Error> {
|
||||
let bucket_id = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.admin_get_existing_matching_bucket(&query.bucket)
|
||||
.await?;
|
||||
|
||||
let mut bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
let bucket_state = bucket.state.as_option_mut().unwrap();
|
||||
|
||||
if !(query.allow ^ query.deny) {
|
||||
return Err(Error::BadRequest(
|
||||
"You must specify exactly one flag, either --allow or --deny".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let website = if query.allow {
|
||||
Some(WebsiteConfig {
|
||||
index_document: query.index_document.clone(),
|
||||
error_document: query.error_document.clone(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
bucket_state.website_config.update(website);
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
let msg = if query.allow {
|
||||
format!("Website access allowed for {}", &query.bucket)
|
||||
} else {
|
||||
format!("Website access denied for {}", &query.bucket)
|
||||
};
|
||||
|
||||
Ok(AdminRpc::Ok(msg))
|
||||
}
|
||||
|
||||
async fn handle_bucket_set_quotas(&self, query: &SetQuotasOpt) -> Result<AdminRpc, Error> {
|
||||
let bucket_id = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.admin_get_existing_matching_bucket(&query.bucket)
|
||||
.await?;
|
||||
|
||||
let mut bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
let bucket_state = bucket.state.as_option_mut().unwrap();
|
||||
|
||||
if query.max_size.is_none() && query.max_objects.is_none() {
|
||||
return Err(Error::BadRequest(
|
||||
"You must specify either --max-size or --max-objects (or both) for this command to do something.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let mut quotas = bucket_state.quotas.get().clone();
|
||||
|
||||
match query.max_size.as_ref().map(String::as_ref) {
|
||||
Some("none") => quotas.max_size = None,
|
||||
Some(v) => {
|
||||
let bs = v
|
||||
.parse::<bytesize::ByteSize>()
|
||||
.ok_or_bad_request(format!("Invalid size specified: {}", v))?;
|
||||
quotas.max_size = Some(bs.as_u64());
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
match query.max_objects.as_ref().map(String::as_ref) {
|
||||
Some("none") => quotas.max_objects = None,
|
||||
Some(v) => {
|
||||
let mo = v
|
||||
.parse::<u64>()
|
||||
.ok_or_bad_request(format!("Invalid number specified: {}", v))?;
|
||||
quotas.max_objects = Some(mo);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
bucket_state.quotas.update(quotas);
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Quotas updated for {}",
|
||||
&query.bucket
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_bucket_cleanup_incomplete_uploads(
|
||||
&self,
|
||||
query: &CleanupIncompleteUploadsOpt,
|
||||
) -> Result<AdminRpc, Error> {
|
||||
let mut bucket_ids = vec![];
|
||||
for b in query.buckets.iter() {
|
||||
bucket_ids.push(
|
||||
self.garage
|
||||
.bucket_helper()
|
||||
.admin_get_existing_matching_bucket(b)
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
let duration = parse_duration::parse::parse(&query.older_than)
|
||||
.ok_or_bad_request("Invalid duration passed for --older-than parameter")?;
|
||||
|
||||
let mut ret = String::new();
|
||||
for bucket in bucket_ids {
|
||||
let count = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.cleanup_incomplete_uploads(&bucket, duration)
|
||||
.await?;
|
||||
writeln!(
|
||||
&mut ret,
|
||||
"Bucket {:?}: {} incomplete uploads aborted",
|
||||
bucket, count
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
Ok(AdminRpc::Ok(ret))
|
||||
}
|
||||
}
|
|
@ -1,161 +0,0 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use garage_table::*;
|
||||
|
||||
use garage_model::helper::error::*;
|
||||
use garage_model::key_table::*;
|
||||
|
||||
use crate::cli::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl AdminRpcHandler {
|
||||
pub(super) async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> {
|
||||
match cmd {
|
||||
KeyOperation::List => self.handle_list_keys().await,
|
||||
KeyOperation::Info(query) => self.handle_key_info(query).await,
|
||||
KeyOperation::Create(query) => self.handle_create_key(query).await,
|
||||
KeyOperation::Rename(query) => self.handle_rename_key(query).await,
|
||||
KeyOperation::Delete(query) => self.handle_delete_key(query).await,
|
||||
KeyOperation::Allow(query) => self.handle_allow_key(query).await,
|
||||
KeyOperation::Deny(query) => self.handle_deny_key(query).await,
|
||||
KeyOperation::Import(query) => self.handle_import_key(query).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_list_keys(&self) -> Result<AdminRpc, Error> {
|
||||
let key_ids = self
|
||||
.garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.iter()
|
||||
.map(|k| (k.key_id.to_string(), k.params().unwrap().name.get().clone()))
|
||||
.collect::<Vec<_>>();
|
||||
Ok(AdminRpc::KeyList(key_ids))
|
||||
}
|
||||
|
||||
async fn handle_key_info(&self, query: &KeyInfoOpt) -> Result<AdminRpc, Error> {
|
||||
let mut key = self
|
||||
.garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
|
||||
if !query.show_secret {
|
||||
key.state.as_option_mut().unwrap().secret_key = "(redacted)".into();
|
||||
}
|
||||
|
||||
self.key_info_result(key).await
|
||||
}
|
||||
|
||||
async fn handle_create_key(&self, query: &KeyNewOpt) -> Result<AdminRpc, Error> {
|
||||
let key = Key::new(&query.name);
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
self.key_info_result(key).await
|
||||
}
|
||||
|
||||
async fn handle_rename_key(&self, query: &KeyRenameOpt) -> Result<AdminRpc, Error> {
|
||||
let mut key = self
|
||||
.garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
key.params_mut()
|
||||
.unwrap()
|
||||
.name
|
||||
.update(query.new_name.clone());
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
self.key_info_result(key).await
|
||||
}
|
||||
|
||||
async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
let mut key = helper
|
||||
.key()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
|
||||
if !query.yes {
|
||||
return Err(Error::BadRequest(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
helper.delete_key(&mut key).await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Key {} was deleted successfully.",
|
||||
key.key_id
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_allow_key(&self, query: &KeyPermOpt) -> Result<AdminRpc, Error> {
|
||||
let mut key = self
|
||||
.garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
if query.create_bucket {
|
||||
key.params_mut().unwrap().allow_create_bucket.update(true);
|
||||
}
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
self.key_info_result(key).await
|
||||
}
|
||||
|
||||
async fn handle_deny_key(&self, query: &KeyPermOpt) -> Result<AdminRpc, Error> {
|
||||
let mut key = self
|
||||
.garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
if query.create_bucket {
|
||||
key.params_mut().unwrap().allow_create_bucket.update(false);
|
||||
}
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
self.key_info_result(key).await
|
||||
}
|
||||
|
||||
async fn handle_import_key(&self, query: &KeyImportOpt) -> Result<AdminRpc, Error> {
|
||||
if !query.yes {
|
||||
return Err(Error::BadRequest("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string()));
|
||||
}
|
||||
|
||||
let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?;
|
||||
if prev_key.is_some() {
|
||||
return Err(Error::BadRequest(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id)));
|
||||
}
|
||||
|
||||
let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name)
|
||||
.ok_or_bad_request("Invalid key format")?;
|
||||
self.garage.key_table.insert(&imported_key).await?;
|
||||
|
||||
self.key_info_result(imported_key).await
|
||||
}
|
||||
|
||||
async fn key_info_result(&self, key: Key) -> Result<AdminRpc, Error> {
|
||||
let mut relevant_buckets = HashMap::new();
|
||||
|
||||
for (id, _) in key
|
||||
.state
|
||||
.as_option()
|
||||
.unwrap()
|
||||
.authorized_buckets
|
||||
.items()
|
||||
.iter()
|
||||
{
|
||||
if let Some(b) = self.garage.bucket_table.get(&EmptyKey, id).await? {
|
||||
relevant_buckets.insert(*id, b);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AdminRpc::KeyInfo(key, relevant_buckets))
|
||||
}
|
||||
}
|
|
@ -1,6 +1,4 @@
|
|||
mod block;
|
||||
mod bucket;
|
||||
mod key;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Write;
|
||||
|
@ -23,10 +21,8 @@ use garage_rpc::*;
|
|||
|
||||
use garage_block::manager::BlockResyncErrorInfo;
|
||||
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::helper::error::{Error, OkOrBadRequest};
|
||||
use garage_model::key_table::*;
|
||||
use garage_model::s3::mpu_table::MultipartUpload;
|
||||
use garage_model::s3::version_table::Version;
|
||||
|
||||
|
@ -38,8 +34,6 @@ pub const ADMIN_RPC_PATH: &str = "garage/admin_rpc.rs/Rpc";
|
|||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum AdminRpc {
|
||||
BucketOperation(BucketOperation),
|
||||
KeyOperation(KeyOperation),
|
||||
LaunchRepair(RepairOpt),
|
||||
Stats(StatsOpt),
|
||||
Worker(WorkerOperation),
|
||||
|
@ -48,20 +42,10 @@ pub enum AdminRpc {
|
|||
|
||||
// Replies
|
||||
Ok(String),
|
||||
BucketList(Vec<Bucket>),
|
||||
BucketInfo {
|
||||
bucket: Bucket,
|
||||
relevant_keys: HashMap<String, Key>,
|
||||
counters: HashMap<String, i64>,
|
||||
mpu_counters: HashMap<String, i64>,
|
||||
},
|
||||
KeyList(Vec<(String, String)>),
|
||||
KeyInfo(Key, HashMap<Uuid, Bucket>),
|
||||
WorkerList(
|
||||
HashMap<usize, garage_util::background::WorkerInfo>,
|
||||
WorkerListOpt,
|
||||
),
|
||||
WorkerVars(Vec<(Uuid, String, String)>),
|
||||
WorkerInfo(usize, garage_util::background::WorkerInfo),
|
||||
BlockErrorList(Vec<BlockResyncErrorInfo>),
|
||||
BlockInfo {
|
||||
|
@ -369,101 +353,7 @@ impl AdminRpcHandler {
|
|||
.clone();
|
||||
Ok(AdminRpc::WorkerInfo(*tid, info))
|
||||
}
|
||||
WorkerOperation::Get {
|
||||
all_nodes,
|
||||
variable,
|
||||
} => self.handle_get_var(*all_nodes, variable).await,
|
||||
WorkerOperation::Set {
|
||||
all_nodes,
|
||||
variable,
|
||||
value,
|
||||
} => self.handle_set_var(*all_nodes, variable, value).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_get_var(
|
||||
&self,
|
||||
all_nodes: bool,
|
||||
variable: &Option<String>,
|
||||
) -> Result<AdminRpc, Error> {
|
||||
if all_nodes {
|
||||
let mut ret = vec![];
|
||||
let all_nodes = self.garage.system.cluster_layout().all_nodes().to_vec();
|
||||
for node in all_nodes.iter() {
|
||||
let node = (*node).into();
|
||||
match self
|
||||
.endpoint
|
||||
.call(
|
||||
&node,
|
||||
AdminRpc::Worker(WorkerOperation::Get {
|
||||
all_nodes: false,
|
||||
variable: variable.clone(),
|
||||
}),
|
||||
PRIO_NORMAL,
|
||||
)
|
||||
.await??
|
||||
{
|
||||
AdminRpc::WorkerVars(v) => ret.extend(v),
|
||||
m => return Err(GarageError::unexpected_rpc_message(m).into()),
|
||||
}
|
||||
}
|
||||
Ok(AdminRpc::WorkerVars(ret))
|
||||
} else {
|
||||
#[allow(clippy::collapsible_else_if)]
|
||||
if let Some(v) = variable {
|
||||
Ok(AdminRpc::WorkerVars(vec![(
|
||||
self.garage.system.id,
|
||||
v.clone(),
|
||||
self.garage.bg_vars.get(v)?,
|
||||
)]))
|
||||
} else {
|
||||
let mut vars = self.garage.bg_vars.get_all();
|
||||
vars.sort();
|
||||
Ok(AdminRpc::WorkerVars(
|
||||
vars.into_iter()
|
||||
.map(|(k, v)| (self.garage.system.id, k.to_string(), v))
|
||||
.collect(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_set_var(
|
||||
&self,
|
||||
all_nodes: bool,
|
||||
variable: &str,
|
||||
value: &str,
|
||||
) -> Result<AdminRpc, Error> {
|
||||
if all_nodes {
|
||||
let mut ret = vec![];
|
||||
let all_nodes = self.garage.system.cluster_layout().all_nodes().to_vec();
|
||||
for node in all_nodes.iter() {
|
||||
let node = (*node).into();
|
||||
match self
|
||||
.endpoint
|
||||
.call(
|
||||
&node,
|
||||
AdminRpc::Worker(WorkerOperation::Set {
|
||||
all_nodes: false,
|
||||
variable: variable.to_string(),
|
||||
value: value.to_string(),
|
||||
}),
|
||||
PRIO_NORMAL,
|
||||
)
|
||||
.await??
|
||||
{
|
||||
AdminRpc::WorkerVars(v) => ret.extend(v),
|
||||
m => return Err(GarageError::unexpected_rpc_message(m).into()),
|
||||
}
|
||||
}
|
||||
Ok(AdminRpc::WorkerVars(ret))
|
||||
} else {
|
||||
self.garage.bg_vars.set(variable, value)?;
|
||||
Ok(AdminRpc::WorkerVars(vec![(
|
||||
self.garage.system.id,
|
||||
variable.to_string(),
|
||||
value.to_string(),
|
||||
)]))
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -513,8 +403,6 @@ impl EndpointHandler<AdminRpc> for AdminRpcHandler {
|
|||
_from: NodeID,
|
||||
) -> Result<AdminRpc, Error> {
|
||||
match message {
|
||||
AdminRpc::BucketOperation(bo) => self.handle_bucket_cmd(bo).await,
|
||||
AdminRpc::KeyOperation(ko) => self.handle_key_cmd(ko).await,
|
||||
AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await,
|
||||
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
|
||||
AdminRpc::Worker(wo) => self.handle_worker_cmd(wo).await,
|
||||
|
|
|
@ -1,10 +1,5 @@
|
|||
use std::collections::{HashMap, HashSet};
|
||||
use std::time::Duration;
|
||||
|
||||
use format_table::format_table;
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_rpc::layout::*;
|
||||
use garage_rpc::system::*;
|
||||
use garage_rpc::*;
|
||||
|
||||
|
@ -13,204 +8,6 @@ use garage_model::helper::error::Error as HelperError;
|
|||
use crate::admin::*;
|
||||
use crate::cli::*;
|
||||
|
||||
pub async fn cli_command_dispatch(
|
||||
cmd: Command,
|
||||
system_rpc_endpoint: &Endpoint<SystemRpc, ()>,
|
||||
admin_rpc_endpoint: &Endpoint<AdminRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
) -> Result<(), HelperError> {
|
||||
match cmd {
|
||||
Command::Status => Ok(cmd_status(system_rpc_endpoint, rpc_host).await?),
|
||||
Command::Node(NodeOperation::Connect(connect_opt)) => {
|
||||
Ok(cmd_connect(system_rpc_endpoint, rpc_host, connect_opt).await?)
|
||||
}
|
||||
Command::Layout(layout_opt) => {
|
||||
Ok(cli_layout_command_dispatch(layout_opt, system_rpc_endpoint, rpc_host).await?)
|
||||
}
|
||||
Command::Bucket(bo) => {
|
||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BucketOperation(bo)).await
|
||||
}
|
||||
Command::Key(ko) => {
|
||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::KeyOperation(ko)).await
|
||||
}
|
||||
Command::Repair(ro) => {
|
||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::LaunchRepair(ro)).await
|
||||
}
|
||||
Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await,
|
||||
Command::Worker(wo) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Worker(wo)).await,
|
||||
Command::Block(bo) => {
|
||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await
|
||||
}
|
||||
Command::Meta(mo) => {
|
||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::MetaOperation(mo)).await
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) -> Result<(), Error> {
|
||||
let status = fetch_status(rpc_cli, rpc_host).await?;
|
||||
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||
|
||||
println!("==== HEALTHY NODES ====");
|
||||
let mut healthy_nodes =
|
||||
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()];
|
||||
for adv in status.iter().filter(|adv| adv.is_up) {
|
||||
let host = adv.status.hostname.as_deref().unwrap_or("?");
|
||||
let addr = match adv.addr {
|
||||
Some(addr) => addr.to_string(),
|
||||
None => "N/A".to_string(),
|
||||
};
|
||||
if let Some(NodeRoleV(Some(cfg))) = layout.current().roles.get(&adv.id) {
|
||||
let data_avail = match &adv.status.data_disk_avail {
|
||||
_ if cfg.capacity.is_none() => "N/A".into(),
|
||||
Some((avail, total)) => {
|
||||
let pct = (*avail as f64) / (*total as f64) * 100.;
|
||||
let avail = bytesize::ByteSize::b(*avail);
|
||||
format!("{} ({:.1}%)", avail, pct)
|
||||
}
|
||||
None => "?".into(),
|
||||
};
|
||||
healthy_nodes.push(format!(
|
||||
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
|
||||
id = adv.id,
|
||||
host = host,
|
||||
addr = addr,
|
||||
tags = cfg.tags.join(","),
|
||||
zone = cfg.zone,
|
||||
capacity = cfg.capacity_string(),
|
||||
data_avail = data_avail,
|
||||
));
|
||||
} else {
|
||||
let prev_role = layout
|
||||
.versions
|
||||
.iter()
|
||||
.rev()
|
||||
.find_map(|x| match x.roles.get(&adv.id) {
|
||||
Some(NodeRoleV(Some(cfg))) => Some(cfg),
|
||||
_ => None,
|
||||
});
|
||||
if let Some(cfg) = prev_role {
|
||||
healthy_nodes.push(format!(
|
||||
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\tdraining metadata...",
|
||||
id = adv.id,
|
||||
host = host,
|
||||
addr = addr,
|
||||
tags = cfg.tags.join(","),
|
||||
zone = cfg.zone,
|
||||
));
|
||||
} else {
|
||||
let new_role = match layout.staging.get().roles.get(&adv.id) {
|
||||
Some(NodeRoleV(Some(_))) => "pending...",
|
||||
_ => "NO ROLE ASSIGNED",
|
||||
};
|
||||
healthy_nodes.push(format!(
|
||||
"{id:?}\t{h}\t{addr}\t\t\t{new_role}",
|
||||
id = adv.id,
|
||||
h = host,
|
||||
addr = addr,
|
||||
new_role = new_role,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
format_table(healthy_nodes);
|
||||
|
||||
// Determine which nodes are unhealthy and print that to stdout
|
||||
let status_map = status
|
||||
.iter()
|
||||
.map(|adv| (adv.id, adv))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let tf = timeago::Formatter::new();
|
||||
let mut drain_msg = false;
|
||||
let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()];
|
||||
let mut listed = HashSet::new();
|
||||
for ver in layout.versions.iter().rev() {
|
||||
for (node, _, role) in ver.roles.items().iter() {
|
||||
let cfg = match role {
|
||||
NodeRoleV(Some(role)) if role.capacity.is_some() => role,
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
if listed.contains(node) {
|
||||
continue;
|
||||
}
|
||||
listed.insert(*node);
|
||||
|
||||
let adv = status_map.get(node);
|
||||
if adv.map(|x| x.is_up).unwrap_or(false) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Node is in a layout version, is not a gateway node, and is not up:
|
||||
// it is in a failed state, add proper line to the output
|
||||
let (host, last_seen) = match adv {
|
||||
Some(adv) => (
|
||||
adv.status.hostname.as_deref().unwrap_or("?"),
|
||||
adv.last_seen_secs_ago
|
||||
.map(|s| tf.convert(Duration::from_secs(s)))
|
||||
.unwrap_or_else(|| "never seen".into()),
|
||||
),
|
||||
None => ("??", "never seen".into()),
|
||||
};
|
||||
let capacity = if ver.version == layout.current().version {
|
||||
cfg.capacity_string()
|
||||
} else {
|
||||
drain_msg = true;
|
||||
"draining metadata...".to_string()
|
||||
};
|
||||
failed_nodes.push(format!(
|
||||
"{id:?}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
|
||||
id = node,
|
||||
host = host,
|
||||
tags = cfg.tags.join(","),
|
||||
zone = cfg.zone,
|
||||
capacity = capacity,
|
||||
last_seen = last_seen,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if failed_nodes.len() > 1 {
|
||||
println!("\n==== FAILED NODES ====");
|
||||
format_table(failed_nodes);
|
||||
if drain_msg {
|
||||
println!();
|
||||
println!("Your cluster is expecting to drain data from nodes that are currently unavailable.");
|
||||
println!("If these nodes are definitely dead, please review the layout history with");
|
||||
println!(
|
||||
"`garage layout history` and use `garage layout skip-dead-nodes` to force progress."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if print_staging_role_changes(&layout) {
|
||||
println!();
|
||||
println!("Please use `garage layout show` to check the proposed new layout and apply it.");
|
||||
println!();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_connect(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
args: ConnectNodeOpt,
|
||||
) -> Result<(), Error> {
|
||||
match rpc_cli
|
||||
.call(&rpc_host, SystemRpc::Connect(args.node), PRIO_NORMAL)
|
||||
.await??
|
||||
{
|
||||
SystemRpc::Ok => {
|
||||
println!("Success.");
|
||||
Ok(())
|
||||
}
|
||||
m => Err(Error::unexpected_rpc_message(m)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_admin(
|
||||
rpc_cli: &Endpoint<AdminRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
|
@ -220,29 +17,9 @@ pub async fn cmd_admin(
|
|||
AdminRpc::Ok(msg) => {
|
||||
println!("{}", msg);
|
||||
}
|
||||
AdminRpc::BucketList(bl) => {
|
||||
print_bucket_list(bl);
|
||||
}
|
||||
AdminRpc::BucketInfo {
|
||||
bucket,
|
||||
relevant_keys,
|
||||
counters,
|
||||
mpu_counters,
|
||||
} => {
|
||||
print_bucket_info(&bucket, &relevant_keys, &counters, &mpu_counters);
|
||||
}
|
||||
AdminRpc::KeyList(kl) => {
|
||||
print_key_list(kl);
|
||||
}
|
||||
AdminRpc::KeyInfo(key, rb) => {
|
||||
print_key_info(&key, &rb);
|
||||
}
|
||||
AdminRpc::WorkerList(wi, wlo) => {
|
||||
print_worker_list(wi, wlo);
|
||||
}
|
||||
AdminRpc::WorkerVars(wv) => {
|
||||
print_worker_vars(wv);
|
||||
}
|
||||
AdminRpc::WorkerInfo(tid, wi) => {
|
||||
print_worker_info(tid, wi);
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
use bytesize::ByteSize;
|
||||
|
||||
use format_table::format_table;
|
||||
use garage_util::crdt::Crdt;
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_rpc::layout::*;
|
||||
|
@ -10,174 +9,6 @@ use garage_rpc::*;
|
|||
|
||||
use crate::cli::*;
|
||||
|
||||
pub async fn cli_layout_command_dispatch(
|
||||
cmd: LayoutOperation,
|
||||
system_rpc_endpoint: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
) -> Result<(), Error> {
|
||||
match cmd {
|
||||
LayoutOperation::Assign(assign_opt) => {
|
||||
cmd_assign_role(system_rpc_endpoint, rpc_host, assign_opt).await
|
||||
}
|
||||
LayoutOperation::Remove(remove_opt) => {
|
||||
cmd_remove_role(system_rpc_endpoint, rpc_host, remove_opt).await
|
||||
}
|
||||
LayoutOperation::Show => cmd_show_layout(system_rpc_endpoint, rpc_host).await,
|
||||
LayoutOperation::Apply(apply_opt) => {
|
||||
cmd_apply_layout(system_rpc_endpoint, rpc_host, apply_opt).await
|
||||
}
|
||||
LayoutOperation::Revert(revert_opt) => {
|
||||
cmd_revert_layout(system_rpc_endpoint, rpc_host, revert_opt).await
|
||||
}
|
||||
LayoutOperation::Config(config_opt) => {
|
||||
cmd_config_layout(system_rpc_endpoint, rpc_host, config_opt).await
|
||||
}
|
||||
LayoutOperation::History => cmd_layout_history(system_rpc_endpoint, rpc_host).await,
|
||||
LayoutOperation::SkipDeadNodes(assume_sync_opt) => {
|
||||
cmd_layout_skip_dead_nodes(system_rpc_endpoint, rpc_host, assume_sync_opt).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_assign_role(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
args: AssignRoleOpt,
|
||||
) -> Result<(), Error> {
|
||||
let status = match rpc_cli
|
||||
.call(&rpc_host, SystemRpc::GetKnownNodes, PRIO_NORMAL)
|
||||
.await??
|
||||
{
|
||||
SystemRpc::ReturnKnownNodes(nodes) => nodes,
|
||||
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
||||
};
|
||||
|
||||
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||
let all_nodes = layout.get_all_nodes();
|
||||
|
||||
let added_nodes = args
|
||||
.node_ids
|
||||
.iter()
|
||||
.map(|node_id| {
|
||||
find_matching_node(
|
||||
status
|
||||
.iter()
|
||||
.map(|adv| adv.id)
|
||||
.chain(all_nodes.iter().cloned()),
|
||||
node_id,
|
||||
)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
|
||||
for replaced in args.replace.iter() {
|
||||
let replaced_node = find_matching_node(all_nodes.iter().cloned(), replaced)?;
|
||||
match roles.get(&replaced_node) {
|
||||
Some(NodeRoleV(Some(_))) => {
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(replaced_node, NodeRoleV(None)));
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::Message(format!(
|
||||
"Cannot replace node {:?} as it is not currently in planned layout",
|
||||
replaced_node
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if args.capacity.is_some() && args.gateway {
|
||||
return Err(Error::Message(
|
||||
"-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into()));
|
||||
}
|
||||
if args.capacity == Some(ByteSize::b(0)) {
|
||||
return Err(Error::Message("Invalid capacity value: 0".into()));
|
||||
}
|
||||
|
||||
for added_node in added_nodes {
|
||||
let new_entry = match roles.get(&added_node) {
|
||||
Some(NodeRoleV(Some(old))) => {
|
||||
let capacity = match args.capacity {
|
||||
Some(c) => Some(c.as_u64()),
|
||||
None if args.gateway => None,
|
||||
None => old.capacity,
|
||||
};
|
||||
let tags = if args.tags.is_empty() {
|
||||
old.tags.clone()
|
||||
} else {
|
||||
args.tags.clone()
|
||||
};
|
||||
NodeRole {
|
||||
zone: args.zone.clone().unwrap_or_else(|| old.zone.to_string()),
|
||||
capacity,
|
||||
tags,
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
let capacity = match args.capacity {
|
||||
Some(c) => Some(c.as_u64()),
|
||||
None if args.gateway => None,
|
||||
None => return Err(Error::Message(
|
||||
"Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())),
|
||||
};
|
||||
NodeRole {
|
||||
zone: args
|
||||
.zone
|
||||
.clone()
|
||||
.ok_or("Please specify a zone with the -z flag")?,
|
||||
capacity,
|
||||
tags: args.tags.clone(),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(added_node, NodeRoleV(Some(new_entry))));
|
||||
}
|
||||
|
||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||
|
||||
println!("Role changes are staged but not yet committed.");
|
||||
println!("Use `garage layout show` to view staged role changes,");
|
||||
println!("and `garage layout apply` to enact staged changes.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_remove_role(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
args: RemoveRoleOpt,
|
||||
) -> Result<(), Error> {
|
||||
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
|
||||
let deleted_node =
|
||||
find_matching_node(roles.items().iter().map(|(id, _, _)| *id), &args.node_id)?;
|
||||
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(deleted_node, NodeRoleV(None)));
|
||||
|
||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||
|
||||
println!("Role removal is staged but not yet committed.");
|
||||
println!("Use `garage layout show` to view staged role changes,");
|
||||
println!("and `garage layout apply` to enact staged changes.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_show_layout(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
|
@ -226,47 +57,6 @@ pub async fn cmd_show_layout(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_apply_layout(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
apply_opt: ApplyLayoutOpt,
|
||||
) -> Result<(), Error> {
|
||||
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||
|
||||
let (layout, msg) = layout.apply_staged_changes(apply_opt.version)?;
|
||||
for line in msg.iter() {
|
||||
println!("{}", line);
|
||||
}
|
||||
|
||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||
|
||||
println!("New cluster layout with updated role assignment has been applied in cluster.");
|
||||
println!("Data will now be moved around between nodes accordingly.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_revert_layout(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
revert_opt: RevertLayoutOpt,
|
||||
) -> Result<(), Error> {
|
||||
if !revert_opt.yes {
|
||||
return Err(Error::Message(
|
||||
"Please add the --yes flag to run the layout revert operation".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||
|
||||
let layout = layout.revert_staged_changes()?;
|
||||
|
||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||
|
||||
println!("All proposed role changes in cluster layout have been canceled.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_config_layout(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
|
|
|
@ -8,6 +8,5 @@ pub(crate) mod convert_db;
|
|||
|
||||
pub(crate) use cmd::*;
|
||||
pub(crate) use init::*;
|
||||
pub(crate) use layout::*;
|
||||
pub(crate) use structs::*;
|
||||
pub(crate) use util::*;
|
||||
|
|
|
@ -3,257 +3,16 @@ use std::time::Duration;
|
|||
|
||||
use format_table::format_table;
|
||||
use garage_util::background::*;
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::*;
|
||||
use garage_util::time::*;
|
||||
|
||||
use garage_block::manager::BlockResyncErrorInfo;
|
||||
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_model::key_table::*;
|
||||
use garage_model::s3::mpu_table::{self, MultipartUpload};
|
||||
use garage_model::s3::object_table;
|
||||
use garage_model::s3::mpu_table::MultipartUpload;
|
||||
use garage_model::s3::version_table::*;
|
||||
|
||||
use crate::cli::structs::WorkerListOpt;
|
||||
|
||||
pub fn print_bucket_list(bl: Vec<Bucket>) {
|
||||
println!("List of buckets:");
|
||||
|
||||
let mut table = vec![];
|
||||
for bucket in bl {
|
||||
let aliases = bucket
|
||||
.aliases()
|
||||
.iter()
|
||||
.filter(|(_, _, active)| *active)
|
||||
.map(|(name, _, _)| name.to_string())
|
||||
.collect::<Vec<_>>();
|
||||
let local_aliases_n = match &bucket
|
||||
.local_aliases()
|
||||
.iter()
|
||||
.filter(|(_, _, active)| *active)
|
||||
.collect::<Vec<_>>()[..]
|
||||
{
|
||||
[] => "".into(),
|
||||
[((k, n), _, _)] => format!("{}:{}", k, n),
|
||||
s => format!("[{} local aliases]", s.len()),
|
||||
};
|
||||
|
||||
table.push(format!(
|
||||
"\t{}\t{}\t{}",
|
||||
aliases.join(","),
|
||||
local_aliases_n,
|
||||
hex::encode(bucket.id),
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
||||
|
||||
pub fn print_key_list(kl: Vec<(String, String)>) {
|
||||
println!("List of keys:");
|
||||
let mut table = vec![];
|
||||
for key in kl {
|
||||
table.push(format!("\t{}\t{}", key.0, key.1));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
||||
|
||||
pub fn print_key_info(key: &Key, relevant_buckets: &HashMap<Uuid, Bucket>) {
|
||||
let bucket_global_aliases = |b: &Uuid| {
|
||||
if let Some(bucket) = relevant_buckets.get(b) {
|
||||
if let Some(p) = bucket.state.as_option() {
|
||||
return p
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, active)| *active)
|
||||
.map(|(a, _, _)| a.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
}
|
||||
}
|
||||
|
||||
"".to_string()
|
||||
};
|
||||
|
||||
match &key.state {
|
||||
Deletable::Present(p) => {
|
||||
println!("Key name: {}", p.name.get());
|
||||
println!("Key ID: {}", key.key_id);
|
||||
println!("Secret key: {}", p.secret_key);
|
||||
println!("Can create buckets: {}", p.allow_create_bucket.get());
|
||||
println!("\nKey-specific bucket aliases:");
|
||||
let mut table = vec![];
|
||||
for (alias_name, _, alias) in p.local_aliases.items().iter() {
|
||||
if let Some(bucket_id) = alias {
|
||||
table.push(format!(
|
||||
"\t{}\t{}\t{}",
|
||||
alias_name,
|
||||
bucket_global_aliases(bucket_id),
|
||||
hex::encode(bucket_id)
|
||||
));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
println!("\nAuthorized buckets:");
|
||||
let mut table = vec![];
|
||||
for (bucket_id, perm) in p.authorized_buckets.items().iter() {
|
||||
if !perm.is_any() {
|
||||
continue;
|
||||
}
|
||||
let rflag = if perm.allow_read { "R" } else { " " };
|
||||
let wflag = if perm.allow_write { "W" } else { " " };
|
||||
let oflag = if perm.allow_owner { "O" } else { " " };
|
||||
let local_aliases = p
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a == Some(*bucket_id))
|
||||
.map(|(a, _, _)| a.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
table.push(format!(
|
||||
"\t{}{}{}\t{}\t{}\t{:?}",
|
||||
rflag,
|
||||
wflag,
|
||||
oflag,
|
||||
bucket_global_aliases(bucket_id),
|
||||
local_aliases,
|
||||
bucket_id
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
||||
Deletable::Deleted => {
|
||||
println!("Key {} is deleted.", key.key_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_bucket_info(
|
||||
bucket: &Bucket,
|
||||
relevant_keys: &HashMap<String, Key>,
|
||||
counters: &HashMap<String, i64>,
|
||||
mpu_counters: &HashMap<String, i64>,
|
||||
) {
|
||||
let key_name = |k| {
|
||||
relevant_keys
|
||||
.get(k)
|
||||
.map(|k| k.params().unwrap().name.get().as_str())
|
||||
.unwrap_or("<deleted>")
|
||||
};
|
||||
|
||||
println!("Bucket: {}", hex::encode(bucket.id));
|
||||
match &bucket.state {
|
||||
Deletable::Deleted => println!("Bucket is deleted."),
|
||||
Deletable::Present(p) => {
|
||||
let size =
|
||||
bytesize::ByteSize::b(*counters.get(object_table::BYTES).unwrap_or(&0) as u64);
|
||||
println!(
|
||||
"\nSize: {} ({})",
|
||||
size.to_string_as(true),
|
||||
size.to_string_as(false)
|
||||
);
|
||||
println!(
|
||||
"Objects: {}",
|
||||
*counters.get(object_table::OBJECTS).unwrap_or(&0)
|
||||
);
|
||||
println!(
|
||||
"Unfinished uploads (multipart and non-multipart): {}",
|
||||
*counters.get(object_table::UNFINISHED_UPLOADS).unwrap_or(&0)
|
||||
);
|
||||
println!(
|
||||
"Unfinished multipart uploads: {}",
|
||||
*mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0)
|
||||
);
|
||||
let mpu_size =
|
||||
bytesize::ByteSize::b(*mpu_counters.get(mpu_table::BYTES).unwrap_or(&0) as u64);
|
||||
println!(
|
||||
"Size of unfinished multipart uploads: {} ({})",
|
||||
mpu_size.to_string_as(true),
|
||||
mpu_size.to_string_as(false),
|
||||
);
|
||||
|
||||
println!("\nWebsite access: {}", p.website_config.get().is_some());
|
||||
|
||||
let quotas = p.quotas.get();
|
||||
if quotas.max_size.is_some() || quotas.max_objects.is_some() {
|
||||
println!("\nQuotas:");
|
||||
if let Some(ms) = quotas.max_size {
|
||||
let ms = bytesize::ByteSize::b(ms);
|
||||
println!(
|
||||
" maximum size: {} ({})",
|
||||
ms.to_string_as(true),
|
||||
ms.to_string_as(false)
|
||||
);
|
||||
}
|
||||
if let Some(mo) = quotas.max_objects {
|
||||
println!(" maximum number of objects: {}", mo);
|
||||
}
|
||||
}
|
||||
|
||||
println!("\nGlobal aliases:");
|
||||
for (alias, _, active) in p.aliases.items().iter() {
|
||||
if *active {
|
||||
println!(" {}", alias);
|
||||
}
|
||||
}
|
||||
|
||||
println!("\nKey-specific aliases:");
|
||||
let mut table = vec![];
|
||||
for ((key_id, alias), _, active) in p.local_aliases.items().iter() {
|
||||
if *active {
|
||||
table.push(format!("\t{} ({})\t{}", key_id, key_name(key_id), alias));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
println!("\nAuthorized keys:");
|
||||
let mut table = vec![];
|
||||
for (k, perm) in p.authorized_keys.items().iter() {
|
||||
if !perm.is_any() {
|
||||
continue;
|
||||
}
|
||||
let rflag = if perm.allow_read { "R" } else { " " };
|
||||
let wflag = if perm.allow_write { "W" } else { " " };
|
||||
let oflag = if perm.allow_owner { "O" } else { " " };
|
||||
table.push(format!(
|
||||
"\t{}{}{}\t{}\t{}",
|
||||
rflag,
|
||||
wflag,
|
||||
oflag,
|
||||
k,
|
||||
key_name(k)
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn find_matching_node(
|
||||
cand: impl std::iter::Iterator<Item = Uuid>,
|
||||
pattern: &str,
|
||||
) -> Result<Uuid, Error> {
|
||||
let mut candidates = vec![];
|
||||
for c in cand {
|
||||
if hex::encode(c).starts_with(pattern) && !candidates.contains(&c) {
|
||||
candidates.push(c);
|
||||
}
|
||||
}
|
||||
if candidates.len() != 1 {
|
||||
Err(Error::Message(format!(
|
||||
"{} nodes match '{}'",
|
||||
candidates.len(),
|
||||
pattern,
|
||||
)))
|
||||
} else {
|
||||
Ok(candidates[0])
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_worker_list(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
|
||||
let mut wi = wi.into_iter().collect::<Vec<_>>();
|
||||
wi.sort_by_key(|(tid, info)| {
|
||||
|
@ -367,14 +126,6 @@ pub fn print_worker_info(tid: usize, info: WorkerInfo) {
|
|||
format_table(table);
|
||||
}
|
||||
|
||||
pub fn print_worker_vars(wv: Vec<(Uuid, String, String)>) {
|
||||
let table = wv
|
||||
.into_iter()
|
||||
.map(|(n, k, v)| format!("{:?}\t{}\t{}", n, k, v))
|
||||
.collect::<Vec<_>>();
|
||||
format_table(table);
|
||||
}
|
||||
|
||||
pub fn print_block_error_list(el: Vec<BlockResyncErrorInfo>) {
|
||||
let now = now_msec();
|
||||
let tf = timeago::Formatter::new();
|
||||
|
|
549
src/garage/cli_v2/bucket.rs
Normal file
549
src/garage/cli_v2/bucket.rs
Normal file
|
@ -0,0 +1,549 @@
|
|||
//use bytesize::ByteSize;
|
||||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api::admin::api::*;
|
||||
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn cmd_bucket(&self, cmd: BucketOperation) -> Result<(), Error> {
|
||||
match cmd {
|
||||
BucketOperation::List => self.cmd_list_buckets().await,
|
||||
BucketOperation::Info(query) => self.cmd_bucket_info(query).await,
|
||||
BucketOperation::Create(query) => self.cmd_create_bucket(query).await,
|
||||
BucketOperation::Delete(query) => self.cmd_delete_bucket(query).await,
|
||||
BucketOperation::Alias(query) => self.cmd_alias_bucket(query).await,
|
||||
BucketOperation::Unalias(query) => self.cmd_unalias_bucket(query).await,
|
||||
BucketOperation::Allow(query) => self.cmd_bucket_allow(query).await,
|
||||
BucketOperation::Deny(query) => self.cmd_bucket_deny(query).await,
|
||||
BucketOperation::Website(query) => self.cmd_bucket_website(query).await,
|
||||
BucketOperation::SetQuotas(query) => self.cmd_bucket_set_quotas(query).await,
|
||||
BucketOperation::CleanupIncompleteUploads(query) => {
|
||||
self.cmd_cleanup_incomplete_uploads(query).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_list_buckets(&self) -> Result<(), Error> {
|
||||
let buckets = self.api_request(ListBucketsRequest).await?;
|
||||
|
||||
println!("List of buckets:");
|
||||
|
||||
let mut table = vec![];
|
||||
for bucket in buckets.0.iter() {
|
||||
let local_aliases_n = match &bucket.local_aliases[..] {
|
||||
[] => "".into(),
|
||||
[alias] => format!("{}:{}", alias.access_key_id, alias.alias),
|
||||
s => format!("[{} local aliases]", s.len()),
|
||||
};
|
||||
|
||||
table.push(format!(
|
||||
"\t{}\t{}\t{}",
|
||||
bucket.global_aliases.join(","),
|
||||
local_aliases_n,
|
||||
bucket.id,
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_bucket_info(&self, opt: BucketOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.name),
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!("Bucket: {}", bucket.id);
|
||||
|
||||
let size = bytesize::ByteSize::b(bucket.bytes as u64);
|
||||
println!(
|
||||
"\nSize: {} ({})",
|
||||
size.to_string_as(true),
|
||||
size.to_string_as(false)
|
||||
);
|
||||
println!("Objects: {}", bucket.objects);
|
||||
println!(
|
||||
"Unfinished uploads (multipart and non-multipart): {}",
|
||||
bucket.unfinished_uploads,
|
||||
);
|
||||
println!(
|
||||
"Unfinished multipart uploads: {}",
|
||||
bucket.unfinished_multipart_uploads
|
||||
);
|
||||
let mpu_size = bytesize::ByteSize::b(bucket.unfinished_multipart_uploads as u64);
|
||||
println!(
|
||||
"Size of unfinished multipart uploads: {} ({})",
|
||||
mpu_size.to_string_as(true),
|
||||
mpu_size.to_string_as(false),
|
||||
);
|
||||
|
||||
println!("\nWebsite access: {}", bucket.website_access);
|
||||
|
||||
if bucket.quotas.max_size.is_some() || bucket.quotas.max_objects.is_some() {
|
||||
println!("\nQuotas:");
|
||||
if let Some(ms) = bucket.quotas.max_size {
|
||||
let ms = bytesize::ByteSize::b(ms);
|
||||
println!(
|
||||
" maximum size: {} ({})",
|
||||
ms.to_string_as(true),
|
||||
ms.to_string_as(false)
|
||||
);
|
||||
}
|
||||
if let Some(mo) = bucket.quotas.max_objects {
|
||||
println!(" maximum number of objects: {}", mo);
|
||||
}
|
||||
}
|
||||
|
||||
println!("\nGlobal aliases:");
|
||||
for alias in bucket.global_aliases {
|
||||
println!(" {}", alias);
|
||||
}
|
||||
|
||||
println!("\nKey-specific aliases:");
|
||||
let mut table = vec![];
|
||||
for key in bucket.keys.iter() {
|
||||
for alias in key.bucket_local_aliases.iter() {
|
||||
table.push(format!("\t{} ({})\t{}", key.access_key_id, key.name, alias));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
println!("\nAuthorized keys:");
|
||||
let mut table = vec![];
|
||||
for key in bucket.keys.iter() {
|
||||
if !(key.permissions.read || key.permissions.write || key.permissions.owner) {
|
||||
continue;
|
||||
}
|
||||
let rflag = if key.permissions.read { "R" } else { " " };
|
||||
let wflag = if key.permissions.write { "W" } else { " " };
|
||||
let oflag = if key.permissions.owner { "O" } else { " " };
|
||||
table.push(format!(
|
||||
"\t{}{}{}\t{}\t{}",
|
||||
rflag, wflag, oflag, key.access_key_id, key.name
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_create_bucket(&self, opt: BucketOpt) -> Result<(), Error> {
|
||||
self.api_request(CreateBucketRequest {
|
||||
global_alias: Some(opt.name.clone()),
|
||||
local_alias: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!("Bucket {} was created.", opt.name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_delete_bucket(&self, opt: DeleteBucketOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.name.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
// CLI-only checks: the bucket must not have other aliases
|
||||
if bucket
|
||||
.global_aliases
|
||||
.iter()
|
||||
.find(|a| **a != opt.name)
|
||||
.is_some()
|
||||
{
|
||||
return Err(Error::Message(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", opt.name)));
|
||||
}
|
||||
|
||||
if bucket
|
||||
.keys
|
||||
.iter()
|
||||
.any(|k| !k.bucket_local_aliases.is_empty())
|
||||
{
|
||||
return Err(Error::Message(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", opt.name)));
|
||||
}
|
||||
|
||||
if !opt.yes {
|
||||
println!("About to delete bucket {}.", bucket.id);
|
||||
return Err(Error::Message(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
self.api_request(DeleteBucketRequest {
|
||||
id: bucket.id.clone(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!("Bucket {} has been deleted.", bucket.id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_alias_bucket(&self, opt: AliasBucketOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.existing_bucket.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
if let Some(key_pat) = &opt.local {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
search: Some(key_pat.clone()),
|
||||
id: None,
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
self.api_request(AddBucketAliasRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
alias: BucketAliasEnum::Local {
|
||||
local_alias: opt.new_name.clone(),
|
||||
access_key_id: key.access_key_id.clone(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!(
|
||||
"Alias {} now points to bucket {:.16} in namespace of key {}",
|
||||
opt.new_name, bucket.id, key.access_key_id
|
||||
)
|
||||
} else {
|
||||
self.api_request(AddBucketAliasRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
alias: BucketAliasEnum::Global {
|
||||
global_alias: opt.new_name.clone(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!(
|
||||
"Alias {} now points to bucket {:.16}",
|
||||
opt.new_name, bucket.id
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_unalias_bucket(&self, opt: UnaliasBucketOpt) -> Result<(), Error> {
|
||||
if let Some(key_pat) = &opt.local {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
search: Some(key_pat.clone()),
|
||||
id: None,
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let bucket = key
|
||||
.buckets
|
||||
.iter()
|
||||
.find(|x| x.local_aliases.contains(&opt.name))
|
||||
.ok_or_message(format!(
|
||||
"No bucket called {} in namespace of key {}",
|
||||
opt.name, key.access_key_id
|
||||
))?;
|
||||
|
||||
self.api_request(RemoveBucketAliasRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
alias: BucketAliasEnum::Local {
|
||||
access_key_id: key.access_key_id.clone(),
|
||||
local_alias: opt.name.clone(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!(
|
||||
"Alias {} no longer points to bucket {:.16} in namespace of key {}",
|
||||
&opt.name, bucket.id, key.access_key_id
|
||||
)
|
||||
} else {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: Some(opt.name.clone()),
|
||||
search: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
self.api_request(RemoveBucketAliasRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
alias: BucketAliasEnum::Global {
|
||||
global_alias: opt.name.clone(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!(
|
||||
"Alias {} no longer points to bucket {:.16}",
|
||||
opt.name, bucket.id
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_bucket_allow(&self, opt: PermBucketOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.bucket.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern.clone()),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
self.api_request(AllowBucketKeyRequest(BucketKeyPermChangeRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
access_key_id: key.access_key_id.clone(),
|
||||
permissions: ApiBucketKeyPerm {
|
||||
read: opt.read,
|
||||
write: opt.write,
|
||||
owner: opt.owner,
|
||||
},
|
||||
}))
|
||||
.await?;
|
||||
|
||||
let new_bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: Some(bucket.id),
|
||||
global_alias: None,
|
||||
search: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
if let Some(new_key) = new_bucket
|
||||
.keys
|
||||
.iter()
|
||||
.find(|k| k.access_key_id == key.access_key_id)
|
||||
{
|
||||
println!(
|
||||
"New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}",
|
||||
key.access_key_id,
|
||||
new_bucket.id,
|
||||
new_key.permissions.read,
|
||||
new_key.permissions.write,
|
||||
new_key.permissions.owner
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
"Access key {} has no permissions on bucket {:.16}",
|
||||
key.access_key_id, new_bucket.id
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_bucket_deny(&self, opt: PermBucketOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.bucket.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern.clone()),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
self.api_request(DenyBucketKeyRequest(BucketKeyPermChangeRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
access_key_id: key.access_key_id.clone(),
|
||||
permissions: ApiBucketKeyPerm {
|
||||
read: opt.read,
|
||||
write: opt.write,
|
||||
owner: opt.owner,
|
||||
},
|
||||
}))
|
||||
.await?;
|
||||
|
||||
let new_bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: Some(bucket.id),
|
||||
global_alias: None,
|
||||
search: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
if let Some(new_key) = new_bucket
|
||||
.keys
|
||||
.iter()
|
||||
.find(|k| k.access_key_id == key.access_key_id)
|
||||
{
|
||||
println!(
|
||||
"New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}",
|
||||
key.access_key_id,
|
||||
new_bucket.id,
|
||||
new_key.permissions.read,
|
||||
new_key.permissions.write,
|
||||
new_key.permissions.owner
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
"Access key {} no longer has permissions on bucket {:.16}",
|
||||
key.access_key_id, new_bucket.id
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_bucket_website(&self, opt: WebsiteOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.bucket.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
if !(opt.allow ^ opt.deny) {
|
||||
return Err(Error::Message(
|
||||
"You must specify exactly one flag, either --allow or --deny".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let wa = if opt.allow {
|
||||
UpdateBucketWebsiteAccess {
|
||||
enabled: true,
|
||||
index_document: Some(opt.index_document.clone()),
|
||||
error_document: opt
|
||||
.error_document
|
||||
.or(bucket.website_config.and_then(|x| x.error_document.clone())),
|
||||
}
|
||||
} else {
|
||||
UpdateBucketWebsiteAccess {
|
||||
enabled: false,
|
||||
index_document: None,
|
||||
error_document: None,
|
||||
}
|
||||
};
|
||||
|
||||
self.api_request(UpdateBucketRequest {
|
||||
id: bucket.id,
|
||||
body: UpdateBucketRequestBody {
|
||||
website_access: Some(wa),
|
||||
quotas: None,
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
if opt.allow {
|
||||
println!("Website access allowed for {}", &opt.bucket);
|
||||
} else {
|
||||
println!("Website access denied for {}", &opt.bucket);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_bucket_set_quotas(&self, opt: SetQuotasOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.bucket.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
if opt.max_size.is_none() && opt.max_objects.is_none() {
|
||||
return Err(Error::Message(
|
||||
"You must specify either --max-size or --max-objects (or both) for this command to do something.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let new_quotas = ApiBucketQuotas {
|
||||
max_size: match opt.max_size.as_deref() {
|
||||
Some("none") => None,
|
||||
Some(v) => Some(
|
||||
v.parse::<bytesize::ByteSize>()
|
||||
.ok_or_message(format!("Invalid size specified: {}", v))?
|
||||
.as_u64(),
|
||||
),
|
||||
None => bucket.quotas.max_size,
|
||||
},
|
||||
max_objects: match opt.max_objects.as_deref() {
|
||||
Some("none") => None,
|
||||
Some(v) => Some(
|
||||
v.parse::<u64>()
|
||||
.ok_or_message(format!("Invalid number: {}", v))?,
|
||||
),
|
||||
None => bucket.quotas.max_objects,
|
||||
},
|
||||
};
|
||||
|
||||
self.api_request(UpdateBucketRequest {
|
||||
id: bucket.id.clone(),
|
||||
body: UpdateBucketRequestBody {
|
||||
website_access: None,
|
||||
quotas: Some(new_quotas),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!("Quotas updated for bucket {:.16}", bucket.id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_cleanup_incomplete_uploads(
|
||||
&self,
|
||||
opt: CleanupIncompleteUploadsOpt,
|
||||
) -> Result<(), Error> {
|
||||
let older_than = parse_duration::parse::parse(&opt.older_than)
|
||||
.ok_or_message("Invalid duration passed for --older-than parameter")?;
|
||||
|
||||
for b in opt.buckets.iter() {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(b.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let res = self
|
||||
.api_request(CleanupIncompleteUploadsRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
older_than_secs: older_than.as_secs(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
if res.uploads_deleted > 0 {
|
||||
println!("{:.16}: {} uploads deleted", bucket.id, res.uploads_deleted);
|
||||
} else {
|
||||
println!("{:.16}: no uploads deleted", bucket.id);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
158
src/garage/cli_v2/cluster.rs
Normal file
158
src/garage/cli_v2/cluster.rs
Normal file
|
@ -0,0 +1,158 @@
|
|||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api::admin::api::*;
|
||||
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::layout::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn cmd_status(&self) -> Result<(), Error> {
|
||||
let status = self.api_request(GetClusterStatusRequest).await?;
|
||||
let layout = self.api_request(GetClusterLayoutRequest).await?;
|
||||
|
||||
println!("==== HEALTHY NODES ====");
|
||||
|
||||
let mut healthy_nodes =
|
||||
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()];
|
||||
|
||||
for adv in status.nodes.iter().filter(|adv| adv.is_up) {
|
||||
let host = adv.hostname.as_deref().unwrap_or("?");
|
||||
let addr = match adv.addr {
|
||||
Some(addr) => addr.to_string(),
|
||||
None => "N/A".to_string(),
|
||||
};
|
||||
if let Some(cfg) = &adv.role {
|
||||
let data_avail = match &adv.data_partition {
|
||||
_ if cfg.capacity.is_none() => "N/A".into(),
|
||||
Some(FreeSpaceResp { available, total }) => {
|
||||
let pct = (*available as f64) / (*total as f64) * 100.;
|
||||
let avail_str = bytesize::ByteSize::b(*available);
|
||||
format!("{} ({:.1}%)", avail_str, pct)
|
||||
}
|
||||
None => "?".into(),
|
||||
};
|
||||
healthy_nodes.push(format!(
|
||||
"{id:.16}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
|
||||
id = adv.id,
|
||||
host = host,
|
||||
addr = addr,
|
||||
tags = cfg.tags.join(","),
|
||||
zone = cfg.zone,
|
||||
capacity = capacity_string(cfg.capacity),
|
||||
data_avail = data_avail,
|
||||
));
|
||||
} else {
|
||||
let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) {
|
||||
Some(NodeRoleChange {
|
||||
action: NodeRoleChangeEnum::Update { .. },
|
||||
..
|
||||
}) => "pending...",
|
||||
_ if adv.draining => "draining metadata..",
|
||||
_ => "NO ROLE ASSIGNED",
|
||||
};
|
||||
healthy_nodes.push(format!(
|
||||
"{id:.16}\t{h}\t{addr}\t\t\t{status}",
|
||||
id = adv.id,
|
||||
h = host,
|
||||
addr = addr,
|
||||
status = status,
|
||||
));
|
||||
}
|
||||
}
|
||||
format_table(healthy_nodes);
|
||||
|
||||
let tf = timeago::Formatter::new();
|
||||
let mut drain_msg = false;
|
||||
let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()];
|
||||
for adv in status.nodes.iter().filter(|x| !x.is_up) {
|
||||
let node = &adv.id;
|
||||
|
||||
let host = adv.hostname.as_deref().unwrap_or("?");
|
||||
let last_seen = adv
|
||||
.last_seen_secs_ago
|
||||
.map(|s| tf.convert(Duration::from_secs(s)))
|
||||
.unwrap_or_else(|| "never seen".into());
|
||||
|
||||
if let Some(cfg) = &adv.role {
|
||||
let capacity = capacity_string(cfg.capacity);
|
||||
|
||||
failed_nodes.push(format!(
|
||||
"{id:.16}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
|
||||
id = node,
|
||||
host = host,
|
||||
tags = cfg.tags.join(","),
|
||||
zone = cfg.zone,
|
||||
capacity = capacity,
|
||||
last_seen = last_seen,
|
||||
));
|
||||
} else {
|
||||
let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) {
|
||||
Some(NodeRoleChange {
|
||||
action: NodeRoleChangeEnum::Update { .. },
|
||||
..
|
||||
}) => "pending...",
|
||||
_ if adv.draining => {
|
||||
drain_msg = true;
|
||||
"draining metadata.."
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
failed_nodes.push(format!(
|
||||
"{id:.16}\t{host}\t\t\t{status}\t{last_seen}",
|
||||
id = node,
|
||||
host = host,
|
||||
status = status,
|
||||
last_seen = last_seen,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if failed_nodes.len() > 1 {
|
||||
println!("\n==== FAILED NODES ====");
|
||||
format_table(failed_nodes);
|
||||
if drain_msg {
|
||||
println!();
|
||||
println!("Your cluster is expecting to drain data from nodes that are currently unavailable.");
|
||||
println!(
|
||||
"If these nodes are definitely dead, please review the layout history with"
|
||||
);
|
||||
println!(
|
||||
"`garage layout history` and use `garage layout skip-dead-nodes` to force progress."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if print_staging_role_changes(&layout) {
|
||||
println!();
|
||||
println!(
|
||||
"Please use `garage layout show` to check the proposed new layout and apply it."
|
||||
);
|
||||
println!();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_connect(&self, opt: ConnectNodeOpt) -> Result<(), Error> {
|
||||
let res = self
|
||||
.api_request(ConnectClusterNodesRequest(vec![opt.node]))
|
||||
.await?;
|
||||
if res.0.len() != 1 {
|
||||
return Err(Error::Message(format!("unexpected response: {:?}", res)));
|
||||
}
|
||||
let res = res.0.into_iter().next().unwrap();
|
||||
if res.success {
|
||||
println!("Success.");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::Message(format!(
|
||||
"Failure: {}",
|
||||
res.error.unwrap_or_default()
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
227
src/garage/cli_v2/key.rs
Normal file
227
src/garage/cli_v2/key.rs
Normal file
|
@ -0,0 +1,227 @@
|
|||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api::admin::api::*;
|
||||
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn cmd_key(&self, cmd: KeyOperation) -> Result<(), Error> {
|
||||
match cmd {
|
||||
KeyOperation::List => self.cmd_list_keys().await,
|
||||
KeyOperation::Info(query) => self.cmd_key_info(query).await,
|
||||
KeyOperation::Create(query) => self.cmd_create_key(query).await,
|
||||
KeyOperation::Rename(query) => self.cmd_rename_key(query).await,
|
||||
KeyOperation::Delete(query) => self.cmd_delete_key(query).await,
|
||||
KeyOperation::Allow(query) => self.cmd_allow_key(query).await,
|
||||
KeyOperation::Deny(query) => self.cmd_deny_key(query).await,
|
||||
KeyOperation::Import(query) => self.cmd_import_key(query).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_list_keys(&self) -> Result<(), Error> {
|
||||
let keys = self.api_request(ListKeysRequest).await?;
|
||||
|
||||
println!("List of keys:");
|
||||
let mut table = vec![];
|
||||
for key in keys.0.iter() {
|
||||
table.push(format!("\t{}\t{}", key.id, key.name));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_key_info(&self, opt: KeyInfoOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern),
|
||||
show_secret_key: opt.show_secret,
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&key);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_create_key(&self, opt: KeyNewOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(CreateKeyRequest {
|
||||
name: Some(opt.name),
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&key.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_rename_key(&self, opt: KeyRenameOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let new_key = self
|
||||
.api_request(UpdateKeyRequest {
|
||||
id: key.access_key_id,
|
||||
body: UpdateKeyRequestBody {
|
||||
name: Some(opt.new_name),
|
||||
allow: None,
|
||||
deny: None,
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&new_key.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_delete_key(&self, opt: KeyDeleteOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
if !opt.yes {
|
||||
println!("About to delete key {}...", key.access_key_id);
|
||||
return Err(Error::Message(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
self.api_request(DeleteKeyRequest {
|
||||
id: key.access_key_id.clone(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!("Access key {} has been deleted.", key.access_key_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_allow_key(&self, opt: KeyPermOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let new_key = self
|
||||
.api_request(UpdateKeyRequest {
|
||||
id: key.access_key_id,
|
||||
body: UpdateKeyRequestBody {
|
||||
name: None,
|
||||
allow: Some(KeyPerm {
|
||||
create_bucket: opt.create_bucket,
|
||||
}),
|
||||
deny: None,
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&new_key.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_deny_key(&self, opt: KeyPermOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let new_key = self
|
||||
.api_request(UpdateKeyRequest {
|
||||
id: key.access_key_id,
|
||||
body: UpdateKeyRequestBody {
|
||||
name: None,
|
||||
allow: None,
|
||||
deny: Some(KeyPerm {
|
||||
create_bucket: opt.create_bucket,
|
||||
}),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&new_key.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_import_key(&self, opt: KeyImportOpt) -> Result<(), Error> {
|
||||
if !opt.yes {
|
||||
return Err(Error::Message("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string()));
|
||||
}
|
||||
|
||||
let new_key = self
|
||||
.api_request(ImportKeyRequest {
|
||||
name: Some(opt.name),
|
||||
access_key_id: opt.key_id,
|
||||
secret_access_key: opt.secret_key,
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&new_key.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn print_key_info(key: &GetKeyInfoResponse) {
|
||||
println!("Key name: {}", key.name);
|
||||
println!("Key ID: {}", key.access_key_id);
|
||||
println!(
|
||||
"Secret key: {}",
|
||||
key.secret_access_key.as_deref().unwrap_or("(redacted)")
|
||||
);
|
||||
println!("Can create buckets: {}", key.permissions.create_bucket);
|
||||
|
||||
println!("\nKey-specific bucket aliases:");
|
||||
let mut table = vec![];
|
||||
for bucket in key.buckets.iter() {
|
||||
for la in bucket.local_aliases.iter() {
|
||||
table.push(format!(
|
||||
"\t{}\t{}\t{}",
|
||||
la,
|
||||
bucket.global_aliases.join(","),
|
||||
bucket.id
|
||||
));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
println!("\nAuthorized buckets:");
|
||||
let mut table = vec![];
|
||||
for bucket in key.buckets.iter() {
|
||||
let rflag = if bucket.permissions.read { "R" } else { " " };
|
||||
let wflag = if bucket.permissions.write { "W" } else { " " };
|
||||
let oflag = if bucket.permissions.owner { "O" } else { " " };
|
||||
table.push(format!(
|
||||
"\t{}{}{}\t{}\t{}\t{:.16}",
|
||||
rflag,
|
||||
wflag,
|
||||
oflag,
|
||||
bucket.global_aliases.join(","),
|
||||
bucket.local_aliases.join(","),
|
||||
bucket.id
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
284
src/garage/cli_v2/layout.rs
Normal file
284
src/garage/cli_v2/layout.rs
Normal file
|
@ -0,0 +1,284 @@
|
|||
use bytesize::ByteSize;
|
||||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api::admin::api::*;
|
||||
|
||||
use crate::cli::layout as cli_v1;
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn layout_command_dispatch(&self, cmd: LayoutOperation) -> Result<(), Error> {
|
||||
match cmd {
|
||||
LayoutOperation::Assign(assign_opt) => self.cmd_assign_role(assign_opt).await,
|
||||
LayoutOperation::Remove(remove_opt) => self.cmd_remove_role(remove_opt).await,
|
||||
LayoutOperation::Apply(apply_opt) => self.cmd_apply_layout(apply_opt).await,
|
||||
LayoutOperation::Revert(revert_opt) => self.cmd_revert_layout(revert_opt).await,
|
||||
|
||||
// TODO
|
||||
LayoutOperation::Show => {
|
||||
cli_v1::cmd_show_layout(&self.system_rpc_endpoint, self.rpc_host).await
|
||||
}
|
||||
LayoutOperation::Config(config_opt) => {
|
||||
cli_v1::cmd_config_layout(&self.system_rpc_endpoint, self.rpc_host, config_opt)
|
||||
.await
|
||||
}
|
||||
LayoutOperation::History => {
|
||||
cli_v1::cmd_layout_history(&self.system_rpc_endpoint, self.rpc_host).await
|
||||
}
|
||||
LayoutOperation::SkipDeadNodes(assume_sync_opt) => {
|
||||
cli_v1::cmd_layout_skip_dead_nodes(
|
||||
&self.system_rpc_endpoint,
|
||||
self.rpc_host,
|
||||
assume_sync_opt,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_assign_role(&self, opt: AssignRoleOpt) -> Result<(), Error> {
|
||||
let status = self.api_request(GetClusterStatusRequest).await?;
|
||||
let layout = self.api_request(GetClusterLayoutRequest).await?;
|
||||
|
||||
let all_node_ids_iter = status
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|x| x.id.as_str())
|
||||
.chain(layout.roles.iter().map(|x| x.id.as_str()));
|
||||
|
||||
let mut actions = vec![];
|
||||
|
||||
for node in opt.replace.iter() {
|
||||
let id = find_matching_node(all_node_ids_iter.clone(), &node)?;
|
||||
|
||||
actions.push(NodeRoleChange {
|
||||
id,
|
||||
action: NodeRoleChangeEnum::Remove { remove: true },
|
||||
});
|
||||
}
|
||||
|
||||
for node in opt.node_ids.iter() {
|
||||
let id = find_matching_node(all_node_ids_iter.clone(), &node)?;
|
||||
|
||||
let current = get_staged_or_current_role(&id, &layout);
|
||||
|
||||
let zone = opt
|
||||
.zone
|
||||
.clone()
|
||||
.or_else(|| current.as_ref().map(|c| c.zone.clone()))
|
||||
.ok_or_message("Please specify a zone with the -z flag")?;
|
||||
|
||||
let capacity = if opt.gateway {
|
||||
if opt.capacity.is_some() {
|
||||
return Err(Error::Message("Please specify only -c or -g".into()));
|
||||
}
|
||||
None
|
||||
} else if let Some(cap) = opt.capacity {
|
||||
Some(cap.as_u64())
|
||||
} else {
|
||||
current.as_ref().ok_or_message("Please specify a capacity with the -c flag, or set node explicitly as gateway with -g")?.capacity
|
||||
};
|
||||
|
||||
let tags = if !opt.tags.is_empty() {
|
||||
opt.tags.clone()
|
||||
} else if let Some(cur) = current.as_ref() {
|
||||
cur.tags.clone()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
actions.push(NodeRoleChange {
|
||||
id,
|
||||
action: NodeRoleChangeEnum::Update {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
self.api_request(UpdateClusterLayoutRequest(actions))
|
||||
.await?;
|
||||
|
||||
println!("Role changes are staged but not yet committed.");
|
||||
println!("Use `garage layout show` to view staged role changes,");
|
||||
println!("and `garage layout apply` to enact staged changes.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_remove_role(&self, opt: RemoveRoleOpt) -> Result<(), Error> {
|
||||
let status = self.api_request(GetClusterStatusRequest).await?;
|
||||
let layout = self.api_request(GetClusterLayoutRequest).await?;
|
||||
|
||||
let all_node_ids_iter = status
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|x| x.id.as_str())
|
||||
.chain(layout.roles.iter().map(|x| x.id.as_str()));
|
||||
|
||||
let id = find_matching_node(all_node_ids_iter.clone(), &opt.node_id)?;
|
||||
|
||||
let actions = vec![NodeRoleChange {
|
||||
id,
|
||||
action: NodeRoleChangeEnum::Remove { remove: true },
|
||||
}];
|
||||
|
||||
self.api_request(UpdateClusterLayoutRequest(actions))
|
||||
.await?;
|
||||
|
||||
println!("Role removal is staged but not yet committed.");
|
||||
println!("Use `garage layout show` to view staged role changes,");
|
||||
println!("and `garage layout apply` to enact staged changes.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_apply_layout(&self, apply_opt: ApplyLayoutOpt) -> Result<(), Error> {
|
||||
let missing_version_error = r#"
|
||||
Please pass the new layout version number to ensure that you are writing the correct version of the cluster layout.
|
||||
To know the correct value of the new layout version, invoke `garage layout show` and review the proposed changes.
|
||||
"#;
|
||||
|
||||
let req = ApplyClusterLayoutRequest {
|
||||
version: apply_opt.version.ok_or_message(missing_version_error)?,
|
||||
};
|
||||
let res = self.api_request(req).await?;
|
||||
|
||||
for line in res.message.iter() {
|
||||
println!("{}", line);
|
||||
}
|
||||
|
||||
println!("New cluster layout with updated role assignment has been applied in cluster.");
|
||||
println!("Data will now be moved around between nodes accordingly.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_revert_layout(&self, revert_opt: RevertLayoutOpt) -> Result<(), Error> {
|
||||
if !revert_opt.yes {
|
||||
return Err(Error::Message(
|
||||
"Please add the --yes flag to run the layout revert operation".into(),
|
||||
));
|
||||
}
|
||||
|
||||
self.api_request(RevertClusterLayoutRequest).await?;
|
||||
|
||||
println!("All proposed role changes in cluster layout have been canceled.");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------
|
||||
// ---- helper functions ----
|
||||
// --------------------------
|
||||
|
||||
pub fn capacity_string(v: Option<u64>) -> String {
|
||||
match v {
|
||||
Some(c) => ByteSize::b(c).to_string_as(false),
|
||||
None => "gateway".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_staged_or_current_role(
|
||||
id: &str,
|
||||
layout: &GetClusterLayoutResponse,
|
||||
) -> Option<NodeRoleResp> {
|
||||
for node in layout.staged_role_changes.iter() {
|
||||
if node.id == id {
|
||||
return match &node.action {
|
||||
NodeRoleChangeEnum::Remove { .. } => None,
|
||||
NodeRoleChangeEnum::Update {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
} => Some(NodeRoleResp {
|
||||
id: id.to_string(),
|
||||
zone: zone.to_string(),
|
||||
capacity: *capacity,
|
||||
tags: tags.clone(),
|
||||
}),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
for node in layout.roles.iter() {
|
||||
if node.id == id {
|
||||
return Some(node.clone());
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub fn find_matching_node<'a>(
|
||||
cand: impl std::iter::Iterator<Item = &'a str>,
|
||||
pattern: &'a str,
|
||||
) -> Result<String, Error> {
|
||||
let mut candidates = vec![];
|
||||
for c in cand {
|
||||
if c.starts_with(pattern) && !candidates.contains(&c) {
|
||||
candidates.push(c);
|
||||
}
|
||||
}
|
||||
if candidates.len() != 1 {
|
||||
Err(Error::Message(format!(
|
||||
"{} nodes match '{}'",
|
||||
candidates.len(),
|
||||
pattern,
|
||||
)))
|
||||
} else {
|
||||
Ok(candidates[0].to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_staging_role_changes(layout: &GetClusterLayoutResponse) -> bool {
|
||||
let has_role_changes = !layout.staged_role_changes.is_empty();
|
||||
|
||||
// TODO!! Layout parameters
|
||||
let has_layout_changes = false;
|
||||
|
||||
if has_role_changes || has_layout_changes {
|
||||
println!();
|
||||
println!("==== STAGED ROLE CHANGES ====");
|
||||
if has_role_changes {
|
||||
let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
|
||||
for change in layout.staged_role_changes.iter() {
|
||||
match &change.action {
|
||||
NodeRoleChangeEnum::Update {
|
||||
tags,
|
||||
zone,
|
||||
capacity,
|
||||
} => {
|
||||
let tags = tags.join(",");
|
||||
table.push(format!(
|
||||
"{:.16}\t{}\t{}\t{}",
|
||||
change.id,
|
||||
tags,
|
||||
zone,
|
||||
capacity_string(*capacity),
|
||||
));
|
||||
}
|
||||
NodeRoleChangeEnum::Remove { .. } => {
|
||||
table.push(format!("{:.16}\tREMOVED", change.id));
|
||||
}
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
println!();
|
||||
}
|
||||
//TODO
|
||||
/*
|
||||
if has_layout_changes {
|
||||
println!(
|
||||
"Zone redundancy: {}",
|
||||
staging.parameters.get().zone_redundancy
|
||||
);
|
||||
}
|
||||
*/
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
106
src/garage/cli_v2/mod.rs
Normal file
106
src/garage/cli_v2/mod.rs
Normal file
|
@ -0,0 +1,106 @@
|
|||
pub mod bucket;
|
||||
pub mod cluster;
|
||||
pub mod key;
|
||||
pub mod layout;
|
||||
|
||||
pub mod worker;
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_rpc::system::*;
|
||||
use garage_rpc::*;
|
||||
|
||||
use garage_api::admin::api::*;
|
||||
use garage_api::admin::api_server::{AdminRpc as ProxyRpc, AdminRpcResponse as ProxyRpcResponse};
|
||||
use garage_api::admin::RequestHandler as AdminApiEndpoint;
|
||||
|
||||
use crate::admin::*;
|
||||
use crate::cli as cli_v1;
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli::Command;
|
||||
|
||||
pub struct Cli {
|
||||
pub system_rpc_endpoint: Arc<Endpoint<SystemRpc, ()>>,
|
||||
pub admin_rpc_endpoint: Arc<Endpoint<AdminRpc, ()>>,
|
||||
pub proxy_rpc_endpoint: Arc<Endpoint<ProxyRpc, ()>>,
|
||||
pub rpc_host: NodeID,
|
||||
}
|
||||
|
||||
impl Cli {
|
||||
pub async fn handle(&self, cmd: Command) -> Result<(), Error> {
|
||||
match cmd {
|
||||
Command::Status => self.cmd_status().await,
|
||||
Command::Node(NodeOperation::Connect(connect_opt)) => {
|
||||
self.cmd_connect(connect_opt).await
|
||||
}
|
||||
Command::Layout(layout_opt) => self.layout_command_dispatch(layout_opt).await,
|
||||
Command::Bucket(bo) => self.cmd_bucket(bo).await,
|
||||
Command::Key(ko) => self.cmd_key(ko).await,
|
||||
Command::Worker(wo) => self.cmd_worker(wo).await,
|
||||
|
||||
// TODO
|
||||
Command::Repair(ro) => cli_v1::cmd_admin(
|
||||
&self.admin_rpc_endpoint,
|
||||
self.rpc_host,
|
||||
AdminRpc::LaunchRepair(ro),
|
||||
)
|
||||
.await
|
||||
.ok_or_message("cli_v1"),
|
||||
Command::Stats(so) => {
|
||||
cli_v1::cmd_admin(&self.admin_rpc_endpoint, self.rpc_host, AdminRpc::Stats(so))
|
||||
.await
|
||||
.ok_or_message("cli_v1")
|
||||
}
|
||||
Command::Block(bo) => cli_v1::cmd_admin(
|
||||
&self.admin_rpc_endpoint,
|
||||
self.rpc_host,
|
||||
AdminRpc::BlockOperation(bo),
|
||||
)
|
||||
.await
|
||||
.ok_or_message("cli_v1"),
|
||||
Command::Meta(mo) => cli_v1::cmd_admin(
|
||||
&self.admin_rpc_endpoint,
|
||||
self.rpc_host,
|
||||
AdminRpc::MetaOperation(mo),
|
||||
)
|
||||
.await
|
||||
.ok_or_message("cli_v1"),
|
||||
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn api_request<T>(&self, req: T) -> Result<<T as AdminApiEndpoint>::Response, Error>
|
||||
where
|
||||
T: AdminApiEndpoint,
|
||||
AdminApiRequest: From<T>,
|
||||
<T as AdminApiEndpoint>::Response: TryFrom<TaggedAdminApiResponse>,
|
||||
{
|
||||
let req = AdminApiRequest::from(req);
|
||||
let req_name = req.name();
|
||||
match self
|
||||
.proxy_rpc_endpoint
|
||||
.call(&self.rpc_host, ProxyRpc::Proxy(req), PRIO_NORMAL)
|
||||
.await??
|
||||
{
|
||||
ProxyRpcResponse::ProxyApiOkResponse(resp) => {
|
||||
<T as AdminApiEndpoint>::Response::try_from(resp).map_err(|_| {
|
||||
Error::Message(format!("{} returned unexpected response", req_name))
|
||||
})
|
||||
}
|
||||
ProxyRpcResponse::ApiErrorResponse {
|
||||
http_code,
|
||||
error_code,
|
||||
message,
|
||||
} => Err(Error::Message(format!(
|
||||
"{} returned {} ({}): {}",
|
||||
req_name, error_code, http_code, message
|
||||
))),
|
||||
m => Err(Error::unexpected_rpc_message(m)),
|
||||
}
|
||||
}
|
||||
}
|
89
src/garage/cli_v2/worker.rs
Normal file
89
src/garage/cli_v2/worker.rs
Normal file
|
@ -0,0 +1,89 @@
|
|||
//use bytesize::ByteSize;
|
||||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api::admin::api::*;
|
||||
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn cmd_worker(&self, cmd: WorkerOperation) -> Result<(), Error> {
|
||||
match cmd {
|
||||
WorkerOperation::Get {
|
||||
all_nodes,
|
||||
variable,
|
||||
} => self.cmd_get_var(all_nodes, variable).await,
|
||||
WorkerOperation::Set {
|
||||
all_nodes,
|
||||
variable,
|
||||
value,
|
||||
} => self.cmd_set_var(all_nodes, variable, value).await,
|
||||
wo => cli_v1::cmd_admin(
|
||||
&self.admin_rpc_endpoint,
|
||||
self.rpc_host,
|
||||
AdminRpc::Worker(wo),
|
||||
)
|
||||
.await
|
||||
.ok_or_message("cli_v1"),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_get_var(&self, all: bool, var: Option<String>) -> Result<(), Error> {
|
||||
let res = self
|
||||
.api_request(GetWorkerVariableRequest {
|
||||
node: if all {
|
||||
"*".to_string()
|
||||
} else {
|
||||
hex::encode(self.rpc_host)
|
||||
},
|
||||
body: LocalGetWorkerVariableRequest { variable: var },
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut table = vec![];
|
||||
for (node, vars) in res.success.iter() {
|
||||
for (key, val) in vars.0.iter() {
|
||||
table.push(format!("{:.16}\t{}\t{}", node, key, val));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
for (node, err) in res.error.iter() {
|
||||
eprintln!("{:.16}: error: {}", node, err);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_set_var(
|
||||
&self,
|
||||
all: bool,
|
||||
variable: String,
|
||||
value: String,
|
||||
) -> Result<(), Error> {
|
||||
let res = self
|
||||
.api_request(SetWorkerVariableRequest {
|
||||
node: if all {
|
||||
"*".to_string()
|
||||
} else {
|
||||
hex::encode(self.rpc_host)
|
||||
},
|
||||
body: LocalSetWorkerVariableRequest { variable, value },
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut table = vec![];
|
||||
for (node, kv) in res.success.iter() {
|
||||
table.push(format!("{:.16}\t{}\t{}", node, kv.variable, kv.value));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
for (node, err) in res.error.iter() {
|
||||
eprintln!("{:.16}: error: {}", node, err);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -6,6 +6,7 @@ extern crate tracing;
|
|||
|
||||
mod admin;
|
||||
mod cli;
|
||||
mod cli_v2;
|
||||
mod repair;
|
||||
mod secrets;
|
||||
mod server;
|
||||
|
@ -34,7 +35,7 @@ use garage_util::error::*;
|
|||
use garage_rpc::system::*;
|
||||
use garage_rpc::*;
|
||||
|
||||
use garage_model::helper::error::Error as HelperError;
|
||||
use garage_api::admin::api_server::{AdminRpc as ProxyRpc, ADMIN_RPC_PATH as PROXY_RPC_PATH};
|
||||
|
||||
use admin::*;
|
||||
use cli::*;
|
||||
|
@ -283,11 +284,14 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
|
|||
|
||||
let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into());
|
||||
let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into());
|
||||
let proxy_rpc_endpoint = netapp.endpoint::<ProxyRpc, ()>(PROXY_RPC_PATH.into());
|
||||
|
||||
match cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await {
|
||||
Err(HelperError::Internal(i)) => Err(Error::Message(format!("Internal error: {}", i))),
|
||||
Err(HelperError::BadRequest(b)) => Err(Error::Message(b)),
|
||||
Err(e) => Err(Error::Message(format!("{}", e))),
|
||||
Ok(x) => Ok(x),
|
||||
}
|
||||
let cli = cli_v2::Cli {
|
||||
system_rpc_endpoint,
|
||||
admin_rpc_endpoint,
|
||||
proxy_rpc_endpoint,
|
||||
rpc_host: id,
|
||||
};
|
||||
|
||||
cli.handle(opt.cmd).await
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::watch;
|
||||
|
||||
|
@ -64,8 +65,9 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er
|
|||
}
|
||||
|
||||
info!("Initialize Admin API server and metrics collector...");
|
||||
let admin_server = AdminApiServer::new(
|
||||
let admin_server: Arc<AdminApiServer> = AdminApiServer::new(
|
||||
garage.clone(),
|
||||
background.clone(),
|
||||
#[cfg(feature = "metrics")]
|
||||
metrics_exporter,
|
||||
);
|
||||
|
|
|
@ -427,12 +427,18 @@ async fn test_website_check_domain() {
|
|||
res_body,
|
||||
json!({
|
||||
"code": "InvalidRequest",
|
||||
"message": "Bad request: No domain query string found",
|
||||
"message": "Bad request: Missing argument `domain` for endpoint",
|
||||
"region": "garage-integ-test",
|
||||
"path": "/check",
|
||||
})
|
||||
);
|
||||
|
||||
// FIXME: Edge case with empty domain
|
||||
// Currently, empty domain is interpreted as an absent parameter
|
||||
// due to logic in router_macros.rs, so this test fails.
|
||||
// Maybe we want empty parameters to be acceptable? But that might
|
||||
// break a lot of S3 stuff.
|
||||
/*
|
||||
let admin_req = || {
|
||||
Request::builder()
|
||||
.method("GET")
|
||||
|
@ -456,6 +462,7 @@ async fn test_website_check_domain() {
|
|||
"path": "/check",
|
||||
})
|
||||
);
|
||||
*/
|
||||
|
||||
let admin_req = || {
|
||||
Request::builder()
|
||||
|
|
|
@ -73,41 +73,48 @@ impl<'a> BucketHelper<'a> {
|
|||
pattern: &String,
|
||||
) -> Result<Uuid, Error> {
|
||||
if let Some(uuid) = self.resolve_global_bucket_name(pattern).await? {
|
||||
return Ok(uuid);
|
||||
} else if pattern.len() >= 2 {
|
||||
let hexdec = pattern
|
||||
.get(..pattern.len() & !1)
|
||||
.and_then(|x| hex::decode(x).ok());
|
||||
if let Some(hex) = hexdec {
|
||||
let mut start = [0u8; 32];
|
||||
start
|
||||
.as_mut_slice()
|
||||
.get_mut(..hex.len())
|
||||
.ok_or_bad_request("invalid length")?
|
||||
.copy_from_slice(&hex);
|
||||
let mut candidates = self
|
||||
.0
|
||||
.bucket_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
Some(start.into()),
|
||||
Some(DeletedFilter::NotDeleted),
|
||||
10,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
candidates.retain(|x| hex::encode(x.id).starts_with(pattern));
|
||||
if candidates.len() == 1 {
|
||||
return Ok(candidates.into_iter().next().unwrap().id);
|
||||
}
|
||||
Ok(uuid)
|
||||
} else {
|
||||
let hexdec = if pattern.len() >= 2 {
|
||||
pattern
|
||||
.get(..pattern.len() & !1)
|
||||
.and_then(|x| hex::decode(x).ok())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let hex = hexdec.ok_or_else(|| Error::NoSuchBucket(pattern.clone()))?;
|
||||
|
||||
let mut start = [0u8; 32];
|
||||
start
|
||||
.as_mut_slice()
|
||||
.get_mut(..hex.len())
|
||||
.ok_or_bad_request("invalid length")?
|
||||
.copy_from_slice(&hex);
|
||||
let mut candidates = self
|
||||
.0
|
||||
.bucket_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
Some(start.into()),
|
||||
Some(DeletedFilter::NotDeleted),
|
||||
10,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
candidates.retain(|x| hex::encode(x.id).starts_with(pattern));
|
||||
if candidates.is_empty() {
|
||||
Err(Error::NoSuchBucket(pattern.clone()))
|
||||
} else if candidates.len() == 1 {
|
||||
Ok(candidates.into_iter().next().unwrap().id)
|
||||
} else {
|
||||
Err(Error::BadRequest(format!(
|
||||
"Several matching buckets: {}",
|
||||
pattern
|
||||
)))
|
||||
}
|
||||
}
|
||||
Err(Error::BadRequest(format!(
|
||||
"Bucket not found / several matching buckets: {}",
|
||||
pattern
|
||||
)))
|
||||
}
|
||||
|
||||
/// Returns a Bucket if it is present in bucket table,
|
||||
|
|
|
@ -43,13 +43,10 @@ impl TableReplication for TableFullReplication {
|
|||
}
|
||||
fn write_quorum(&self) -> usize {
|
||||
let nmembers = self.system.cluster_layout().current().all_nodes().len();
|
||||
|
||||
let max_faults = if nmembers > 1 { 1 } else { 0 };
|
||||
|
||||
if nmembers > max_faults {
|
||||
nmembers - max_faults
|
||||
} else {
|
||||
if nmembers < 3 {
|
||||
1
|
||||
} else {
|
||||
nmembers.div_euclid(2) + 1
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue