Compare commits
61 commits
main
...
1686a/s3-r
Author | SHA1 | Date | |
---|---|---|---|
62a3003cca | |||
3151695011 | |||
f034e834fa | |||
bf0f792418 | |||
61f3de6496 | |||
71655c1e89 | |||
7c8fc04b96 | |||
f914db057a | |||
406b6da163 | |||
9f468b4439 | |||
97be7b38fa | |||
6a1079c412 | |||
b1629dd355 | |||
d405a9f839 | |||
7b9c047b11 | |||
10bbb26b30 | |||
89ff9f5576 | |||
bdaf55ab3f | |||
e96014ca60 | |||
568c4954e9 | |||
fe937c2901 | |||
3192088aac | |||
5a89350b38 | |||
3caea5fc06 | |||
ebc0e9319e | |||
f8c6a8373d | |||
076ce04fe5 | |||
f37d5d2b08 | |||
819f4f0050 | |||
69ddaafc60 | |||
145130481e | |||
6ed78abb5c | |||
19454c1679 | |||
1c03941b19 | |||
4f0b923c4f | |||
420bbc162d | |||
12ea4cda5f | |||
5fefbd94e9 | |||
ba810b2e81 | |||
f8ed3fdbc4 | |||
2daeb89834 | |||
4cb45bd398 | |||
d5ad797ad7 | |||
a99925e0ed | |||
f538dc34d3 | |||
ed58f8b0fe | |||
5037b97dd4 | |||
af1a530834 | |||
c99bfe69ea | |||
831f2b0207 | |||
c1eb1610ba | |||
5560a963e0 | |||
2aaba39ddc | |||
47467df83e | |||
9b7fea4cb0 | |||
44ce6ae5b4 | |||
22487ceddf | |||
6ccfbb2986 | |||
c939d2a936 | |||
65e9dde8c9 | |||
c9b733a4a6 |
118 changed files with 9037 additions and 6393 deletions
1055
Cargo.lock
generated
1055
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
34
Cargo.toml
34
Cargo.toml
|
@ -24,18 +24,18 @@ default-members = ["src/garage"]
|
|||
|
||||
# Internal Garage crates
|
||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||
garage_api_common = { version = "1.1.0", path = "src/api/common" }
|
||||
garage_api_admin = { version = "1.1.0", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "1.1.0", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "1.1.0", path = "src/api/k2v" }
|
||||
garage_block = { version = "1.1.0", path = "src/block" }
|
||||
garage_db = { version = "1.1.0", path = "src/db", default-features = false }
|
||||
garage_model = { version = "1.1.0", path = "src/model", default-features = false }
|
||||
garage_net = { version = "1.1.0", path = "src/net" }
|
||||
garage_rpc = { version = "1.1.0", path = "src/rpc" }
|
||||
garage_table = { version = "1.1.0", path = "src/table" }
|
||||
garage_util = { version = "1.1.0", path = "src/util" }
|
||||
garage_web = { version = "1.1.0", path = "src/web" }
|
||||
garage_api_common = { version = "1.0.1", path = "src/api/common" }
|
||||
garage_api_admin = { version = "1.0.1", path = "src/api/admin" }
|
||||
garage_api_s3 = { version = "1.0.1", path = "src/api/s3" }
|
||||
garage_api_k2v = { version = "1.0.1", path = "src/api/k2v" }
|
||||
garage_block = { version = "1.0.1", path = "src/block" }
|
||||
garage_db = { version = "1.0.1", path = "src/db", default-features = false }
|
||||
garage_model = { version = "1.0.1", path = "src/model", default-features = false }
|
||||
garage_net = { version = "1.0.1", path = "src/net" }
|
||||
garage_rpc = { version = "1.0.1", path = "src/rpc" }
|
||||
garage_table = { version = "1.0.1", path = "src/table" }
|
||||
garage_util = { version = "1.0.1", path = "src/util" }
|
||||
garage_web = { version = "1.0.1", path = "src/web" }
|
||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||
|
||||
# External crates from crates.io
|
||||
|
@ -67,6 +67,7 @@ mktemp = "0.5"
|
|||
nix = { version = "0.29", default-features = false, features = ["fs"] }
|
||||
nom = "7.1"
|
||||
parse_duration = "2.1"
|
||||
paste = "1.0"
|
||||
pin-project = "1.0.12"
|
||||
pnet_datalink = "0.34"
|
||||
rand = "0.8"
|
||||
|
@ -132,8 +133,8 @@ opentelemetry-contrib = "0.9"
|
|||
prometheus = "0.13"
|
||||
|
||||
# used by the k2v-client crate only
|
||||
aws-sigv4 = { version = "1.1", default-features = false }
|
||||
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "http2", "ring", "rustls-native-certs"] }
|
||||
aws-sigv4 = { version = "1.1" }
|
||||
hyper-rustls = { version = "0.26", features = ["http2"] }
|
||||
log = "0.4"
|
||||
thiserror = "1.0"
|
||||
|
||||
|
@ -141,9 +142,8 @@ thiserror = "1.0"
|
|||
assert-json-diff = "2.0"
|
||||
rustc_version = "0.4.0"
|
||||
static_init = "1.0"
|
||||
aws-smithy-runtime = { version = "1.8", default-features = false, features = ["tls-rustls"] }
|
||||
aws-sdk-config = { version = "1.62", default-features = false }
|
||||
aws-sdk-s3 = { version = "1.79", default-features = false, features = ["rt-tokio"] }
|
||||
aws-sdk-config = "1.13"
|
||||
aws-sdk-s3 = "1.14"
|
||||
|
||||
[profile.dev]
|
||||
#lto = "thin" # disabled for now, adds 2-4 min to each CI build
|
||||
|
|
4
Makefile
4
Makefile
|
@ -2,7 +2,9 @@
|
|||
|
||||
all:
|
||||
clear
|
||||
cargo build
|
||||
cargo build \
|
||||
--config 'target.x86_64-unknown-linux-gnu.linker="clang"' \
|
||||
--config 'target.x86_64-unknown-linux-gnu.rustflags=["-C", "link-arg=-fuse-ld=mold"]' \
|
||||
|
||||
# ----
|
||||
|
||||
|
|
|
@ -687,7 +687,7 @@ paths:
|
|||
operationId: "GetBucketInfo"
|
||||
summary: "Get a bucket"
|
||||
description: |
|
||||
Given a bucket identifier (`id`) or a global alias (`globalAlias`), get its information.
|
||||
Given a bucket identifier (`id`) or a global alias (`alias`), get its information.
|
||||
It includes its aliases, its web configuration, keys that have some permissions
|
||||
on it, some statistics (number of objects, size), number of dangling multipart uploads,
|
||||
and its quotas (if any).
|
||||
|
@ -701,7 +701,7 @@ paths:
|
|||
example: "b4018dc61b27ccb5c64ec1b24f53454bbbd180697c758c4d47a22a8921864a87"
|
||||
schema:
|
||||
type: string
|
||||
- name: globalAlias
|
||||
- name: alias
|
||||
in: query
|
||||
description: |
|
||||
The exact global alias of one of the existing buckets.
|
||||
|
|
24
doc/api/garage-admin-v2.html
Normal file
24
doc/api/garage-admin-v2.html
Normal file
|
@ -0,0 +1,24 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Garage Adminstration API v0</title>
|
||||
<!-- needed for adaptive design -->
|
||||
<meta charset="utf-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<link href="./css/redoc.css" rel="stylesheet">
|
||||
|
||||
<!--
|
||||
Redoc doesn't change outer page styles
|
||||
-->
|
||||
<style>
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<redoc spec-url='./garage-admin-v2.yml'></redoc>
|
||||
<script src="./redoc.standalone.js"> </script>
|
||||
</body>
|
||||
</html>
|
1336
doc/api/garage-admin-v2.yml
Normal file
1336
doc/api/garage-admin-v2.yml
Normal file
File diff suppressed because it is too large
Load diff
|
@ -86,62 +86,3 @@ helm delete --namespace garage garage
|
|||
```
|
||||
|
||||
Note that this will leave behind custom CRD `garagenodes.deuxfleurs.fr`, which must be removed manually if desired.
|
||||
|
||||
## Increase PVC size on running Garage instances
|
||||
|
||||
Since the Garage Helm chart creates the data and meta PVC based on `StatefulSet` templates, increasing the PVC size can be a bit tricky.
|
||||
|
||||
### Confirm the `StorageClass` used for Garage supports volume expansion
|
||||
|
||||
Confirm the storage class used for garage.
|
||||
|
||||
```bash
|
||||
kubectl -n garage get pvc
|
||||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
|
||||
data-garage-0 Bound pvc-080360c9-8ce3-4acf-8579-1701e57b7f3f 30Gi RWO longhorn-local <unset> 77d
|
||||
data-garage-1 Bound pvc-ab8ba697-6030-4fc7-ab3c-0d6df9e3dbc0 30Gi RWO longhorn-local <unset> 5d8h
|
||||
data-garage-2 Bound pvc-3ab37551-0231-4604-986d-136d0fd950ec 30Gi RWO longhorn-local <unset> 5d5h
|
||||
meta-garage-0 Bound pvc-3b457302-3023-4169-846e-c928c5f2ea65 3Gi RWO longhorn-local <unset> 77d
|
||||
meta-garage-1 Bound pvc-49ace2b9-5c85-42df-9247-51c4cf64b460 3Gi RWO longhorn-local <unset> 5d8h
|
||||
meta-garage-2 Bound pvc-99e2e50f-42b4-4128-ae2f-b52629259723 3Gi RWO longhorn-local <unset> 5d5h
|
||||
```
|
||||
|
||||
In this case, the storage class is `longhorn-local`. Now, check if `ALLOWVOLUMEEXPANSION` is true for the used `StorageClass`.
|
||||
|
||||
```bash
|
||||
kubectl get storageclasses.storage.k8s.io longhorn-local
|
||||
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
||||
longhorn-local driver.longhorn.io Delete Immediate true 103d
|
||||
```
|
||||
|
||||
If your `StorageClass` does not support volume expansion, double check if you can enable it. Otherwise, your only real option is to spin up a new Garage cluster with increased size and migrate all data over.
|
||||
|
||||
If your `StorageClass` supports expansion, you are free to continue.
|
||||
|
||||
### Increase the size of the PVCs
|
||||
|
||||
Increase the size of all PVCs to your desired size.
|
||||
|
||||
```bash
|
||||
kubectl -n garage edit pvc data-garage-0
|
||||
kubectl -n garage edit pvc data-garage-1
|
||||
kubectl -n garage edit pvc data-garage-2
|
||||
kubectl -n garage edit pvc meta-garage-0
|
||||
kubectl -n garage edit pvc meta-garage-1
|
||||
kubectl -n garage edit pvc meta-garage-2
|
||||
```
|
||||
|
||||
### Increase the size of the `StatefulSet` PVC template
|
||||
|
||||
This is an optional step, but if not done, future instances of Garage will be created with the original size from the template.
|
||||
|
||||
```bash
|
||||
kubectl -n garage delete sts --cascade=orphan garage
|
||||
statefulset.apps "garage" deleted
|
||||
```
|
||||
|
||||
This will remove the Garage `StatefulSet` but leave the pods running. It may seem destructive but needs to be done this way since edits to the size of PVC templates are prohibited.
|
||||
|
||||
### Redeploy the `StatefulSet`
|
||||
|
||||
Now the size of future PVCs can be increased, and the Garage Helm chart can be upgraded. The new `StatefulSet` should take ownership of the orphaned pods again.
|
||||
|
|
|
@ -96,14 +96,14 @@ to store 2 TB of data in total.
|
|||
## Get a Docker image
|
||||
|
||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
We encourage you to use a fixed tag (eg. `v1.1.0`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v1.1.0` but it's up to you
|
||||
We encourage you to use a fixed tag (eg. `v1.0.1`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v1.0.1` but it's up to you
|
||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
sudo docker pull dxflrs/garage:v1.1.0
|
||||
sudo docker pull dxflrs/garage:v1.0.1
|
||||
```
|
||||
|
||||
## Deploying and configuring Garage
|
||||
|
@ -171,7 +171,7 @@ docker run \
|
|||
-v /etc/garage.toml:/etc/garage.toml \
|
||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.1.0
|
||||
dxflrs/garage:v1.0.1
|
||||
```
|
||||
|
||||
With this command line, Garage should be started automatically at each boot.
|
||||
|
@ -185,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
|||
version: "3"
|
||||
services:
|
||||
garage:
|
||||
image: dxflrs/garage:v1.1.0
|
||||
image: dxflrs/garage:v1.0.1
|
||||
network_mode: "host"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
|
|
|
@ -71,7 +71,7 @@ The entire procedure would look something like this:
|
|||
|
||||
2. Take each node offline individually to back up its metadata folder, bring them back online once the backup is done.
|
||||
You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
|
||||
Do not try to manually copy the metadata folder of a running node.
|
||||
Do not try to make a backup of the metadata folder of a running node.
|
||||
|
||||
**Since Garage v0.9.4,** you can use the `garage meta snapshot --all` command
|
||||
to take a simultaneous snapshot of the metadata database files of all your
|
||||
|
|
|
@ -132,7 +132,7 @@ docker run \
|
|||
-v /etc/garage.toml:/path/to/garage.toml \
|
||||
-v /var/lib/garage/meta:/path/to/garage/meta \
|
||||
-v /var/lib/garage/data:/path/to/garage/data \
|
||||
dxflrs/garage:v1.1.0
|
||||
dxflrs/garage:v0.9.4
|
||||
```
|
||||
|
||||
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
||||
|
|
|
@ -75,7 +75,6 @@ root_domain = ".s3.garage"
|
|||
[s3_web]
|
||||
bind_addr = "[::]:3902"
|
||||
root_domain = ".web.garage"
|
||||
add_host_to_metrics = true
|
||||
|
||||
[admin]
|
||||
api_bind_addr = "0.0.0.0:3903"
|
||||
|
@ -139,7 +138,6 @@ The `[s3_api]` section:
|
|||
[`s3_region`](#s3_region).
|
||||
|
||||
The `[s3_web]` section:
|
||||
[`add_host_to_metrics`](#web_add_host_to_metrics),
|
||||
[`bind_addr`](#web_bind_addr),
|
||||
[`root_domain`](#web_root_domain).
|
||||
|
||||
|
@ -154,7 +152,7 @@ The `[admin]` section:
|
|||
The following configuration parameter must be specified as an environment
|
||||
variable, it does not exist in the configuration file:
|
||||
|
||||
- `GARAGE_LOG_TO_SYSLOG` (since `v0.9.4`): set this to `1` or `true` to make the
|
||||
- `GARAGE_LOG_TO_SYSLOG` (since v0.9.4): set this to `1` or `true` to make the
|
||||
Garage daemon send its logs to `syslog` (using the libc `syslog` function)
|
||||
instead of printing to stderr.
|
||||
|
||||
|
@ -300,7 +298,7 @@ data_dir = [
|
|||
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
|
||||
on how to operate Garage in such a setup.
|
||||
|
||||
#### `metadata_snapshots_dir` (since `v1.1.0`) {#metadata_snapshots_dir}
|
||||
#### `metadata_snapshots_dir` (since Garage `v1.0.2`) {#metadata_snapshots_dir}
|
||||
|
||||
The directory in which Garage will store metadata snapshots when it
|
||||
performs a snapshot of the metadata database, either when instructed to do
|
||||
|
@ -416,7 +414,7 @@ at the cost of a moderate drop in write performance.
|
|||
Similarly to `metatada_fsync`, this is likely not necessary
|
||||
if geographical replication is used.
|
||||
|
||||
#### `metadata_auto_snapshot_interval` (since `v0.9.4`) {#metadata_auto_snapshot_interval}
|
||||
#### `metadata_auto_snapshot_interval` (since Garage v0.9.4) {#metadata_auto_snapshot_interval}
|
||||
|
||||
If this value is set, Garage will automatically take a snapshot of the metadata
|
||||
DB file at a regular interval and save it in the metadata directory.
|
||||
|
@ -453,7 +451,7 @@ you should delete it from the data directory and then call `garage repair
|
|||
blocks` on the node to ensure that it re-obtains a copy from another node on
|
||||
the network.
|
||||
|
||||
#### `use_local_tz` (since `v1.1.0`) {#use_local_tz}
|
||||
#### `use_local_tz` {#use_local_tz}
|
||||
|
||||
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
|
||||
`use_local_tz` configuration value to `true` if you want Garage to run the
|
||||
|
@ -475,7 +473,7 @@ files will remain available. This however means that chunks from existing files
|
|||
will not be deduplicated with chunks from newly uploaded files, meaning you
|
||||
might use more storage space that is optimally possible.
|
||||
|
||||
#### `block_ram_buffer_max` (since `v0.9.4`) {#block_ram_buffer_max}
|
||||
#### `block_ram_buffer_max` (since v0.9.4) {#block_ram_buffer_max}
|
||||
|
||||
A limit on the total size of data blocks kept in RAM by S3 API nodes awaiting
|
||||
to be sent to storage nodes asynchronously.
|
||||
|
@ -562,7 +560,7 @@ the node, even in the case of a NAT: the NAT should be configured to forward the
|
|||
port number to the same internal port nubmer. This means that if you have several nodes running
|
||||
behind a NAT, they should each use a different RPC port number.
|
||||
|
||||
#### `rpc_bind_outgoing` (since `v0.9.2`) {#rpc_bind_outgoing}
|
||||
#### `rpc_bind_outgoing`(since v0.9.2) {#rpc_bind_outgoing}
|
||||
|
||||
If enabled, pre-bind all sockets for outgoing connections to the same IP address
|
||||
used for listening (the IP address specified in `rpc_bind_addr`) before
|
||||
|
@ -746,13 +744,6 @@ For instance, if `root_domain` is `web.garage.eu`, a bucket called `deuxfleurs.f
|
|||
will be accessible either with hostname `deuxfleurs.fr.web.garage.eu`
|
||||
or with hostname `deuxfleurs.fr`.
|
||||
|
||||
#### `add_host_to_metrics` {#web_add_host_to_metrics}
|
||||
|
||||
Whether to include the requested domain name (HTTP `Host` header) in the
|
||||
Prometheus metrics of the web endpoint. This is disabled by default as the
|
||||
number of possible values is not bounded and can be a source of cardinality
|
||||
explosion in the exported metrics.
|
||||
|
||||
|
||||
### The `[admin]` section
|
||||
|
||||
|
|
|
@ -13,8 +13,9 @@ We will bump the version numbers prefixed to each API endpoint each time the syn
|
|||
or semantics change, meaning that code that relies on these endpoints will break
|
||||
when changes are introduced.
|
||||
|
||||
The Garage administration API was introduced in version 0.7.2, this document
|
||||
does not apply to older versions of Garage.
|
||||
The Garage administration API was introduced in version 0.7.2, and was
|
||||
changed several times.
|
||||
This document applies only to the Garage v2 API (starting with Garage v2.0.0).
|
||||
|
||||
|
||||
## Access control
|
||||
|
@ -52,11 +53,18 @@ Returns an HTTP status 200 if the node is ready to answer user's requests,
|
|||
and an HTTP status 503 (Service Unavailable) if there are some partitions
|
||||
for which a quorum of nodes is not available.
|
||||
A simple textual message is also returned in a body with content-type `text/plain`.
|
||||
See `/v1/health` for an API that also returns JSON output.
|
||||
See `/v2/GetClusterHealth` for an API that also returns JSON output.
|
||||
|
||||
### Other special endpoints
|
||||
|
||||
#### CheckDomain `GET /check?domain=<domain>`
|
||||
|
||||
Checks whether this Garage cluster serves a website for domain `<domain>`.
|
||||
Returns HTTP 200 Ok if yes, or HTTP 4xx if no website is available for this domain.
|
||||
|
||||
### Cluster operations
|
||||
|
||||
#### GetClusterStatus `GET /v1/status`
|
||||
#### GetClusterStatus `GET /v2/GetClusterStatus`
|
||||
|
||||
Returns the cluster's current status in JSON, including:
|
||||
|
||||
|
@ -70,7 +78,7 @@ Example response body:
|
|||
```json
|
||||
{
|
||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||
"garageVersion": "v1.1.0",
|
||||
"garageVersion": "v2.0.0",
|
||||
"garageFeatures": [
|
||||
"k2v",
|
||||
"lmdb",
|
||||
|
@ -169,7 +177,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### GetClusterHealth `GET /v1/health`
|
||||
#### GetClusterHealth `GET /v2/GetClusterHealth`
|
||||
|
||||
Returns the cluster's current health in JSON format, with the following variables:
|
||||
|
||||
|
@ -202,7 +210,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### ConnectClusterNodes `POST /v1/connect`
|
||||
#### ConnectClusterNodes `POST /v2/ConnectClusterNodes`
|
||||
|
||||
Instructs this Garage node to connect to other Garage nodes at specified addresses.
|
||||
|
||||
|
@ -232,7 +240,7 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetClusterLayout `GET /v1/layout`
|
||||
#### GetClusterLayout `GET /v2/GetClusterLayout`
|
||||
|
||||
Returns the cluster's current layout in JSON, including:
|
||||
|
||||
|
@ -293,7 +301,7 @@ Example response body:
|
|||
}
|
||||
```
|
||||
|
||||
#### UpdateClusterLayout `POST /v1/layout`
|
||||
#### UpdateClusterLayout `POST /v2/UpdateClusterLayout`
|
||||
|
||||
Send modifications to the cluster layout. These modifications will
|
||||
be included in the staged role changes, visible in subsequent calls
|
||||
|
@ -330,7 +338,7 @@ This returns the new cluster layout with the proposed staged changes,
|
|||
as returned by GetClusterLayout.
|
||||
|
||||
|
||||
#### ApplyClusterLayout `POST /v1/layout/apply`
|
||||
#### ApplyClusterLayout `POST /v2/ApplyClusterLayout`
|
||||
|
||||
Applies to the cluster the layout changes currently registered as
|
||||
staged layout changes.
|
||||
|
@ -350,7 +358,7 @@ existing layout in the cluster.
|
|||
This returns the message describing all the calculations done to compute the new
|
||||
layout, as well as the description of the layout as returned by GetClusterLayout.
|
||||
|
||||
#### RevertClusterLayout `POST /v1/layout/revert`
|
||||
#### RevertClusterLayout `POST /v2/RevertClusterLayout`
|
||||
|
||||
Clears all of the staged layout changes.
|
||||
|
||||
|
@ -374,7 +382,7 @@ as returned by GetClusterLayout.
|
|||
|
||||
### Access key operations
|
||||
|
||||
#### ListKeys `GET /v1/key`
|
||||
#### ListKeys `GET /v2/ListKeys`
|
||||
|
||||
Returns all API access keys in the cluster.
|
||||
|
||||
|
@ -393,8 +401,8 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetKeyInfo `GET /v1/key?id=<acces key id>`
|
||||
#### GetKeyInfo `GET /v1/key?search=<pattern>`
|
||||
#### GetKeyInfo `GET /v2/GetKeyInfo?id=<acces key id>`
|
||||
#### GetKeyInfo `GET /v2/GetKeyInfo?search=<pattern>`
|
||||
|
||||
Returns information about the requested API access key.
|
||||
|
||||
|
@ -468,7 +476,7 @@ Example response:
|
|||
}
|
||||
```
|
||||
|
||||
#### CreateKey `POST /v1/key`
|
||||
#### CreateKey `POST /v2/CreateKey`
|
||||
|
||||
Creates a new API access key.
|
||||
|
||||
|
@ -483,7 +491,7 @@ Request body format:
|
|||
This returns the key info, including the created secret key,
|
||||
in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### ImportKey `POST /v1/key/import`
|
||||
#### ImportKey `POST /v2/ImportKey`
|
||||
|
||||
Imports an existing API key.
|
||||
This will check that the imported key is in the valid format, i.e.
|
||||
|
@ -501,7 +509,7 @@ Request body format:
|
|||
|
||||
This returns the key info in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### UpdateKey `POST /v1/key?id=<acces key id>`
|
||||
#### UpdateKey `POST /v2/UpdateKey?id=<acces key id>`
|
||||
|
||||
Updates information about the specified API access key.
|
||||
|
||||
|
@ -523,14 +531,14 @@ The possible flags in `allow` and `deny` are: `createBucket`.
|
|||
|
||||
This returns the key info in the same format as the result of GetKeyInfo.
|
||||
|
||||
#### DeleteKey `DELETE /v1/key?id=<acces key id>`
|
||||
#### DeleteKey `POST /v2/DeleteKey?id=<acces key id>`
|
||||
|
||||
Deletes an API access key.
|
||||
|
||||
|
||||
### Bucket operations
|
||||
|
||||
#### ListBuckets `GET /v1/bucket`
|
||||
#### ListBuckets `GET /v2/ListBuckets`
|
||||
|
||||
Returns all storage buckets in the cluster.
|
||||
|
||||
|
@ -572,8 +580,8 @@ Example response:
|
|||
]
|
||||
```
|
||||
|
||||
#### GetBucketInfo `GET /v1/bucket?id=<bucket id>`
|
||||
#### GetBucketInfo `GET /v1/bucket?globalAlias=<alias>`
|
||||
#### GetBucketInfo `GET /v2/GetBucketInfo?id=<bucket id>`
|
||||
#### GetBucketInfo `GET /v2/GetBucketInfo?globalAlias=<alias>`
|
||||
|
||||
Returns information about the requested storage bucket.
|
||||
|
||||
|
@ -616,7 +624,7 @@ Example response:
|
|||
}
|
||||
```
|
||||
|
||||
#### CreateBucket `POST /v1/bucket`
|
||||
#### CreateBucket `POST /v2/CreateBucket`
|
||||
|
||||
Creates a new storage bucket.
|
||||
|
||||
|
@ -656,7 +664,7 @@ or no alias at all.
|
|||
Technically, you can also specify both `globalAlias` and `localAlias` and that would create
|
||||
two aliases, but I don't see why you would want to do that.
|
||||
|
||||
#### UpdateBucket `PUT /v1/bucket?id=<bucket id>`
|
||||
#### UpdateBucket `POST /v2/UpdateBucket?id=<bucket id>`
|
||||
|
||||
Updates configuration of the given bucket.
|
||||
|
||||
|
@ -688,16 +696,38 @@ In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or
|
|||
to remove the quotas. An absent value will be considered the same as a `null`. It is not possible
|
||||
to change only one of the two quotas.
|
||||
|
||||
#### DeleteBucket `DELETE /v1/bucket?id=<bucket id>`
|
||||
#### DeleteBucket `POST /v2/DeleteBucket?id=<bucket id>`
|
||||
|
||||
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
|
||||
|
||||
Warning: this will delete all aliases associated with the bucket!
|
||||
|
||||
#### CleanupIncompleteUploads `POST /v2/CleanupIncompleteUploads`
|
||||
|
||||
Cleanup all incomplete uploads in a bucket that are older than a specified number
|
||||
of seconds.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"olderThanSecs": 3600
|
||||
}
|
||||
```
|
||||
|
||||
Response format
|
||||
|
||||
```json
|
||||
{
|
||||
"uploadsDeleted": 12
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Operations on permissions for keys on buckets
|
||||
|
||||
#### BucketAllowKey `POST /v1/bucket/allow`
|
||||
#### AllowBucketKey `POST /v2/AllowBucketKey`
|
||||
|
||||
Allows a key to do read/write/owner operations on a bucket.
|
||||
|
||||
|
@ -718,7 +748,7 @@ Request body format:
|
|||
Flags in `permissions` which have the value `true` will be activated.
|
||||
Other flags will remain unchanged.
|
||||
|
||||
#### BucketDenyKey `POST /v1/bucket/deny`
|
||||
#### DenyBucketKey `POST /v2/DenyBucketKey`
|
||||
|
||||
Denies a key from doing read/write/owner operations on a bucket.
|
||||
|
||||
|
@ -742,19 +772,35 @@ Other flags will remain unchanged.
|
|||
|
||||
### Operations on bucket aliases
|
||||
|
||||
#### GlobalAliasBucket `PUT /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||
#### AddBucketAlias `POST /v2/AddBucketAlias`
|
||||
|
||||
Empty body. Creates a global alias for a bucket.
|
||||
Creates an alias for a bucket in the namespace of a specific access key.
|
||||
To create a global alias, specify the `globalAlias` field.
|
||||
To create a local alias, specify the `localAlias` and `accessKeyId` fields.
|
||||
|
||||
#### GlobalUnaliasBucket `DELETE /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||
Request body format:
|
||||
|
||||
Removes a global alias for a bucket.
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"globalAlias": "my-bucket"
|
||||
}
|
||||
```
|
||||
|
||||
#### LocalAliasBucket `PUT /v1/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>`
|
||||
or:
|
||||
|
||||
Empty body. Creates a local alias for a bucket in the namespace of a specific access key.
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||
"localAlias": "my-bucket"
|
||||
}
|
||||
```
|
||||
|
||||
#### LocalUnaliasBucket `DELETE /v1/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>`
|
||||
#### RemoveBucketAlias `POST /v2/RemoveBucketAlias`
|
||||
|
||||
Removes a local alias for a bucket in the namespace of a specific access key.
|
||||
Removes an alias for a bucket in the namespace of a specific access key.
|
||||
To remove a global alias, specify the `globalAlias` field.
|
||||
To remove a local alias, specify the `localAlias` and `accessKeyId` fields.
|
||||
|
||||
Request body format: same as AddBucketAlias.
|
||||
|
|
|
@ -11,7 +11,7 @@ PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
|
|||
FANCYCOLORS=("41m" "42m" "44m" "45m" "100m" "104m")
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
export RUST_LOG=garage=info,garage_api_common=debug,garage_api_s3=debug
|
||||
export RUST_LOG=garage=info,garage_api=debug
|
||||
MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m"
|
||||
|
||||
if [ -z "$GARAGE_BIN" ]; then
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# Garage helm3 chart
|
||||
|
||||
Documentation is located [here](https://garagehq.deuxfleurs.fr/documentation/cookbook/kubernetes/).
|
||||
Documentation is located [here](/doc/book/cookbook/kubernetes.md).
|
||||
|
|
|
@ -15,10 +15,10 @@ type: application
|
|||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.7.0
|
||||
version: 0.6.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "v1.1.0"
|
||||
appVersion: "v1.0.1"
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
{{- if eq .Values.deployment.kind "StatefulSet" -}}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "garage.fullname" . }}-headless
|
||||
labels:
|
||||
{{- include "garage.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: {{ .Values.service.s3.api.port }}
|
||||
targetPort: 3900
|
||||
protocol: TCP
|
||||
name: s3-api
|
||||
- port: {{ .Values.service.s3.web.port }}
|
||||
targetPort: 3902
|
||||
protocol: TCP
|
||||
name: s3-web
|
||||
selector:
|
||||
{{- include "garage.selectorLabels" . | nindent 4 }}
|
||||
{{- end }}
|
|
@ -10,11 +10,12 @@ spec:
|
|||
{{- include "garage.selectorLabels" . | nindent 6 }}
|
||||
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
||||
replicas: {{ .Values.deployment.replicaCount }}
|
||||
serviceName: {{ include "garage.fullname" . }}-headless
|
||||
serviceName: {{ include "garage.fullname" . }}
|
||||
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
[package]
|
||||
name = "garage_api_admin"
|
||||
version = "1.1.0"
|
||||
version = "1.0.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
description = "Admin API server crate for the Garage object store"
|
||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||
readme = "../../../README.md"
|
||||
readme = "../../README.md"
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
|
@ -14,7 +14,9 @@ path = "lib.rs"
|
|||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
format_table.workspace = true
|
||||
garage_model.workspace = true
|
||||
garage_block.workspace = true
|
||||
garage_table.workspace = true
|
||||
garage_util.workspace = true
|
||||
garage_rpc.workspace = true
|
||||
|
@ -22,8 +24,10 @@ garage_api_common.workspace = true
|
|||
|
||||
argon2.workspace = true
|
||||
async-trait.workspace = true
|
||||
bytesize.workspace = true
|
||||
err-derive.workspace = true
|
||||
hex.workspace = true
|
||||
paste.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
futures.workspace = true
|
||||
|
|
876
src/api/admin/api.rs
Normal file
876
src/api/admin/api.rs
Normal file
|
@ -0,0 +1,876 @@
|
|||
use std::collections::HashMap;
|
||||
use std::convert::TryFrom;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use paste::paste;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_rpc::*;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use garage_api_common::common_error::CommonErrorDerivative;
|
||||
use garage_api_common::helpers::is_default;
|
||||
|
||||
use crate::api_server::{AdminRpc, AdminRpcResponse};
|
||||
use crate::error::Error;
|
||||
use crate::macros::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
// This generates the following:
|
||||
//
|
||||
// - An enum AdminApiRequest that contains a variant for all endpoints
|
||||
//
|
||||
// - An enum AdminApiResponse that contains a variant for all non-special endpoints.
|
||||
// This enum is serialized in api_server.rs, without the enum tag,
|
||||
// which gives directly the JSON response corresponding to the API call.
|
||||
// This enum does not implement Deserialize as its meaning can be ambiguous.
|
||||
//
|
||||
// - An enum TaggedAdminApiResponse that contains the same variants, but
|
||||
// serializes as a tagged enum. This allows it to be transmitted through
|
||||
// Garage RPC and deserialized correctly upon receival.
|
||||
// Conversion from untagged to tagged can be done using the `.tagged()` method.
|
||||
//
|
||||
// - AdminApiRequest::name() that returns the name of the endpoint
|
||||
//
|
||||
// - impl EndpointHandler for AdminApiHandler, that uses the impl EndpointHandler
|
||||
// of each request type below for non-special endpoints
|
||||
admin_endpoints![
|
||||
// Special endpoints of the Admin API
|
||||
@special Options,
|
||||
@special CheckDomain,
|
||||
@special Health,
|
||||
@special Metrics,
|
||||
|
||||
// Cluster operations
|
||||
GetClusterStatus,
|
||||
GetClusterHealth,
|
||||
ConnectClusterNodes,
|
||||
GetClusterLayout,
|
||||
UpdateClusterLayout,
|
||||
ApplyClusterLayout,
|
||||
RevertClusterLayout,
|
||||
|
||||
// Access key operations
|
||||
ListKeys,
|
||||
GetKeyInfo,
|
||||
CreateKey,
|
||||
ImportKey,
|
||||
UpdateKey,
|
||||
DeleteKey,
|
||||
|
||||
// Bucket operations
|
||||
ListBuckets,
|
||||
GetBucketInfo,
|
||||
CreateBucket,
|
||||
UpdateBucket,
|
||||
DeleteBucket,
|
||||
CleanupIncompleteUploads,
|
||||
|
||||
// Operations on permissions for keys on buckets
|
||||
AllowBucketKey,
|
||||
DenyBucketKey,
|
||||
|
||||
// Operations on bucket aliases
|
||||
AddBucketAlias,
|
||||
RemoveBucketAlias,
|
||||
|
||||
// Node operations
|
||||
CreateMetadataSnapshot,
|
||||
GetNodeStatistics,
|
||||
GetClusterStatistics,
|
||||
LaunchRepairOperation,
|
||||
|
||||
// Worker operations
|
||||
ListWorkers,
|
||||
GetWorkerInfo,
|
||||
GetWorkerVariable,
|
||||
SetWorkerVariable,
|
||||
|
||||
// Block operations
|
||||
ListBlockErrors,
|
||||
GetBlockInfo,
|
||||
RetryBlockResync,
|
||||
PurgeBlocks,
|
||||
];
|
||||
|
||||
local_admin_endpoints![
|
||||
// Node operations
|
||||
CreateMetadataSnapshot,
|
||||
GetNodeStatistics,
|
||||
LaunchRepairOperation,
|
||||
// Background workers
|
||||
ListWorkers,
|
||||
GetWorkerInfo,
|
||||
GetWorkerVariable,
|
||||
SetWorkerVariable,
|
||||
// Block operations
|
||||
ListBlockErrors,
|
||||
GetBlockInfo,
|
||||
RetryBlockResync,
|
||||
PurgeBlocks,
|
||||
];
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MultiRequest<RB> {
|
||||
pub node: String,
|
||||
pub body: RB,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MultiResponse<RB> {
|
||||
pub success: HashMap<String, RB>,
|
||||
pub error: HashMap<String, String>,
|
||||
}
|
||||
|
||||
// **********************************************
|
||||
// Special endpoints
|
||||
//
|
||||
// These endpoints don't have associated *Response structs
|
||||
// because they directly produce an http::Response
|
||||
// **********************************************
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct OptionsRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CheckDomainRequest {
|
||||
pub domain: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HealthRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MetricsRequest;
|
||||
|
||||
// **********************************************
|
||||
// Cluster operations
|
||||
// **********************************************
|
||||
|
||||
// ---- GetClusterStatus ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GetClusterStatusRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetClusterStatusResponse {
|
||||
pub node: String,
|
||||
pub garage_version: String,
|
||||
pub garage_features: Option<Vec<String>>,
|
||||
pub rust_version: String,
|
||||
pub db_engine: String,
|
||||
pub layout_version: u64,
|
||||
pub nodes: Vec<NodeResp>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeResp {
|
||||
pub id: String,
|
||||
pub role: Option<NodeRoleResp>,
|
||||
pub addr: Option<SocketAddr>,
|
||||
pub hostname: Option<String>,
|
||||
pub is_up: bool,
|
||||
pub last_seen_secs_ago: Option<u64>,
|
||||
pub draining: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub data_partition: Option<FreeSpaceResp>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub metadata_partition: Option<FreeSpaceResp>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeRoleResp {
|
||||
pub id: String,
|
||||
pub zone: String,
|
||||
pub capacity: Option<u64>,
|
||||
pub tags: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct FreeSpaceResp {
|
||||
pub available: u64,
|
||||
pub total: u64,
|
||||
}
|
||||
|
||||
// ---- GetClusterHealth ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GetClusterHealthRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetClusterHealthResponse {
|
||||
pub status: String,
|
||||
pub known_nodes: usize,
|
||||
pub connected_nodes: usize,
|
||||
pub storage_nodes: usize,
|
||||
pub storage_nodes_ok: usize,
|
||||
pub partitions: usize,
|
||||
pub partitions_quorum: usize,
|
||||
pub partitions_all_ok: usize,
|
||||
}
|
||||
|
||||
// ---- ConnectClusterNodes ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConnectClusterNodesRequest(pub Vec<String>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConnectClusterNodesResponse(pub Vec<ConnectNodeResponse>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ConnectNodeResponse {
|
||||
pub success: bool,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
// ---- GetClusterLayout ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GetClusterLayoutRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetClusterLayoutResponse {
|
||||
pub version: u64,
|
||||
pub roles: Vec<NodeRoleResp>,
|
||||
pub staged_role_changes: Vec<NodeRoleChange>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeRoleChange {
|
||||
pub id: String,
|
||||
#[serde(flatten)]
|
||||
pub action: NodeRoleChangeEnum,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum NodeRoleChangeEnum {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Remove { remove: bool },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Update {
|
||||
zone: String,
|
||||
capacity: Option<u64>,
|
||||
tags: Vec<String>,
|
||||
},
|
||||
}
|
||||
|
||||
// ---- UpdateClusterLayout ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateClusterLayoutRequest(pub Vec<NodeRoleChange>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateClusterLayoutResponse(pub GetClusterLayoutResponse);
|
||||
|
||||
// ---- ApplyClusterLayout ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApplyClusterLayoutRequest {
|
||||
pub version: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApplyClusterLayoutResponse {
|
||||
pub message: Vec<String>,
|
||||
pub layout: GetClusterLayoutResponse,
|
||||
}
|
||||
|
||||
// ---- RevertClusterLayout ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RevertClusterLayoutRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RevertClusterLayoutResponse(pub GetClusterLayoutResponse);
|
||||
|
||||
// **********************************************
|
||||
// Access key operations
|
||||
// **********************************************
|
||||
|
||||
// ---- ListKeys ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ListKeysRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ListKeysResponse(pub Vec<ListKeysResponseItem>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ListKeysResponseItem {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
// ---- GetKeyInfo ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GetKeyInfoRequest {
|
||||
pub id: Option<String>,
|
||||
pub search: Option<String>,
|
||||
pub show_secret_key: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetKeyInfoResponse {
|
||||
pub name: String,
|
||||
pub access_key_id: String,
|
||||
#[serde(skip_serializing_if = "is_default")]
|
||||
pub secret_access_key: Option<String>,
|
||||
pub permissions: KeyPerm,
|
||||
pub buckets: Vec<KeyInfoBucketResponse>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KeyPerm {
|
||||
#[serde(default)]
|
||||
pub create_bucket: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KeyInfoBucketResponse {
|
||||
pub id: String,
|
||||
pub global_aliases: Vec<String>,
|
||||
pub local_aliases: Vec<String>,
|
||||
pub permissions: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApiBucketKeyPerm {
|
||||
#[serde(default)]
|
||||
pub read: bool,
|
||||
#[serde(default)]
|
||||
pub write: bool,
|
||||
#[serde(default)]
|
||||
pub owner: bool,
|
||||
}
|
||||
|
||||
// ---- CreateKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CreateKeyRequest {
|
||||
pub name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CreateKeyResponse(pub GetKeyInfoResponse);
|
||||
|
||||
// ---- ImportKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ImportKeyRequest {
|
||||
pub access_key_id: String,
|
||||
pub secret_access_key: String,
|
||||
pub name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ImportKeyResponse(pub GetKeyInfoResponse);
|
||||
|
||||
// ---- UpdateKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateKeyRequest {
|
||||
pub id: String,
|
||||
pub body: UpdateKeyRequestBody,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateKeyResponse(pub GetKeyInfoResponse);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UpdateKeyRequestBody {
|
||||
pub name: Option<String>,
|
||||
pub allow: Option<KeyPerm>,
|
||||
pub deny: Option<KeyPerm>,
|
||||
}
|
||||
|
||||
// ---- DeleteKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DeleteKeyRequest {
|
||||
pub id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DeleteKeyResponse;
|
||||
|
||||
// **********************************************
|
||||
// Bucket operations
|
||||
// **********************************************
|
||||
|
||||
// ---- ListBuckets ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ListBucketsRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ListBucketsResponse(pub Vec<ListBucketsResponseItem>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ListBucketsResponseItem {
|
||||
pub id: String,
|
||||
pub global_aliases: Vec<String>,
|
||||
pub local_aliases: Vec<BucketLocalAlias>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BucketLocalAlias {
|
||||
pub access_key_id: String,
|
||||
pub alias: String,
|
||||
}
|
||||
|
||||
// ---- GetBucketInfo ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GetBucketInfoRequest {
|
||||
pub id: Option<String>,
|
||||
pub global_alias: Option<String>,
|
||||
pub search: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetBucketInfoResponse {
|
||||
pub id: String,
|
||||
pub global_aliases: Vec<String>,
|
||||
pub website_access: bool,
|
||||
#[serde(default)]
|
||||
pub website_config: Option<GetBucketInfoWebsiteResponse>,
|
||||
pub keys: Vec<GetBucketInfoKey>,
|
||||
pub objects: i64,
|
||||
pub bytes: i64,
|
||||
pub unfinished_uploads: i64,
|
||||
pub unfinished_multipart_uploads: i64,
|
||||
pub unfinished_multipart_upload_parts: i64,
|
||||
pub unfinished_multipart_upload_bytes: i64,
|
||||
pub quotas: ApiBucketQuotas,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetBucketInfoWebsiteResponse {
|
||||
pub index_document: String,
|
||||
pub error_document: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetBucketInfoKey {
|
||||
pub access_key_id: String,
|
||||
pub name: String,
|
||||
pub permissions: ApiBucketKeyPerm,
|
||||
pub bucket_local_aliases: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApiBucketQuotas {
|
||||
pub max_size: Option<u64>,
|
||||
pub max_objects: Option<u64>,
|
||||
}
|
||||
|
||||
// ---- CreateBucket ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CreateBucketRequest {
|
||||
pub global_alias: Option<String>,
|
||||
pub local_alias: Option<CreateBucketLocalAlias>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CreateBucketResponse(pub GetBucketInfoResponse);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CreateBucketLocalAlias {
|
||||
pub access_key_id: String,
|
||||
pub alias: String,
|
||||
#[serde(default)]
|
||||
pub allow: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
// ---- UpdateBucket ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateBucketRequest {
|
||||
pub id: String,
|
||||
pub body: UpdateBucketRequestBody,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct UpdateBucketResponse(pub GetBucketInfoResponse);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UpdateBucketRequestBody {
|
||||
pub website_access: Option<UpdateBucketWebsiteAccess>,
|
||||
pub quotas: Option<ApiBucketQuotas>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UpdateBucketWebsiteAccess {
|
||||
pub enabled: bool,
|
||||
pub index_document: Option<String>,
|
||||
pub error_document: Option<String>,
|
||||
}
|
||||
|
||||
// ---- DeleteBucket ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DeleteBucketRequest {
|
||||
pub id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DeleteBucketResponse;
|
||||
|
||||
// ---- CleanupIncompleteUploads ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CleanupIncompleteUploadsRequest {
|
||||
pub bucket_id: String,
|
||||
pub older_than_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CleanupIncompleteUploadsResponse {
|
||||
pub uploads_deleted: u64,
|
||||
}
|
||||
|
||||
// **********************************************
|
||||
// Operations on permissions for keys on buckets
|
||||
// **********************************************
|
||||
|
||||
// ---- AllowBucketKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AllowBucketKeyRequest(pub BucketKeyPermChangeRequest);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AllowBucketKeyResponse(pub GetBucketInfoResponse);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BucketKeyPermChangeRequest {
|
||||
pub bucket_id: String,
|
||||
pub access_key_id: String,
|
||||
pub permissions: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
// ---- DenyBucketKey ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DenyBucketKeyRequest(pub BucketKeyPermChangeRequest);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DenyBucketKeyResponse(pub GetBucketInfoResponse);
|
||||
|
||||
// **********************************************
|
||||
// Operations on bucket aliases
|
||||
// **********************************************
|
||||
|
||||
// ---- AddBucketAlias ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AddBucketAliasRequest {
|
||||
pub bucket_id: String,
|
||||
#[serde(flatten)]
|
||||
pub alias: BucketAliasEnum,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AddBucketAliasResponse(pub GetBucketInfoResponse);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum BucketAliasEnum {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Global { global_alias: String },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Local {
|
||||
local_alias: String,
|
||||
access_key_id: String,
|
||||
},
|
||||
}
|
||||
|
||||
// ---- RemoveBucketAlias ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RemoveBucketAliasRequest {
|
||||
pub bucket_id: String,
|
||||
#[serde(flatten)]
|
||||
pub alias: BucketAliasEnum,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RemoveBucketAliasResponse(pub GetBucketInfoResponse);
|
||||
|
||||
// **********************************************
|
||||
// Node operations
|
||||
// **********************************************
|
||||
|
||||
// ---- CreateMetadataSnapshot ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct LocalCreateMetadataSnapshotRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalCreateMetadataSnapshotResponse;
|
||||
|
||||
// ---- GetNodeStatistics ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct LocalGetNodeStatisticsRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalGetNodeStatisticsResponse {
|
||||
pub freeform: String,
|
||||
}
|
||||
|
||||
// ---- GetClusterStatistics ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct GetClusterStatisticsRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GetClusterStatisticsResponse {
|
||||
pub freeform: String,
|
||||
}
|
||||
|
||||
// ---- LaunchRepairOperation ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalLaunchRepairOperationRequest {
|
||||
pub repair_type: RepairType,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RepairType {
|
||||
Tables,
|
||||
Blocks,
|
||||
Versions,
|
||||
MultipartUploads,
|
||||
BlockRefs,
|
||||
BlockRc,
|
||||
Rebalance,
|
||||
Scrub(ScrubCommand),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum ScrubCommand {
|
||||
Start,
|
||||
Pause,
|
||||
Resume,
|
||||
Cancel,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalLaunchRepairOperationResponse;
|
||||
|
||||
// **********************************************
|
||||
// Worker operations
|
||||
// **********************************************
|
||||
|
||||
// ---- GetWorkerList ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LocalListWorkersRequest {
|
||||
#[serde(default)]
|
||||
pub busy_only: bool,
|
||||
#[serde(default)]
|
||||
pub error_only: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalListWorkersResponse(pub Vec<WorkerInfoResp>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct WorkerInfoResp {
|
||||
pub id: u64,
|
||||
pub name: String,
|
||||
pub state: WorkerStateResp,
|
||||
pub errors: u64,
|
||||
pub consecutive_errors: u64,
|
||||
pub last_error: Option<WorkerLastError>,
|
||||
pub tranquility: Option<u32>,
|
||||
pub progress: Option<String>,
|
||||
pub queue_length: Option<u64>,
|
||||
pub persistent_errors: Option<u64>,
|
||||
pub freeform: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum WorkerStateResp {
|
||||
Busy,
|
||||
Throttled { duration_secs: f32 },
|
||||
Idle,
|
||||
Done,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct WorkerLastError {
|
||||
pub message: String,
|
||||
pub secs_ago: u64,
|
||||
}
|
||||
|
||||
// ---- GetWorkerList ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalGetWorkerInfoRequest {
|
||||
pub id: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalGetWorkerInfoResponse(pub WorkerInfoResp);
|
||||
|
||||
// ---- GetWorkerVariable ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalGetWorkerVariableRequest {
|
||||
pub variable: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalGetWorkerVariableResponse(pub HashMap<String, String>);
|
||||
|
||||
// ---- SetWorkerVariable ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalSetWorkerVariableRequest {
|
||||
pub variable: String,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalSetWorkerVariableResponse {
|
||||
pub variable: String,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
// **********************************************
|
||||
// Block operations
|
||||
// **********************************************
|
||||
|
||||
// ---- ListBlockErrors ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct LocalListBlockErrorsRequest;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalListBlockErrorsResponse(pub Vec<BlockError>);
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BlockError {
|
||||
pub block_hash: String,
|
||||
pub refcount: u64,
|
||||
pub error_count: u64,
|
||||
pub last_try_secs_ago: u64,
|
||||
pub next_try_in_secs: u64,
|
||||
}
|
||||
|
||||
// ---- GetBlockInfo ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LocalGetBlockInfoRequest {
|
||||
pub block_hash: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LocalGetBlockInfoResponse {
|
||||
pub block_hash: String,
|
||||
pub refcount: u64,
|
||||
pub versions: Vec<BlockVersion>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BlockVersion {
|
||||
pub version_id: String,
|
||||
pub deleted: bool,
|
||||
pub garbage_collected: bool,
|
||||
pub backlink: Option<BlockVersionBacklink>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum BlockVersionBacklink {
|
||||
Object {
|
||||
bucket_id: String,
|
||||
key: String,
|
||||
},
|
||||
Upload {
|
||||
upload_id: String,
|
||||
upload_deleted: bool,
|
||||
upload_garbage_collected: bool,
|
||||
bucket_id: Option<String>,
|
||||
key: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
// ---- RetryBlockResync ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum LocalRetryBlockResyncRequest {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
All { all: bool },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Blocks { block_hashes: Vec<String> },
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LocalRetryBlockResyncResponse {
|
||||
pub count: u64,
|
||||
}
|
||||
|
||||
// ---- PurgeBlocks ----
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LocalPurgeBlocksRequest(pub Vec<String>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LocalPurgeBlocksResponse {
|
||||
pub blocks_purged: u64,
|
||||
pub objects_deleted: u64,
|
||||
pub uploads_deleted: u64,
|
||||
pub versions_deleted: u64,
|
||||
}
|
|
@ -1,248 +1,162 @@
|
|||
use std::collections::HashMap;
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use argon2::password_hash::PasswordHash;
|
||||
|
||||
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||
use http::header::{HeaderValue, ACCESS_CONTROL_ALLOW_ORIGIN, AUTHORIZATION};
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::watch;
|
||||
|
||||
use opentelemetry::trace::SpanRef;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
use opentelemetry_prometheus::PrometheusExporter;
|
||||
#[cfg(feature = "metrics")]
|
||||
use prometheus::{Encoder, TextEncoder};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
use garage_rpc::{Endpoint as RpcEndpoint, *};
|
||||
use garage_util::background::BackgroundRunner;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||
|
||||
use garage_api_common::generic_server::*;
|
||||
use garage_api_common::helpers::*;
|
||||
|
||||
use crate::bucket::*;
|
||||
use crate::cluster::*;
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::key::*;
|
||||
use crate::router_v0;
|
||||
use crate::router_v1::{Authorization, Endpoint};
|
||||
use crate::router_v1;
|
||||
use crate::Authorization;
|
||||
use crate::RequestHandler;
|
||||
|
||||
// ---- FOR RPC ----
|
||||
|
||||
pub const ADMIN_RPC_PATH: &str = "garage_api/admin/rpc.rs/Rpc";
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum AdminRpc {
|
||||
Proxy(AdminApiRequest),
|
||||
Internal(LocalAdminApiRequest),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub enum AdminRpcResponse {
|
||||
ProxyApiOkResponse(TaggedAdminApiResponse),
|
||||
InternalApiOkResponse(LocalAdminApiResponse),
|
||||
ApiErrorResponse {
|
||||
http_code: u16,
|
||||
error_code: String,
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl Rpc for AdminRpc {
|
||||
type Response = Result<AdminRpcResponse, GarageError>;
|
||||
}
|
||||
|
||||
impl EndpointHandler<AdminRpc> for AdminApiServer {
|
||||
async fn handle(
|
||||
self: &Arc<Self>,
|
||||
message: &AdminRpc,
|
||||
_from: NodeID,
|
||||
) -> Result<AdminRpcResponse, GarageError> {
|
||||
match message {
|
||||
AdminRpc::Proxy(req) => {
|
||||
info!("Proxied admin API request: {}", req.name());
|
||||
let res = req.clone().handle(&self.garage, &self).await;
|
||||
match res {
|
||||
Ok(res) => Ok(AdminRpcResponse::ProxyApiOkResponse(res.tagged())),
|
||||
Err(e) => Ok(AdminRpcResponse::ApiErrorResponse {
|
||||
http_code: e.http_status_code().as_u16(),
|
||||
error_code: e.code().to_string(),
|
||||
message: e.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
AdminRpc::Internal(req) => {
|
||||
info!("Internal admin API request: {}", req.name());
|
||||
let res = req.clone().handle(&self.garage, &self).await;
|
||||
match res {
|
||||
Ok(res) => Ok(AdminRpcResponse::InternalApiOkResponse(res)),
|
||||
Err(e) => Ok(AdminRpcResponse::ApiErrorResponse {
|
||||
http_code: e.http_status_code().as_u16(),
|
||||
error_code: e.code().to_string(),
|
||||
message: e.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---- FOR HTTP ----
|
||||
|
||||
pub type ResBody = BoxBody<Error>;
|
||||
|
||||
pub struct AdminApiServer {
|
||||
garage: Arc<Garage>,
|
||||
#[cfg(feature = "metrics")]
|
||||
exporter: PrometheusExporter,
|
||||
pub(crate) exporter: PrometheusExporter,
|
||||
metrics_token: Option<String>,
|
||||
admin_token: Option<String>,
|
||||
pub(crate) background: Arc<BackgroundRunner>,
|
||||
pub(crate) endpoint: Arc<RpcEndpoint<AdminRpc, Self>>,
|
||||
}
|
||||
|
||||
pub enum HttpEndpoint {
|
||||
Old(router_v1::Endpoint),
|
||||
New(String),
|
||||
}
|
||||
|
||||
impl AdminApiServer {
|
||||
pub fn new(
|
||||
garage: Arc<Garage>,
|
||||
background: Arc<BackgroundRunner>,
|
||||
#[cfg(feature = "metrics")] exporter: PrometheusExporter,
|
||||
) -> Self {
|
||||
) -> Arc<Self> {
|
||||
let cfg = &garage.config.admin;
|
||||
let metrics_token = cfg.metrics_token.as_deref().map(hash_bearer_token);
|
||||
let admin_token = cfg.admin_token.as_deref().map(hash_bearer_token);
|
||||
Self {
|
||||
|
||||
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
|
||||
let admin = Arc::new(Self {
|
||||
garage,
|
||||
#[cfg(feature = "metrics")]
|
||||
exporter,
|
||||
metrics_token,
|
||||
admin_token,
|
||||
}
|
||||
background,
|
||||
endpoint,
|
||||
});
|
||||
admin.endpoint.set_handler(admin.clone());
|
||||
admin
|
||||
}
|
||||
|
||||
pub async fn run(
|
||||
self,
|
||||
self: Arc<Self>,
|
||||
bind_addr: UnixOrTCPSocketAddress,
|
||||
must_exit: watch::Receiver<bool>,
|
||||
) -> Result<(), GarageError> {
|
||||
let region = self.garage.config.s3_api.s3_region.clone();
|
||||
ApiServer::new(region, self)
|
||||
ApiServer::new(region, ArcAdminApiServer(self))
|
||||
.run_server(bind_addr, Some(0o220), must_exit)
|
||||
.await
|
||||
}
|
||||
|
||||
fn handle_options(&self, _req: &Request<IncomingBody>) -> Result<Response<ResBody>, Error> {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.header(ALLOW, "OPTIONS, GET, POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||
.body(empty_body())?)
|
||||
}
|
||||
|
||||
async fn handle_check_domain(
|
||||
async fn handle_http_api(
|
||||
&self,
|
||||
req: Request<IncomingBody>,
|
||||
endpoint: HttpEndpoint,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let query_params: HashMap<String, String> = req
|
||||
.uri()
|
||||
.query()
|
||||
.map(|v| {
|
||||
url::form_urlencoded::parse(v.as_bytes())
|
||||
.into_owned()
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_else(HashMap::new);
|
||||
let auth_header = req.headers().get(AUTHORIZATION).cloned();
|
||||
|
||||
let has_domain_key = query_params.contains_key("domain");
|
||||
|
||||
if !has_domain_key {
|
||||
return Err(Error::bad_request("No domain query string found"));
|
||||
}
|
||||
|
||||
let domain = query_params
|
||||
.get("domain")
|
||||
.ok_or_internal_error("Could not parse domain query string")?;
|
||||
|
||||
if self.check_domain(domain).await? {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(string_body(format!(
|
||||
"Domain '{domain}' is managed by Garage"
|
||||
)))?)
|
||||
} else {
|
||||
Err(Error::bad_request(format!(
|
||||
"Domain '{domain}' is not managed by Garage"
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_domain(&self, domain: &str) -> Result<bool, Error> {
|
||||
// Resolve bucket from domain name, inferring if the website must be activated for the
|
||||
// domain to be valid.
|
||||
let (bucket_name, must_check_website) = if let Some(bname) = self
|
||||
.garage
|
||||
.config
|
||||
.s3_api
|
||||
.root_domain
|
||||
.as_ref()
|
||||
.and_then(|rd| host_to_bucket(domain, rd))
|
||||
{
|
||||
(bname.to_string(), false)
|
||||
} else if let Some(bname) = self
|
||||
.garage
|
||||
.config
|
||||
.s3_web
|
||||
.as_ref()
|
||||
.and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str()))
|
||||
{
|
||||
(bname.to_string(), true)
|
||||
} else {
|
||||
(domain.to_string(), true)
|
||||
let request = match endpoint {
|
||||
HttpEndpoint::Old(endpoint_v1) => AdminApiRequest::from_v1(endpoint_v1, req).await?,
|
||||
HttpEndpoint::New(_) => AdminApiRequest::from_request(req).await?,
|
||||
};
|
||||
|
||||
let bucket_id = match self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&bucket_name)
|
||||
.await?
|
||||
{
|
||||
Some(bucket_id) => bucket_id,
|
||||
None => return Ok(false),
|
||||
};
|
||||
|
||||
if !must_check_website {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
let bucket_state = bucket.state.as_option().unwrap();
|
||||
let bucket_website_config = bucket_state.website_config.get();
|
||||
|
||||
match bucket_website_config {
|
||||
Some(_v) => Ok(true),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_health(&self) -> Result<Response<ResBody>, Error> {
|
||||
let health = self.garage.system.health();
|
||||
|
||||
let (status, status_str) = match health.status {
|
||||
ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
|
||||
ClusterHealthStatus::Degraded => (
|
||||
StatusCode::OK,
|
||||
"Garage is operational but some storage nodes are unavailable",
|
||||
),
|
||||
ClusterHealthStatus::Unavailable => (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Quorum is not available for some/all partitions, reads and writes will fail",
|
||||
),
|
||||
};
|
||||
let status_str = format!(
|
||||
"{}\nConsult the full health check API endpoint at /v1/health for more details\n",
|
||||
status_str
|
||||
);
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(status)
|
||||
.header(http::header::CONTENT_TYPE, "text/plain")
|
||||
.body(string_body(status_str))?)
|
||||
}
|
||||
|
||||
fn handle_metrics(&self) -> Result<Response<ResBody>, Error> {
|
||||
#[cfg(feature = "metrics")]
|
||||
{
|
||||
use opentelemetry::trace::Tracer;
|
||||
|
||||
let mut buffer = vec![];
|
||||
let encoder = TextEncoder::new();
|
||||
|
||||
let tracer = opentelemetry::global::tracer("garage");
|
||||
let metric_families = tracer.in_span("admin/gather_metrics", |_| {
|
||||
self.exporter.registry().gather()
|
||||
});
|
||||
|
||||
encoder
|
||||
.encode(&metric_families, &mut buffer)
|
||||
.ok_or_internal_error("Could not serialize metrics")?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(http::header::CONTENT_TYPE, encoder.format_type())
|
||||
.body(bytes_body(buffer.into()))?)
|
||||
}
|
||||
#[cfg(not(feature = "metrics"))]
|
||||
Err(Error::bad_request(
|
||||
"Garage was built without the metrics feature".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiHandler for AdminApiServer {
|
||||
const API_NAME: &'static str = "admin";
|
||||
const API_NAME_DISPLAY: &'static str = "Admin";
|
||||
|
||||
type Endpoint = Endpoint;
|
||||
type Error = Error;
|
||||
|
||||
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<Endpoint, Error> {
|
||||
if req.uri().path().starts_with("/v0/") {
|
||||
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
|
||||
Endpoint::from_v0(endpoint_v0)
|
||||
} else {
|
||||
Endpoint::from_request(req)
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
req: Request<IncomingBody>,
|
||||
endpoint: Endpoint,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let required_auth_hash =
|
||||
match endpoint.authorization_type() {
|
||||
match request.authorization_type() {
|
||||
Authorization::None => None,
|
||||
Authorization::MetricsToken => self.metrics_token.as_deref(),
|
||||
Authorization::AdminToken => match self.admin_token.as_deref() {
|
||||
|
@ -254,7 +168,7 @@ impl ApiHandler for AdminApiServer {
|
|||
};
|
||||
|
||||
if let Some(password_hash) = required_auth_hash {
|
||||
match req.headers().get("Authorization") {
|
||||
match auth_header {
|
||||
None => return Err(Error::forbidden("Authorization token must be provided")),
|
||||
Some(authorization) => {
|
||||
verify_bearer_token(&authorization, password_hash)?;
|
||||
|
@ -262,72 +176,59 @@ impl ApiHandler for AdminApiServer {
|
|||
}
|
||||
}
|
||||
|
||||
match endpoint {
|
||||
Endpoint::Options => self.handle_options(&req),
|
||||
Endpoint::CheckDomain => self.handle_check_domain(req).await,
|
||||
Endpoint::Health => self.handle_health(),
|
||||
Endpoint::Metrics => self.handle_metrics(),
|
||||
Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await,
|
||||
Endpoint::GetClusterHealth => handle_get_cluster_health(&self.garage).await,
|
||||
Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await,
|
||||
// Layout
|
||||
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
|
||||
Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
|
||||
Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
|
||||
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage).await,
|
||||
// Keys
|
||||
Endpoint::ListKeys => handle_list_keys(&self.garage).await,
|
||||
Endpoint::GetKeyInfo {
|
||||
id,
|
||||
search,
|
||||
show_secret_key,
|
||||
} => {
|
||||
let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
|
||||
handle_get_key_info(&self.garage, id, search, show_secret_key).await
|
||||
match request {
|
||||
AdminApiRequest::Options(req) => req.handle(&self.garage, &self).await,
|
||||
AdminApiRequest::CheckDomain(req) => req.handle(&self.garage, &self).await,
|
||||
AdminApiRequest::Health(req) => req.handle(&self.garage, &self).await,
|
||||
AdminApiRequest::Metrics(req) => req.handle(&self.garage, &self).await,
|
||||
req => {
|
||||
let res = req.handle(&self.garage, &self).await?;
|
||||
let mut res = json_ok_response(&res)?;
|
||||
res.headers_mut()
|
||||
.insert(ACCESS_CONTROL_ALLOW_ORIGIN, HeaderValue::from_static("*"));
|
||||
Ok(res)
|
||||
}
|
||||
Endpoint::CreateKey => handle_create_key(&self.garage, req).await,
|
||||
Endpoint::ImportKey => handle_import_key(&self.garage, req).await,
|
||||
Endpoint::UpdateKey { id } => handle_update_key(&self.garage, id, req).await,
|
||||
Endpoint::DeleteKey { id } => handle_delete_key(&self.garage, id).await,
|
||||
// Buckets
|
||||
Endpoint::ListBuckets => handle_list_buckets(&self.garage).await,
|
||||
Endpoint::GetBucketInfo { id, global_alias } => {
|
||||
handle_get_bucket_info(&self.garage, id, global_alias).await
|
||||
}
|
||||
Endpoint::CreateBucket => handle_create_bucket(&self.garage, req).await,
|
||||
Endpoint::DeleteBucket { id } => handle_delete_bucket(&self.garage, id).await,
|
||||
Endpoint::UpdateBucket { id } => handle_update_bucket(&self.garage, id, req).await,
|
||||
// Bucket-key permissions
|
||||
Endpoint::BucketAllowKey => {
|
||||
handle_bucket_change_key_perm(&self.garage, req, true).await
|
||||
}
|
||||
Endpoint::BucketDenyKey => {
|
||||
handle_bucket_change_key_perm(&self.garage, req, false).await
|
||||
}
|
||||
// Bucket aliasing
|
||||
Endpoint::GlobalAliasBucket { id, alias } => {
|
||||
handle_global_alias_bucket(&self.garage, id, alias).await
|
||||
}
|
||||
Endpoint::GlobalUnaliasBucket { id, alias } => {
|
||||
handle_global_unalias_bucket(&self.garage, id, alias).await
|
||||
}
|
||||
Endpoint::LocalAliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => handle_local_alias_bucket(&self.garage, id, access_key_id, alias).await,
|
||||
Endpoint::LocalUnaliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => handle_local_unalias_bucket(&self.garage, id, access_key_id, alias).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiEndpoint for Endpoint {
|
||||
fn name(&self) -> &'static str {
|
||||
Endpoint::name(self)
|
||||
struct ArcAdminApiServer(Arc<AdminApiServer>);
|
||||
|
||||
impl ApiHandler for ArcAdminApiServer {
|
||||
const API_NAME: &'static str = "admin";
|
||||
const API_NAME_DISPLAY: &'static str = "Admin";
|
||||
|
||||
type Endpoint = HttpEndpoint;
|
||||
type Error = Error;
|
||||
|
||||
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<HttpEndpoint, Error> {
|
||||
if req.uri().path().starts_with("/v0/") {
|
||||
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
|
||||
let endpoint_v1 = router_v1::Endpoint::from_v0(endpoint_v0)?;
|
||||
Ok(HttpEndpoint::Old(endpoint_v1))
|
||||
} else if req.uri().path().starts_with("/v1/") {
|
||||
let endpoint_v1 = router_v1::Endpoint::from_request(req)?;
|
||||
Ok(HttpEndpoint::Old(endpoint_v1))
|
||||
} else {
|
||||
Ok(HttpEndpoint::New(req.uri().path().to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
req: Request<IncomingBody>,
|
||||
endpoint: HttpEndpoint,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
self.0.handle_http_api(req, endpoint).await
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiEndpoint for HttpEndpoint {
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
match self {
|
||||
Self::Old(endpoint_v1) => Cow::Borrowed(endpoint_v1.name()),
|
||||
Self::New(path) => Cow::Owned(path.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
|
||||
|
|
274
src/api/admin/block.rs
Normal file
274
src/api/admin/block.rs
Normal file
|
@ -0,0 +1,274 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_table::EmptyKey;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::object_table::*;
|
||||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::common_error::CommonErrorDerivative;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for LocalListBlockErrorsRequest {
|
||||
type Response = LocalListBlockErrorsResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalListBlockErrorsResponse, Error> {
|
||||
let errors = garage.block_manager.list_resync_errors()?;
|
||||
let now = now_msec();
|
||||
let errors = errors
|
||||
.into_iter()
|
||||
.map(|e| BlockError {
|
||||
block_hash: hex::encode(&e.hash),
|
||||
refcount: e.refcount,
|
||||
error_count: e.error_count,
|
||||
last_try_secs_ago: now.saturating_sub(e.last_try) / 1000,
|
||||
next_try_in_secs: e.next_try.saturating_sub(now) / 1000,
|
||||
})
|
||||
.collect();
|
||||
Ok(LocalListBlockErrorsResponse(errors))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalGetBlockInfoRequest {
|
||||
type Response = LocalGetBlockInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalGetBlockInfoResponse, Error> {
|
||||
let hash = find_block_hash_by_prefix(garage, &self.block_hash)?;
|
||||
let refcount = garage.block_manager.get_block_rc(&hash)?;
|
||||
let block_refs = garage
|
||||
.block_ref_table
|
||||
.get_range(&hash, None, None, 10000, Default::default())
|
||||
.await?;
|
||||
let mut versions = vec![];
|
||||
for br in block_refs {
|
||||
if let Some(v) = garage.version_table.get(&br.version, &EmptyKey).await? {
|
||||
let bl = match &v.backlink {
|
||||
VersionBacklink::MultipartUpload { upload_id } => {
|
||||
if let Some(u) = garage.mpu_table.get(upload_id, &EmptyKey).await? {
|
||||
BlockVersionBacklink::Upload {
|
||||
upload_id: hex::encode(&upload_id),
|
||||
upload_deleted: u.deleted.get(),
|
||||
upload_garbage_collected: false,
|
||||
bucket_id: Some(hex::encode(&u.bucket_id)),
|
||||
key: Some(u.key.to_string()),
|
||||
}
|
||||
} else {
|
||||
BlockVersionBacklink::Upload {
|
||||
upload_id: hex::encode(&upload_id),
|
||||
upload_deleted: true,
|
||||
upload_garbage_collected: true,
|
||||
bucket_id: None,
|
||||
key: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
VersionBacklink::Object { bucket_id, key } => BlockVersionBacklink::Object {
|
||||
bucket_id: hex::encode(&bucket_id),
|
||||
key: key.to_string(),
|
||||
},
|
||||
};
|
||||
versions.push(BlockVersion {
|
||||
version_id: hex::encode(&br.version),
|
||||
deleted: v.deleted.get(),
|
||||
garbage_collected: false,
|
||||
backlink: Some(bl),
|
||||
});
|
||||
} else {
|
||||
versions.push(BlockVersion {
|
||||
version_id: hex::encode(&br.version),
|
||||
deleted: true,
|
||||
garbage_collected: true,
|
||||
backlink: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
Ok(LocalGetBlockInfoResponse {
|
||||
block_hash: hex::encode(&hash),
|
||||
refcount,
|
||||
versions,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalRetryBlockResyncRequest {
|
||||
type Response = LocalRetryBlockResyncResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalRetryBlockResyncResponse, Error> {
|
||||
match self {
|
||||
Self::All { all: true } => {
|
||||
let blocks = garage.block_manager.list_resync_errors()?;
|
||||
for b in blocks.iter() {
|
||||
garage.block_manager.resync.clear_backoff(&b.hash)?;
|
||||
}
|
||||
Ok(LocalRetryBlockResyncResponse {
|
||||
count: blocks.len() as u64,
|
||||
})
|
||||
}
|
||||
Self::All { all: false } => Err(Error::bad_request("nonsense")),
|
||||
Self::Blocks { block_hashes } => {
|
||||
for hash in block_hashes.iter() {
|
||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
||||
garage.block_manager.resync.clear_backoff(&hash)?;
|
||||
}
|
||||
Ok(LocalRetryBlockResyncResponse {
|
||||
count: block_hashes.len() as u64,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalPurgeBlocksRequest {
|
||||
type Response = LocalPurgeBlocksResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalPurgeBlocksResponse, Error> {
|
||||
let mut obj_dels = 0;
|
||||
let mut mpu_dels = 0;
|
||||
let mut ver_dels = 0;
|
||||
|
||||
for hash in self.0.iter() {
|
||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
||||
let block_refs = garage
|
||||
.block_ref_table
|
||||
.get_range(&hash, None, None, 10000, Default::default())
|
||||
.await?;
|
||||
|
||||
for br in block_refs {
|
||||
if let Some(version) = garage.version_table.get(&br.version, &EmptyKey).await? {
|
||||
handle_block_purge_version_backlink(
|
||||
garage,
|
||||
&version,
|
||||
&mut obj_dels,
|
||||
&mut mpu_dels,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !version.deleted.get() {
|
||||
let deleted_version = Version::new(version.uuid, version.backlink, true);
|
||||
garage.version_table.insert(&deleted_version).await?;
|
||||
ver_dels += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(LocalPurgeBlocksResponse {
|
||||
blocks_purged: self.0.len() as u64,
|
||||
versions_deleted: ver_dels,
|
||||
objects_deleted: obj_dels,
|
||||
uploads_deleted: mpu_dels,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn find_block_hash_by_prefix(garage: &Arc<Garage>, prefix: &str) -> Result<Hash, Error> {
|
||||
if prefix.len() < 4 {
|
||||
return Err(Error::bad_request(
|
||||
"Please specify at least 4 characters of the block hash",
|
||||
));
|
||||
}
|
||||
|
||||
let prefix_bin = hex::decode(&prefix[..prefix.len() & !1]).ok_or_bad_request("invalid hash")?;
|
||||
|
||||
let iter = garage
|
||||
.block_ref_table
|
||||
.data
|
||||
.store
|
||||
.range(&prefix_bin[..]..)
|
||||
.map_err(GarageError::from)?;
|
||||
let mut found = None;
|
||||
for item in iter {
|
||||
let (k, _v) = item.map_err(GarageError::from)?;
|
||||
let hash = Hash::try_from(&k[..32]).unwrap();
|
||||
if &hash.as_slice()[..prefix_bin.len()] != prefix_bin {
|
||||
break;
|
||||
}
|
||||
if hex::encode(hash.as_slice()).starts_with(prefix) {
|
||||
match &found {
|
||||
Some(x) if *x == hash => (),
|
||||
Some(_) => {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Several blocks match prefix `{}`",
|
||||
prefix
|
||||
)));
|
||||
}
|
||||
None => {
|
||||
found = Some(hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
found.ok_or_else(|| Error::NoSuchBlock(prefix.to_string()))
|
||||
}
|
||||
|
||||
async fn handle_block_purge_version_backlink(
|
||||
garage: &Arc<Garage>,
|
||||
version: &Version,
|
||||
obj_dels: &mut u64,
|
||||
mpu_dels: &mut u64,
|
||||
) -> Result<(), Error> {
|
||||
let (bucket_id, key, ov_id) = match &version.backlink {
|
||||
VersionBacklink::Object { bucket_id, key } => (*bucket_id, key.clone(), version.uuid),
|
||||
VersionBacklink::MultipartUpload { upload_id } => {
|
||||
if let Some(mut mpu) = garage.mpu_table.get(upload_id, &EmptyKey).await? {
|
||||
if !mpu.deleted.get() {
|
||||
mpu.parts.clear();
|
||||
mpu.deleted.set();
|
||||
garage.mpu_table.insert(&mpu).await?;
|
||||
*mpu_dels += 1;
|
||||
}
|
||||
(mpu.bucket_id, mpu.key.clone(), *upload_id)
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(object) = garage.object_table.get(&bucket_id, &key).await? {
|
||||
let ov = object.versions().iter().rev().find(|v| v.is_complete());
|
||||
if let Some(ov) = ov {
|
||||
if ov.uuid == ov_id {
|
||||
let del_uuid = gen_uuid();
|
||||
let deleted_object = Object::new(
|
||||
bucket_id,
|
||||
key,
|
||||
vec![ObjectVersion {
|
||||
uuid: del_uuid,
|
||||
timestamp: ov.timestamp + 1,
|
||||
state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker),
|
||||
}],
|
||||
);
|
||||
garage.object_table.insert(&deleted_object).await?;
|
||||
*obj_dels += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -1,8 +1,6 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
|
@ -18,102 +16,97 @@ use garage_model::s3::mpu_table;
|
|||
use garage_model::s3::object_table::*;
|
||||
|
||||
use garage_api_common::common_error::CommonError;
|
||||
use garage_api_common::helpers::*;
|
||||
|
||||
use crate::api_server::ResBody;
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::key::ApiBucketKeyPerm;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let buckets = garage
|
||||
.bucket_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(DeletedFilter::NotDeleted),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?;
|
||||
impl RequestHandler for ListBucketsRequest {
|
||||
type Response = ListBucketsResponse;
|
||||
|
||||
let res = buckets
|
||||
.into_iter()
|
||||
.map(|b| {
|
||||
let state = b.state.as_option().unwrap();
|
||||
ListBucketResultItem {
|
||||
id: hex::encode(b.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
local_aliases: state
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|((k, n), _, _)| BucketLocalAlias {
|
||||
access_key_id: k.to_string(),
|
||||
alias: n.to_string(),
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ListBucketsResponse, Error> {
|
||||
let buckets = garage
|
||||
.bucket_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(DeletedFilter::NotDeleted),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let res = buckets
|
||||
.into_iter()
|
||||
.map(|b| {
|
||||
let state = b.state.as_option().unwrap();
|
||||
ListBucketsResponseItem {
|
||||
id: hex::encode(b.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
local_aliases: state
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|((k, n), _, _)| BucketLocalAlias {
|
||||
access_key_id: k.to_string(),
|
||||
alias: n.to_string(),
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(ListBucketsResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for GetBucketInfoRequest {
|
||||
type Response = GetBucketInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetBucketInfoResponse, Error> {
|
||||
let bucket_id = match (self.id, self.global_alias, self.search) {
|
||||
(Some(id), None, None) => parse_bucket_id(&id)?,
|
||||
(None, Some(ga), None) => garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&ga)
|
||||
.await?
|
||||
.ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?,
|
||||
(None, None, Some(search)) => {
|
||||
garage
|
||||
.bucket_helper()
|
||||
.admin_get_existing_matching_bucket(&search)
|
||||
.await?
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Either id, globalAlias or search must be provided (but not several of them)",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ListBucketResultItem {
|
||||
id: String,
|
||||
global_aliases: Vec<String>,
|
||||
local_aliases: Vec<BucketLocalAlias>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct BucketLocalAlias {
|
||||
access_key_id: String,
|
||||
alias: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ApiBucketQuotas {
|
||||
max_size: Option<u64>,
|
||||
max_objects: Option<u64>,
|
||||
}
|
||||
|
||||
pub async fn handle_get_bucket_info(
|
||||
garage: &Arc<Garage>,
|
||||
id: Option<String>,
|
||||
global_alias: Option<String>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let bucket_id = match (id, global_alias) {
|
||||
(Some(id), None) => parse_bucket_id(&id)?,
|
||||
(None, Some(ga)) => garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&ga)
|
||||
.await?
|
||||
.ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?,
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Either id or globalAlias must be provided (but not both)",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn bucket_info_results(
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
) -> Result<GetBucketInfoResponse, Error> {
|
||||
let bucket = garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
|
@ -176,301 +169,297 @@ async fn bucket_info_results(
|
|||
let state = bucket.state.as_option().unwrap();
|
||||
|
||||
let quotas = state.quotas.get();
|
||||
let res =
|
||||
GetBucketInfoResult {
|
||||
id: hex::encode(bucket.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
website_access: state.website_config.get().is_some(),
|
||||
website_config: state.website_config.get().clone().map(|wsc| {
|
||||
GetBucketInfoWebsiteResult {
|
||||
index_document: wsc.index_document,
|
||||
error_document: wsc.error_document,
|
||||
let res = GetBucketInfoResponse {
|
||||
id: hex::encode(bucket.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a)
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
website_access: state.website_config.get().is_some(),
|
||||
website_config: state.website_config.get().clone().map(|wsc| {
|
||||
GetBucketInfoWebsiteResponse {
|
||||
index_document: wsc.index_document,
|
||||
error_document: wsc.error_document,
|
||||
}
|
||||
}),
|
||||
keys: relevant_keys
|
||||
.into_values()
|
||||
.map(|key| {
|
||||
let p = key.state.as_option().unwrap();
|
||||
GetBucketInfoKey {
|
||||
access_key_id: key.key_id,
|
||||
name: p.name.get().to_string(),
|
||||
permissions: p
|
||||
.authorized_buckets
|
||||
.get(&bucket.id)
|
||||
.map(|p| ApiBucketKeyPerm {
|
||||
read: p.allow_read,
|
||||
write: p.allow_write,
|
||||
owner: p.allow_owner,
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
bucket_local_aliases: p
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, b)| *b == Some(bucket.id))
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
}
|
||||
}),
|
||||
keys: relevant_keys
|
||||
.into_values()
|
||||
.map(|key| {
|
||||
let p = key.state.as_option().unwrap();
|
||||
GetBucketInfoKey {
|
||||
access_key_id: key.key_id,
|
||||
name: p.name.get().to_string(),
|
||||
permissions: p
|
||||
.authorized_buckets
|
||||
.get(&bucket.id)
|
||||
.map(|p| ApiBucketKeyPerm {
|
||||
read: p.allow_read,
|
||||
write: p.allow_write,
|
||||
owner: p.allow_owner,
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
bucket_local_aliases: p
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, b)| *b == Some(bucket.id))
|
||||
.map(|(n, _, _)| n.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
objects: *counters.get(OBJECTS).unwrap_or(&0),
|
||||
bytes: *counters.get(BYTES).unwrap_or(&0),
|
||||
unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0),
|
||||
unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0),
|
||||
unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0),
|
||||
unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0),
|
||||
quotas: ApiBucketQuotas {
|
||||
max_size: quotas.max_size,
|
||||
max_objects: quotas.max_objects,
|
||||
},
|
||||
};
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
objects: *counters.get(OBJECTS).unwrap_or(&0),
|
||||
bytes: *counters.get(BYTES).unwrap_or(&0),
|
||||
unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0),
|
||||
unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0),
|
||||
unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0),
|
||||
unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0),
|
||||
quotas: ApiBucketQuotas {
|
||||
max_size: quotas.max_size,
|
||||
max_objects: quotas.max_objects,
|
||||
},
|
||||
};
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetBucketInfoResult {
|
||||
id: String,
|
||||
global_aliases: Vec<String>,
|
||||
website_access: bool,
|
||||
#[serde(default)]
|
||||
website_config: Option<GetBucketInfoWebsiteResult>,
|
||||
keys: Vec<GetBucketInfoKey>,
|
||||
objects: i64,
|
||||
bytes: i64,
|
||||
unfinished_uploads: i64,
|
||||
unfinished_multipart_uploads: i64,
|
||||
unfinished_multipart_upload_parts: i64,
|
||||
unfinished_multipart_upload_bytes: i64,
|
||||
quotas: ApiBucketQuotas,
|
||||
}
|
||||
impl RequestHandler for CreateBucketRequest {
|
||||
type Response = CreateBucketResponse;
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetBucketInfoWebsiteResult {
|
||||
index_document: String,
|
||||
error_document: Option<String>,
|
||||
}
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<CreateBucketResponse, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetBucketInfoKey {
|
||||
access_key_id: String,
|
||||
name: String,
|
||||
permissions: ApiBucketKeyPerm,
|
||||
bucket_local_aliases: Vec<String>,
|
||||
}
|
||||
if let Some(ga) = &self.global_alias {
|
||||
if !is_valid_bucket_name(ga) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{}: {}",
|
||||
ga, INVALID_BUCKET_NAME_MESSAGE
|
||||
)));
|
||||
}
|
||||
|
||||
pub async fn handle_create_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<CreateBucketRequest, _, Error>(req).await?;
|
||||
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
if let Some(ga) = &req.global_alias {
|
||||
if !is_valid_bucket_name(ga) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{}: {}",
|
||||
ga, INVALID_BUCKET_NAME_MESSAGE
|
||||
)));
|
||||
}
|
||||
|
||||
if let Some(alias) = garage.bucket_alias_table.get(&EmptyKey, ga).await? {
|
||||
if alias.state.get().is_some() {
|
||||
return Err(CommonError::BucketAlreadyExists.into());
|
||||
if let Some(alias) = garage.bucket_alias_table.get(&EmptyKey, ga).await? {
|
||||
if alias.state.get().is_some() {
|
||||
return Err(CommonError::BucketAlreadyExists.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(la) = &req.local_alias {
|
||||
if !is_valid_bucket_name(&la.alias) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{}: {}",
|
||||
la.alias, INVALID_BUCKET_NAME_MESSAGE
|
||||
)));
|
||||
if let Some(la) = &self.local_alias {
|
||||
if !is_valid_bucket_name(&la.alias) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"{}: {}",
|
||||
la.alias, INVALID_BUCKET_NAME_MESSAGE
|
||||
)));
|
||||
}
|
||||
|
||||
let key = helper.key().get_existing_key(&la.access_key_id).await?;
|
||||
let state = key.state.as_option().unwrap();
|
||||
if matches!(state.local_aliases.get(&la.alias), Some(_)) {
|
||||
return Err(Error::bad_request("Local alias already exists"));
|
||||
}
|
||||
}
|
||||
|
||||
let key = helper.key().get_existing_key(&la.access_key_id).await?;
|
||||
let state = key.state.as_option().unwrap();
|
||||
if matches!(state.local_aliases.get(&la.alias), Some(_)) {
|
||||
return Err(Error::bad_request("Local alias already exists"));
|
||||
let bucket = Bucket::new();
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
if let Some(ga) = &self.global_alias {
|
||||
helper.set_global_bucket_alias(bucket.id, ga).await?;
|
||||
}
|
||||
|
||||
if let Some(la) = &self.local_alias {
|
||||
helper
|
||||
.set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias)
|
||||
.await?;
|
||||
|
||||
if la.allow.read || la.allow.write || la.allow.owner {
|
||||
helper
|
||||
.set_bucket_key_permissions(
|
||||
bucket.id,
|
||||
&la.access_key_id,
|
||||
BucketKeyPerm {
|
||||
timestamp: now_msec(),
|
||||
allow_read: la.allow.read,
|
||||
allow_write: la.allow.write,
|
||||
allow_owner: la.allow.owner,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(CreateBucketResponse(
|
||||
bucket_info_results(garage, bucket.id).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
let bucket = Bucket::new();
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
impl RequestHandler for DeleteBucketRequest {
|
||||
type Response = DeleteBucketResponse;
|
||||
|
||||
if let Some(ga) = &req.global_alias {
|
||||
helper.set_global_bucket_alias(bucket.id, ga).await?;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<DeleteBucketResponse, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
let bucket_id = parse_bucket_id(&self.id)?;
|
||||
|
||||
let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
|
||||
let state = bucket.state.as_option().unwrap();
|
||||
|
||||
// Check bucket is empty
|
||||
if !helper.bucket().is_bucket_empty(bucket_id).await? {
|
||||
return Err(CommonError::BucketNotEmpty.into());
|
||||
}
|
||||
|
||||
// --- done checking, now commit ---
|
||||
// 1. delete authorization from keys that had access
|
||||
for (key_id, perm) in bucket.authorized_keys() {
|
||||
if perm.is_any() {
|
||||
helper
|
||||
.set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
// 2. delete all local aliases
|
||||
for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
|
||||
if *active {
|
||||
helper
|
||||
.unset_local_bucket_alias(bucket.id, key_id, alias)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
// 3. delete all global aliases
|
||||
for (alias, _, active) in state.aliases.items().iter() {
|
||||
if *active {
|
||||
helper.purge_global_bucket_alias(bucket.id, alias).await?;
|
||||
}
|
||||
}
|
||||
|
||||
// 4. delete bucket
|
||||
bucket.state = Deletable::delete();
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(DeleteBucketResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(la) = &req.local_alias {
|
||||
helper
|
||||
.set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias)
|
||||
impl RequestHandler for UpdateBucketRequest {
|
||||
type Response = UpdateBucketResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<UpdateBucketResponse, Error> {
|
||||
let bucket_id = parse_bucket_id(&self.id)?;
|
||||
|
||||
let mut bucket = garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
if la.allow.read || la.allow.write || la.allow.owner {
|
||||
helper
|
||||
.set_bucket_key_permissions(
|
||||
bucket.id,
|
||||
&la.access_key_id,
|
||||
BucketKeyPerm {
|
||||
timestamp: now_msec(),
|
||||
allow_read: la.allow.read,
|
||||
allow_write: la.allow.write,
|
||||
allow_owner: la.allow.owner,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
let state = bucket.state.as_option_mut().unwrap();
|
||||
|
||||
bucket_info_results(garage, bucket.id).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct CreateBucketRequest {
|
||||
global_alias: Option<String>,
|
||||
local_alias: Option<CreateBucketLocalAlias>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct CreateBucketLocalAlias {
|
||||
access_key_id: String,
|
||||
alias: String,
|
||||
#[serde(default)]
|
||||
allow: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
pub async fn handle_delete_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
let bucket_id = parse_bucket_id(&id)?;
|
||||
|
||||
let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
|
||||
let state = bucket.state.as_option().unwrap();
|
||||
|
||||
// Check bucket is empty
|
||||
if !helper.bucket().is_bucket_empty(bucket_id).await? {
|
||||
return Err(CommonError::BucketNotEmpty.into());
|
||||
}
|
||||
|
||||
// --- done checking, now commit ---
|
||||
// 1. delete authorization from keys that had access
|
||||
for (key_id, perm) in bucket.authorized_keys() {
|
||||
if perm.is_any() {
|
||||
helper
|
||||
.set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
// 2. delete all local aliases
|
||||
for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
|
||||
if *active {
|
||||
helper
|
||||
.unset_local_bucket_alias(bucket.id, key_id, alias)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
// 3. delete all global aliases
|
||||
for (alias, _, active) in state.aliases.items().iter() {
|
||||
if *active {
|
||||
helper.purge_global_bucket_alias(bucket.id, alias).await?;
|
||||
}
|
||||
}
|
||||
|
||||
// 4. delete bucket
|
||||
bucket.state = Deletable::delete();
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.body(empty_body())?)
|
||||
}
|
||||
|
||||
pub async fn handle_update_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<UpdateBucketRequest, _, Error>(req).await?;
|
||||
let bucket_id = parse_bucket_id(&id)?;
|
||||
|
||||
let mut bucket = garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
let state = bucket.state.as_option_mut().unwrap();
|
||||
|
||||
if let Some(wa) = req.website_access {
|
||||
if wa.enabled {
|
||||
state.website_config.update(Some(WebsiteConfig {
|
||||
index_document: wa.index_document.ok_or_bad_request(
|
||||
"Please specify indexDocument when enabling website access.",
|
||||
)?,
|
||||
error_document: wa.error_document,
|
||||
}));
|
||||
} else {
|
||||
if wa.index_document.is_some() || wa.error_document.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"Cannot specify indexDocument or errorDocument when disabling website access.",
|
||||
));
|
||||
if let Some(wa) = self.body.website_access {
|
||||
if wa.enabled {
|
||||
state.website_config.update(Some(WebsiteConfig {
|
||||
index_document: wa.index_document.ok_or_bad_request(
|
||||
"Please specify indexDocument when enabling website access.",
|
||||
)?,
|
||||
error_document: wa.error_document,
|
||||
redirect_all: None,
|
||||
routing_rules: Vec::new(),
|
||||
}));
|
||||
} else {
|
||||
if wa.index_document.is_some() || wa.error_document.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"Cannot specify indexDocument or errorDocument when disabling website access.",
|
||||
));
|
||||
}
|
||||
state.website_config.update(None);
|
||||
}
|
||||
state.website_config.update(None);
|
||||
}
|
||||
|
||||
if let Some(q) = self.body.quotas {
|
||||
state.quotas.update(BucketQuotas {
|
||||
max_size: q.max_size,
|
||||
max_objects: q.max_objects,
|
||||
});
|
||||
}
|
||||
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(UpdateBucketResponse(
|
||||
bucket_info_results(garage, bucket_id).await?,
|
||||
))
|
||||
}
|
||||
|
||||
if let Some(q) = req.quotas {
|
||||
state.quotas.update(BucketQuotas {
|
||||
max_size: q.max_size,
|
||||
max_objects: q.max_objects,
|
||||
});
|
||||
}
|
||||
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UpdateBucketRequest {
|
||||
website_access: Option<UpdateBucketWebsiteAccess>,
|
||||
quotas: Option<ApiBucketQuotas>,
|
||||
}
|
||||
impl RequestHandler for CleanupIncompleteUploadsRequest {
|
||||
type Response = CleanupIncompleteUploadsResponse;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UpdateBucketWebsiteAccess {
|
||||
enabled: bool,
|
||||
index_document: Option<String>,
|
||||
error_document: Option<String>,
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<CleanupIncompleteUploadsResponse, Error> {
|
||||
let duration = Duration::from_secs(self.older_than_secs);
|
||||
|
||||
let bucket_id = parse_bucket_id(&self.bucket_id)?;
|
||||
|
||||
let count = garage
|
||||
.bucket_helper()
|
||||
.cleanup_incomplete_uploads(&bucket_id, duration)
|
||||
.await?;
|
||||
|
||||
Ok(CleanupIncompleteUploadsResponse {
|
||||
uploads_deleted: count as u64,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---- BUCKET/KEY PERMISSIONS ----
|
||||
|
||||
impl RequestHandler for AllowBucketKeyRequest {
|
||||
type Response = AllowBucketKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<AllowBucketKeyResponse, Error> {
|
||||
let res = handle_bucket_change_key_perm(garage, self.0, true).await?;
|
||||
Ok(AllowBucketKeyResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for DenyBucketKeyRequest {
|
||||
type Response = DenyBucketKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<DenyBucketKeyResponse, Error> {
|
||||
let res = handle_bucket_change_key_perm(garage, self.0, false).await?;
|
||||
Ok(DenyBucketKeyResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_bucket_change_key_perm(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
req: BucketKeyPermChangeRequest,
|
||||
new_perm_flag: bool,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
|
||||
|
||||
) -> Result<GetBucketInfoResponse, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
let bucket_id = parse_bucket_id(&req.bucket_id)?;
|
||||
|
@ -503,76 +492,74 @@ pub async fn handle_bucket_change_key_perm(
|
|||
bucket_info_results(garage, bucket.id).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct BucketKeyPermChangeRequest {
|
||||
bucket_id: String,
|
||||
access_key_id: String,
|
||||
permissions: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
// ---- BUCKET ALIASES ----
|
||||
|
||||
pub async fn handle_global_alias_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: String,
|
||||
alias: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||
impl RequestHandler for AddBucketAliasRequest {
|
||||
type Response = AddBucketAliasResponse;
|
||||
|
||||
let helper = garage.locked_helper().await;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<AddBucketAliasResponse, Error> {
|
||||
let bucket_id = parse_bucket_id(&self.bucket_id)?;
|
||||
|
||||
helper.set_global_bucket_alias(bucket_id, &alias).await?;
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
match self.alias {
|
||||
BucketAliasEnum::Global { global_alias } => {
|
||||
helper
|
||||
.set_global_bucket_alias(bucket_id, &global_alias)
|
||||
.await?;
|
||||
}
|
||||
BucketAliasEnum::Local {
|
||||
local_alias,
|
||||
access_key_id,
|
||||
} => {
|
||||
helper
|
||||
.set_local_bucket_alias(bucket_id, &access_key_id, &local_alias)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AddBucketAliasResponse(
|
||||
bucket_info_results(garage, bucket_id).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_global_unalias_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: String,
|
||||
alias: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||
impl RequestHandler for RemoveBucketAliasRequest {
|
||||
type Response = RemoveBucketAliasResponse;
|
||||
|
||||
let helper = garage.locked_helper().await;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<RemoveBucketAliasResponse, Error> {
|
||||
let bucket_id = parse_bucket_id(&self.bucket_id)?;
|
||||
|
||||
helper.unset_global_bucket_alias(bucket_id, &alias).await?;
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
}
|
||||
match self.alias {
|
||||
BucketAliasEnum::Global { global_alias } => {
|
||||
helper
|
||||
.unset_global_bucket_alias(bucket_id, &global_alias)
|
||||
.await?;
|
||||
}
|
||||
BucketAliasEnum::Local {
|
||||
local_alias,
|
||||
access_key_id,
|
||||
} => {
|
||||
helper
|
||||
.unset_local_bucket_alias(bucket_id, &access_key_id, &local_alias)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_local_alias_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: String,
|
||||
access_key_id: String,
|
||||
alias: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
helper
|
||||
.set_local_bucket_alias(bucket_id, &access_key_id, &alias)
|
||||
.await?;
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
}
|
||||
|
||||
pub async fn handle_local_unalias_bucket(
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: String,
|
||||
access_key_id: String,
|
||||
alias: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
helper
|
||||
.unset_local_bucket_alias(bucket_id, &access_key_id, &alias)
|
||||
.await?;
|
||||
|
||||
bucket_info_results(garage, bucket_id).await
|
||||
Ok(RemoveBucketAliasResponse(
|
||||
bucket_info_results(garage, bucket_id).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// ---- HELPER ----
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
|
||||
|
@ -12,158 +8,182 @@ use garage_rpc::layout;
|
|||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use garage_api_common::helpers::{json_ok_response, parse_json_body};
|
||||
|
||||
use crate::api_server::ResBody;
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let layout = garage.system.cluster_layout();
|
||||
let mut nodes = garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|i| {
|
||||
(
|
||||
i.id,
|
||||
NodeResp {
|
||||
id: hex::encode(i.id),
|
||||
addr: i.addr,
|
||||
hostname: i.status.hostname,
|
||||
is_up: i.is_up,
|
||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||
data_partition: i
|
||||
.status
|
||||
.data_disk_avail
|
||||
.map(|(avail, total)| FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
impl RequestHandler for GetClusterStatusRequest {
|
||||
type Response = GetClusterStatusResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterStatusResponse, Error> {
|
||||
let layout = garage.system.cluster_layout();
|
||||
let mut nodes = garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|i| {
|
||||
(
|
||||
i.id,
|
||||
NodeResp {
|
||||
id: hex::encode(i.id),
|
||||
addr: i.addr,
|
||||
hostname: i.status.hostname,
|
||||
is_up: i.is_up,
|
||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||
data_partition: i.status.data_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}
|
||||
}),
|
||||
metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for (id, _, role) in layout.current().roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
let role = NodeRoleResp {
|
||||
id: hex::encode(id),
|
||||
zone: r.zone.to_string(),
|
||||
capacity: r.capacity,
|
||||
tags: r.tags.clone(),
|
||||
};
|
||||
match nodes.get_mut(id) {
|
||||
None => {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
role: Some(role),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
Some(n) => {
|
||||
n.role = Some(role);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for ver in layout.versions().iter().rev().skip(1) {
|
||||
for (id, _, role) in ver.roles.items().iter() {
|
||||
for (id, _, role) in layout.current().roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
if r.capacity.is_some() {
|
||||
if let Some(n) = nodes.get_mut(id) {
|
||||
if n.role.is_none() {
|
||||
n.draining = true;
|
||||
}
|
||||
} else {
|
||||
let role = NodeRoleResp {
|
||||
id: hex::encode(id),
|
||||
zone: r.zone.to_string(),
|
||||
capacity: r.capacity,
|
||||
tags: r.tags.clone(),
|
||||
};
|
||||
match nodes.get_mut(id) {
|
||||
None => {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
draining: true,
|
||||
role: Some(role),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
Some(n) => {
|
||||
n.role = Some(role);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
||||
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
||||
for ver in layout.versions().iter().rev().skip(1) {
|
||||
for (id, _, role) in ver.roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
if r.capacity.is_some() {
|
||||
if let Some(n) = nodes.get_mut(id) {
|
||||
if n.role.is_none() {
|
||||
n.draining = true;
|
||||
}
|
||||
} else {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
draining: true,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let res = GetClusterStatusResponse {
|
||||
node: hex::encode(garage.system.id),
|
||||
garage_version: garage_util::version::garage_version(),
|
||||
garage_features: garage_util::version::garage_features(),
|
||||
rust_version: garage_util::version::rust_version(),
|
||||
db_engine: garage.db.engine(),
|
||||
layout_version: layout.current().version,
|
||||
nodes,
|
||||
};
|
||||
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
||||
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
let health = garage.system.health();
|
||||
let health = ClusterHealth {
|
||||
status: match health.status {
|
||||
ClusterHealthStatus::Healthy => "healthy",
|
||||
ClusterHealthStatus::Degraded => "degraded",
|
||||
ClusterHealthStatus::Unavailable => "unavailable",
|
||||
},
|
||||
known_nodes: health.known_nodes,
|
||||
connected_nodes: health.connected_nodes,
|
||||
storage_nodes: health.storage_nodes,
|
||||
storage_nodes_ok: health.storage_nodes_ok,
|
||||
partitions: health.partitions,
|
||||
partitions_quorum: health.partitions_quorum,
|
||||
partitions_all_ok: health.partitions_all_ok,
|
||||
};
|
||||
Ok(json_ok_response(&health)?)
|
||||
}
|
||||
|
||||
pub async fn handle_connect_cluster_nodes(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<Vec<String>, _, Error>(req).await?;
|
||||
|
||||
let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node)))
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|r| match r {
|
||||
Ok(()) => ConnectClusterNodesResponse {
|
||||
success: true,
|
||||
error: None,
|
||||
},
|
||||
Err(e) => ConnectClusterNodesResponse {
|
||||
success: false,
|
||||
error: Some(format!("{}", e)),
|
||||
},
|
||||
Ok(GetClusterStatusResponse {
|
||||
node: hex::encode(garage.system.id),
|
||||
garage_version: garage_util::version::garage_version().to_string(),
|
||||
garage_features: garage_util::version::garage_features()
|
||||
.map(|features| features.iter().map(ToString::to_string).collect()),
|
||||
rust_version: garage_util::version::rust_version().to_string(),
|
||||
db_engine: garage.db.engine(),
|
||||
layout_version: layout.current().version,
|
||||
nodes,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let res = format_cluster_layout(garage.system.cluster_layout().inner());
|
||||
impl RequestHandler for GetClusterHealthRequest {
|
||||
type Response = GetClusterHealthResponse;
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterHealthResponse, Error> {
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
let health = garage.system.health();
|
||||
let health = GetClusterHealthResponse {
|
||||
status: match health.status {
|
||||
ClusterHealthStatus::Healthy => "healthy",
|
||||
ClusterHealthStatus::Degraded => "degraded",
|
||||
ClusterHealthStatus::Unavailable => "unavailable",
|
||||
}
|
||||
.to_string(),
|
||||
known_nodes: health.known_nodes,
|
||||
connected_nodes: health.connected_nodes,
|
||||
storage_nodes: health.storage_nodes,
|
||||
storage_nodes_ok: health.storage_nodes_ok,
|
||||
partitions: health.partitions,
|
||||
partitions_quorum: health.partitions_quorum,
|
||||
partitions_all_ok: health.partitions_all_ok,
|
||||
};
|
||||
Ok(health)
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for ConnectClusterNodesRequest {
|
||||
type Response = ConnectClusterNodesResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ConnectClusterNodesResponse, Error> {
|
||||
let res = futures::future::join_all(self.0.iter().map(|node| garage.system.connect(node)))
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|r| match r {
|
||||
Ok(()) => ConnectNodeResponse {
|
||||
success: true,
|
||||
error: None,
|
||||
},
|
||||
Err(e) => ConnectNodeResponse {
|
||||
success: false,
|
||||
error: Some(format!("{}", e)),
|
||||
},
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
Ok(ConnectClusterNodesResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for GetClusterLayoutRequest {
|
||||
type Response = GetClusterLayoutResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterLayoutResponse, Error> {
|
||||
Ok(format_cluster_layout(
|
||||
garage.system.cluster_layout().inner(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse {
|
||||
|
@ -213,199 +233,98 @@ fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResp
|
|||
|
||||
// ----
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ClusterHealth {
|
||||
status: &'static str,
|
||||
known_nodes: usize,
|
||||
connected_nodes: usize,
|
||||
storage_nodes: usize,
|
||||
storage_nodes_ok: usize,
|
||||
partitions: usize,
|
||||
partitions_quorum: usize,
|
||||
partitions_all_ok: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetClusterStatusResponse {
|
||||
node: String,
|
||||
garage_version: &'static str,
|
||||
garage_features: Option<&'static [&'static str]>,
|
||||
rust_version: &'static str,
|
||||
db_engine: String,
|
||||
layout_version: u64,
|
||||
nodes: Vec<NodeResp>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ApplyClusterLayoutResponse {
|
||||
message: Vec<String>,
|
||||
layout: GetClusterLayoutResponse,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ConnectClusterNodesResponse {
|
||||
success: bool,
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetClusterLayoutResponse {
|
||||
version: u64,
|
||||
roles: Vec<NodeRoleResp>,
|
||||
staged_role_changes: Vec<NodeRoleChange>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeRoleResp {
|
||||
id: String,
|
||||
zone: String,
|
||||
capacity: Option<u64>,
|
||||
tags: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct FreeSpaceResp {
|
||||
available: u64,
|
||||
total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeResp {
|
||||
id: String,
|
||||
role: Option<NodeRoleResp>,
|
||||
addr: Option<SocketAddr>,
|
||||
hostname: Option<String>,
|
||||
is_up: bool,
|
||||
last_seen_secs_ago: Option<u64>,
|
||||
draining: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
data_partition: Option<FreeSpaceResp>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
metadata_partition: Option<FreeSpaceResp>,
|
||||
}
|
||||
|
||||
// ---- update functions ----
|
||||
|
||||
pub async fn handle_update_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||
impl RequestHandler for UpdateClusterLayoutRequest {
|
||||
type Response = UpdateClusterLayoutResponse;
|
||||
|
||||
let mut layout = garage.system.cluster_layout().inner().clone();
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<UpdateClusterLayoutResponse, Error> {
|
||||
let mut layout = garage.system.cluster_layout().inner().clone();
|
||||
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
|
||||
for change in updates {
|
||||
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
||||
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
|
||||
for change in self.0 {
|
||||
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
||||
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
|
||||
|
||||
let new_role = match change.action {
|
||||
NodeRoleChangeEnum::Remove { remove: true } => None,
|
||||
NodeRoleChangeEnum::Update {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
} => Some(layout::NodeRole {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
}),
|
||||
_ => return Err(Error::bad_request("Invalid layout change")),
|
||||
};
|
||||
let new_role = match change.action {
|
||||
NodeRoleChangeEnum::Remove { remove: true } => None,
|
||||
NodeRoleChangeEnum::Update {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
} => Some(layout::NodeRole {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
}),
|
||||
_ => return Err(Error::bad_request("Invalid layout change")),
|
||||
};
|
||||
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
||||
}
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(UpdateClusterLayoutResponse(res))
|
||||
}
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
pub async fn handle_apply_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
||||
impl RequestHandler for ApplyClusterLayoutRequest {
|
||||
type Response = ApplyClusterLayoutResponse;
|
||||
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ApplyClusterLayoutResponse, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let (layout, msg) = layout.apply_staged_changes(Some(self.version))?;
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = ApplyClusterLayoutResponse {
|
||||
message: msg,
|
||||
layout: format_cluster_layout(&layout),
|
||||
};
|
||||
Ok(json_ok_response(&res)?)
|
||||
Ok(ApplyClusterLayoutResponse {
|
||||
message: msg,
|
||||
layout: format_cluster_layout(&layout),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_revert_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let layout = layout.revert_staged_changes()?;
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
impl RequestHandler for RevertClusterLayoutRequest {
|
||||
type Response = RevertClusterLayoutResponse;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
type UpdateClusterLayoutRequest = Vec<NodeRoleChange>;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ApplyLayoutRequest {
|
||||
version: u64,
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeRoleChange {
|
||||
id: String,
|
||||
#[serde(flatten)]
|
||||
action: NodeRoleChangeEnum,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum NodeRoleChangeEnum {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Remove { remove: bool },
|
||||
#[serde(rename_all = "camelCase")]
|
||||
Update {
|
||||
zone: String,
|
||||
capacity: Option<u64>,
|
||||
tags: Vec<String>,
|
||||
},
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<RevertClusterLayoutResponse, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let layout = layout.revert_staged_changes()?;
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(RevertClusterLayoutResponse(res))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,14 @@ pub enum Error {
|
|||
#[error(display = "Access key not found: {}", _0)]
|
||||
NoSuchAccessKey(String),
|
||||
|
||||
/// The requested block does not exist
|
||||
#[error(display = "Block not found: {}", _0)]
|
||||
NoSuchBlock(String),
|
||||
|
||||
/// The requested worker does not exist
|
||||
#[error(display = "Worker not found: {}", _0)]
|
||||
NoSuchWorker(u64),
|
||||
|
||||
/// In Import key, the key already exists
|
||||
#[error(
|
||||
display = "Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.",
|
||||
|
@ -49,10 +57,12 @@ impl From<HelperError> for Error {
|
|||
}
|
||||
|
||||
impl Error {
|
||||
fn code(&self) -> &'static str {
|
||||
pub fn code(&self) -> &'static str {
|
||||
match self {
|
||||
Error::Common(c) => c.aws_code(),
|
||||
Error::NoSuchAccessKey(_) => "NoSuchAccessKey",
|
||||
Error::NoSuchWorker(_) => "NoSuchWorker",
|
||||
Error::NoSuchBlock(_) => "NoSuchBlock",
|
||||
Error::KeyAlreadyExists(_) => "KeyAlreadyExists",
|
||||
}
|
||||
}
|
||||
|
@ -63,7 +73,9 @@ impl ApiError for Error {
|
|||
fn http_status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
Error::Common(c) => c.http_status_code(),
|
||||
Error::NoSuchAccessKey(_) => StatusCode::NOT_FOUND,
|
||||
Error::NoSuchAccessKey(_) | Error::NoSuchWorker(_) | Error::NoSuchBlock(_) => {
|
||||
StatusCode::NOT_FOUND
|
||||
}
|
||||
Error::KeyAlreadyExists(_) => StatusCode::CONFLICT,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,173 +1,168 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_table::*;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
|
||||
use crate::api_server::ResBody;
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let res = garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.iter()
|
||||
.map(|k| ListKeyResultItem {
|
||||
id: k.key_id.to_string(),
|
||||
name: k.params().unwrap().name.get().clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
impl RequestHandler for ListKeysRequest {
|
||||
type Response = ListKeysResponse;
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ListKeyResultItem {
|
||||
id: String,
|
||||
name: String,
|
||||
}
|
||||
|
||||
pub async fn handle_get_key_info(
|
||||
garage: &Arc<Garage>,
|
||||
id: Option<String>,
|
||||
search: Option<String>,
|
||||
show_secret_key: bool,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let key = if let Some(id) = id {
|
||||
garage.key_helper().get_existing_key(&id).await?
|
||||
} else if let Some(search) = search {
|
||||
garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&search)
|
||||
async fn handle(self, garage: &Arc<Garage>, _admin: &Admin) -> Result<ListKeysResponse, Error> {
|
||||
let res = garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
unreachable!();
|
||||
};
|
||||
.iter()
|
||||
.map(|k| ListKeysResponseItem {
|
||||
id: k.key_id.to_string(),
|
||||
name: k.params().unwrap().name.get().clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
key_info_results(garage, key, show_secret_key).await
|
||||
}
|
||||
|
||||
pub async fn handle_create_key(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<CreateKeyRequest, _, Error>(req).await?;
|
||||
|
||||
let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key"));
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
key_info_results(garage, key, true).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct CreateKeyRequest {
|
||||
name: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn handle_import_key(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<ImportKeyRequest, _, Error>(req).await?;
|
||||
|
||||
let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?;
|
||||
if prev_key.is_some() {
|
||||
return Err(Error::KeyAlreadyExists(req.access_key_id.to_string()));
|
||||
Ok(ListKeysResponse(res))
|
||||
}
|
||||
|
||||
let imported_key = Key::import(
|
||||
&req.access_key_id,
|
||||
&req.secret_access_key,
|
||||
req.name.as_deref().unwrap_or("Imported key"),
|
||||
)
|
||||
.ok_or_bad_request("Invalid key format")?;
|
||||
garage.key_table.insert(&imported_key).await?;
|
||||
|
||||
key_info_results(garage, imported_key, false).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ImportKeyRequest {
|
||||
access_key_id: String,
|
||||
secret_access_key: String,
|
||||
name: Option<String>,
|
||||
}
|
||||
impl RequestHandler for GetKeyInfoRequest {
|
||||
type Response = GetKeyInfoResponse;
|
||||
|
||||
pub async fn handle_update_key(
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let req = parse_json_body::<UpdateKeyRequest, _, Error>(req).await?;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetKeyInfoResponse, Error> {
|
||||
let key = match (self.id, self.search) {
|
||||
(Some(id), None) => garage.key_helper().get_existing_key(&id).await?,
|
||||
(None, Some(search)) => {
|
||||
garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&search)
|
||||
.await?
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Either id or search must be provided (but not both)",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
||||
|
||||
let key_state = key.state.as_option_mut().unwrap();
|
||||
|
||||
if let Some(new_name) = req.name {
|
||||
key_state.name.update(new_name);
|
||||
Ok(key_info_results(garage, key, self.show_secret_key).await?)
|
||||
}
|
||||
if let Some(allow) = req.allow {
|
||||
if allow.create_bucket {
|
||||
key_state.allow_create_bucket.update(true);
|
||||
}
|
||||
|
||||
impl RequestHandler for CreateKeyRequest {
|
||||
type Response = CreateKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<CreateKeyResponse, Error> {
|
||||
let key = Key::new(self.name.as_deref().unwrap_or("Unnamed key"));
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
Ok(CreateKeyResponse(
|
||||
key_info_results(garage, key, true).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for ImportKeyRequest {
|
||||
type Response = ImportKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<ImportKeyResponse, Error> {
|
||||
let prev_key = garage.key_table.get(&EmptyKey, &self.access_key_id).await?;
|
||||
if prev_key.is_some() {
|
||||
return Err(Error::KeyAlreadyExists(self.access_key_id.to_string()));
|
||||
}
|
||||
|
||||
let imported_key = Key::import(
|
||||
&self.access_key_id,
|
||||
&self.secret_access_key,
|
||||
self.name.as_deref().unwrap_or("Imported key"),
|
||||
)
|
||||
.ok_or_bad_request("Invalid key format")?;
|
||||
garage.key_table.insert(&imported_key).await?;
|
||||
|
||||
Ok(ImportKeyResponse(
|
||||
key_info_results(garage, imported_key, false).await?,
|
||||
))
|
||||
}
|
||||
if let Some(deny) = req.deny {
|
||||
if deny.create_bucket {
|
||||
key_state.allow_create_bucket.update(false);
|
||||
}
|
||||
|
||||
impl RequestHandler for UpdateKeyRequest {
|
||||
type Response = UpdateKeyResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<UpdateKeyResponse, Error> {
|
||||
let mut key = garage.key_helper().get_existing_key(&self.id).await?;
|
||||
|
||||
let key_state = key.state.as_option_mut().unwrap();
|
||||
|
||||
if let Some(new_name) = self.body.name {
|
||||
key_state.name.update(new_name);
|
||||
}
|
||||
if let Some(allow) = self.body.allow {
|
||||
if allow.create_bucket {
|
||||
key_state.allow_create_bucket.update(true);
|
||||
}
|
||||
}
|
||||
if let Some(deny) = self.body.deny {
|
||||
if deny.create_bucket {
|
||||
key_state.allow_create_bucket.update(false);
|
||||
}
|
||||
}
|
||||
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
Ok(UpdateKeyResponse(
|
||||
key_info_results(garage, key, false).await?,
|
||||
))
|
||||
}
|
||||
|
||||
garage.key_table.insert(&key).await?;
|
||||
|
||||
key_info_results(garage, key, false).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UpdateKeyRequest {
|
||||
name: Option<String>,
|
||||
allow: Option<KeyPerm>,
|
||||
deny: Option<KeyPerm>,
|
||||
}
|
||||
impl RequestHandler for DeleteKeyRequest {
|
||||
type Response = DeleteKeyResponse;
|
||||
|
||||
pub async fn handle_delete_key(
|
||||
garage: &Arc<Garage>,
|
||||
id: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<DeleteKeyResponse, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
let mut key = helper.key().get_existing_key(&id).await?;
|
||||
let mut key = helper.key().get_existing_key(&self.id).await?;
|
||||
|
||||
helper.delete_key(&mut key).await?;
|
||||
helper.delete_key(&mut key).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.body(empty_body())?)
|
||||
Ok(DeleteKeyResponse)
|
||||
}
|
||||
}
|
||||
|
||||
async fn key_info_results(
|
||||
garage: &Arc<Garage>,
|
||||
key: Key,
|
||||
show_secret: bool,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
) -> Result<GetKeyInfoResponse, Error> {
|
||||
let mut relevant_buckets = HashMap::new();
|
||||
|
||||
let key_state = key.state.as_option().unwrap();
|
||||
|
@ -193,7 +188,7 @@ async fn key_info_results(
|
|||
}
|
||||
}
|
||||
|
||||
let res = GetKeyInfoResult {
|
||||
let res = GetKeyInfoResponse {
|
||||
name: key_state.name.get().clone(),
|
||||
access_key_id: key.key_id.clone(),
|
||||
secret_access_key: if show_secret {
|
||||
|
@ -208,7 +203,7 @@ async fn key_info_results(
|
|||
.into_values()
|
||||
.map(|bucket| {
|
||||
let state = bucket.state.as_option().unwrap();
|
||||
KeyInfoBucketResult {
|
||||
KeyInfoBucketResponse {
|
||||
id: hex::encode(bucket.id),
|
||||
global_aliases: state
|
||||
.aliases
|
||||
|
@ -238,43 +233,5 @@ async fn key_info_results(
|
|||
.collect::<Vec<_>>(),
|
||||
};
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct GetKeyInfoResult {
|
||||
name: String,
|
||||
access_key_id: String,
|
||||
#[serde(skip_serializing_if = "is_default")]
|
||||
secret_access_key: Option<String>,
|
||||
permissions: KeyPerm,
|
||||
buckets: Vec<KeyInfoBucketResult>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct KeyPerm {
|
||||
#[serde(default)]
|
||||
create_bucket: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct KeyInfoBucketResult {
|
||||
id: String,
|
||||
global_aliases: Vec<String>,
|
||||
local_aliases: Vec<String>,
|
||||
permissions: ApiBucketKeyPerm,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub(crate) struct ApiBucketKeyPerm {
|
||||
#[serde(default)]
|
||||
pub(crate) read: bool,
|
||||
#[serde(default)]
|
||||
pub(crate) write: bool,
|
||||
#[serde(default)]
|
||||
pub(crate) owner: bool,
|
||||
Ok(res)
|
||||
}
|
||||
|
|
|
@ -3,9 +3,41 @@ extern crate tracing;
|
|||
|
||||
pub mod api_server;
|
||||
mod error;
|
||||
mod macros;
|
||||
|
||||
pub mod api;
|
||||
mod router_v0;
|
||||
mod router_v1;
|
||||
mod router_v2;
|
||||
|
||||
mod bucket;
|
||||
mod cluster;
|
||||
mod key;
|
||||
mod special;
|
||||
|
||||
mod block;
|
||||
mod node;
|
||||
mod repair;
|
||||
mod worker;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
pub use api_server::AdminApiServer as Admin;
|
||||
|
||||
pub enum Authorization {
|
||||
None,
|
||||
MetricsToken,
|
||||
AdminToken,
|
||||
}
|
||||
|
||||
pub trait RequestHandler {
|
||||
type Response;
|
||||
|
||||
fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
admin: &Admin,
|
||||
) -> impl std::future::Future<Output = Result<Self::Response, error::Error>> + Send;
|
||||
}
|
||||
|
|
219
src/api/admin/macros.rs
Normal file
219
src/api/admin/macros.rs
Normal file
|
@ -0,0 +1,219 @@
|
|||
macro_rules! admin_endpoints {
|
||||
[
|
||||
$(@special $special_endpoint:ident,)*
|
||||
$($endpoint:ident,)*
|
||||
] => {
|
||||
paste! {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum AdminApiRequest {
|
||||
$(
|
||||
$special_endpoint( [<$special_endpoint Request>] ),
|
||||
)*
|
||||
$(
|
||||
$endpoint( [<$endpoint Request>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum AdminApiResponse {
|
||||
$(
|
||||
$endpoint( [<$endpoint Response>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum TaggedAdminApiResponse {
|
||||
$(
|
||||
$endpoint( [<$endpoint Response>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
impl AdminApiRequest {
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
$(
|
||||
Self::$special_endpoint(_) => stringify!($special_endpoint),
|
||||
)*
|
||||
$(
|
||||
Self::$endpoint(_) => stringify!($endpoint),
|
||||
)*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AdminApiResponse {
|
||||
pub fn tagged(self) -> TaggedAdminApiResponse {
|
||||
match self {
|
||||
$(
|
||||
Self::$endpoint(res) => TaggedAdminApiResponse::$endpoint(res),
|
||||
)*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$(
|
||||
impl From< [< $endpoint Request >] > for AdminApiRequest {
|
||||
fn from(req: [< $endpoint Request >]) -> AdminApiRequest {
|
||||
AdminApiRequest::$endpoint(req)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<TaggedAdminApiResponse> for [< $endpoint Response >] {
|
||||
type Error = TaggedAdminApiResponse;
|
||||
fn try_from(resp: TaggedAdminApiResponse) -> Result< [< $endpoint Response >], TaggedAdminApiResponse> {
|
||||
match resp {
|
||||
TaggedAdminApiResponse::$endpoint(v) => Ok(v),
|
||||
x => Err(x),
|
||||
}
|
||||
}
|
||||
}
|
||||
)*
|
||||
|
||||
impl RequestHandler for AdminApiRequest {
|
||||
type Response = AdminApiResponse;
|
||||
|
||||
async fn handle(self, garage: &Arc<Garage>, admin: &Admin) -> Result<AdminApiResponse, Error> {
|
||||
Ok(match self {
|
||||
$(
|
||||
AdminApiRequest::$special_endpoint(_) => panic!(
|
||||
concat!(stringify!($special_endpoint), " needs to go through a special handler")
|
||||
),
|
||||
)*
|
||||
$(
|
||||
AdminApiRequest::$endpoint(req) => AdminApiResponse::$endpoint(req.handle(garage, admin).await?),
|
||||
)*
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! local_admin_endpoints {
|
||||
[
|
||||
$($endpoint:ident,)*
|
||||
] => {
|
||||
paste! {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum LocalAdminApiRequest {
|
||||
$(
|
||||
$endpoint( [<Local $endpoint Request>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum LocalAdminApiResponse {
|
||||
$(
|
||||
$endpoint( [<Local $endpoint Response>] ),
|
||||
)*
|
||||
}
|
||||
|
||||
$(
|
||||
pub type [< $endpoint Request >] = MultiRequest< [< Local $endpoint Request >] >;
|
||||
|
||||
pub type [< $endpoint RequestBody >] = [< Local $endpoint Request >];
|
||||
|
||||
pub type [< $endpoint Response >] = MultiResponse< [< Local $endpoint Response >] >;
|
||||
|
||||
impl From< [< Local $endpoint Request >] > for LocalAdminApiRequest {
|
||||
fn from(req: [< Local $endpoint Request >]) -> LocalAdminApiRequest {
|
||||
LocalAdminApiRequest::$endpoint(req)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<LocalAdminApiResponse> for [< Local $endpoint Response >] {
|
||||
type Error = LocalAdminApiResponse;
|
||||
fn try_from(resp: LocalAdminApiResponse) -> Result< [< Local $endpoint Response >], LocalAdminApiResponse> {
|
||||
match resp {
|
||||
LocalAdminApiResponse::$endpoint(v) => Ok(v),
|
||||
x => Err(x),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for [< $endpoint Request >] {
|
||||
type Response = [< $endpoint Response >];
|
||||
|
||||
async fn handle(self, garage: &Arc<Garage>, admin: &Admin) -> Result<Self::Response, Error> {
|
||||
let to = match self.node.as_str() {
|
||||
"*" => garage.system.cluster_layout().all_nodes().to_vec(),
|
||||
id => {
|
||||
let nodes = garage.system.cluster_layout().all_nodes()
|
||||
.iter()
|
||||
.filter(|x| hex::encode(x).starts_with(id))
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
if nodes.len() != 1 {
|
||||
return Err(Error::bad_request(format!("Zero or multiple nodes matching {}: {:?}", id, nodes)));
|
||||
}
|
||||
nodes
|
||||
}
|
||||
};
|
||||
|
||||
let resps = garage.system.rpc_helper().call_many(&admin.endpoint,
|
||||
&to,
|
||||
AdminRpc::Internal(self.body.into()),
|
||||
RequestStrategy::with_priority(PRIO_NORMAL),
|
||||
).await?;
|
||||
|
||||
let mut ret = [< $endpoint Response >] {
|
||||
success: HashMap::new(),
|
||||
error: HashMap::new(),
|
||||
};
|
||||
for (node, resp) in resps {
|
||||
match resp {
|
||||
Ok(AdminRpcResponse::InternalApiOkResponse(r)) => {
|
||||
match [< Local $endpoint Response >]::try_from(r) {
|
||||
Ok(r) => {
|
||||
ret.success.insert(hex::encode(node), r);
|
||||
}
|
||||
Err(_) => {
|
||||
ret.error.insert(hex::encode(node), "returned invalid value".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(AdminRpcResponse::ApiErrorResponse{error_code, http_code, message}) => {
|
||||
ret.error.insert(hex::encode(node), format!("{} ({}): {}", error_code, http_code, message));
|
||||
}
|
||||
Ok(_) => {
|
||||
ret.error.insert(hex::encode(node), "returned invalid value".to_string());
|
||||
}
|
||||
Err(e) => {
|
||||
ret.error.insert(hex::encode(node), e.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
)*
|
||||
|
||||
impl LocalAdminApiRequest {
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
$(
|
||||
Self::$endpoint(_) => stringify!($endpoint),
|
||||
)*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalAdminApiRequest {
|
||||
type Response = LocalAdminApiResponse;
|
||||
|
||||
async fn handle(self, garage: &Arc<Garage>, admin: &Admin) -> Result<LocalAdminApiResponse, Error> {
|
||||
Ok(match self {
|
||||
$(
|
||||
LocalAdminApiRequest::$endpoint(req) => LocalAdminApiResponse::$endpoint(req.handle(garage, admin).await?),
|
||||
)*
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use admin_endpoints;
|
||||
pub(crate) use local_admin_endpoints;
|
216
src/api/admin/node.rs
Normal file
216
src/api/admin/node.rs
Normal file
|
@ -0,0 +1,216 @@
|
|||
use std::collections::HashMap;
|
||||
use std::fmt::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
use format_table::format_table_to_string;
|
||||
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
use garage_table::replication::*;
|
||||
use garage_table::*;
|
||||
|
||||
use garage_rpc::layout::PARTITION_BITS;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::Error;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for LocalCreateMetadataSnapshotRequest {
|
||||
type Response = LocalCreateMetadataSnapshotResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalCreateMetadataSnapshotResponse, Error> {
|
||||
garage_model::snapshot::async_snapshot_metadata(garage).await?;
|
||||
Ok(LocalCreateMetadataSnapshotResponse)
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalGetNodeStatisticsRequest {
|
||||
type Response = LocalGetNodeStatisticsResponse;
|
||||
|
||||
// FIXME: return this as a JSON struct instead of text
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalGetNodeStatisticsResponse, Error> {
|
||||
let mut ret = String::new();
|
||||
writeln!(
|
||||
&mut ret,
|
||||
"Garage version: {} [features: {}]\nRust compiler version: {}",
|
||||
garage_util::version::garage_version(),
|
||||
garage_util::version::garage_features()
|
||||
.map(|list| list.join(", "))
|
||||
.unwrap_or_else(|| "(unknown)".into()),
|
||||
garage_util::version::rust_version(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
writeln!(&mut ret, "\nDatabase engine: {}", garage.db.engine()).unwrap();
|
||||
|
||||
// Gather table statistics
|
||||
let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()];
|
||||
table.push(gather_table_stats(&garage.bucket_table)?);
|
||||
table.push(gather_table_stats(&garage.key_table)?);
|
||||
table.push(gather_table_stats(&garage.object_table)?);
|
||||
table.push(gather_table_stats(&garage.version_table)?);
|
||||
table.push(gather_table_stats(&garage.block_ref_table)?);
|
||||
write!(
|
||||
&mut ret,
|
||||
"\nTable stats:\n{}",
|
||||
format_table_to_string(table)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Gather block manager statistics
|
||||
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
|
||||
let rc_len = garage.block_manager.rc_len()?.to_string();
|
||||
|
||||
writeln!(
|
||||
&mut ret,
|
||||
" number of RC entries (~= number of blocks): {}",
|
||||
rc_len
|
||||
)
|
||||
.unwrap();
|
||||
writeln!(
|
||||
&mut ret,
|
||||
" resync queue length: {}",
|
||||
garage.block_manager.resync.queue_len()?
|
||||
)
|
||||
.unwrap();
|
||||
writeln!(
|
||||
&mut ret,
|
||||
" blocks with resync errors: {}",
|
||||
garage.block_manager.resync.errors_len()?
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
Ok(LocalGetNodeStatisticsResponse { freeform: ret })
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for GetClusterStatisticsRequest {
|
||||
type Response = GetClusterStatisticsResponse;
|
||||
|
||||
// FIXME: return this as a JSON struct instead of text
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<GetClusterStatisticsResponse, Error> {
|
||||
let mut ret = String::new();
|
||||
|
||||
// Gather storage node and free space statistics for current nodes
|
||||
let layout = &garage.system.cluster_layout();
|
||||
let mut node_partition_count = HashMap::<Uuid, u64>::new();
|
||||
for short_id in layout.current().ring_assignment_data.iter() {
|
||||
let id = layout.current().node_id_vec[*short_id as usize];
|
||||
*node_partition_count.entry(id).or_default() += 1;
|
||||
}
|
||||
let node_info = garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|n| (n.id, n))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let mut table = vec![" ID\tHostname\tZone\tCapacity\tPart.\tDataAvail\tMetaAvail".into()];
|
||||
for (id, parts) in node_partition_count.iter() {
|
||||
let info = node_info.get(id);
|
||||
let status = info.map(|x| &x.status);
|
||||
let role = layout.current().roles.get(id).and_then(|x| x.0.as_ref());
|
||||
let hostname = status.and_then(|x| x.hostname.as_deref()).unwrap_or("?");
|
||||
let zone = role.map(|x| x.zone.as_str()).unwrap_or("?");
|
||||
let capacity = role
|
||||
.map(|x| x.capacity_string())
|
||||
.unwrap_or_else(|| "?".into());
|
||||
let avail_str = |x| match x {
|
||||
Some((avail, total)) => {
|
||||
let pct = (avail as f64) / (total as f64) * 100.;
|
||||
let avail = bytesize::ByteSize::b(avail);
|
||||
let total = bytesize::ByteSize::b(total);
|
||||
format!("{}/{} ({:.1}%)", avail, total, pct)
|
||||
}
|
||||
None => "?".into(),
|
||||
};
|
||||
let data_avail = avail_str(status.and_then(|x| x.data_disk_avail));
|
||||
let meta_avail = avail_str(status.and_then(|x| x.meta_disk_avail));
|
||||
table.push(format!(
|
||||
" {:?}\t{}\t{}\t{}\t{}\t{}\t{}",
|
||||
id, hostname, zone, capacity, parts, data_avail, meta_avail
|
||||
));
|
||||
}
|
||||
write!(
|
||||
&mut ret,
|
||||
"Storage nodes:\n{}",
|
||||
format_table_to_string(table)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let meta_part_avail = node_partition_count
|
||||
.iter()
|
||||
.filter_map(|(id, parts)| {
|
||||
node_info
|
||||
.get(id)
|
||||
.and_then(|x| x.status.meta_disk_avail)
|
||||
.map(|c| c.0 / *parts)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let data_part_avail = node_partition_count
|
||||
.iter()
|
||||
.filter_map(|(id, parts)| {
|
||||
node_info
|
||||
.get(id)
|
||||
.and_then(|x| x.status.data_disk_avail)
|
||||
.map(|c| c.0 / *parts)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if !meta_part_avail.is_empty() && !data_part_avail.is_empty() {
|
||||
let meta_avail =
|
||||
bytesize::ByteSize(meta_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
|
||||
let data_avail =
|
||||
bytesize::ByteSize(data_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
|
||||
writeln!(
|
||||
&mut ret,
|
||||
"\nEstimated available storage space cluster-wide (might be lower in practice):"
|
||||
)
|
||||
.unwrap();
|
||||
if meta_part_avail.len() < node_partition_count.len()
|
||||
|| data_part_avail.len() < node_partition_count.len()
|
||||
{
|
||||
writeln!(&mut ret, " data: < {}", data_avail).unwrap();
|
||||
writeln!(&mut ret, " metadata: < {}", meta_avail).unwrap();
|
||||
writeln!(&mut ret, "A precise estimate could not be given as information is missing for some storage nodes.").unwrap();
|
||||
} else {
|
||||
writeln!(&mut ret, " data: {}", data_avail).unwrap();
|
||||
writeln!(&mut ret, " metadata: {}", meta_avail).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetClusterStatisticsResponse { freeform: ret })
|
||||
}
|
||||
}
|
||||
|
||||
fn gather_table_stats<F, R>(t: &Arc<Table<F, R>>) -> Result<String, Error>
|
||||
where
|
||||
F: TableSchema + 'static,
|
||||
R: TableReplication + 'static,
|
||||
{
|
||||
let data_len = t.data.store.len().map_err(GarageError::from)?.to_string();
|
||||
let mkl_len = t.merkle_updater.merkle_tree_len()?.to_string();
|
||||
|
||||
Ok(format!(
|
||||
" {}\t{}\t{}\t{}\t{}",
|
||||
F::TABLE_NAME,
|
||||
data_len,
|
||||
mkl_len,
|
||||
t.merkle_updater.todo_len()?,
|
||||
t.data.gc_todo_len()?
|
||||
))
|
||||
}
|
|
@ -5,6 +5,14 @@ use std::time::Duration;
|
|||
use async_trait::async_trait;
|
||||
use tokio::sync::watch;
|
||||
|
||||
use garage_util::background::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::{Error as GarageError, OkOrMessage};
|
||||
use garage_util::migrate::Migrate;
|
||||
|
||||
use garage_table::replication::*;
|
||||
use garage_table::*;
|
||||
|
||||
use garage_block::manager::BlockManager;
|
||||
use garage_block::repair::ScrubWorkerCommand;
|
||||
|
||||
|
@ -14,82 +22,76 @@ use garage_model::s3::mpu_table::*;
|
|||
use garage_model::s3::object_table::*;
|
||||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_table::replication::*;
|
||||
use garage_table::*;
|
||||
|
||||
use garage_util::background::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error;
|
||||
use garage_util::migrate::Migrate;
|
||||
|
||||
use crate::*;
|
||||
use crate::api::*;
|
||||
use crate::error::Error;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
const RC_REPAIR_ITER_COUNT: usize = 64;
|
||||
|
||||
pub async fn launch_online_repair(
|
||||
garage: &Arc<Garage>,
|
||||
bg: &BackgroundRunner,
|
||||
opt: RepairOpt,
|
||||
) -> Result<(), Error> {
|
||||
match opt.what {
|
||||
RepairWhat::Tables => {
|
||||
info!("Launching a full sync of tables");
|
||||
garage.bucket_table.syncer.add_full_sync()?;
|
||||
garage.object_table.syncer.add_full_sync()?;
|
||||
garage.version_table.syncer.add_full_sync()?;
|
||||
garage.block_ref_table.syncer.add_full_sync()?;
|
||||
garage.key_table.syncer.add_full_sync()?;
|
||||
}
|
||||
RepairWhat::Versions => {
|
||||
info!("Repairing the versions table");
|
||||
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairVersions));
|
||||
}
|
||||
RepairWhat::MultipartUploads => {
|
||||
info!("Repairing the multipart uploads table");
|
||||
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairMpu));
|
||||
}
|
||||
RepairWhat::BlockRefs => {
|
||||
info!("Repairing the block refs table");
|
||||
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs));
|
||||
}
|
||||
RepairWhat::BlockRc => {
|
||||
info!("Repairing the block reference counters");
|
||||
bg.spawn_worker(BlockRcRepair::new(
|
||||
garage.block_manager.clone(),
|
||||
garage.block_ref_table.clone(),
|
||||
));
|
||||
}
|
||||
RepairWhat::Blocks => {
|
||||
info!("Repairing the stored blocks");
|
||||
bg.spawn_worker(garage_block::repair::RepairWorker::new(
|
||||
garage.block_manager.clone(),
|
||||
));
|
||||
}
|
||||
RepairWhat::Scrub { cmd } => {
|
||||
let cmd = match cmd {
|
||||
ScrubCmd::Start => ScrubWorkerCommand::Start,
|
||||
ScrubCmd::Pause => ScrubWorkerCommand::Pause(Duration::from_secs(3600 * 24)),
|
||||
ScrubCmd::Resume => ScrubWorkerCommand::Resume,
|
||||
ScrubCmd::Cancel => ScrubWorkerCommand::Cancel,
|
||||
ScrubCmd::SetTranquility { tranquility } => {
|
||||
garage
|
||||
.block_manager
|
||||
.scrub_persister
|
||||
.set_with(|x| x.tranquility = tranquility)?;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
info!("Sending command to scrub worker: {:?}", cmd);
|
||||
garage.block_manager.send_scrub_command(cmd).await?;
|
||||
}
|
||||
RepairWhat::Rebalance => {
|
||||
info!("Rebalancing the stored blocks among storage locations");
|
||||
bg.spawn_worker(garage_block::repair::RebalanceWorker::new(
|
||||
garage.block_manager.clone(),
|
||||
));
|
||||
impl RequestHandler for LocalLaunchRepairOperationRequest {
|
||||
type Response = LocalLaunchRepairOperationResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
admin: &Admin,
|
||||
) -> Result<LocalLaunchRepairOperationResponse, Error> {
|
||||
let bg = &admin.background;
|
||||
match self.repair_type {
|
||||
RepairType::Tables => {
|
||||
info!("Launching a full sync of tables");
|
||||
garage.bucket_table.syncer.add_full_sync()?;
|
||||
garage.object_table.syncer.add_full_sync()?;
|
||||
garage.version_table.syncer.add_full_sync()?;
|
||||
garage.block_ref_table.syncer.add_full_sync()?;
|
||||
garage.key_table.syncer.add_full_sync()?;
|
||||
}
|
||||
RepairType::Versions => {
|
||||
info!("Repairing the versions table");
|
||||
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairVersions));
|
||||
}
|
||||
RepairType::MultipartUploads => {
|
||||
info!("Repairing the multipart uploads table");
|
||||
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairMpu));
|
||||
}
|
||||
RepairType::BlockRefs => {
|
||||
info!("Repairing the block refs table");
|
||||
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs));
|
||||
}
|
||||
RepairType::BlockRc => {
|
||||
info!("Repairing the block reference counters");
|
||||
bg.spawn_worker(BlockRcRepair::new(
|
||||
garage.block_manager.clone(),
|
||||
garage.block_ref_table.clone(),
|
||||
));
|
||||
}
|
||||
RepairType::Blocks => {
|
||||
info!("Repairing the stored blocks");
|
||||
bg.spawn_worker(garage_block::repair::RepairWorker::new(
|
||||
garage.block_manager.clone(),
|
||||
));
|
||||
}
|
||||
RepairType::Scrub(cmd) => {
|
||||
let cmd = match cmd {
|
||||
ScrubCommand::Start => ScrubWorkerCommand::Start,
|
||||
ScrubCommand::Pause => {
|
||||
ScrubWorkerCommand::Pause(Duration::from_secs(3600 * 24))
|
||||
}
|
||||
ScrubCommand::Resume => ScrubWorkerCommand::Resume,
|
||||
ScrubCommand::Cancel => ScrubWorkerCommand::Cancel,
|
||||
};
|
||||
info!("Sending command to scrub worker: {:?}", cmd);
|
||||
garage.block_manager.send_scrub_command(cmd).await?;
|
||||
}
|
||||
RepairType::Rebalance => {
|
||||
info!("Rebalancing the stored blocks among storage locations");
|
||||
bg.spawn_worker(garage_block::repair::RebalanceWorker::new(
|
||||
garage.block_manager.clone(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(LocalLaunchRepairOperationResponse)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ----
|
||||
|
@ -103,7 +105,7 @@ trait TableRepair: Send + Sync + 'static {
|
|||
&mut self,
|
||||
garage: &Garage,
|
||||
entry: <<Self as TableRepair>::T as TableSchema>::E,
|
||||
) -> impl Future<Output = Result<bool, Error>> + Send;
|
||||
) -> impl Future<Output = Result<bool, GarageError>> + Send;
|
||||
}
|
||||
|
||||
struct TableRepairWorker<T: TableRepair> {
|
||||
|
@ -139,7 +141,10 @@ impl<R: TableRepair> Worker for TableRepairWorker<R> {
|
|||
}
|
||||
}
|
||||
|
||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||
async fn work(
|
||||
&mut self,
|
||||
_must_exit: &mut watch::Receiver<bool>,
|
||||
) -> Result<WorkerState, GarageError> {
|
||||
let (item_bytes, next_pos) = match R::table(&self.garage).data.store.get_gt(&self.pos)? {
|
||||
Some((k, v)) => (v, k),
|
||||
None => {
|
||||
|
@ -181,7 +186,7 @@ impl TableRepair for RepairVersions {
|
|||
&garage.version_table
|
||||
}
|
||||
|
||||
async fn process(&mut self, garage: &Garage, version: Version) -> Result<bool, Error> {
|
||||
async fn process(&mut self, garage: &Garage, version: Version) -> Result<bool, GarageError> {
|
||||
if !version.deleted.get() {
|
||||
let ref_exists = match &version.backlink {
|
||||
VersionBacklink::Object { bucket_id, key } => garage
|
||||
|
@ -227,7 +232,11 @@ impl TableRepair for RepairBlockRefs {
|
|||
&garage.block_ref_table
|
||||
}
|
||||
|
||||
async fn process(&mut self, garage: &Garage, mut block_ref: BlockRef) -> Result<bool, Error> {
|
||||
async fn process(
|
||||
&mut self,
|
||||
garage: &Garage,
|
||||
mut block_ref: BlockRef,
|
||||
) -> Result<bool, GarageError> {
|
||||
if !block_ref.deleted.get() {
|
||||
let ref_exists = garage
|
||||
.version_table
|
||||
|
@ -262,7 +271,11 @@ impl TableRepair for RepairMpu {
|
|||
&garage.mpu_table
|
||||
}
|
||||
|
||||
async fn process(&mut self, garage: &Garage, mut mpu: MultipartUpload) -> Result<bool, Error> {
|
||||
async fn process(
|
||||
&mut self,
|
||||
garage: &Garage,
|
||||
mut mpu: MultipartUpload,
|
||||
) -> Result<bool, GarageError> {
|
||||
if !mpu.deleted.get() {
|
||||
let ref_exists = garage
|
||||
.object_table
|
||||
|
@ -329,7 +342,10 @@ impl Worker for BlockRcRepair {
|
|||
}
|
||||
}
|
||||
|
||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||
async fn work(
|
||||
&mut self,
|
||||
_must_exit: &mut watch::Receiver<bool>,
|
||||
) -> Result<WorkerState, GarageError> {
|
||||
for _i in 0..RC_REPAIR_ITER_COUNT {
|
||||
let next1 = self
|
||||
.block_manager
|
|
@ -7,12 +7,6 @@ use garage_api_common::router_macros::*;
|
|||
use crate::error::*;
|
||||
use crate::router_v0;
|
||||
|
||||
pub enum Authorization {
|
||||
None,
|
||||
MetricsToken,
|
||||
AdminToken,
|
||||
}
|
||||
|
||||
router_match! {@func
|
||||
|
||||
/// List of all Admin API endpoints.
|
||||
|
@ -211,15 +205,6 @@ impl Endpoint {
|
|||
))),
|
||||
}
|
||||
}
|
||||
/// Get the kind of authorization which is required to perform the operation.
|
||||
pub fn authorization_type(&self) -> Authorization {
|
||||
match self {
|
||||
Self::Health => Authorization::None,
|
||||
Self::CheckDomain => Authorization::None,
|
||||
Self::Metrics => Authorization::MetricsToken,
|
||||
_ => Authorization::AdminToken,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
generateQueryParameters! {
|
||||
|
|
268
src/api/admin/router_v2.rs
Normal file
268
src/api/admin/router_v2.rs
Normal file
|
@ -0,0 +1,268 @@
|
|||
use std::borrow::Cow;
|
||||
|
||||
use hyper::body::Incoming as IncomingBody;
|
||||
use hyper::{Method, Request};
|
||||
use paste::paste;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::router_macros::*;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::*;
|
||||
use crate::router_v1;
|
||||
use crate::Authorization;
|
||||
|
||||
impl AdminApiRequest {
|
||||
/// Determine which S3 endpoint a request is for using the request, and a bucket which was
|
||||
/// possibly extracted from the Host header.
|
||||
/// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets
|
||||
pub async fn from_request(req: Request<IncomingBody>) -> Result<Self, Error> {
|
||||
let uri = req.uri().clone();
|
||||
let path = uri.path();
|
||||
let query = uri.query();
|
||||
|
||||
let method = req.method().clone();
|
||||
|
||||
let mut query = QueryParameters::from_query(query.unwrap_or_default())?;
|
||||
|
||||
let res = router_match!(@gen_path_parser_v2 (&method, path, "/v2/", query, req) [
|
||||
@special OPTIONS _ => Options (),
|
||||
@special GET "/check" => CheckDomain (query::domain),
|
||||
@special GET "/health" => Health (),
|
||||
@special GET "/metrics" => Metrics (),
|
||||
// Cluster endpoints
|
||||
GET GetClusterStatus (),
|
||||
GET GetClusterHealth (),
|
||||
POST ConnectClusterNodes (body),
|
||||
// Layout endpoints
|
||||
GET GetClusterLayout (),
|
||||
POST UpdateClusterLayout (body),
|
||||
POST ApplyClusterLayout (body),
|
||||
POST RevertClusterLayout (),
|
||||
// API key endpoints
|
||||
GET GetKeyInfo (query_opt::id, query_opt::search, parse_default(false)::show_secret_key),
|
||||
POST UpdateKey (body_field, query::id),
|
||||
POST CreateKey (body),
|
||||
POST ImportKey (body),
|
||||
POST DeleteKey (query::id),
|
||||
GET ListKeys (),
|
||||
// Bucket endpoints
|
||||
GET GetBucketInfo (query_opt::id, query_opt::global_alias, query_opt::search),
|
||||
GET ListBuckets (),
|
||||
POST CreateBucket (body),
|
||||
POST DeleteBucket (query::id),
|
||||
POST UpdateBucket (body_field, query::id),
|
||||
POST CleanupIncompleteUploads (body),
|
||||
// Bucket-key permissions
|
||||
POST AllowBucketKey (body),
|
||||
POST DenyBucketKey (body),
|
||||
// Bucket aliases
|
||||
POST AddBucketAlias (body),
|
||||
POST RemoveBucketAlias (body),
|
||||
// Node APIs
|
||||
POST CreateMetadataSnapshot (default::body, query::node),
|
||||
GET GetNodeStatistics (default::body, query::node),
|
||||
GET GetClusterStatistics (),
|
||||
POST LaunchRepairOperation (body_field, query::node),
|
||||
// Worker APIs
|
||||
POST ListWorkers (body_field, query::node),
|
||||
POST GetWorkerInfo (body_field, query::node),
|
||||
POST GetWorkerVariable (body_field, query::node),
|
||||
POST SetWorkerVariable (body_field, query::node),
|
||||
// Block APIs
|
||||
GET ListBlockErrors (default::body, query::node),
|
||||
POST GetBlockInfo (body_field, query::node),
|
||||
POST RetryBlockResync (body_field, query::node),
|
||||
POST PurgeBlocks (body_field, query::node),
|
||||
]);
|
||||
|
||||
if let Some(message) = query.nonempty_message() {
|
||||
debug!("Unused query parameter: {}", message)
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Some endpoints work exactly the same in their v2/ version as they did in their v1/ version.
|
||||
/// For these endpoints, we can convert a v1/ call to its equivalent as if it was made using
|
||||
/// its v2/ URL.
|
||||
pub async fn from_v1(
|
||||
v1_endpoint: router_v1::Endpoint,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Self, Error> {
|
||||
use router_v1::Endpoint;
|
||||
|
||||
match v1_endpoint {
|
||||
Endpoint::GetClusterStatus => {
|
||||
Ok(AdminApiRequest::GetClusterStatus(GetClusterStatusRequest))
|
||||
}
|
||||
Endpoint::GetClusterHealth => {
|
||||
Ok(AdminApiRequest::GetClusterHealth(GetClusterHealthRequest))
|
||||
}
|
||||
Endpoint::ConnectClusterNodes => {
|
||||
let req = parse_json_body::<ConnectClusterNodesRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::ConnectClusterNodes(req))
|
||||
}
|
||||
|
||||
// Layout
|
||||
Endpoint::GetClusterLayout => {
|
||||
Ok(AdminApiRequest::GetClusterLayout(GetClusterLayoutRequest))
|
||||
}
|
||||
Endpoint::UpdateClusterLayout => {
|
||||
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::UpdateClusterLayout(updates))
|
||||
}
|
||||
Endpoint::ApplyClusterLayout => {
|
||||
let param = parse_json_body::<ApplyClusterLayoutRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::ApplyClusterLayout(param))
|
||||
}
|
||||
Endpoint::RevertClusterLayout => Ok(AdminApiRequest::RevertClusterLayout(
|
||||
RevertClusterLayoutRequest,
|
||||
)),
|
||||
|
||||
// Keys
|
||||
Endpoint::ListKeys => Ok(AdminApiRequest::ListKeys(ListKeysRequest)),
|
||||
Endpoint::GetKeyInfo {
|
||||
id,
|
||||
search,
|
||||
show_secret_key,
|
||||
} => {
|
||||
let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
|
||||
Ok(AdminApiRequest::GetKeyInfo(GetKeyInfoRequest {
|
||||
id,
|
||||
search,
|
||||
show_secret_key,
|
||||
}))
|
||||
}
|
||||
Endpoint::CreateKey => {
|
||||
let req = parse_json_body::<CreateKeyRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::CreateKey(req))
|
||||
}
|
||||
Endpoint::ImportKey => {
|
||||
let req = parse_json_body::<ImportKeyRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::ImportKey(req))
|
||||
}
|
||||
Endpoint::UpdateKey { id } => {
|
||||
let body = parse_json_body::<UpdateKeyRequestBody, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::UpdateKey(UpdateKeyRequest { id, body }))
|
||||
}
|
||||
|
||||
// DeleteKey semantics changed:
|
||||
// - in v1/ : HTTP DELETE => HTTP 204 No Content
|
||||
// - in v2/ : HTTP POST => HTTP 200 Ok
|
||||
// Endpoint::DeleteKey { id } => Ok(AdminApiRequest::DeleteKey(DeleteKeyRequest { id })),
|
||||
|
||||
// Buckets
|
||||
Endpoint::ListBuckets => Ok(AdminApiRequest::ListBuckets(ListBucketsRequest)),
|
||||
Endpoint::GetBucketInfo { id, global_alias } => {
|
||||
Ok(AdminApiRequest::GetBucketInfo(GetBucketInfoRequest {
|
||||
id,
|
||||
global_alias,
|
||||
search: None,
|
||||
}))
|
||||
}
|
||||
Endpoint::CreateBucket => {
|
||||
let req = parse_json_body::<CreateBucketRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::CreateBucket(req))
|
||||
}
|
||||
|
||||
// DeleteBucket semantics changed::
|
||||
// - in v1/ : HTTP DELETE => HTTP 204 No Content
|
||||
// - in v2/ : HTTP POST => HTTP 200 Ok
|
||||
// Endpoint::DeleteBucket { id } => {
|
||||
// Ok(AdminApiRequest::DeleteBucket(DeleteBucketRequest { id }))
|
||||
// }
|
||||
Endpoint::UpdateBucket { id } => {
|
||||
let body = parse_json_body::<UpdateBucketRequestBody, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::UpdateBucket(UpdateBucketRequest {
|
||||
id,
|
||||
body,
|
||||
}))
|
||||
}
|
||||
|
||||
// Bucket-key permissions
|
||||
Endpoint::BucketAllowKey => {
|
||||
let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::AllowBucketKey(AllowBucketKeyRequest(req)))
|
||||
}
|
||||
Endpoint::BucketDenyKey => {
|
||||
let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
|
||||
Ok(AdminApiRequest::DenyBucketKey(DenyBucketKeyRequest(req)))
|
||||
}
|
||||
// Bucket aliasing
|
||||
Endpoint::GlobalAliasBucket { id, alias } => {
|
||||
Ok(AdminApiRequest::AddBucketAlias(AddBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Global {
|
||||
global_alias: alias,
|
||||
},
|
||||
}))
|
||||
}
|
||||
Endpoint::GlobalUnaliasBucket { id, alias } => Ok(AdminApiRequest::RemoveBucketAlias(
|
||||
RemoveBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Global {
|
||||
global_alias: alias,
|
||||
},
|
||||
},
|
||||
)),
|
||||
Endpoint::LocalAliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => Ok(AdminApiRequest::AddBucketAlias(AddBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Local {
|
||||
local_alias: alias,
|
||||
access_key_id,
|
||||
},
|
||||
})),
|
||||
Endpoint::LocalUnaliasBucket {
|
||||
id,
|
||||
access_key_id,
|
||||
alias,
|
||||
} => Ok(AdminApiRequest::RemoveBucketAlias(
|
||||
RemoveBucketAliasRequest {
|
||||
bucket_id: id,
|
||||
alias: BucketAliasEnum::Local {
|
||||
local_alias: alias,
|
||||
access_key_id,
|
||||
},
|
||||
},
|
||||
)),
|
||||
|
||||
// For endpoints that have different body content syntax, issue
|
||||
// deprecation warning
|
||||
_ => Err(Error::bad_request(format!(
|
||||
"v1/ endpoint is no longer supported: {}",
|
||||
v1_endpoint.name()
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the kind of authorization which is required to perform the operation.
|
||||
pub fn authorization_type(&self) -> Authorization {
|
||||
match self {
|
||||
Self::Options(_) => Authorization::None,
|
||||
Self::Health(_) => Authorization::None,
|
||||
Self::CheckDomain(_) => Authorization::None,
|
||||
Self::Metrics(_) => Authorization::MetricsToken,
|
||||
_ => Authorization::AdminToken,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
generateQueryParameters! {
|
||||
keywords: [],
|
||||
fields: [
|
||||
"node" => node,
|
||||
"domain" => domain,
|
||||
"format" => format,
|
||||
"id" => id,
|
||||
"search" => search,
|
||||
"globalAlias" => global_alias,
|
||||
"alias" => alias,
|
||||
"accessKeyId" => access_key_id,
|
||||
"showSecretKey" => show_secret_key
|
||||
]
|
||||
}
|
179
src/api/admin/special.rs
Normal file
179
src/api/admin/special.rs
Normal file
|
@ -0,0 +1,179 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use http::header::{
|
||||
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW,
|
||||
};
|
||||
use hyper::{Response, StatusCode};
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
use prometheus::{Encoder, TextEncoder};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
|
||||
use crate::api::{CheckDomainRequest, HealthRequest, MetricsRequest, OptionsRequest};
|
||||
use crate::api_server::ResBody;
|
||||
use crate::error::*;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for OptionsRequest {
|
||||
type Response = Response<ResBody>;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
_garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(ALLOW, "OPTIONS,GET,POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS,GET,POST")
|
||||
.header(ACCESS_CONTROL_ALLOW_HEADERS, "authorization,content-type")
|
||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||
.body(empty_body())?)
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for MetricsRequest {
|
||||
type Response = Response<ResBody>;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
_garage: &Arc<Garage>,
|
||||
admin: &Admin,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
#[cfg(feature = "metrics")]
|
||||
{
|
||||
use opentelemetry::trace::Tracer;
|
||||
|
||||
let mut buffer = vec![];
|
||||
let encoder = TextEncoder::new();
|
||||
|
||||
let tracer = opentelemetry::global::tracer("garage");
|
||||
let metric_families = tracer.in_span("admin/gather_metrics", |_| {
|
||||
admin.exporter.registry().gather()
|
||||
});
|
||||
|
||||
encoder
|
||||
.encode(&metric_families, &mut buffer)
|
||||
.ok_or_internal_error("Could not serialize metrics")?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(http::header::CONTENT_TYPE, encoder.format_type())
|
||||
.body(bytes_body(buffer.into()))?)
|
||||
}
|
||||
#[cfg(not(feature = "metrics"))]
|
||||
Err(Error::bad_request(
|
||||
"Garage was built without the metrics feature".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for HealthRequest {
|
||||
type Response = Response<ResBody>;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let health = garage.system.health();
|
||||
|
||||
let (status, status_str) = match health.status {
|
||||
ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
|
||||
ClusterHealthStatus::Degraded => (
|
||||
StatusCode::OK,
|
||||
"Garage is operational but some storage nodes are unavailable",
|
||||
),
|
||||
ClusterHealthStatus::Unavailable => (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Quorum is not available for some/all partitions, reads and writes will fail",
|
||||
),
|
||||
};
|
||||
let status_str = format!(
|
||||
"{}\nConsult the full health check API endpoint at /v2/GetClusterHealth for more details\n",
|
||||
status_str
|
||||
);
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(status)
|
||||
.header(http::header::CONTENT_TYPE, "text/plain")
|
||||
.body(string_body(status_str))?)
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for CheckDomainRequest {
|
||||
type Response = Response<ResBody>;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
if check_domain(garage, &self.domain).await? {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.body(string_body(format!(
|
||||
"Domain '{}' is managed by Garage",
|
||||
self.domain
|
||||
)))?)
|
||||
} else {
|
||||
Err(Error::bad_request(format!(
|
||||
"Domain '{}' is not managed by Garage",
|
||||
self.domain
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_domain(garage: &Arc<Garage>, domain: &str) -> Result<bool, Error> {
|
||||
// Resolve bucket from domain name, inferring if the website must be activated for the
|
||||
// domain to be valid.
|
||||
let (bucket_name, must_check_website) = if let Some(bname) = garage
|
||||
.config
|
||||
.s3_api
|
||||
.root_domain
|
||||
.as_ref()
|
||||
.and_then(|rd| host_to_bucket(domain, rd))
|
||||
{
|
||||
(bname.to_string(), false)
|
||||
} else if let Some(bname) = garage
|
||||
.config
|
||||
.s3_web
|
||||
.as_ref()
|
||||
.and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str()))
|
||||
{
|
||||
(bname.to_string(), true)
|
||||
} else {
|
||||
(domain.to_string(), true)
|
||||
};
|
||||
|
||||
let bucket_id = match garage
|
||||
.bucket_helper()
|
||||
.resolve_global_bucket_name(&bucket_name)
|
||||
.await?
|
||||
{
|
||||
Some(bucket_id) => bucket_id,
|
||||
None => return Ok(false),
|
||||
};
|
||||
|
||||
if !must_check_website {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let bucket = garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
let bucket_state = bucket.state.as_option().unwrap();
|
||||
let bucket_website_config = bucket_state.website_config.get();
|
||||
|
||||
match bucket_website_config {
|
||||
Some(_v) => Ok(true),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
118
src/api/admin/worker.rs
Normal file
118
src/api/admin/worker.rs
Normal file
|
@ -0,0 +1,118 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use garage_util::background::*;
|
||||
use garage_util::time::now_msec;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
||||
use crate::api::*;
|
||||
use crate::error::Error;
|
||||
use crate::{Admin, RequestHandler};
|
||||
|
||||
impl RequestHandler for LocalListWorkersRequest {
|
||||
type Response = LocalListWorkersResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
_garage: &Arc<Garage>,
|
||||
admin: &Admin,
|
||||
) -> Result<LocalListWorkersResponse, Error> {
|
||||
let workers = admin.background.get_worker_info();
|
||||
let info = workers
|
||||
.into_iter()
|
||||
.filter(|(_, w)| {
|
||||
(!self.busy_only
|
||||
|| matches!(w.state, WorkerState::Busy | WorkerState::Throttled(_)))
|
||||
&& (!self.error_only || w.errors > 0)
|
||||
})
|
||||
.map(|(id, w)| worker_info_to_api(id as u64, w))
|
||||
.collect::<Vec<_>>();
|
||||
Ok(LocalListWorkersResponse(info))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalGetWorkerInfoRequest {
|
||||
type Response = LocalGetWorkerInfoResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
_garage: &Arc<Garage>,
|
||||
admin: &Admin,
|
||||
) -> Result<LocalGetWorkerInfoResponse, Error> {
|
||||
let info = admin
|
||||
.background
|
||||
.get_worker_info()
|
||||
.get(&(self.id as usize))
|
||||
.ok_or(Error::NoSuchWorker(self.id))?
|
||||
.clone();
|
||||
Ok(LocalGetWorkerInfoResponse(worker_info_to_api(
|
||||
self.id, info,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalGetWorkerVariableRequest {
|
||||
type Response = LocalGetWorkerVariableResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalGetWorkerVariableResponse, Error> {
|
||||
let mut res = HashMap::new();
|
||||
if let Some(k) = self.variable {
|
||||
res.insert(k.clone(), garage.bg_vars.get(&k)?);
|
||||
} else {
|
||||
let vars = garage.bg_vars.get_all();
|
||||
for (k, v) in vars.iter() {
|
||||
res.insert(k.to_string(), v.to_string());
|
||||
}
|
||||
}
|
||||
Ok(LocalGetWorkerVariableResponse(res))
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestHandler for LocalSetWorkerVariableRequest {
|
||||
type Response = LocalSetWorkerVariableResponse;
|
||||
|
||||
async fn handle(
|
||||
self,
|
||||
garage: &Arc<Garage>,
|
||||
_admin: &Admin,
|
||||
) -> Result<LocalSetWorkerVariableResponse, Error> {
|
||||
garage.bg_vars.set(&self.variable, &self.value)?;
|
||||
|
||||
Ok(LocalSetWorkerVariableResponse {
|
||||
variable: self.variable,
|
||||
value: self.value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ---- helper functions ----
|
||||
|
||||
fn worker_info_to_api(id: u64, info: WorkerInfo) -> WorkerInfoResp {
|
||||
WorkerInfoResp {
|
||||
id,
|
||||
name: info.name,
|
||||
state: match info.state {
|
||||
WorkerState::Busy => WorkerStateResp::Busy,
|
||||
WorkerState::Throttled(t) => WorkerStateResp::Throttled { duration_secs: t },
|
||||
WorkerState::Idle => WorkerStateResp::Idle,
|
||||
WorkerState::Done => WorkerStateResp::Done,
|
||||
},
|
||||
errors: info.errors as u64,
|
||||
consecutive_errors: info.consecutive_errors as u64,
|
||||
last_error: info.last_error.map(|(message, t)| WorkerLastError {
|
||||
message,
|
||||
secs_ago: now_msec().saturating_sub(t) / 1000,
|
||||
}),
|
||||
|
||||
tranquility: info.status.tranquility,
|
||||
progress: info.status.progress,
|
||||
queue_length: info.status.queue_length,
|
||||
persistent_errors: info.status.persistent_errors,
|
||||
freeform: info.status.freeform,
|
||||
}
|
||||
}
|
|
@ -1,12 +1,12 @@
|
|||
[package]
|
||||
name = "garage_api_common"
|
||||
version = "1.1.0"
|
||||
version = "1.0.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
description = "Common functions for the API server crates for the Garage object store"
|
||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||
readme = "../../../README.md"
|
||||
readme = "../../README.md"
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
|
@ -18,21 +18,16 @@ garage_model.workspace = true
|
|||
garage_table.workspace = true
|
||||
garage_util.workspace = true
|
||||
|
||||
base64.workspace = true
|
||||
bytes.workspace = true
|
||||
chrono.workspace = true
|
||||
crc32fast.workspace = true
|
||||
crc32c.workspace = true
|
||||
crypto-common.workspace = true
|
||||
err-derive.workspace = true
|
||||
hex.workspace = true
|
||||
hmac.workspace = true
|
||||
md-5.workspace = true
|
||||
idna.workspace = true
|
||||
tracing.workspace = true
|
||||
nom.workspace = true
|
||||
pin-project.workspace = true
|
||||
sha1.workspace = true
|
||||
sha2.workspace = true
|
||||
|
||||
futures.workspace = true
|
||||
|
|
|
@ -14,9 +14,9 @@ use crate::common_error::{
|
|||
};
|
||||
use crate::helpers::*;
|
||||
|
||||
pub fn find_matching_cors_rule<'a, B>(
|
||||
pub fn find_matching_cors_rule<'a>(
|
||||
bucket_params: &'a BucketParams,
|
||||
req: &Request<B>,
|
||||
req: &Request<impl Body>,
|
||||
) -> Result<Option<&'a GarageCorsRule>, CommonError> {
|
||||
if let Some(cors_config) = bucket_params.cors_config.get() {
|
||||
if let Some(origin) = req.headers().get("Origin") {
|
||||
|
@ -132,8 +132,8 @@ pub async fn handle_options_api(
|
|||
}
|
||||
}
|
||||
|
||||
pub fn handle_options_for_bucket<B>(
|
||||
req: &Request<B>,
|
||||
pub fn handle_options_for_bucket(
|
||||
req: &Request<IncomingBody>,
|
||||
bucket_params: &BucketParams,
|
||||
) -> Result<Response<EmptyBody>, CommonError> {
|
||||
let origin = req
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use std::borrow::Cow;
|
||||
use std::convert::Infallible;
|
||||
use std::fs::{self, Permissions};
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
|
@ -35,7 +36,7 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
|
|||
use crate::helpers::{BoxBody, ErrorBody};
|
||||
|
||||
pub trait ApiEndpoint: Send + Sync + 'static {
|
||||
fn name(&self) -> &'static str;
|
||||
fn name(&self) -> Cow<'static, str>;
|
||||
fn add_span_attributes(&self, span: SpanRef<'_>);
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,68 @@ macro_rules! router_match {
|
|||
}
|
||||
}
|
||||
}};
|
||||
(@gen_path_parser_v2 ($method:expr, $reqpath:expr, $pathprefix:literal, $query:expr, $req:expr)
|
||||
[
|
||||
$(@special $spec_meth:ident $spec_path:pat => $spec_api:ident $spec_params:tt,)*
|
||||
$($meth:ident $api:ident $params:tt,)*
|
||||
]) => {{
|
||||
{
|
||||
#[allow(unused_parens)]
|
||||
match ($method, $reqpath) {
|
||||
$(
|
||||
(&Method::$spec_meth, $spec_path) => AdminApiRequest::$spec_api (
|
||||
router_match!(@@gen_parse_request $spec_api, $spec_params, $query, $req)
|
||||
),
|
||||
)*
|
||||
$(
|
||||
(&Method::$meth, concat!($pathprefix, stringify!($api)))
|
||||
=> AdminApiRequest::$api (
|
||||
router_match!(@@gen_parse_request $api, $params, $query, $req)
|
||||
),
|
||||
)*
|
||||
(m, p) => {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Unknown API endpoint: {} {}",
|
||||
m, p
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, (), $query: expr, $req:expr) => {{
|
||||
paste!(
|
||||
[< $api Request >]
|
||||
)
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, (body), $query: expr, $req:expr) => {{
|
||||
paste!({
|
||||
parse_json_body::< [<$api Request>], _, Error>($req).await?
|
||||
})
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, (body_field, $($conv:ident $(($conv_arg:expr))? :: $param:ident),*), $query: expr, $req:expr)
|
||||
=>
|
||||
{{
|
||||
paste!({
|
||||
let body = parse_json_body::< [<$api RequestBody>], _, Error>($req).await?;
|
||||
[< $api Request >] {
|
||||
body,
|
||||
$(
|
||||
$param: router_match!(@@parse_param $query, $conv $(($conv_arg))?, $param),
|
||||
)+
|
||||
}
|
||||
})
|
||||
}};
|
||||
(@@gen_parse_request $api:ident, ($($conv:ident $(($conv_arg:expr))? :: $param:ident),*), $query: expr, $req:expr)
|
||||
=>
|
||||
{{
|
||||
paste!({
|
||||
[< $api Request >] {
|
||||
$(
|
||||
$param: router_match!(@@parse_param $query, $conv $(($conv_arg))?, $param),
|
||||
)+
|
||||
}
|
||||
})
|
||||
}};
|
||||
(@gen_parser ($keyword:expr, $key:ident, $query:expr, $header:expr),
|
||||
key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*],
|
||||
no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{
|
||||
|
@ -79,13 +141,19 @@ macro_rules! router_match {
|
|||
}
|
||||
}};
|
||||
|
||||
(@@parse_param $query:expr, default, $param:ident) => {{
|
||||
Default::default()
|
||||
}};
|
||||
(@@parse_param $query:expr, query_opt, $param:ident) => {{
|
||||
// extract optional query parameter
|
||||
$query.$param.take().map(|param| param.into_owned())
|
||||
}};
|
||||
(@@parse_param $query:expr, query, $param:ident) => {{
|
||||
// extract mendatory query parameter
|
||||
$query.$param.take().ok_or_bad_request("Missing argument for endpoint")?.into_owned()
|
||||
$query.$param.take()
|
||||
.ok_or_bad_request(
|
||||
format!("Missing argument `{}` for endpoint", stringify!($param))
|
||||
)?.into_owned()
|
||||
}};
|
||||
(@@parse_param $query:expr, opt_parse, $param:ident) => {{
|
||||
// extract and parse optional query parameter
|
||||
|
@ -99,10 +167,22 @@ macro_rules! router_match {
|
|||
(@@parse_param $query:expr, parse, $param:ident) => {{
|
||||
// extract and parse mandatory query parameter
|
||||
// both missing and un-parseable parameters are reported as errors
|
||||
$query.$param.take().ok_or_bad_request("Missing argument for endpoint")?
|
||||
$query.$param.take()
|
||||
.ok_or_bad_request(
|
||||
format!("Missing argument `{}` for endpoint", stringify!($param))
|
||||
)?
|
||||
.parse()
|
||||
.map_err(|_| Error::bad_request("Failed to parse query parameter"))?
|
||||
}};
|
||||
(@@parse_param $query:expr, parse_default($default:expr), $param:ident) => {{
|
||||
// extract and parse optional query parameter
|
||||
// using provided value as default if paramter is missing
|
||||
$query.$param.take().map(|x| x
|
||||
.parse()
|
||||
.map_err(|_| Error::bad_request("Failed to parse query parameter")))
|
||||
.transpose()?
|
||||
.unwrap_or($default)
|
||||
}};
|
||||
(@func
|
||||
$(#[$doc:meta])*
|
||||
pub enum Endpoint {
|
||||
|
@ -187,6 +267,7 @@ macro_rules! generateQueryParameters {
|
|||
},
|
||||
)*
|
||||
$(
|
||||
// FIXME: remove if !v.is_empty() ?
|
||||
$f_param => if !v.is_empty() {
|
||||
if res.$f_name.replace(v).is_some() {
|
||||
return Err(Error::bad_request(format!(
|
||||
|
|
|
@ -1,135 +0,0 @@
|
|||
use std::sync::Mutex;
|
||||
|
||||
use futures::prelude::*;
|
||||
use futures::stream::BoxStream;
|
||||
use http_body_util::{BodyExt, StreamBody};
|
||||
use hyper::body::{Bytes, Frame};
|
||||
use serde::Deserialize;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task;
|
||||
|
||||
use super::*;
|
||||
|
||||
use crate::signature::checksum::*;
|
||||
|
||||
pub struct ReqBody {
|
||||
// why need mutex to be sync??
|
||||
pub(crate) stream: Mutex<BoxStream<'static, Result<Frame<Bytes>, Error>>>,
|
||||
pub(crate) checksummer: Checksummer,
|
||||
pub(crate) expected_checksums: ExpectedChecksums,
|
||||
pub(crate) trailer_algorithm: Option<ChecksumAlgorithm>,
|
||||
}
|
||||
|
||||
pub type StreamingChecksumReceiver = task::JoinHandle<Result<Checksums, Error>>;
|
||||
|
||||
impl ReqBody {
|
||||
pub fn add_expected_checksums(&mut self, more: ExpectedChecksums) {
|
||||
if more.md5.is_some() {
|
||||
self.expected_checksums.md5 = more.md5;
|
||||
}
|
||||
if more.sha256.is_some() {
|
||||
self.expected_checksums.sha256 = more.sha256;
|
||||
}
|
||||
if more.extra.is_some() {
|
||||
self.expected_checksums.extra = more.extra;
|
||||
}
|
||||
self.checksummer.add_expected(&self.expected_checksums);
|
||||
}
|
||||
|
||||
pub fn add_md5(&mut self) {
|
||||
self.checksummer.add_md5();
|
||||
}
|
||||
|
||||
// ============ non-streaming =============
|
||||
|
||||
pub async fn json<T: for<'a> Deserialize<'a>>(self) -> Result<T, Error> {
|
||||
let body = self.collect().await?;
|
||||
let resp: T = serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?;
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
pub async fn collect(self) -> Result<Bytes, Error> {
|
||||
self.collect_with_checksums().await.map(|(b, _)| b)
|
||||
}
|
||||
|
||||
pub async fn collect_with_checksums(mut self) -> Result<(Bytes, Checksums), Error> {
|
||||
let stream: BoxStream<_> = self.stream.into_inner().unwrap();
|
||||
let bytes = BodyExt::collect(StreamBody::new(stream)).await?.to_bytes();
|
||||
|
||||
self.checksummer.update(&bytes);
|
||||
let checksums = self.checksummer.finalize();
|
||||
checksums.verify(&self.expected_checksums)?;
|
||||
|
||||
Ok((bytes, checksums))
|
||||
}
|
||||
|
||||
// ============ streaming =============
|
||||
|
||||
pub fn streaming_with_checksums(
|
||||
self,
|
||||
) -> (
|
||||
BoxStream<'static, Result<Bytes, Error>>,
|
||||
StreamingChecksumReceiver,
|
||||
) {
|
||||
let Self {
|
||||
stream,
|
||||
mut checksummer,
|
||||
mut expected_checksums,
|
||||
trailer_algorithm,
|
||||
} = self;
|
||||
|
||||
let (frame_tx, mut frame_rx) = mpsc::channel::<Frame<Bytes>>(5);
|
||||
|
||||
let join_checksums = tokio::spawn(async move {
|
||||
while let Some(frame) = frame_rx.recv().await {
|
||||
match frame.into_data() {
|
||||
Ok(data) => {
|
||||
checksummer = tokio::task::spawn_blocking(move || {
|
||||
checksummer.update(&data);
|
||||
checksummer
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
Err(frame) => {
|
||||
let trailers = frame.into_trailers().unwrap();
|
||||
let algo = trailer_algorithm.unwrap();
|
||||
expected_checksums.extra = Some(extract_checksum_value(&trailers, algo)?);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if trailer_algorithm.is_some() && expected_checksums.extra.is_none() {
|
||||
return Err(Error::bad_request("trailing checksum was not sent"));
|
||||
}
|
||||
|
||||
let checksums = checksummer.finalize();
|
||||
checksums.verify(&expected_checksums)?;
|
||||
|
||||
Ok(checksums)
|
||||
});
|
||||
|
||||
let stream: BoxStream<_> = stream.into_inner().unwrap();
|
||||
let stream = stream.filter_map(move |x| {
|
||||
let frame_tx = frame_tx.clone();
|
||||
async move {
|
||||
match x {
|
||||
Err(e) => Some(Err(e)),
|
||||
Ok(frame) => {
|
||||
if frame.is_data() {
|
||||
let data = frame.data_ref().unwrap().clone();
|
||||
let _ = frame_tx.send(frame).await;
|
||||
Some(Ok(data))
|
||||
} else {
|
||||
let _ = frame_tx.send(frame).await;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
(stream.boxed(), join_checksums)
|
||||
}
|
||||
}
|
|
@ -18,10 +18,6 @@ pub enum Error {
|
|||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||
|
||||
/// The provided digest (checksum) value was invalid
|
||||
#[error(display = "Invalid digest: {}", _0)]
|
||||
InvalidDigest(String),
|
||||
}
|
||||
|
||||
impl<T> From<T> for Error
|
||||
|
|
|
@ -2,7 +2,6 @@ use chrono::{DateTime, Utc};
|
|||
use hmac::{Hmac, Mac};
|
||||
use sha2::Sha256;
|
||||
|
||||
use hyper::header::HeaderName;
|
||||
use hyper::{body::Incoming as IncomingBody, Request};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
|
@ -11,8 +10,6 @@ use garage_util::data::{sha256sum, Hash};
|
|||
|
||||
use error::*;
|
||||
|
||||
pub mod body;
|
||||
pub mod checksum;
|
||||
pub mod error;
|
||||
pub mod payload;
|
||||
pub mod streaming;
|
||||
|
@ -20,73 +17,36 @@ pub mod streaming;
|
|||
pub const SHORT_DATE: &str = "%Y%m%d";
|
||||
pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ";
|
||||
|
||||
// ---- Constants used in AWSv4 signatures ----
|
||||
|
||||
pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
|
||||
pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
|
||||
pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
|
||||
pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
|
||||
pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
|
||||
pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
|
||||
pub const X_AMZ_CONTENT_SHA256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
|
||||
pub const X_AMZ_TRAILER: HeaderName = HeaderName::from_static("x-amz-trailer");
|
||||
|
||||
/// Result of `sha256("")`
|
||||
pub(crate) const EMPTY_STRING_HEX_DIGEST: &str =
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
|
||||
|
||||
// Signature calculation algorithm
|
||||
pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
// Possible values for x-amz-content-sha256, in addition to the actual sha256
|
||||
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
|
||||
pub const STREAMING_UNSIGNED_PAYLOAD_TRAILER: &str = "STREAMING-UNSIGNED-PAYLOAD-TRAILER";
|
||||
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
|
||||
|
||||
// Used in the computation of StringToSign
|
||||
pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
|
||||
|
||||
// ---- enums to describe stuff going on in signature calculation ----
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ContentSha256Header {
|
||||
UnsignedPayload,
|
||||
Sha256Checksum(Hash),
|
||||
StreamingPayload { trailer: bool, signed: bool },
|
||||
}
|
||||
|
||||
// ---- top-level functions ----
|
||||
|
||||
pub struct VerifiedRequest {
|
||||
pub request: Request<streaming::ReqBody>,
|
||||
pub access_key: Key,
|
||||
pub content_sha256_header: ContentSha256Header,
|
||||
}
|
||||
|
||||
pub async fn verify_request(
|
||||
garage: &Garage,
|
||||
mut req: Request<IncomingBody>,
|
||||
service: &'static str,
|
||||
) -> Result<VerifiedRequest, Error> {
|
||||
let checked_signature = payload::check_payload_signature(&garage, &mut req, service).await?;
|
||||
) -> Result<(Request<streaming::ReqBody>, Key, Option<Hash>), Error> {
|
||||
let (api_key, mut content_sha256) =
|
||||
payload::check_payload_signature(&garage, &mut req, service).await?;
|
||||
let api_key =
|
||||
api_key.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
||||
|
||||
let request = streaming::parse_streaming_body(
|
||||
let req = streaming::parse_streaming_body(
|
||||
&api_key,
|
||||
req,
|
||||
&checked_signature,
|
||||
&mut content_sha256,
|
||||
&garage.config.s3_api.s3_region,
|
||||
service,
|
||||
)?;
|
||||
|
||||
let access_key = checked_signature
|
||||
.key
|
||||
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
||||
Ok((req, api_key, content_sha256))
|
||||
}
|
||||
|
||||
Ok(VerifiedRequest {
|
||||
request,
|
||||
access_key,
|
||||
content_sha256_header: checked_signature.content_sha256_header,
|
||||
})
|
||||
pub fn verify_signed_content(expected_sha256: Hash, body: &[u8]) -> Result<(), Error> {
|
||||
if expected_sha256 != sha256sum(body) {
|
||||
return Err(Error::bad_request(
|
||||
"Request content hash does not match signed hash".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn signing_hmac(
|
||||
|
|
|
@ -13,9 +13,23 @@ use garage_util::data::Hash;
|
|||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::*;
|
||||
|
||||
use super::*;
|
||||
use super::LONG_DATETIME;
|
||||
use super::{compute_scope, signing_hmac};
|
||||
|
||||
use crate::encoding::uri_encode;
|
||||
use crate::signature::error::*;
|
||||
|
||||
pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
|
||||
pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
|
||||
pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
|
||||
pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
|
||||
pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
|
||||
pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
|
||||
pub const X_AMZ_CONTENT_SH256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
|
||||
|
||||
pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
|
||||
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
|
||||
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
|
||||
|
||||
pub type QueryMap = HeaderMap<QueryValue>;
|
||||
pub struct QueryValue {
|
||||
|
@ -25,18 +39,11 @@ pub struct QueryValue {
|
|||
value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CheckedSignature {
|
||||
pub key: Option<Key>,
|
||||
pub content_sha256_header: ContentSha256Header,
|
||||
pub signature_header: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn check_payload_signature(
|
||||
garage: &Garage,
|
||||
request: &mut Request<IncomingBody>,
|
||||
service: &'static str,
|
||||
) -> Result<CheckedSignature, Error> {
|
||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
||||
let query = parse_query_map(request.uri())?;
|
||||
|
||||
if query.contains_key(&X_AMZ_ALGORITHM) {
|
||||
|
@ -50,46 +57,17 @@ pub async fn check_payload_signature(
|
|||
// Unsigned (anonymous) request
|
||||
let content_sha256 = request
|
||||
.headers()
|
||||
.get(X_AMZ_CONTENT_SHA256)
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?;
|
||||
Ok(CheckedSignature {
|
||||
key: None,
|
||||
content_sha256_header: parse_x_amz_content_sha256(content_sha256)?,
|
||||
signature_header: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_x_amz_content_sha256(header: Option<&str>) -> Result<ContentSha256Header, Error> {
|
||||
let header = match header {
|
||||
Some(x) => x,
|
||||
None => return Ok(ContentSha256Header::UnsignedPayload),
|
||||
};
|
||||
if header == UNSIGNED_PAYLOAD {
|
||||
Ok(ContentSha256Header::UnsignedPayload)
|
||||
} else if let Some(rest) = header.strip_prefix("STREAMING-") {
|
||||
let (trailer, algo) = if let Some(rest2) = rest.strip_suffix("-TRAILER") {
|
||||
(true, rest2)
|
||||
.get("x-amz-content-sha256")
|
||||
.filter(|c| c.as_bytes() != UNSIGNED_PAYLOAD.as_bytes());
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
let sha256 = hex::decode(content_sha256)
|
||||
.ok()
|
||||
.and_then(|bytes| Hash::try_from(&bytes))
|
||||
.ok_or_bad_request("Invalid content sha256 hash")?;
|
||||
Ok((None, Some(sha256)))
|
||||
} else {
|
||||
(false, rest)
|
||||
};
|
||||
let signed = match algo {
|
||||
AWS4_HMAC_SHA256_PAYLOAD => true,
|
||||
UNSIGNED_PAYLOAD => false,
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"invalid or unsupported x-amz-content-sha256",
|
||||
))
|
||||
}
|
||||
};
|
||||
Ok(ContentSha256Header::StreamingPayload { trailer, signed })
|
||||
} else {
|
||||
let sha256 = hex::decode(header)
|
||||
.ok()
|
||||
.and_then(|bytes| Hash::try_from(&bytes))
|
||||
.ok_or_bad_request("Invalid content sha256 hash")?;
|
||||
Ok(ContentSha256Header::Sha256Checksum(sha256))
|
||||
Ok((None, None))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -98,7 +76,7 @@ async fn check_standard_signature(
|
|||
service: &'static str,
|
||||
request: &Request<IncomingBody>,
|
||||
query: QueryMap,
|
||||
) -> Result<CheckedSignature, Error> {
|
||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
||||
let authorization = Authorization::parse_header(request.headers())?;
|
||||
|
||||
// Verify that all necessary request headers are included in signed_headers
|
||||
|
@ -130,13 +108,18 @@ async fn check_standard_signature(
|
|||
|
||||
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
|
||||
|
||||
let content_sha256_header = parse_x_amz_content_sha256(Some(&authorization.content_sha256))?;
|
||||
let content_sha256 = if authorization.content_sha256 == UNSIGNED_PAYLOAD {
|
||||
None
|
||||
} else if authorization.content_sha256 == STREAMING_AWS4_HMAC_SHA256_PAYLOAD {
|
||||
let bytes = hex::decode(authorization.signature).ok_or_bad_request("Invalid signature")?;
|
||||
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid signature")?)
|
||||
} else {
|
||||
let bytes = hex::decode(authorization.content_sha256)
|
||||
.ok_or_bad_request("Invalid content sha256 hash")?;
|
||||
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid content sha256 hash")?)
|
||||
};
|
||||
|
||||
Ok(CheckedSignature {
|
||||
key: Some(key),
|
||||
content_sha256_header,
|
||||
signature_header: Some(authorization.signature),
|
||||
})
|
||||
Ok((Some(key), content_sha256))
|
||||
}
|
||||
|
||||
async fn check_presigned_signature(
|
||||
|
@ -144,7 +127,7 @@ async fn check_presigned_signature(
|
|||
service: &'static str,
|
||||
request: &mut Request<IncomingBody>,
|
||||
mut query: QueryMap,
|
||||
) -> Result<CheckedSignature, Error> {
|
||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
||||
let algorithm = query.get(&X_AMZ_ALGORITHM).unwrap();
|
||||
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
|
||||
|
||||
|
@ -210,11 +193,7 @@ async fn check_presigned_signature(
|
|||
|
||||
// Presigned URLs always use UNSIGNED-PAYLOAD,
|
||||
// so there is no sha256 hash to return.
|
||||
Ok(CheckedSignature {
|
||||
key: Some(key),
|
||||
content_sha256_header: ContentSha256Header::UnsignedPayload,
|
||||
signature_header: Some(authorization.signature),
|
||||
})
|
||||
Ok((Some(key), None))
|
||||
}
|
||||
|
||||
pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> {
|
||||
|
@ -463,7 +442,7 @@ impl Authorization {
|
|||
.to_string();
|
||||
|
||||
let content_sha256 = headers
|
||||
.get(X_AMZ_CONTENT_SHA256)
|
||||
.get(X_AMZ_CONTENT_SH256)
|
||||
.ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
|
||||
|
||||
let date = headers
|
||||
|
|
|
@ -1,157 +1,84 @@
|
|||
use std::pin::Pin;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use chrono::{DateTime, NaiveDateTime, TimeZone, Utc};
|
||||
use futures::prelude::*;
|
||||
use futures::task;
|
||||
use garage_model::key_table::Key;
|
||||
use hmac::Mac;
|
||||
use http::header::{HeaderMap, HeaderValue, CONTENT_ENCODING};
|
||||
use hyper::body::{Bytes, Frame, Incoming as IncomingBody};
|
||||
use http_body_util::StreamBody;
|
||||
use hyper::body::{Bytes, Incoming as IncomingBody};
|
||||
use hyper::Request;
|
||||
|
||||
use garage_util::data::Hash;
|
||||
|
||||
use super::*;
|
||||
use super::{compute_scope, sha256sum, HmacSha256, LONG_DATETIME};
|
||||
|
||||
use crate::helpers::body_stream;
|
||||
use crate::signature::checksum::*;
|
||||
use crate::signature::payload::CheckedSignature;
|
||||
use crate::helpers::*;
|
||||
use crate::signature::error::*;
|
||||
use crate::signature::payload::{
|
||||
STREAMING_AWS4_HMAC_SHA256_PAYLOAD, X_AMZ_CONTENT_SH256, X_AMZ_DATE,
|
||||
};
|
||||
|
||||
pub use crate::signature::body::ReqBody;
|
||||
pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
|
||||
|
||||
pub type ReqBody = BoxBody<Error>;
|
||||
|
||||
pub fn parse_streaming_body(
|
||||
mut req: Request<IncomingBody>,
|
||||
checked_signature: &CheckedSignature,
|
||||
api_key: &Key,
|
||||
req: Request<IncomingBody>,
|
||||
content_sha256: &mut Option<Hash>,
|
||||
region: &str,
|
||||
service: &str,
|
||||
) -> Result<Request<ReqBody>, Error> {
|
||||
debug!(
|
||||
"Content signature mode: {:?}",
|
||||
checked_signature.content_sha256_header
|
||||
);
|
||||
match req.headers().get(X_AMZ_CONTENT_SH256) {
|
||||
Some(header) if header == STREAMING_AWS4_HMAC_SHA256_PAYLOAD => {
|
||||
let signature = content_sha256
|
||||
.take()
|
||||
.ok_or_bad_request("No signature provided")?;
|
||||
|
||||
match checked_signature.content_sha256_header {
|
||||
ContentSha256Header::StreamingPayload { signed, trailer } => {
|
||||
// Sanity checks
|
||||
if !signed && !trailer {
|
||||
return Err(Error::bad_request(
|
||||
"STREAMING-UNSIGNED-PAYLOAD without trailer is not a valid combination",
|
||||
));
|
||||
}
|
||||
let secret_key = &api_key
|
||||
.state
|
||||
.as_option()
|
||||
.ok_or_internal_error("Deleted key state")?
|
||||
.secret_key;
|
||||
|
||||
// Remove the aws-chunked component in the content-encoding: header
|
||||
// Note: this header is not properly sent by minio client, so don't fail
|
||||
// if it is absent from the request.
|
||||
if let Some(content_encoding) = req.headers_mut().remove(CONTENT_ENCODING) {
|
||||
if let Some(rest) = content_encoding.as_bytes().strip_prefix(b"aws-chunked,") {
|
||||
req.headers_mut()
|
||||
.insert(CONTENT_ENCODING, HeaderValue::from_bytes(rest).unwrap());
|
||||
} else if content_encoding != "aws-chunked" {
|
||||
return Err(Error::bad_request(
|
||||
"content-encoding does not contain aws-chunked for STREAMING-*-PAYLOAD",
|
||||
));
|
||||
}
|
||||
}
|
||||
let date = req
|
||||
.headers()
|
||||
.get(X_AMZ_DATE)
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||
.to_str()?;
|
||||
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
|
||||
.ok_or_bad_request("Invalid date")?;
|
||||
let date: DateTime<Utc> = Utc.from_utc_datetime(&date);
|
||||
|
||||
// If trailer header is announced, add the calculation of the requested checksum
|
||||
let mut checksummer = Checksummer::init(&Default::default(), false);
|
||||
let trailer_algorithm = if trailer {
|
||||
let algo = Some(
|
||||
request_trailer_checksum_algorithm(req.headers())?
|
||||
.ok_or_bad_request("Missing x-amz-trailer header")?,
|
||||
);
|
||||
checksummer = checksummer.add(algo);
|
||||
algo
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// For signed variants, determine signing parameters
|
||||
let sign_params = if signed {
|
||||
let signature = checked_signature
|
||||
.signature_header
|
||||
.clone()
|
||||
.ok_or_bad_request("No signature provided")?;
|
||||
let signature = hex::decode(signature)
|
||||
.ok()
|
||||
.and_then(|bytes| Hash::try_from(&bytes))
|
||||
.ok_or_bad_request("Invalid signature")?;
|
||||
|
||||
let secret_key = checked_signature
|
||||
.key
|
||||
.as_ref()
|
||||
.ok_or_bad_request("Cannot sign streaming payload without signing key")?
|
||||
.state
|
||||
.as_option()
|
||||
.ok_or_internal_error("Deleted key state")?
|
||||
.secret_key
|
||||
.to_string();
|
||||
|
||||
let date = req
|
||||
.headers()
|
||||
.get(X_AMZ_DATE)
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||
.to_str()?;
|
||||
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
|
||||
.ok_or_bad_request("Invalid date")?;
|
||||
let date: DateTime<Utc> = Utc.from_utc_datetime(&date);
|
||||
|
||||
let scope = compute_scope(&date, region, service);
|
||||
let signing_hmac =
|
||||
crate::signature::signing_hmac(&date, &secret_key, region, service)
|
||||
.ok_or_internal_error("Unable to build signing HMAC")?;
|
||||
|
||||
Some(SignParams {
|
||||
datetime: date,
|
||||
scope,
|
||||
signing_hmac,
|
||||
previous_signature: signature,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let scope = compute_scope(&date, region, service);
|
||||
let signing_hmac = crate::signature::signing_hmac(&date, secret_key, region, service)
|
||||
.ok_or_internal_error("Unable to build signing HMAC")?;
|
||||
|
||||
Ok(req.map(move |body| {
|
||||
let stream = body_stream::<_, Error>(body);
|
||||
|
||||
let signed_payload_stream =
|
||||
StreamingPayloadStream::new(stream, sign_params, trailer).map_err(Error::from);
|
||||
ReqBody {
|
||||
stream: Mutex::new(signed_payload_stream.boxed()),
|
||||
checksummer,
|
||||
expected_checksums: Default::default(),
|
||||
trailer_algorithm,
|
||||
}
|
||||
SignedPayloadStream::new(stream, signing_hmac, date, &scope, signature)
|
||||
.map(|x| x.map(hyper::body::Frame::data))
|
||||
.map_err(Error::from);
|
||||
ReqBody::new(StreamBody::new(signed_payload_stream))
|
||||
}))
|
||||
}
|
||||
_ => Ok(req.map(|body| {
|
||||
let expected_checksums = ExpectedChecksums {
|
||||
sha256: match &checked_signature.content_sha256_header {
|
||||
ContentSha256Header::Sha256Checksum(sha256) => Some(*sha256),
|
||||
_ => None,
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
let checksummer = Checksummer::init(&expected_checksums, false);
|
||||
|
||||
let stream = http_body_util::BodyStream::new(body).map_err(Error::from);
|
||||
ReqBody {
|
||||
stream: Mutex::new(stream.boxed()),
|
||||
checksummer,
|
||||
expected_checksums,
|
||||
trailer_algorithm: None,
|
||||
}
|
||||
})),
|
||||
_ => Ok(req.map(|body| ReqBody::new(http_body_util::BodyExt::map_err(body, Error::from)))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of `sha256("")`
|
||||
const EMPTY_STRING_HEX_DIGEST: &str =
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
|
||||
|
||||
fn compute_streaming_payload_signature(
|
||||
signing_hmac: &HmacSha256,
|
||||
date: DateTime<Utc>,
|
||||
scope: &str,
|
||||
previous_signature: Hash,
|
||||
content_sha256: Hash,
|
||||
) -> Result<Hash, StreamingPayloadError> {
|
||||
) -> Result<Hash, Error> {
|
||||
let string_to_sign = [
|
||||
AWS4_HMAC_SHA256_PAYLOAD,
|
||||
&date.format(LONG_DATETIME).to_string(),
|
||||
|
@ -165,49 +92,12 @@ fn compute_streaming_payload_signature(
|
|||
let mut hmac = signing_hmac.clone();
|
||||
hmac.update(string_to_sign.as_bytes());
|
||||
|
||||
Hash::try_from(&hmac.finalize().into_bytes())
|
||||
.ok_or_else(|| StreamingPayloadError::Message("Could not build signature".into()))
|
||||
}
|
||||
|
||||
fn compute_streaming_trailer_signature(
|
||||
signing_hmac: &HmacSha256,
|
||||
date: DateTime<Utc>,
|
||||
scope: &str,
|
||||
previous_signature: Hash,
|
||||
trailer_sha256: Hash,
|
||||
) -> Result<Hash, StreamingPayloadError> {
|
||||
let string_to_sign = [
|
||||
AWS4_HMAC_SHA256_PAYLOAD,
|
||||
&date.format(LONG_DATETIME).to_string(),
|
||||
scope,
|
||||
&hex::encode(previous_signature),
|
||||
&hex::encode(trailer_sha256),
|
||||
]
|
||||
.join("\n");
|
||||
|
||||
let mut hmac = signing_hmac.clone();
|
||||
hmac.update(string_to_sign.as_bytes());
|
||||
|
||||
Hash::try_from(&hmac.finalize().into_bytes())
|
||||
.ok_or_else(|| StreamingPayloadError::Message("Could not build signature".into()))
|
||||
Ok(Hash::try_from(&hmac.finalize().into_bytes()).ok_or_internal_error("Invalid signature")?)
|
||||
}
|
||||
|
||||
mod payload {
|
||||
use http::{HeaderName, HeaderValue};
|
||||
|
||||
use garage_util::data::Hash;
|
||||
|
||||
use nom::bytes::streaming::{tag, take_while};
|
||||
use nom::character::streaming::hex_digit1;
|
||||
use nom::combinator::{map_res, opt};
|
||||
use nom::number::streaming::hex_u32;
|
||||
|
||||
macro_rules! try_parse {
|
||||
($expr:expr) => {
|
||||
$expr.map_err(|e| e.map(Error::Parser))?
|
||||
};
|
||||
}
|
||||
|
||||
pub enum Error<I> {
|
||||
Parser(nom::error::Error<I>),
|
||||
BadSignature,
|
||||
|
@ -223,13 +113,24 @@ mod payload {
|
|||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ChunkHeader {
|
||||
pub struct Header {
|
||||
pub size: usize,
|
||||
pub signature: Option<Hash>,
|
||||
pub signature: Hash,
|
||||
}
|
||||
|
||||
impl ChunkHeader {
|
||||
pub fn parse_signed(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
impl Header {
|
||||
pub fn parse(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
use nom::bytes::streaming::tag;
|
||||
use nom::character::streaming::hex_digit1;
|
||||
use nom::combinator::map_res;
|
||||
use nom::number::streaming::hex_u32;
|
||||
|
||||
macro_rules! try_parse {
|
||||
($expr:expr) => {
|
||||
$expr.map_err(|e| e.map(Error::Parser))?
|
||||
};
|
||||
}
|
||||
|
||||
let (input, size) = try_parse!(hex_u32(input));
|
||||
let (input, _) = try_parse!(tag(";")(input));
|
||||
|
||||
|
@ -239,172 +140,96 @@ mod payload {
|
|||
|
||||
let (input, _) = try_parse!(tag("\r\n")(input));
|
||||
|
||||
let header = ChunkHeader {
|
||||
let header = Header {
|
||||
size: size as usize,
|
||||
signature: Some(signature),
|
||||
signature,
|
||||
};
|
||||
|
||||
Ok((input, header))
|
||||
}
|
||||
|
||||
pub fn parse_unsigned(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
let (input, size) = try_parse!(hex_u32(input));
|
||||
let (input, _) = try_parse!(tag("\r\n")(input));
|
||||
|
||||
let header = ChunkHeader {
|
||||
size: size as usize,
|
||||
signature: None,
|
||||
};
|
||||
|
||||
Ok((input, header))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TrailerChunk {
|
||||
pub header_name: HeaderName,
|
||||
pub header_value: HeaderValue,
|
||||
pub signature: Option<Hash>,
|
||||
}
|
||||
|
||||
impl TrailerChunk {
|
||||
fn parse_content(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
let (input, header_name) = try_parse!(map_res(
|
||||
take_while(|c: u8| c.is_ascii_alphanumeric() || c == b'-'),
|
||||
HeaderName::from_bytes
|
||||
)(input));
|
||||
let (input, _) = try_parse!(tag(b":")(input));
|
||||
let (input, header_value) = try_parse!(map_res(
|
||||
take_while(|c: u8| c.is_ascii_alphanumeric() || b"+/=".contains(&c)),
|
||||
HeaderValue::from_bytes
|
||||
)(input));
|
||||
|
||||
// Possible '\n' after the header value, depends on clients
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||
let (input, _) = try_parse!(opt(tag(b"\n"))(input));
|
||||
|
||||
let (input, _) = try_parse!(tag(b"\r\n")(input));
|
||||
|
||||
Ok((
|
||||
input,
|
||||
TrailerChunk {
|
||||
header_name,
|
||||
header_value,
|
||||
signature: None,
|
||||
},
|
||||
))
|
||||
}
|
||||
pub fn parse_signed(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
let (input, trailer) = Self::parse_content(input)?;
|
||||
|
||||
let (input, _) = try_parse!(tag(b"x-amz-trailer-signature:")(input));
|
||||
let (input, data) = try_parse!(map_res(hex_digit1, hex::decode)(input));
|
||||
let signature = Hash::try_from(&data).ok_or(nom::Err::Failure(Error::BadSignature))?;
|
||||
let (input, _) = try_parse!(tag(b"\r\n")(input));
|
||||
|
||||
Ok((
|
||||
input,
|
||||
TrailerChunk {
|
||||
signature: Some(signature),
|
||||
..trailer
|
||||
},
|
||||
))
|
||||
}
|
||||
pub fn parse_unsigned(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
|
||||
let (input, trailer) = Self::parse_content(input)?;
|
||||
let (input, _) = try_parse!(tag(b"\r\n")(input));
|
||||
|
||||
Ok((input, trailer))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum StreamingPayloadError {
|
||||
pub enum SignedPayloadStreamError {
|
||||
Stream(Error),
|
||||
InvalidSignature,
|
||||
Message(String),
|
||||
}
|
||||
|
||||
impl StreamingPayloadError {
|
||||
impl SignedPayloadStreamError {
|
||||
fn message(msg: &str) -> Self {
|
||||
StreamingPayloadError::Message(msg.into())
|
||||
SignedPayloadStreamError::Message(msg.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StreamingPayloadError> for Error {
|
||||
fn from(err: StreamingPayloadError) -> Self {
|
||||
impl From<SignedPayloadStreamError> for Error {
|
||||
fn from(err: SignedPayloadStreamError) -> Self {
|
||||
match err {
|
||||
StreamingPayloadError::Stream(e) => e,
|
||||
StreamingPayloadError::InvalidSignature => {
|
||||
SignedPayloadStreamError::Stream(e) => e,
|
||||
SignedPayloadStreamError::InvalidSignature => {
|
||||
Error::bad_request("Invalid payload signature")
|
||||
}
|
||||
StreamingPayloadError::Message(e) => {
|
||||
SignedPayloadStreamError::Message(e) => {
|
||||
Error::bad_request(format!("Chunk format error: {}", e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> From<payload::Error<I>> for StreamingPayloadError {
|
||||
impl<I> From<payload::Error<I>> for SignedPayloadStreamError {
|
||||
fn from(err: payload::Error<I>) -> Self {
|
||||
Self::message(err.description())
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> From<nom::error::Error<I>> for StreamingPayloadError {
|
||||
impl<I> From<nom::error::Error<I>> for SignedPayloadStreamError {
|
||||
fn from(err: nom::error::Error<I>) -> Self {
|
||||
Self::message(err.code.description())
|
||||
}
|
||||
}
|
||||
|
||||
enum StreamingPayloadChunk {
|
||||
Chunk {
|
||||
header: payload::ChunkHeader,
|
||||
data: Bytes,
|
||||
},
|
||||
Trailer(payload::TrailerChunk),
|
||||
}
|
||||
|
||||
struct SignParams {
|
||||
datetime: DateTime<Utc>,
|
||||
scope: String,
|
||||
signing_hmac: HmacSha256,
|
||||
previous_signature: Hash,
|
||||
struct SignedPayload {
|
||||
header: payload::Header,
|
||||
data: Bytes,
|
||||
}
|
||||
|
||||
#[pin_project::pin_project]
|
||||
pub struct StreamingPayloadStream<S>
|
||||
pub struct SignedPayloadStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>>,
|
||||
{
|
||||
#[pin]
|
||||
stream: S,
|
||||
buf: bytes::BytesMut,
|
||||
signing: Option<SignParams>,
|
||||
has_trailer: bool,
|
||||
done: bool,
|
||||
datetime: DateTime<Utc>,
|
||||
scope: String,
|
||||
signing_hmac: HmacSha256,
|
||||
previous_signature: Hash,
|
||||
}
|
||||
|
||||
impl<S> StreamingPayloadStream<S>
|
||||
impl<S> SignedPayloadStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>>,
|
||||
{
|
||||
fn new(stream: S, signing: Option<SignParams>, has_trailer: bool) -> Self {
|
||||
pub fn new(
|
||||
stream: S,
|
||||
signing_hmac: HmacSha256,
|
||||
datetime: DateTime<Utc>,
|
||||
scope: &str,
|
||||
seed_signature: Hash,
|
||||
) -> Self {
|
||||
Self {
|
||||
stream,
|
||||
buf: bytes::BytesMut::new(),
|
||||
signing,
|
||||
has_trailer,
|
||||
done: false,
|
||||
datetime,
|
||||
scope: scope.into(),
|
||||
signing_hmac,
|
||||
previous_signature: seed_signature,
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_next(
|
||||
input: &[u8],
|
||||
is_signed: bool,
|
||||
has_trailer: bool,
|
||||
) -> nom::IResult<&[u8], StreamingPayloadChunk, StreamingPayloadError> {
|
||||
fn parse_next(input: &[u8]) -> nom::IResult<&[u8], SignedPayload, SignedPayloadStreamError> {
|
||||
use nom::bytes::streaming::{tag, take};
|
||||
|
||||
macro_rules! try_parse {
|
||||
|
@ -413,30 +238,17 @@ where
|
|||
};
|
||||
}
|
||||
|
||||
let (input, header) = if is_signed {
|
||||
try_parse!(payload::ChunkHeader::parse_signed(input))
|
||||
} else {
|
||||
try_parse!(payload::ChunkHeader::parse_unsigned(input))
|
||||
};
|
||||
let (input, header) = try_parse!(payload::Header::parse(input));
|
||||
|
||||
// 0-sized chunk is the last
|
||||
if header.size == 0 {
|
||||
if has_trailer {
|
||||
let (input, trailer) = if is_signed {
|
||||
try_parse!(payload::TrailerChunk::parse_signed(input))
|
||||
} else {
|
||||
try_parse!(payload::TrailerChunk::parse_unsigned(input))
|
||||
};
|
||||
return Ok((input, StreamingPayloadChunk::Trailer(trailer)));
|
||||
} else {
|
||||
return Ok((
|
||||
input,
|
||||
StreamingPayloadChunk::Chunk {
|
||||
header,
|
||||
data: Bytes::new(),
|
||||
},
|
||||
));
|
||||
}
|
||||
return Ok((
|
||||
input,
|
||||
SignedPayload {
|
||||
header,
|
||||
data: Bytes::new(),
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
let (input, data) = try_parse!(take::<_, _, nom::error::Error<_>>(header.size)(input));
|
||||
|
@ -444,15 +256,15 @@ where
|
|||
|
||||
let data = Bytes::from(data.to_vec());
|
||||
|
||||
Ok((input, StreamingPayloadChunk::Chunk { header, data }))
|
||||
Ok((input, SignedPayload { header, data }))
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Stream for StreamingPayloadStream<S>
|
||||
impl<S> Stream for SignedPayloadStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>> + Unpin,
|
||||
{
|
||||
type Item = Result<Frame<Bytes>, StreamingPayloadError>;
|
||||
type Item = Result<Bytes, SignedPayloadStreamError>;
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
|
@ -462,105 +274,56 @@ where
|
|||
|
||||
let mut this = self.project();
|
||||
|
||||
if *this.done {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
|
||||
loop {
|
||||
let (input, payload) =
|
||||
match Self::parse_next(this.buf, this.signing.is_some(), *this.has_trailer) {
|
||||
Ok(res) => res,
|
||||
Err(nom::Err::Incomplete(_)) => {
|
||||
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
|
||||
Some(Ok(bytes)) => {
|
||||
this.buf.extend(bytes);
|
||||
continue;
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
return Poll::Ready(Some(Err(StreamingPayloadError::Stream(e))))
|
||||
}
|
||||
None => {
|
||||
return Poll::Ready(Some(Err(StreamingPayloadError::message(
|
||||
"Unexpected EOF",
|
||||
))));
|
||||
}
|
||||
let (input, payload) = match Self::parse_next(this.buf) {
|
||||
Ok(res) => res,
|
||||
Err(nom::Err::Incomplete(_)) => {
|
||||
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
|
||||
Some(Ok(bytes)) => {
|
||||
this.buf.extend(bytes);
|
||||
continue;
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
return Poll::Ready(Some(Err(SignedPayloadStreamError::Stream(e))))
|
||||
}
|
||||
None => {
|
||||
return Poll::Ready(Some(Err(SignedPayloadStreamError::message(
|
||||
"Unexpected EOF",
|
||||
))));
|
||||
}
|
||||
}
|
||||
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => {
|
||||
return Poll::Ready(Some(Err(e)))
|
||||
}
|
||||
};
|
||||
|
||||
match payload {
|
||||
StreamingPayloadChunk::Chunk { data, header } => {
|
||||
if let Some(signing) = this.signing.as_mut() {
|
||||
let data_sha256sum = sha256sum(&data);
|
||||
|
||||
let expected_signature = compute_streaming_payload_signature(
|
||||
&signing.signing_hmac,
|
||||
signing.datetime,
|
||||
&signing.scope,
|
||||
signing.previous_signature,
|
||||
data_sha256sum,
|
||||
)?;
|
||||
|
||||
if header.signature.unwrap() != expected_signature {
|
||||
return Poll::Ready(Some(Err(StreamingPayloadError::InvalidSignature)));
|
||||
}
|
||||
|
||||
signing.previous_signature = header.signature.unwrap();
|
||||
}
|
||||
|
||||
*this.buf = input.into();
|
||||
|
||||
// 0-sized chunk is the last
|
||||
if data.is_empty() {
|
||||
// if there was a trailer, it would have been returned by the parser
|
||||
assert!(!*this.has_trailer);
|
||||
*this.done = true;
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
|
||||
return Poll::Ready(Some(Ok(Frame::data(data))));
|
||||
}
|
||||
StreamingPayloadChunk::Trailer(trailer) => {
|
||||
trace!(
|
||||
"In StreamingPayloadStream::poll_next: got trailer {:?}",
|
||||
trailer
|
||||
);
|
||||
|
||||
if let Some(signing) = this.signing.as_mut() {
|
||||
let data = [
|
||||
trailer.header_name.as_ref(),
|
||||
&b":"[..],
|
||||
trailer.header_value.as_ref(),
|
||||
&b"\n"[..],
|
||||
]
|
||||
.concat();
|
||||
let trailer_sha256sum = sha256sum(&data);
|
||||
|
||||
let expected_signature = compute_streaming_trailer_signature(
|
||||
&signing.signing_hmac,
|
||||
signing.datetime,
|
||||
&signing.scope,
|
||||
signing.previous_signature,
|
||||
trailer_sha256sum,
|
||||
)?;
|
||||
|
||||
if trailer.signature.unwrap() != expected_signature {
|
||||
return Poll::Ready(Some(Err(StreamingPayloadError::InvalidSignature)));
|
||||
}
|
||||
}
|
||||
|
||||
*this.buf = input.into();
|
||||
*this.done = true;
|
||||
|
||||
let mut trailers_map = HeaderMap::new();
|
||||
trailers_map.insert(trailer.header_name, trailer.header_value);
|
||||
|
||||
return Poll::Ready(Some(Ok(Frame::trailers(trailers_map))));
|
||||
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => {
|
||||
return Poll::Ready(Some(Err(e)))
|
||||
}
|
||||
};
|
||||
|
||||
// 0-sized chunk is the last
|
||||
if payload.data.is_empty() {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
|
||||
let data_sha256sum = sha256sum(&payload.data);
|
||||
|
||||
let expected_signature = compute_streaming_payload_signature(
|
||||
this.signing_hmac,
|
||||
*this.datetime,
|
||||
this.scope,
|
||||
*this.previous_signature,
|
||||
data_sha256sum,
|
||||
)
|
||||
.map_err(|e| {
|
||||
SignedPayloadStreamError::Message(format!("Could not build signature: {}", e))
|
||||
})?;
|
||||
|
||||
if payload.header.signature != expected_signature {
|
||||
return Poll::Ready(Some(Err(SignedPayloadStreamError::InvalidSignature)));
|
||||
}
|
||||
|
||||
*this.buf = input.into();
|
||||
*this.previous_signature = payload.header.signature;
|
||||
|
||||
return Poll::Ready(Some(Ok(payload.data)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -573,7 +336,7 @@ where
|
|||
mod tests {
|
||||
use futures::prelude::*;
|
||||
|
||||
use super::{SignParams, StreamingPayloadError, StreamingPayloadStream};
|
||||
use super::{SignedPayloadStream, SignedPayloadStreamError};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_interrupted_signed_payload_stream() {
|
||||
|
@ -595,20 +358,12 @@ mod tests {
|
|||
|
||||
let seed_signature = Hash::default();
|
||||
|
||||
let mut stream = StreamingPayloadStream::new(
|
||||
body,
|
||||
Some(SignParams {
|
||||
signing_hmac,
|
||||
datetime,
|
||||
scope,
|
||||
previous_signature: seed_signature,
|
||||
}),
|
||||
false,
|
||||
);
|
||||
let mut stream =
|
||||
SignedPayloadStream::new(body, signing_hmac, datetime, &scope, seed_signature);
|
||||
|
||||
assert!(stream.try_next().await.is_err());
|
||||
match stream.try_next().await {
|
||||
Err(StreamingPayloadError::Message(msg)) if msg == "Unexpected EOF" => {}
|
||||
Err(SignedPayloadStreamError::Message(msg)) if msg == "Unexpected EOF" => {}
|
||||
item => panic!(
|
||||
"Unexpected result, expected early EOF error, got {:?}",
|
||||
item
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
[package]
|
||||
name = "garage_api_k2v"
|
||||
version = "1.1.0"
|
||||
version = "1.0.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
description = "K2V API server crate for the Garage object store"
|
||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||
readme = "../../../README.md"
|
||||
readme = "../../README.md"
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Method, Request, Response};
|
||||
|
@ -81,9 +82,7 @@ impl ApiHandler for K2VApiServer {
|
|||
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||
}
|
||||
|
||||
let verified_request = verify_request(&garage, req, "k2v").await?;
|
||||
let req = verified_request.request;
|
||||
let api_key = verified_request.access_key;
|
||||
let (req, api_key, _content_sha256) = verify_request(&garage, req, "k2v").await?;
|
||||
|
||||
let bucket_id = garage
|
||||
.bucket_helper()
|
||||
|
@ -179,8 +178,8 @@ impl ApiHandler for K2VApiServer {
|
|||
}
|
||||
|
||||
impl ApiEndpoint for K2VApiEndpoint {
|
||||
fn name(&self) -> &'static str {
|
||||
self.endpoint.name()
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
Cow::Borrowed(self.endpoint.name())
|
||||
}
|
||||
|
||||
fn add_span_attributes(&self, span: SpanRef<'_>) {
|
||||
|
|
|
@ -20,7 +20,7 @@ pub async fn handle_insert_batch(
|
|||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = &ctx;
|
||||
let items = req.into_body().json::<Vec<InsertBatchItem>>().await?;
|
||||
let items = parse_json_body::<Vec<InsertBatchItem>, _, Error>(req).await?;
|
||||
|
||||
let mut items2 = vec![];
|
||||
for it in items {
|
||||
|
@ -47,7 +47,7 @@ pub async fn handle_read_batch(
|
|||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let queries = req.into_body().json::<Vec<ReadBatchQuery>>().await?;
|
||||
let queries = parse_json_body::<Vec<ReadBatchQuery>, _, Error>(req).await?;
|
||||
|
||||
let resp_results = futures::future::join_all(
|
||||
queries
|
||||
|
@ -141,7 +141,7 @@ pub async fn handle_delete_batch(
|
|||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let queries = req.into_body().json::<Vec<DeleteBatchQuery>>().await?;
|
||||
let queries = parse_json_body::<Vec<DeleteBatchQuery>, _, Error>(req).await?;
|
||||
|
||||
let resp_results = futures::future::join_all(
|
||||
queries
|
||||
|
@ -262,7 +262,7 @@ pub(crate) async fn handle_poll_range(
|
|||
} = ctx;
|
||||
use garage_model::k2v::sub::PollRange;
|
||||
|
||||
let query = req.into_body().json::<PollRangeQuery>().await?;
|
||||
let query = parse_json_body::<PollRangeQuery, _, Error>(req).await?;
|
||||
|
||||
let timeout_msec = query.timeout.unwrap_or(300).clamp(1, 600) * 1000;
|
||||
|
||||
|
|
|
@ -23,10 +23,6 @@ pub enum Error {
|
|||
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
|
||||
AuthorizationHeaderMalformed(String),
|
||||
|
||||
/// The provided digest (checksum) value was invalid
|
||||
#[error(display = "Invalid digest: {}", _0)]
|
||||
InvalidDigest(String),
|
||||
|
||||
/// The object requested don't exists
|
||||
#[error(display = "Key not found")]
|
||||
NoSuchKey,
|
||||
|
@ -58,7 +54,6 @@ impl From<SignatureError> for Error {
|
|||
Self::AuthorizationHeaderMalformed(c)
|
||||
}
|
||||
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
||||
SignatureError::InvalidDigest(d) => Self::InvalidDigest(d),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -76,7 +71,6 @@ impl Error {
|
|||
Error::InvalidBase64(_) => "InvalidBase64",
|
||||
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
|
||||
Error::InvalidCausalityToken => "CausalityToken",
|
||||
Error::InvalidDigest(_) => "InvalidDigest",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -91,7 +85,6 @@ impl ApiError for Error {
|
|||
Error::AuthorizationHeaderMalformed(_)
|
||||
| Error::InvalidBase64(_)
|
||||
| Error::InvalidUtf8Str(_)
|
||||
| Error::InvalidDigest(_)
|
||||
| Error::InvalidCausalityToken => StatusCode::BAD_REQUEST,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -144,7 +144,9 @@ pub async fn handle_insert_item(
|
|||
.map(parse_causality_token)
|
||||
.transpose()?;
|
||||
|
||||
let body = req.into_body().collect().await?;
|
||||
let body = http_body_util::BodyExt::collect(req.into_body())
|
||||
.await?
|
||||
.to_bytes();
|
||||
|
||||
let value = DvvsValue::Value(body.to_vec());
|
||||
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
[package]
|
||||
name = "garage_api_s3"
|
||||
version = "1.1.0"
|
||||
version = "1.0.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
description = "S3 API server crate for the Garage object store"
|
||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||
readme = "../../../README.md"
|
||||
readme = "../../README.md"
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use std::borrow::Cow;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hyper::header;
|
||||
|
@ -121,9 +122,7 @@ impl ApiHandler for S3ApiServer {
|
|||
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||
}
|
||||
|
||||
let verified_request = verify_request(&garage, req, "s3").await?;
|
||||
let req = verified_request.request;
|
||||
let api_key = verified_request.access_key;
|
||||
let (req, api_key, content_sha256) = verify_request(&garage, req, "s3").await?;
|
||||
|
||||
let bucket_name = match bucket_name {
|
||||
None => {
|
||||
|
@ -136,7 +135,14 @@ impl ApiHandler for S3ApiServer {
|
|||
|
||||
// Special code path for CreateBucket API endpoint
|
||||
if let Endpoint::CreateBucket {} = endpoint {
|
||||
return handle_create_bucket(&garage, req, &api_key.key_id, bucket_name).await;
|
||||
return handle_create_bucket(
|
||||
&garage,
|
||||
req,
|
||||
content_sha256,
|
||||
&api_key.key_id,
|
||||
bucket_name,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let bucket_id = garage
|
||||
|
@ -174,7 +180,7 @@ impl ApiHandler for S3ApiServer {
|
|||
let resp = match endpoint {
|
||||
Endpoint::HeadObject {
|
||||
key, part_number, ..
|
||||
} => handle_head(ctx, &req.map(|_| ()), &key, part_number).await,
|
||||
} => handle_head(ctx, &req, &key, part_number).await,
|
||||
Endpoint::GetObject {
|
||||
key,
|
||||
part_number,
|
||||
|
@ -194,20 +200,20 @@ impl ApiHandler for S3ApiServer {
|
|||
response_content_type,
|
||||
response_expires,
|
||||
};
|
||||
handle_get(ctx, &req.map(|_| ()), &key, part_number, overrides).await
|
||||
handle_get(ctx, &req, &key, part_number, overrides).await
|
||||
}
|
||||
Endpoint::UploadPart {
|
||||
key,
|
||||
part_number,
|
||||
upload_id,
|
||||
} => handle_put_part(ctx, req, &key, part_number, &upload_id).await,
|
||||
} => handle_put_part(ctx, req, &key, part_number, &upload_id, content_sha256).await,
|
||||
Endpoint::CopyObject { key } => handle_copy(ctx, &req, &key).await,
|
||||
Endpoint::UploadPartCopy {
|
||||
key,
|
||||
part_number,
|
||||
upload_id,
|
||||
} => handle_upload_part_copy(ctx, &req, &key, part_number, &upload_id).await,
|
||||
Endpoint::PutObject { key } => handle_put(ctx, req, &key).await,
|
||||
Endpoint::PutObject { key } => handle_put(ctx, req, &key, content_sha256).await,
|
||||
Endpoint::AbortMultipartUpload { key, upload_id } => {
|
||||
handle_abort_multipart_upload(ctx, &key, &upload_id).await
|
||||
}
|
||||
|
@ -216,7 +222,7 @@ impl ApiHandler for S3ApiServer {
|
|||
handle_create_multipart_upload(ctx, &req, &key).await
|
||||
}
|
||||
Endpoint::CompleteMultipartUpload { key, upload_id } => {
|
||||
handle_complete_multipart_upload(ctx, req, &key, &upload_id).await
|
||||
handle_complete_multipart_upload(ctx, req, &key, &upload_id, content_sha256).await
|
||||
}
|
||||
Endpoint::CreateBucket {} => unreachable!(),
|
||||
Endpoint::HeadBucket {} => {
|
||||
|
@ -312,6 +318,7 @@ impl ApiHandler for S3ApiServer {
|
|||
} => {
|
||||
let query = ListPartsQuery {
|
||||
bucket_name: ctx.bucket_name.clone(),
|
||||
bucket_id,
|
||||
key,
|
||||
upload_id,
|
||||
part_number_marker: part_number_marker.map(|p| p.min(10000)),
|
||||
|
@ -319,15 +326,17 @@ impl ApiHandler for S3ApiServer {
|
|||
};
|
||||
handle_list_parts(ctx, req, &query).await
|
||||
}
|
||||
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req).await,
|
||||
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
|
||||
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
|
||||
Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req).await,
|
||||
Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req, content_sha256).await,
|
||||
Endpoint::DeleteBucketWebsite {} => handle_delete_website(ctx).await,
|
||||
Endpoint::GetBucketCors {} => handle_get_cors(ctx).await,
|
||||
Endpoint::PutBucketCors {} => handle_put_cors(ctx, req).await,
|
||||
Endpoint::PutBucketCors {} => handle_put_cors(ctx, req, content_sha256).await,
|
||||
Endpoint::DeleteBucketCors {} => handle_delete_cors(ctx).await,
|
||||
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(ctx).await,
|
||||
Endpoint::PutBucketLifecycleConfiguration {} => handle_put_lifecycle(ctx, req).await,
|
||||
Endpoint::PutBucketLifecycleConfiguration {} => {
|
||||
handle_put_lifecycle(ctx, req, content_sha256).await
|
||||
}
|
||||
Endpoint::DeleteBucketLifecycle {} => handle_delete_lifecycle(ctx).await,
|
||||
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
||||
};
|
||||
|
@ -345,8 +354,8 @@ impl ApiHandler for S3ApiServer {
|
|||
}
|
||||
|
||||
impl ApiEndpoint for S3ApiEndpoint {
|
||||
fn name(&self) -> &'static str {
|
||||
self.endpoint.name()
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
Cow::Borrowed(self.endpoint.name())
|
||||
}
|
||||
|
||||
fn add_span_attributes(&self, span: SpanRef<'_>) {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
|
||||
use garage_model::bucket_alias_table::*;
|
||||
|
@ -9,10 +10,12 @@ use garage_model::key_table::Key;
|
|||
use garage_model::permission::BucketKeyPerm;
|
||||
use garage_table::util::*;
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::time::*;
|
||||
|
||||
use garage_api_common::common_error::CommonError;
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::error::*;
|
||||
|
@ -119,10 +122,15 @@ pub async fn handle_list_buckets(
|
|||
pub async fn handle_create_bucket(
|
||||
garage: &Garage,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
api_key_id: &String,
|
||||
bucket_name: String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let body = req.into_body().collect().await?;
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
|
||||
let cmd =
|
||||
parse_create_bucket_xml(&body[..]).ok_or_bad_request("Invalid create bucket XML query")?;
|
||||
|
|
|
@ -11,12 +11,11 @@ use sha2::Sha256;
|
|||
use http::{HeaderMap, HeaderName, HeaderValue};
|
||||
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::OkOrMessage;
|
||||
|
||||
use super::*;
|
||||
use garage_model::s3::object_table::*;
|
||||
|
||||
pub use garage_model::s3::object_table::{ChecksumAlgorithm, ChecksumValue};
|
||||
|
||||
pub const CONTENT_MD5: HeaderName = HeaderName::from_static("content-md5");
|
||||
use crate::error::*;
|
||||
|
||||
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
|
||||
HeaderName::from_static("x-amz-checksum-algorithm");
|
||||
|
@ -32,8 +31,8 @@ pub type Md5Checksum = [u8; 16];
|
|||
pub type Sha1Checksum = [u8; 20];
|
||||
pub type Sha256Checksum = [u8; 32];
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ExpectedChecksums {
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct ExpectedChecksums {
|
||||
// base64-encoded md5 (content-md5 header)
|
||||
pub md5: Option<String>,
|
||||
// content_sha256 (as a Hash / FixedBytes32)
|
||||
|
@ -42,7 +41,7 @@ pub struct ExpectedChecksums {
|
|||
pub extra: Option<ChecksumValue>,
|
||||
}
|
||||
|
||||
pub struct Checksummer {
|
||||
pub(crate) struct Checksummer {
|
||||
pub crc32: Option<Crc32>,
|
||||
pub crc32c: Option<Crc32c>,
|
||||
pub md5: Option<Md5>,
|
||||
|
@ -51,7 +50,7 @@ pub struct Checksummer {
|
|||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Checksums {
|
||||
pub(crate) struct Checksums {
|
||||
pub crc32: Option<Crc32Checksum>,
|
||||
pub crc32c: Option<Crc32cChecksum>,
|
||||
pub md5: Option<Md5Checksum>,
|
||||
|
@ -60,48 +59,34 @@ pub struct Checksums {
|
|||
}
|
||||
|
||||
impl Checksummer {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
pub(crate) fn init(expected: &ExpectedChecksums, require_md5: bool) -> Self {
|
||||
let mut ret = Self {
|
||||
crc32: None,
|
||||
crc32c: None,
|
||||
md5: None,
|
||||
sha1: None,
|
||||
sha256: None,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init(expected: &ExpectedChecksums, add_md5: bool) -> Self {
|
||||
let mut ret = Self::new();
|
||||
ret.add_expected(expected);
|
||||
if add_md5 {
|
||||
ret.add_md5();
|
||||
if expected.md5.is_some() || require_md5 {
|
||||
ret.md5 = Some(Md5::new());
|
||||
}
|
||||
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
|
||||
ret.sha256 = Some(Sha256::new());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
|
||||
ret.crc32 = Some(Crc32::new());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
|
||||
ret.crc32c = Some(Crc32c::default());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
|
||||
ret.sha1 = Some(Sha1::new());
|
||||
}
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn add_md5(&mut self) {
|
||||
self.md5 = Some(Md5::new());
|
||||
}
|
||||
|
||||
pub fn add_expected(&mut self, expected: &ExpectedChecksums) {
|
||||
if expected.md5.is_some() {
|
||||
self.md5 = Some(Md5::new());
|
||||
}
|
||||
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
|
||||
self.sha256 = Some(Sha256::new());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
|
||||
self.crc32 = Some(Crc32::new());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
|
||||
self.crc32c = Some(Crc32c::default());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
|
||||
self.sha1 = Some(Sha1::new());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
|
||||
pub(crate) fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
|
||||
match algo {
|
||||
Some(ChecksumAlgorithm::Crc32) => {
|
||||
self.crc32 = Some(Crc32::new());
|
||||
|
@ -120,7 +105,7 @@ impl Checksummer {
|
|||
self
|
||||
}
|
||||
|
||||
pub fn update(&mut self, bytes: &[u8]) {
|
||||
pub(crate) fn update(&mut self, bytes: &[u8]) {
|
||||
if let Some(crc32) = &mut self.crc32 {
|
||||
crc32.update(bytes);
|
||||
}
|
||||
|
@ -138,7 +123,7 @@ impl Checksummer {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn finalize(self) -> Checksums {
|
||||
pub(crate) fn finalize(self) -> Checksums {
|
||||
Checksums {
|
||||
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
|
||||
crc32c: self
|
||||
|
@ -198,56 +183,153 @@ impl Checksums {
|
|||
|
||||
// ----
|
||||
|
||||
pub fn parse_checksum_algorithm(algo: &str) -> Result<ChecksumAlgorithm, Error> {
|
||||
match algo {
|
||||
"CRC32" => Ok(ChecksumAlgorithm::Crc32),
|
||||
"CRC32C" => Ok(ChecksumAlgorithm::Crc32c),
|
||||
"SHA1" => Ok(ChecksumAlgorithm::Sha1),
|
||||
"SHA256" => Ok(ChecksumAlgorithm::Sha256),
|
||||
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
||||
#[derive(Default)]
|
||||
pub(crate) struct MultipartChecksummer {
|
||||
pub md5: Md5,
|
||||
pub extra: Option<MultipartExtraChecksummer>,
|
||||
}
|
||||
|
||||
pub(crate) enum MultipartExtraChecksummer {
|
||||
Crc32(Crc32),
|
||||
Crc32c(Crc32c),
|
||||
Sha1(Sha1),
|
||||
Sha256(Sha256),
|
||||
}
|
||||
|
||||
impl MultipartChecksummer {
|
||||
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
|
||||
Self {
|
||||
md5: Md5::new(),
|
||||
extra: match algo {
|
||||
None => None,
|
||||
Some(ChecksumAlgorithm::Crc32) => {
|
||||
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
|
||||
}
|
||||
Some(ChecksumAlgorithm::Crc32c) => {
|
||||
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
|
||||
}
|
||||
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
|
||||
Some(ChecksumAlgorithm::Sha256) => {
|
||||
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update(
|
||||
&mut self,
|
||||
etag: &str,
|
||||
checksum: Option<ChecksumValue>,
|
||||
) -> Result<(), Error> {
|
||||
self.md5
|
||||
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
|
||||
match (&mut self.extra, checksum) {
|
||||
(None, _) => (),
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
|
||||
Some(ChecksumValue::Crc32(x)),
|
||||
) => {
|
||||
crc32.update(&x);
|
||||
}
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
|
||||
Some(ChecksumValue::Crc32c(x)),
|
||||
) => {
|
||||
crc32c.write(&x);
|
||||
}
|
||||
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
|
||||
sha1.update(&x);
|
||||
}
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
|
||||
Some(ChecksumValue::Sha256(x)),
|
||||
) => {
|
||||
sha256.update(&x);
|
||||
}
|
||||
(Some(_), b) => {
|
||||
return Err(Error::internal_error(format!(
|
||||
"part checksum was not computed correctly, got: {:?}",
|
||||
b
|
||||
)))
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
|
||||
let md5 = self.md5.finalize()[..].try_into().unwrap();
|
||||
let extra = match self.extra {
|
||||
None => None,
|
||||
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
|
||||
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
|
||||
}
|
||||
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
|
||||
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
|
||||
)),
|
||||
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
|
||||
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
|
||||
}
|
||||
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
|
||||
sha256.finalize()[..].try_into().unwrap(),
|
||||
)),
|
||||
};
|
||||
(md5, extra)
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
/// Extract the value of the x-amz-checksum-algorithm header
|
||||
pub fn request_checksum_algorithm(
|
||||
pub(crate) fn request_checksum_algorithm(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<Option<ChecksumAlgorithm>, Error> {
|
||||
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
||||
None => Ok(None),
|
||||
Some(x) => parse_checksum_algorithm(x.to_str()?).map(Some),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn request_trailer_checksum_algorithm(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<Option<ChecksumAlgorithm>, Error> {
|
||||
match headers.get(X_AMZ_TRAILER).map(|x| x.to_str()).transpose()? {
|
||||
None => Ok(None),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_CRC32 => Ok(Some(ChecksumAlgorithm::Crc32)),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_CRC32C => Ok(Some(ChecksumAlgorithm::Crc32c)),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_SHA1 => Ok(Some(ChecksumAlgorithm::Sha1)),
|
||||
Some(x) if x == X_AMZ_CHECKSUM_SHA256 => Ok(Some(ChecksumAlgorithm::Sha256)),
|
||||
Some(x) if x == "CRC32" => Ok(Some(ChecksumAlgorithm::Crc32)),
|
||||
Some(x) if x == "CRC32C" => Ok(Some(ChecksumAlgorithm::Crc32c)),
|
||||
Some(x) if x == "SHA1" => Ok(Some(ChecksumAlgorithm::Sha1)),
|
||||
Some(x) if x == "SHA256" => Ok(Some(ChecksumAlgorithm::Sha256)),
|
||||
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract the value of any of the x-amz-checksum-* headers
|
||||
pub fn request_checksum_value(
|
||||
pub(crate) fn request_checksum_value(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<Option<ChecksumValue>, Error> {
|
||||
let mut ret = vec![];
|
||||
|
||||
if headers.contains_key(X_AMZ_CHECKSUM_CRC32) {
|
||||
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32)?);
|
||||
if let Some(crc32_str) = headers.get(X_AMZ_CHECKSUM_CRC32) {
|
||||
let crc32 = BASE64_STANDARD
|
||||
.decode(&crc32_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
||||
ret.push(ChecksumValue::Crc32(crc32))
|
||||
}
|
||||
if headers.contains_key(X_AMZ_CHECKSUM_CRC32C) {
|
||||
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32c)?);
|
||||
if let Some(crc32c_str) = headers.get(X_AMZ_CHECKSUM_CRC32C) {
|
||||
let crc32c = BASE64_STANDARD
|
||||
.decode(&crc32c_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
||||
ret.push(ChecksumValue::Crc32c(crc32c))
|
||||
}
|
||||
if headers.contains_key(X_AMZ_CHECKSUM_SHA1) {
|
||||
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha1)?);
|
||||
if let Some(sha1_str) = headers.get(X_AMZ_CHECKSUM_SHA1) {
|
||||
let sha1 = BASE64_STANDARD
|
||||
.decode(&sha1_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
||||
ret.push(ChecksumValue::Sha1(sha1))
|
||||
}
|
||||
if headers.contains_key(X_AMZ_CHECKSUM_SHA256) {
|
||||
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha256)?);
|
||||
if let Some(sha256_str) = headers.get(X_AMZ_CHECKSUM_SHA256) {
|
||||
let sha256 = BASE64_STANDARD
|
||||
.decode(&sha256_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
||||
ret.push(ChecksumValue::Sha256(sha256))
|
||||
}
|
||||
|
||||
if ret.len() > 1 {
|
||||
|
@ -260,47 +342,48 @@ pub fn request_checksum_value(
|
|||
|
||||
/// Checks for the presence of x-amz-checksum-algorithm
|
||||
/// if so extract the corresponding x-amz-checksum-* value
|
||||
pub fn extract_checksum_value(
|
||||
pub(crate) fn request_checksum_algorithm_value(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
algo: ChecksumAlgorithm,
|
||||
) -> Result<ChecksumValue, Error> {
|
||||
match algo {
|
||||
ChecksumAlgorithm::Crc32 => {
|
||||
) -> Result<Option<ChecksumValue>, Error> {
|
||||
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
||||
Some(x) if x == "CRC32" => {
|
||||
let crc32 = headers
|
||||
.get(X_AMZ_CHECKSUM_CRC32)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
||||
Ok(ChecksumValue::Crc32(crc32))
|
||||
Ok(Some(ChecksumValue::Crc32(crc32)))
|
||||
}
|
||||
ChecksumAlgorithm::Crc32c => {
|
||||
Some(x) if x == "CRC32C" => {
|
||||
let crc32c = headers
|
||||
.get(X_AMZ_CHECKSUM_CRC32C)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
||||
Ok(ChecksumValue::Crc32c(crc32c))
|
||||
Ok(Some(ChecksumValue::Crc32c(crc32c)))
|
||||
}
|
||||
ChecksumAlgorithm::Sha1 => {
|
||||
Some(x) if x == "SHA1" => {
|
||||
let sha1 = headers
|
||||
.get(X_AMZ_CHECKSUM_SHA1)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
||||
Ok(ChecksumValue::Sha1(sha1))
|
||||
Ok(Some(ChecksumValue::Sha1(sha1)))
|
||||
}
|
||||
ChecksumAlgorithm::Sha256 => {
|
||||
Some(x) if x == "SHA256" => {
|
||||
let sha256 = headers
|
||||
.get(X_AMZ_CHECKSUM_SHA256)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
||||
Ok(ChecksumValue::Sha256(sha256))
|
||||
Ok(Some(ChecksumValue::Sha256(sha256)))
|
||||
}
|
||||
Some(_) => Err(Error::bad_request("invalid x-amz-checksum-algorithm")),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_checksum_response_headers(
|
||||
pub(crate) fn add_checksum_response_headers(
|
||||
checksum: &Option<ChecksumValue>,
|
||||
mut resp: http::response::Builder,
|
||||
) -> http::response::Builder {
|
|
@ -1,9 +1,9 @@
|
|||
use std::pin::Pin;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use futures::{stream, stream::Stream, StreamExt, TryStreamExt};
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::header::HeaderName;
|
||||
use hyper::{Request, Response};
|
||||
use serde::Serialize;
|
||||
|
||||
|
@ -21,25 +21,16 @@ use garage_model::s3::object_table::*;
|
|||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::checksum::*;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::checksum::*;
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
use crate::get::{full_object_byte_stream, PreconditionHeaders};
|
||||
use crate::get::full_object_byte_stream;
|
||||
use crate::multipart;
|
||||
use crate::put::{extract_metadata_headers, save_stream, ChecksumMode, SaveStreamResult};
|
||||
use crate::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
|
||||
use crate::xml::{self as s3_xml, xmlns_tag};
|
||||
|
||||
pub const X_AMZ_COPY_SOURCE_IF_MATCH: HeaderName =
|
||||
HeaderName::from_static("x-amz-copy-source-if-match");
|
||||
pub const X_AMZ_COPY_SOURCE_IF_NONE_MATCH: HeaderName =
|
||||
HeaderName::from_static("x-amz-copy-source-if-none-match");
|
||||
pub const X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE: HeaderName =
|
||||
HeaderName::from_static("x-amz-copy-source-if-modified-since");
|
||||
pub const X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE: HeaderName =
|
||||
HeaderName::from_static("x-amz-copy-source-if-unmodified-since");
|
||||
|
||||
// -------- CopyObject ---------
|
||||
|
||||
pub async fn handle_copy(
|
||||
|
@ -47,7 +38,7 @@ pub async fn handle_copy(
|
|||
req: &Request<ReqBody>,
|
||||
dest_key: &str,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let copy_precondition = PreconditionHeaders::parse_copy_source(req)?;
|
||||
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
||||
|
||||
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
|
||||
|
||||
|
@ -57,7 +48,7 @@ pub async fn handle_copy(
|
|||
extract_source_info(&source_object)?;
|
||||
|
||||
// Check precondition, e.g. x-amz-copy-source-if-match
|
||||
copy_precondition.check_copy_source(source_version, &source_version_meta.etag)?;
|
||||
copy_precondition.check(source_version, &source_version_meta.etag)?;
|
||||
|
||||
// Determine encryption parameters
|
||||
let (source_encryption, source_object_meta_inner) =
|
||||
|
@ -82,7 +73,7 @@ pub async fn handle_copy(
|
|||
let dest_object_meta = ObjectVersionMetaInner {
|
||||
headers: match req.headers().get("x-amz-metadata-directive") {
|
||||
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
|
||||
extract_metadata_headers(req.headers())?
|
||||
get_headers(req.headers())?
|
||||
}
|
||||
_ => source_object_meta_inner.into_owned().headers,
|
||||
},
|
||||
|
@ -344,7 +335,7 @@ pub async fn handle_upload_part_copy(
|
|||
part_number: u64,
|
||||
upload_id: &str,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let copy_precondition = PreconditionHeaders::parse_copy_source(req)?;
|
||||
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
||||
|
||||
let dest_upload_id = multipart::decode_upload_id(upload_id)?;
|
||||
|
||||
|
@ -360,7 +351,7 @@ pub async fn handle_upload_part_copy(
|
|||
extract_source_info(&source_object)?;
|
||||
|
||||
// Check precondition on source, e.g. x-amz-copy-source-if-match
|
||||
copy_precondition.check_copy_source(source_object_version, &source_version_meta.etag)?;
|
||||
copy_precondition.check(source_object_version, &source_version_meta.etag)?;
|
||||
|
||||
// Determine encryption parameters
|
||||
let (source_encryption, _) = EncryptionParams::check_decrypt_for_copy_source(
|
||||
|
@ -712,6 +703,97 @@ fn extract_source_info(
|
|||
Ok((source_version, source_version_data, source_version_meta))
|
||||
}
|
||||
|
||||
struct CopyPreconditionHeaders {
|
||||
copy_source_if_match: Option<Vec<String>>,
|
||||
copy_source_if_modified_since: Option<SystemTime>,
|
||||
copy_source_if_none_match: Option<Vec<String>>,
|
||||
copy_source_if_unmodified_since: Option<SystemTime>,
|
||||
}
|
||||
|
||||
impl CopyPreconditionHeaders {
|
||||
fn parse(req: &Request<ReqBody>) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
copy_source_if_match: req
|
||||
.headers()
|
||||
.get("x-amz-copy-source-if-match")
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(|x| {
|
||||
x.split(',')
|
||||
.map(|m| m.trim().trim_matches('"').to_string())
|
||||
.collect::<Vec<_>>()
|
||||
}),
|
||||
copy_source_if_modified_since: req
|
||||
.headers()
|
||||
.get("x-amz-copy-source-if-modified-since")
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(httpdate::parse_http_date)
|
||||
.transpose()
|
||||
.ok_or_bad_request("Invalid date in x-amz-copy-source-if-modified-since")?,
|
||||
copy_source_if_none_match: req
|
||||
.headers()
|
||||
.get("x-amz-copy-source-if-none-match")
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(|x| {
|
||||
x.split(',')
|
||||
.map(|m| m.trim().trim_matches('"').to_string())
|
||||
.collect::<Vec<_>>()
|
||||
}),
|
||||
copy_source_if_unmodified_since: req
|
||||
.headers()
|
||||
.get("x-amz-copy-source-if-unmodified-since")
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(httpdate::parse_http_date)
|
||||
.transpose()
|
||||
.ok_or_bad_request("Invalid date in x-amz-copy-source-if-unmodified-since")?,
|
||||
})
|
||||
}
|
||||
|
||||
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> {
|
||||
let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp);
|
||||
|
||||
let ok = match (
|
||||
&self.copy_source_if_match,
|
||||
&self.copy_source_if_unmodified_since,
|
||||
&self.copy_source_if_none_match,
|
||||
&self.copy_source_if_modified_since,
|
||||
) {
|
||||
// TODO I'm not sure all of the conditions are evaluated correctly here
|
||||
|
||||
// If we have both if-match and if-unmodified-since,
|
||||
// basically we don't care about if-unmodified-since,
|
||||
// because in the spec it says that if if-match evaluates to
|
||||
// true but if-unmodified-since evaluates to false,
|
||||
// the copy is still done.
|
||||
(Some(im), _, None, None) => im.iter().any(|x| x == etag || x == "*"),
|
||||
(None, Some(ius), None, None) => v_date <= *ius,
|
||||
|
||||
// If we have both if-none-match and if-modified-since,
|
||||
// then both of the two conditions must evaluate to true
|
||||
(None, None, Some(inm), Some(ims)) => {
|
||||
!inm.iter().any(|x| x == etag || x == "*") && v_date > *ims
|
||||
}
|
||||
(None, None, Some(inm), None) => !inm.iter().any(|x| x == etag || x == "*"),
|
||||
(None, None, None, Some(ims)) => v_date > *ims,
|
||||
(None, None, None, None) => true,
|
||||
_ => {
|
||||
return Err(Error::bad_request(
|
||||
"Invalid combination of x-amz-copy-source-if-xxxxx headers",
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
if ok {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::PreconditionFailed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type BlockStreamItemOk = (Bytes, Option<Hash>);
|
||||
type BlockStreamItem = Result<BlockStreamItemOk, garage_util::error::Error>;
|
||||
|
||||
|
|
|
@ -2,11 +2,15 @@ use quick_xml::de::from_reader;
|
|||
|
||||
use hyper::{header::HeaderName, Method, Request, Response, StatusCode};
|
||||
|
||||
use http_body_util::BodyExt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule};
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::error::*;
|
||||
|
@ -55,6 +59,7 @@ pub async fn handle_delete_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error>
|
|||
pub async fn handle_put_cors(
|
||||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
|
@ -63,7 +68,11 @@ pub async fn handle_put_cors(
|
|||
..
|
||||
} = ctx;
|
||||
|
||||
let body = req.into_body().collect().await?;
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
|
||||
let conf: CorsConfiguration = from_reader(&body as &[u8])?;
|
||||
conf.validate()?;
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
|
||||
use garage_util::data::*;
|
||||
|
@ -5,6 +6,7 @@ use garage_util::data::*;
|
|||
use garage_model::s3::object_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::error::*;
|
||||
|
@ -66,8 +68,13 @@ pub async fn handle_delete(ctx: ReqCtx, key: &str) -> Result<Response<ResBody>,
|
|||
pub async fn handle_delete_objects(
|
||||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let body = req.into_body().collect().await?;
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
|
||||
let cmd_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
|
||||
let cmd = parse_delete_objects_xml(&cmd_xml).ok_or_bad_request("Invalid delete XML query")?;
|
||||
|
|
|
@ -29,8 +29,8 @@ use garage_model::garage::Garage;
|
|||
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
|
||||
|
||||
use garage_api_common::common_error::*;
|
||||
use garage_api_common::signature::checksum::Md5Checksum;
|
||||
|
||||
use crate::checksum::Md5Checksum;
|
||||
use crate::error::Error;
|
||||
|
||||
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
||||
|
|
|
@ -80,7 +80,7 @@ pub enum Error {
|
|||
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
|
||||
InvalidEncryptionAlgorithm(String),
|
||||
|
||||
/// The provided digest (checksum) value was invalid
|
||||
/// The client sent invalid XML data
|
||||
#[error(display = "Invalid digest: {}", _0)]
|
||||
InvalidDigest(String),
|
||||
|
||||
|
@ -119,7 +119,6 @@ impl From<SignatureError> for Error {
|
|||
Self::AuthorizationHeaderMalformed(c)
|
||||
}
|
||||
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
||||
SignatureError::InvalidDigest(d) => Self::InvalidDigest(d),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,17 +2,17 @@
|
|||
use std::collections::BTreeMap;
|
||||
use std::convert::TryInto;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
use std::time::{Duration, UNIX_EPOCH};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::future;
|
||||
use futures::stream::{self, Stream, StreamExt};
|
||||
use http::header::{
|
||||
HeaderMap, HeaderName, ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING,
|
||||
CONTENT_LANGUAGE, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MATCH,
|
||||
IF_MODIFIED_SINCE, IF_NONE_MATCH, IF_UNMODIFIED_SINCE, LAST_MODIFIED, RANGE,
|
||||
ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE,
|
||||
CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MODIFIED_SINCE, IF_NONE_MATCH,
|
||||
LAST_MODIFIED, RANGE,
|
||||
};
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
use hyper::{body::Body, Request, Response, StatusCode};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use garage_net::stream::ByteStream;
|
||||
|
@ -26,14 +26,13 @@ use garage_model::s3::object_table::*;
|
|||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
||||
|
||||
use crate::api_server::ResBody;
|
||||
use crate::copy::*;
|
||||
use crate::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
|
||||
const X_AMZ_MP_PARTS_COUNT: HeaderName = HeaderName::from_static("x-amz-mp-parts-count");
|
||||
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct GetObjectOverrides {
|
||||
|
@ -116,29 +115,49 @@ fn getobject_override_headers(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_http_precondition(
|
||||
fn try_answer_cached(
|
||||
version: &ObjectVersion,
|
||||
version_meta: &ObjectVersionMeta,
|
||||
req: &Request<()>,
|
||||
) -> Result<Option<Response<ResBody>>, Error> {
|
||||
let precondition_headers = PreconditionHeaders::parse(req)?;
|
||||
req: &Request<impl Body>,
|
||||
) -> Option<Response<ResBody>> {
|
||||
// <trinity> It is possible, and is even usually the case, [that both If-None-Match and
|
||||
// If-Modified-Since] are present in a request. In this situation If-None-Match takes
|
||||
// precedence and If-Modified-Since is ignored (as per 6.Precedence from rfc7232). The rational
|
||||
// being that etag based matching is more accurate, it has no issue with sub-second precision
|
||||
// for instance (in case of very fast updates)
|
||||
let cached = if let Some(none_match) = req.headers().get(IF_NONE_MATCH) {
|
||||
let none_match = none_match.to_str().ok()?;
|
||||
let expected = format!("\"{}\"", version_meta.etag);
|
||||
let found = none_match
|
||||
.split(',')
|
||||
.map(str::trim)
|
||||
.any(|etag| etag == expected || etag == "\"*\"");
|
||||
found
|
||||
} else if let Some(modified_since) = req.headers().get(IF_MODIFIED_SINCE) {
|
||||
let modified_since = modified_since.to_str().ok()?;
|
||||
let client_date = httpdate::parse_http_date(modified_since).ok()?;
|
||||
let server_date = UNIX_EPOCH + Duration::from_millis(version.timestamp);
|
||||
client_date >= server_date
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if let Some(status_code) = precondition_headers.check(&version, &version_meta.etag)? {
|
||||
Ok(Some(
|
||||
if cached {
|
||||
Some(
|
||||
Response::builder()
|
||||
.status(status_code)
|
||||
.status(StatusCode::NOT_MODIFIED)
|
||||
.body(empty_body())
|
||||
.unwrap(),
|
||||
))
|
||||
)
|
||||
} else {
|
||||
Ok(None)
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle HEAD request
|
||||
pub async fn handle_head(
|
||||
ctx: ReqCtx,
|
||||
req: &Request<()>,
|
||||
req: &Request<impl Body>,
|
||||
key: &str,
|
||||
part_number: Option<u64>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
|
@ -148,7 +167,7 @@ pub async fn handle_head(
|
|||
/// Handle HEAD request for website
|
||||
pub async fn handle_head_without_ctx(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<()>,
|
||||
req: &Request<impl Body>,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
part_number: Option<u64>,
|
||||
|
@ -177,8 +196,8 @@ pub async fn handle_head_without_ctx(
|
|||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
if let Some(res) = handle_http_precondition(object_version, version_meta, req)? {
|
||||
return Ok(res);
|
||||
if let Some(cached) = try_answer_cached(object_version, version_meta, req) {
|
||||
return Ok(cached);
|
||||
}
|
||||
|
||||
let (encryption, headers) =
|
||||
|
@ -259,7 +278,7 @@ pub async fn handle_head_without_ctx(
|
|||
/// Handle GET request
|
||||
pub async fn handle_get(
|
||||
ctx: ReqCtx,
|
||||
req: &Request<()>,
|
||||
req: &Request<impl Body>,
|
||||
key: &str,
|
||||
part_number: Option<u64>,
|
||||
overrides: GetObjectOverrides,
|
||||
|
@ -270,7 +289,7 @@ pub async fn handle_get(
|
|||
/// Handle GET request
|
||||
pub async fn handle_get_without_ctx(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<()>,
|
||||
req: &Request<impl Body>,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
part_number: Option<u64>,
|
||||
|
@ -299,8 +318,8 @@ pub async fn handle_get_without_ctx(
|
|||
ObjectVersionData::FirstBlock(meta, _) => meta,
|
||||
};
|
||||
|
||||
if let Some(res) = handle_http_precondition(last_v, last_v_meta, req)? {
|
||||
return Ok(res);
|
||||
if let Some(cached) = try_answer_cached(last_v, last_v_meta, req) {
|
||||
return Ok(cached);
|
||||
}
|
||||
|
||||
let (enc, headers) =
|
||||
|
@ -321,12 +340,7 @@ pub async fn handle_get_without_ctx(
|
|||
enc,
|
||||
&headers,
|
||||
pn,
|
||||
ChecksumMode {
|
||||
// TODO: for multipart uploads, checksums of each part should be stored
|
||||
// so that we can return the corresponding checksum here
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||
enabled: false,
|
||||
},
|
||||
checksum_mode,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
@ -340,12 +354,7 @@ pub async fn handle_get_without_ctx(
|
|||
&headers,
|
||||
range.start,
|
||||
range.start + range.length,
|
||||
ChecksumMode {
|
||||
// TODO: for range queries that align with part boundaries,
|
||||
// we should return the saved checksum of the part
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||
enabled: false,
|
||||
},
|
||||
checksum_mode,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
@ -568,7 +577,7 @@ async fn handle_get_part(
|
|||
}
|
||||
|
||||
fn parse_range_header(
|
||||
req: &Request<()>,
|
||||
req: &Request<impl Body>,
|
||||
total_size: u64,
|
||||
) -> Result<Option<http_range::HttpRange>, Error> {
|
||||
let range = match req.headers().get(RANGE) {
|
||||
|
@ -609,7 +618,7 @@ struct ChecksumMode {
|
|||
enabled: bool,
|
||||
}
|
||||
|
||||
fn checksum_mode(req: &Request<()>) -> ChecksumMode {
|
||||
fn checksum_mode(req: &Request<impl Body>) -> ChecksumMode {
|
||||
ChecksumMode {
|
||||
enabled: req
|
||||
.headers()
|
||||
|
@ -742,116 +751,3 @@ fn std_error_from_read_error<E: std::fmt::Display>(e: E) -> std::io::Error {
|
|||
format!("Error while reading object data: {}", e),
|
||||
)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
pub struct PreconditionHeaders {
|
||||
if_match: Option<Vec<String>>,
|
||||
if_modified_since: Option<SystemTime>,
|
||||
if_none_match: Option<Vec<String>>,
|
||||
if_unmodified_since: Option<SystemTime>,
|
||||
}
|
||||
|
||||
impl PreconditionHeaders {
|
||||
fn parse<B>(req: &Request<B>) -> Result<Self, Error> {
|
||||
Self::parse_with(
|
||||
req.headers(),
|
||||
&IF_MATCH,
|
||||
&IF_NONE_MATCH,
|
||||
&IF_MODIFIED_SINCE,
|
||||
&IF_UNMODIFIED_SINCE,
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn parse_copy_source<B>(req: &Request<B>) -> Result<Self, Error> {
|
||||
Self::parse_with(
|
||||
req.headers(),
|
||||
&X_AMZ_COPY_SOURCE_IF_MATCH,
|
||||
&X_AMZ_COPY_SOURCE_IF_NONE_MATCH,
|
||||
&X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE,
|
||||
&X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE,
|
||||
)
|
||||
}
|
||||
|
||||
fn parse_with(
|
||||
headers: &HeaderMap,
|
||||
hdr_if_match: &HeaderName,
|
||||
hdr_if_none_match: &HeaderName,
|
||||
hdr_if_modified_since: &HeaderName,
|
||||
hdr_if_unmodified_since: &HeaderName,
|
||||
) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
if_match: headers
|
||||
.get(hdr_if_match)
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(|x| {
|
||||
x.split(',')
|
||||
.map(|m| m.trim().trim_matches('"').to_string())
|
||||
.collect::<Vec<_>>()
|
||||
}),
|
||||
if_none_match: headers
|
||||
.get(hdr_if_none_match)
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(|x| {
|
||||
x.split(',')
|
||||
.map(|m| m.trim().trim_matches('"').to_string())
|
||||
.collect::<Vec<_>>()
|
||||
}),
|
||||
if_modified_since: headers
|
||||
.get(hdr_if_modified_since)
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(httpdate::parse_http_date)
|
||||
.transpose()
|
||||
.ok_or_bad_request("Invalid date in if-modified-since")?,
|
||||
if_unmodified_since: headers
|
||||
.get(hdr_if_unmodified_since)
|
||||
.map(|x| x.to_str())
|
||||
.transpose()?
|
||||
.map(httpdate::parse_http_date)
|
||||
.transpose()
|
||||
.ok_or_bad_request("Invalid date in if-unmodified-since")?,
|
||||
})
|
||||
}
|
||||
|
||||
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<Option<StatusCode>, Error> {
|
||||
let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp);
|
||||
|
||||
// Implemented from https://datatracker.ietf.org/doc/html/rfc7232#section-6
|
||||
|
||||
if let Some(im) = &self.if_match {
|
||||
// Step 1: if-match is present
|
||||
if !im.iter().any(|x| x == etag || x == "*") {
|
||||
return Ok(Some(StatusCode::PRECONDITION_FAILED));
|
||||
}
|
||||
} else if let Some(ius) = &self.if_unmodified_since {
|
||||
// Step 2: if-unmodified-since is present, and if-match is absent
|
||||
if v_date > *ius {
|
||||
return Ok(Some(StatusCode::PRECONDITION_FAILED));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(inm) = &self.if_none_match {
|
||||
// Step 3: if-none-match is present
|
||||
if inm.iter().any(|x| x == etag || x == "*") {
|
||||
return Ok(Some(StatusCode::NOT_MODIFIED));
|
||||
}
|
||||
} else if let Some(ims) = &self.if_modified_since {
|
||||
// Step 4: if-modified-since is present, and if-none-match is absent
|
||||
if v_date <= *ims {
|
||||
return Ok(Some(StatusCode::NOT_MODIFIED));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub(crate) fn check_copy_source(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> {
|
||||
match self.check(v, etag)? {
|
||||
Some(_) => Err(Error::PreconditionFailed),
|
||||
None => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,8 +14,9 @@ mod list;
|
|||
mod multipart;
|
||||
mod post_object;
|
||||
mod put;
|
||||
pub mod website;
|
||||
mod website;
|
||||
|
||||
mod checksum;
|
||||
mod encryption;
|
||||
mod router;
|
||||
pub mod xml;
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
use quick_xml::de::from_reader;
|
||||
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::error::*;
|
||||
|
@ -14,6 +16,7 @@ use garage_model::bucket_table::{
|
|||
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
|
||||
LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule,
|
||||
};
|
||||
use garage_util::data::*;
|
||||
|
||||
pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { bucket_params, .. } = ctx;
|
||||
|
@ -53,6 +56,7 @@ pub async fn handle_delete_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, E
|
|||
pub async fn handle_put_lifecycle(
|
||||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
|
@ -61,7 +65,11 @@ pub async fn handle_put_lifecycle(
|
|||
..
|
||||
} = ctx;
|
||||
|
||||
let body = req.into_body().collect().await?;
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
|
||||
let conf: LifecycleConfiguration = from_reader(&body as &[u8])?;
|
||||
let config = conf
|
||||
|
|
|
@ -54,6 +54,7 @@ pub struct ListMultipartUploadsQuery {
|
|||
#[derive(Debug)]
|
||||
pub struct ListPartsQuery {
|
||||
pub bucket_name: String,
|
||||
pub bucket_id: Uuid,
|
||||
pub key: String,
|
||||
pub upload_id: String,
|
||||
pub part_number_marker: Option<u64>,
|
||||
|
@ -1244,8 +1245,10 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_fetch_part_info() -> Result<(), Error> {
|
||||
let uuid = Uuid::from([0x08; 32]);
|
||||
let mut query = ListPartsQuery {
|
||||
bucket_name: "a".to_string(),
|
||||
bucket_id: uuid,
|
||||
key: "a".to_string(),
|
||||
upload_id: "xx".to_string(),
|
||||
part_number_marker: None,
|
||||
|
|
|
@ -1,20 +1,13 @@
|
|||
use std::collections::HashMap;
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::hash::Hasher;
|
||||
use std::convert::TryInto;
|
||||
use std::sync::Arc;
|
||||
|
||||
use base64::prelude::*;
|
||||
use crc32c::Crc32cHasher as Crc32c;
|
||||
use crc32fast::Hasher as Crc32;
|
||||
use futures::prelude::*;
|
||||
use hyper::{Request, Response};
|
||||
use md5::{Digest, Md5};
|
||||
use sha1::Sha1;
|
||||
use sha2::Sha256;
|
||||
|
||||
use garage_table::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::OkOrMessage;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::block_ref_table::*;
|
||||
|
@ -23,9 +16,10 @@ use garage_model::s3::object_table::*;
|
|||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::checksum::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::checksum::*;
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
use crate::put::*;
|
||||
|
@ -49,7 +43,7 @@ pub async fn handle_create_multipart_upload(
|
|||
let upload_id = gen_uuid();
|
||||
let timestamp = next_timestamp(existing_object.as_ref());
|
||||
|
||||
let headers = extract_metadata_headers(req.headers())?;
|
||||
let headers = get_headers(req.headers())?;
|
||||
let meta = ObjectVersionMetaInner {
|
||||
headers,
|
||||
checksum: None,
|
||||
|
@ -100,6 +94,7 @@ pub async fn handle_put_part(
|
|||
key: &str,
|
||||
part_number: u64,
|
||||
upload_id: &str,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { garage, .. } = &ctx;
|
||||
|
||||
|
@ -110,30 +105,17 @@ pub async fn handle_put_part(
|
|||
Some(x) => Some(x.to_str()?.to_string()),
|
||||
None => None,
|
||||
},
|
||||
sha256: None,
|
||||
sha256: content_sha256,
|
||||
extra: request_checksum_value(req.headers())?,
|
||||
};
|
||||
|
||||
// Read first chuck, and at the same time try to get object to see if it exists
|
||||
let key = key.to_string();
|
||||
|
||||
let (req_head, mut req_body) = req.into_parts();
|
||||
|
||||
// Before we stream the body, configure the needed checksums.
|
||||
req_body.add_expected_checksums(expected_checksums.clone());
|
||||
// TODO: avoid parsing encryption headers twice...
|
||||
if !EncryptionParams::new_from_headers(&garage, &req_head.headers)?.is_encrypted() {
|
||||
// For non-encrypted objects, we need to compute the md5sum in all cases
|
||||
// (even if content-md5 is not set), because it is used as an etag of the
|
||||
// part, which is in turn used in the etag computation of the whole object
|
||||
req_body.add_md5();
|
||||
}
|
||||
|
||||
let (stream, stream_checksums) = req_body.streaming_with_checksums();
|
||||
let stream = stream.map_err(Error::from);
|
||||
|
||||
let (req_head, req_body) = req.into_parts();
|
||||
let stream = body_stream(req_body);
|
||||
let mut chunker = StreamChunker::new(stream, garage.config.block_size);
|
||||
|
||||
// Read first chuck, and at the same time try to get object to see if it exists
|
||||
let ((_, object_version, mut mpu), first_block) =
|
||||
futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?;
|
||||
|
||||
|
@ -190,21 +172,21 @@ pub async fn handle_put_part(
|
|||
garage.version_table.insert(&version).await?;
|
||||
|
||||
// Copy data to version
|
||||
let (total_size, _, _) = read_and_put_blocks(
|
||||
let checksummer =
|
||||
Checksummer::init(&expected_checksums, !encryption.is_encrypted()).add(checksum_algorithm);
|
||||
let (total_size, checksums, _) = read_and_put_blocks(
|
||||
&ctx,
|
||||
&version,
|
||||
encryption,
|
||||
part_number,
|
||||
first_block,
|
||||
chunker,
|
||||
Checksummer::new(),
|
||||
&mut chunker,
|
||||
checksummer,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Verify that checksums match
|
||||
let checksums = stream_checksums
|
||||
.await
|
||||
.ok_or_internal_error("checksum calculation")??;
|
||||
// Verify that checksums map
|
||||
checksums.verify(&expected_checksums)?;
|
||||
|
||||
// Store part etag in version
|
||||
let etag = encryption.etag_from_md5(&checksums.md5);
|
||||
|
@ -266,6 +248,7 @@ pub async fn handle_complete_multipart_upload(
|
|||
req: Request<ReqBody>,
|
||||
key: &str,
|
||||
upload_id: &str,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
|
@ -277,7 +260,11 @@ pub async fn handle_complete_multipart_upload(
|
|||
|
||||
let expected_checksum = request_checksum_value(&req_head.headers)?;
|
||||
|
||||
let body = req_body.collect().await?;
|
||||
let body = http_body_util::BodyExt::collect(req_body).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
|
||||
let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
|
||||
let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml)
|
||||
|
@ -443,16 +430,7 @@ pub async fn handle_complete_multipart_upload(
|
|||
// Send response saying ok we're done
|
||||
let result = s3_xml::CompleteMultipartUploadResult {
|
||||
xmlns: (),
|
||||
// FIXME: the location returned is not always correct:
|
||||
// - we always return https, but maybe some people do http
|
||||
// - if root_domain is not specified, a full URL is not returned
|
||||
location: garage
|
||||
.config
|
||||
.s3_api
|
||||
.root_domain
|
||||
.as_ref()
|
||||
.map(|rd| s3_xml::Value(format!("https://{}.{}/{}", bucket_name, rd, key)))
|
||||
.or(Some(s3_xml::Value(format!("/{}/{}", bucket_name, key)))),
|
||||
location: None,
|
||||
bucket: s3_xml::Value(bucket_name.to_string()),
|
||||
key: s3_xml::Value(key),
|
||||
etag: s3_xml::Value(format!("\"{}\"", etag)),
|
||||
|
@ -615,99 +593,3 @@ fn parse_complete_multipart_upload_body(
|
|||
|
||||
Some(parts)
|
||||
}
|
||||
|
||||
// ====== checksummer ====
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct MultipartChecksummer {
|
||||
pub md5: Md5,
|
||||
pub extra: Option<MultipartExtraChecksummer>,
|
||||
}
|
||||
|
||||
pub(crate) enum MultipartExtraChecksummer {
|
||||
Crc32(Crc32),
|
||||
Crc32c(Crc32c),
|
||||
Sha1(Sha1),
|
||||
Sha256(Sha256),
|
||||
}
|
||||
|
||||
impl MultipartChecksummer {
|
||||
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
|
||||
Self {
|
||||
md5: Md5::new(),
|
||||
extra: match algo {
|
||||
None => None,
|
||||
Some(ChecksumAlgorithm::Crc32) => {
|
||||
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
|
||||
}
|
||||
Some(ChecksumAlgorithm::Crc32c) => {
|
||||
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
|
||||
}
|
||||
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
|
||||
Some(ChecksumAlgorithm::Sha256) => {
|
||||
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update(
|
||||
&mut self,
|
||||
etag: &str,
|
||||
checksum: Option<ChecksumValue>,
|
||||
) -> Result<(), Error> {
|
||||
self.md5
|
||||
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
|
||||
match (&mut self.extra, checksum) {
|
||||
(None, _) => (),
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
|
||||
Some(ChecksumValue::Crc32(x)),
|
||||
) => {
|
||||
crc32.update(&x);
|
||||
}
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
|
||||
Some(ChecksumValue::Crc32c(x)),
|
||||
) => {
|
||||
crc32c.write(&x);
|
||||
}
|
||||
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
|
||||
sha1.update(&x);
|
||||
}
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
|
||||
Some(ChecksumValue::Sha256(x)),
|
||||
) => {
|
||||
sha256.update(&x);
|
||||
}
|
||||
(Some(_), b) => {
|
||||
return Err(Error::internal_error(format!(
|
||||
"part checksum was not computed correctly, got: {:?}",
|
||||
b
|
||||
)))
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
|
||||
let md5 = self.md5.finalize()[..].try_into().unwrap();
|
||||
let extra = match self.extra {
|
||||
None => None,
|
||||
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
|
||||
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
|
||||
}
|
||||
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
|
||||
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
|
||||
)),
|
||||
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
|
||||
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
|
||||
}
|
||||
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
|
||||
sha256.finalize()[..].try_into().unwrap(),
|
||||
)),
|
||||
};
|
||||
(md5, extra)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,13 +18,13 @@ use garage_model::s3::object_table::*;
|
|||
|
||||
use garage_api_common::cors::*;
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::checksum::*;
|
||||
use garage_api_common::signature::payload::{verify_v4, Authorization};
|
||||
|
||||
use crate::api_server::ResBody;
|
||||
use crate::checksum::*;
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
use crate::put::{extract_metadata_headers, save_stream, ChecksumMode};
|
||||
use crate::put::{get_headers, save_stream, ChecksumMode};
|
||||
use crate::xml as s3_xml;
|
||||
|
||||
pub async fn handle_post_object(
|
||||
|
@ -216,9 +216,8 @@ pub async fn handle_post_object(
|
|||
|
||||
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
|
||||
// around here to make sure the rest of the machinery takes our acl into account.
|
||||
let headers = extract_metadata_headers(¶ms)?;
|
||||
let headers = get_headers(¶ms)?;
|
||||
|
||||
let checksum_algorithm = request_checksum_algorithm(¶ms)?;
|
||||
let expected_checksums = ExpectedChecksums {
|
||||
md5: params
|
||||
.get("content-md5")
|
||||
|
@ -226,9 +225,7 @@ pub async fn handle_post_object(
|
|||
.transpose()?
|
||||
.map(str::to_string),
|
||||
sha256: None,
|
||||
extra: checksum_algorithm
|
||||
.map(|algo| extract_checksum_value(¶ms, algo))
|
||||
.transpose()?,
|
||||
extra: request_checksum_algorithm_value(¶ms)?,
|
||||
};
|
||||
|
||||
let meta = ObjectVersionMetaInner {
|
||||
|
|
|
@ -31,13 +31,11 @@ use garage_model::s3::object_table::*;
|
|||
use garage_model::s3::version_table::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::body::StreamingChecksumReceiver;
|
||||
use garage_api_common::signature::checksum::*;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::checksum::*;
|
||||
use crate::encryption::EncryptionParams;
|
||||
use crate::error::*;
|
||||
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
|
||||
|
||||
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
||||
|
||||
|
@ -50,10 +48,6 @@ pub(crate) struct SaveStreamResult {
|
|||
|
||||
pub(crate) enum ChecksumMode<'a> {
|
||||
Verify(&'a ExpectedChecksums),
|
||||
VerifyFrom {
|
||||
checksummer: StreamingChecksumReceiver,
|
||||
trailer_algo: Option<ChecksumAlgorithm>,
|
||||
},
|
||||
Calculate(Option<ChecksumAlgorithm>),
|
||||
}
|
||||
|
||||
|
@ -61,9 +55,10 @@ pub async fn handle_put(
|
|||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
key: &String,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
// Retrieve interesting headers from request
|
||||
let headers = extract_metadata_headers(req.headers())?;
|
||||
let headers = get_headers(req.headers())?;
|
||||
debug!("Object headers: {:?}", headers);
|
||||
|
||||
let expected_checksums = ExpectedChecksums {
|
||||
|
@ -71,10 +66,9 @@ pub async fn handle_put(
|
|||
Some(x) => Some(x.to_str()?.to_string()),
|
||||
None => None,
|
||||
},
|
||||
sha256: None,
|
||||
sha256: content_sha256,
|
||||
extra: request_checksum_value(req.headers())?,
|
||||
};
|
||||
let trailer_checksum_algorithm = request_trailer_checksum_algorithm(req.headers())?;
|
||||
|
||||
let meta = ObjectVersionMetaInner {
|
||||
headers,
|
||||
|
@ -84,19 +78,7 @@ pub async fn handle_put(
|
|||
// Determine whether object should be encrypted, and if so the key
|
||||
let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
||||
|
||||
// The request body is a special ReqBody object (see garage_api_common::signature::body)
|
||||
// which supports calculating checksums while streaming the data.
|
||||
// Before we start streaming, we configure it to calculate all the checksums we need.
|
||||
let mut req_body = req.into_body();
|
||||
req_body.add_expected_checksums(expected_checksums.clone());
|
||||
if !encryption.is_encrypted() {
|
||||
// For non-encrypted objects, we need to compute the md5sum in all cases
|
||||
// (even if content-md5 is not set), because it is used as the object etag
|
||||
req_body.add_md5();
|
||||
}
|
||||
|
||||
let (stream, checksummer) = req_body.streaming_with_checksums();
|
||||
let stream = stream.map_err(Error::from);
|
||||
let stream = body_stream(req.into_body());
|
||||
|
||||
let res = save_stream(
|
||||
&ctx,
|
||||
|
@ -104,10 +86,7 @@ pub async fn handle_put(
|
|||
encryption,
|
||||
stream,
|
||||
key,
|
||||
ChecksumMode::VerifyFrom {
|
||||
checksummer,
|
||||
trailer_algo: trailer_checksum_algorithm,
|
||||
},
|
||||
ChecksumMode::Verify(&expected_checksums),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -143,15 +122,10 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
let version_uuid = gen_uuid();
|
||||
let version_timestamp = next_timestamp(existing_object.as_ref());
|
||||
|
||||
let mut checksummer = match &checksum_mode {
|
||||
let mut checksummer = match checksum_mode {
|
||||
ChecksumMode::Verify(expected) => Checksummer::init(expected, !encryption.is_encrypted()),
|
||||
ChecksumMode::Calculate(algo) => {
|
||||
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(*algo)
|
||||
}
|
||||
ChecksumMode::VerifyFrom { .. } => {
|
||||
// Checksums are calculated by the garage_api_common::signature module
|
||||
// so here we can just have an empty checksummer that does nothing
|
||||
Checksummer::new()
|
||||
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(algo)
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -159,7 +133,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
// as "inline data". We can then return immediately.
|
||||
if first_block.len() < INLINE_THRESHOLD {
|
||||
checksummer.update(&first_block);
|
||||
let mut checksums = checksummer.finalize();
|
||||
let checksums = checksummer.finalize();
|
||||
|
||||
match checksum_mode {
|
||||
ChecksumMode::Verify(expected) => {
|
||||
|
@ -168,18 +142,6 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
ChecksumMode::Calculate(algo) => {
|
||||
meta.checksum = checksums.extract(algo);
|
||||
}
|
||||
ChecksumMode::VerifyFrom {
|
||||
checksummer,
|
||||
trailer_algo,
|
||||
} => {
|
||||
drop(chunker);
|
||||
checksums = checksummer
|
||||
.await
|
||||
.ok_or_internal_error("checksum calculation")??;
|
||||
if let Some(algo) = trailer_algo {
|
||||
meta.checksum = checksums.extract(Some(algo));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let size = first_block.len() as u64;
|
||||
|
@ -251,13 +213,13 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
garage.version_table.insert(&version).await?;
|
||||
|
||||
// Transfer data
|
||||
let (total_size, mut checksums, first_block_hash) = read_and_put_blocks(
|
||||
let (total_size, checksums, first_block_hash) = read_and_put_blocks(
|
||||
ctx,
|
||||
&version,
|
||||
encryption,
|
||||
1,
|
||||
first_block,
|
||||
chunker,
|
||||
&mut chunker,
|
||||
checksummer,
|
||||
)
|
||||
.await?;
|
||||
|
@ -270,17 +232,6 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
ChecksumMode::Calculate(algo) => {
|
||||
meta.checksum = checksums.extract(algo);
|
||||
}
|
||||
ChecksumMode::VerifyFrom {
|
||||
checksummer,
|
||||
trailer_algo,
|
||||
} => {
|
||||
checksums = checksummer
|
||||
.await
|
||||
.ok_or_internal_error("checksum calculation")??;
|
||||
if let Some(algo) = trailer_algo {
|
||||
meta.checksum = checksums.extract(Some(algo));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Verify quotas are respsected
|
||||
|
@ -381,7 +332,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
|||
encryption: EncryptionParams,
|
||||
part_number: u64,
|
||||
first_block: Bytes,
|
||||
mut chunker: StreamChunker<S>,
|
||||
chunker: &mut StreamChunker<S>,
|
||||
checksummer: Checksummer,
|
||||
) -> Result<(u64, Checksums, Hash), Error> {
|
||||
let tracer = opentelemetry::global::tracer("garage");
|
||||
|
@ -650,9 +601,7 @@ impl Drop for InterruptedCleanup {
|
|||
|
||||
// ============ helpers ============
|
||||
|
||||
pub(crate) fn extract_metadata_headers(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<HeaderList, Error> {
|
||||
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList, Error> {
|
||||
let mut ret = Vec::new();
|
||||
|
||||
// Preserve standard headers
|
||||
|
@ -678,18 +627,6 @@ pub(crate) fn extract_metadata_headers(
|
|||
std::str::from_utf8(value.as_bytes())?.to_string(),
|
||||
));
|
||||
}
|
||||
if name == X_AMZ_WEBSITE_REDIRECT_LOCATION {
|
||||
let value = std::str::from_utf8(value.as_bytes())?.to_string();
|
||||
if !(value.starts_with("/")
|
||||
|| value.starts_with("http://")
|
||||
|| value.starts_with("https://"))
|
||||
{
|
||||
return Err(Error::bad_request(format!(
|
||||
"Invalid {X_AMZ_WEBSITE_REDIRECT_LOCATION} header",
|
||||
)));
|
||||
}
|
||||
ret.push((X_AMZ_WEBSITE_REDIRECT_LOCATION.to_string(), value));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
|
|
|
@ -352,18 +352,6 @@ impl Endpoint {
|
|||
_ => return Err(Error::bad_request("Unknown method")),
|
||||
};
|
||||
|
||||
if let Some(x_id) = query.x_id.take() {
|
||||
if x_id != res.name() {
|
||||
// I think AWS ignores the x-id parameter.
|
||||
// Let's make this at least be a warnin to help debugging.
|
||||
warn!(
|
||||
"x-id ({}) does not match parsed endpoint ({})",
|
||||
x_id,
|
||||
res.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(message) = query.nonempty_message() {
|
||||
debug!("Unused query parameter: {}", message)
|
||||
}
|
||||
|
@ -708,8 +696,7 @@ generateQueryParameters! {
|
|||
"uploadId" => upload_id,
|
||||
"upload-id-marker" => upload_id_marker,
|
||||
"versionId" => version_id,
|
||||
"version-id-marker" => version_id_marker,
|
||||
"x-id" => x_id
|
||||
"version-id-marker" => version_id_marker
|
||||
]
|
||||
}
|
||||
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
use quick_xml::de::from_reader;
|
||||
|
||||
use hyper::{header::HeaderName, Request, Response, StatusCode};
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_model::bucket_table::{self, *};
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_api_common::helpers::*;
|
||||
use garage_api_common::signature::verify_signed_content;
|
||||
|
||||
use crate::api_server::{ReqBody, ResBody};
|
||||
use crate::error::*;
|
||||
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||
|
||||
pub const X_AMZ_WEBSITE_REDIRECT_LOCATION: HeaderName =
|
||||
HeaderName::from_static("x-amz-website-redirect-location");
|
||||
|
||||
pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { bucket_params, .. } = ctx;
|
||||
if let Some(website) = bucket_params.website_config.get() {
|
||||
|
@ -26,7 +26,28 @@ pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error>
|
|||
suffix: Value(website.index_document.to_string()),
|
||||
}),
|
||||
redirect_all_requests_to: None,
|
||||
routing_rules: None,
|
||||
routing_rules: RoutingRules {
|
||||
rules: website
|
||||
.routing_rules
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|rule| RoutingRule {
|
||||
condition: rule.condition.map(|cond| Condition {
|
||||
http_error_code: cond.http_error_code.map(|c| IntValue(c as i64)),
|
||||
prefix: cond.prefix.map(Value),
|
||||
}),
|
||||
redirect: Redirect {
|
||||
hostname: rule.redirect.hostname.map(Value),
|
||||
http_redirect_code: Some(IntValue(
|
||||
rule.redirect.http_redirect_code as i64,
|
||||
)),
|
||||
protocol: rule.redirect.protocol.map(Value),
|
||||
replace_full: rule.redirect.replace_key.map(Value),
|
||||
replace_prefix: rule.redirect.replace_key_prefix.map(Value),
|
||||
},
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
};
|
||||
let xml = to_xml_with_header(&wc)?;
|
||||
Ok(Response::builder()
|
||||
|
@ -61,6 +82,7 @@ pub async fn handle_delete_website(ctx: ReqCtx) -> Result<Response<ResBody>, Err
|
|||
pub async fn handle_put_website(
|
||||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
|
@ -69,7 +91,11 @@ pub async fn handle_put_website(
|
|||
..
|
||||
} = ctx;
|
||||
|
||||
let body = req.into_body().collect().await?;
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
|
||||
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
|
||||
conf.validate()?;
|
||||
|
@ -97,18 +123,28 @@ pub struct WebsiteConfiguration {
|
|||
pub index_document: Option<Suffix>,
|
||||
#[serde(rename = "RedirectAllRequestsTo")]
|
||||
pub redirect_all_requests_to: Option<Target>,
|
||||
#[serde(rename = "RoutingRules")]
|
||||
pub routing_rules: Option<Vec<RoutingRule>>,
|
||||
#[serde(
|
||||
rename = "RoutingRules",
|
||||
default,
|
||||
skip_serializing_if = "RoutingRules::is_empty"
|
||||
)]
|
||||
pub routing_rules: RoutingRules,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Default)]
|
||||
pub struct RoutingRules {
|
||||
#[serde(rename = "RoutingRule")]
|
||||
pub rules: Vec<RoutingRule>,
|
||||
}
|
||||
|
||||
impl RoutingRules {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.rules.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct RoutingRule {
|
||||
#[serde(rename = "RoutingRule")]
|
||||
pub inner: RoutingRuleInner,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct RoutingRuleInner {
|
||||
#[serde(rename = "Condition")]
|
||||
pub condition: Option<Condition>,
|
||||
#[serde(rename = "Redirect")]
|
||||
|
@ -162,7 +198,7 @@ impl WebsiteConfiguration {
|
|||
if self.redirect_all_requests_to.is_some()
|
||||
&& (self.error_document.is_some()
|
||||
|| self.index_document.is_some()
|
||||
|| self.routing_rules.is_some())
|
||||
|| !self.routing_rules.is_empty())
|
||||
{
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: can't have RedirectAllRequestsTo and other fields",
|
||||
|
@ -177,10 +213,15 @@ impl WebsiteConfiguration {
|
|||
if let Some(ref rart) = self.redirect_all_requests_to {
|
||||
rart.validate()?;
|
||||
}
|
||||
if let Some(ref rrs) = self.routing_rules {
|
||||
for rr in rrs {
|
||||
rr.inner.validate()?;
|
||||
}
|
||||
for rr in &self.routing_rules.rules {
|
||||
rr.validate()?;
|
||||
}
|
||||
if self.routing_rules.rules.len() > 1000 {
|
||||
// we will do linear scans, best to avoid overly long configuration. The
|
||||
// limit was choosen arbitrarily
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: RoutingRules can't have more than 1000 child elements",
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -189,11 +230,7 @@ impl WebsiteConfiguration {
|
|||
pub fn into_garage_website_config(self) -> Result<WebsiteConfig, Error> {
|
||||
if self.redirect_all_requests_to.is_some() {
|
||||
Err(Error::NotImplemented(
|
||||
"S3 website redirects are not currently implemented in Garage.".into(),
|
||||
))
|
||||
} else if self.routing_rules.map(|x| !x.is_empty()).unwrap_or(false) {
|
||||
Err(Error::NotImplemented(
|
||||
"S3 routing rules are not currently implemented in Garage.".into(),
|
||||
"RedirectAllRequestsTo is not currently implemented in Garage, however its effect can be emulated using a single inconditional RoutingRule.".into(),
|
||||
))
|
||||
} else {
|
||||
Ok(WebsiteConfig {
|
||||
|
@ -202,6 +239,36 @@ impl WebsiteConfiguration {
|
|||
.map(|x| x.suffix.0)
|
||||
.unwrap_or_else(|| "index.html".to_string()),
|
||||
error_document: self.error_document.map(|x| x.key.0),
|
||||
redirect_all: None,
|
||||
routing_rules: self
|
||||
.routing_rules
|
||||
.rules
|
||||
.into_iter()
|
||||
.map(|rule| {
|
||||
bucket_table::RoutingRule {
|
||||
condition: rule.condition.map(|condition| {
|
||||
bucket_table::RedirectCondition {
|
||||
http_error_code: condition.http_error_code.map(|c| c.0 as u16),
|
||||
prefix: condition.prefix.map(|p| p.0),
|
||||
}
|
||||
}),
|
||||
redirect: bucket_table::Redirect {
|
||||
hostname: rule.redirect.hostname.map(|h| h.0),
|
||||
protocol: rule.redirect.protocol.map(|p| p.0),
|
||||
// aws default to 301, which i find punitive in case of
|
||||
// missconfiguration (can be permanently cached on the
|
||||
// user agent)
|
||||
http_redirect_code: rule
|
||||
.redirect
|
||||
.http_redirect_code
|
||||
.map(|c| c.0 as u16)
|
||||
.unwrap_or(302),
|
||||
replace_key_prefix: rule.redirect.replace_prefix.map(|k| k.0),
|
||||
replace_key: rule.redirect.replace_full.map(|k| k.0),
|
||||
},
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -242,37 +309,69 @@ impl Target {
|
|||
}
|
||||
}
|
||||
|
||||
impl RoutingRuleInner {
|
||||
impl RoutingRule {
|
||||
pub fn validate(&self) -> Result<(), Error> {
|
||||
let has_prefix = self
|
||||
.condition
|
||||
.as_ref()
|
||||
.and_then(|c| c.prefix.as_ref())
|
||||
.is_some();
|
||||
self.redirect.validate(has_prefix)
|
||||
if let Some(condition) = &self.condition {
|
||||
condition.validate()?;
|
||||
}
|
||||
self.redirect.validate()
|
||||
}
|
||||
}
|
||||
|
||||
impl Condition {
|
||||
pub fn validate(&self) -> Result<bool, Error> {
|
||||
if let Some(ref error_code) = self.http_error_code {
|
||||
// TODO do other error codes make sense? Aws only allows 4xx and 5xx
|
||||
if error_code.0 != 404 {
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: HttpErrorCodeReturnedEquals must be 404 or absent",
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(self.prefix.is_some())
|
||||
}
|
||||
}
|
||||
|
||||
impl Redirect {
|
||||
pub fn validate(&self, has_prefix: bool) -> Result<(), Error> {
|
||||
if self.replace_prefix.is_some() {
|
||||
if self.replace_full.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: both ReplaceKeyPrefixWith and ReplaceKeyWith are set",
|
||||
));
|
||||
}
|
||||
if !has_prefix {
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: ReplaceKeyPrefixWith is set, but KeyPrefixEquals isn't",
|
||||
));
|
||||
}
|
||||
pub fn validate(&self) -> Result<(), Error> {
|
||||
if self.replace_prefix.is_some() && self.replace_full.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: both ReplaceKeyPrefixWith and ReplaceKeyWith are set",
|
||||
));
|
||||
}
|
||||
if let Some(ref protocol) = self.protocol {
|
||||
if protocol.0 != "http" && protocol.0 != "https" {
|
||||
return Err(Error::bad_request("Bad XML: invalid protocol"));
|
||||
}
|
||||
}
|
||||
// TODO there are probably more invalid cases, but which ones?
|
||||
if let Some(ref http_redirect_code) = self.http_redirect_code {
|
||||
match http_redirect_code.0 {
|
||||
// aws allows all 3xx except 300, but some are non-sensical (not modified,
|
||||
// use proxy...)
|
||||
301 | 302 | 303 | 307 | 308 => {
|
||||
if self.hostname.is_none() && self.protocol.is_some() {
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: HostName must be set if Protocol is set",
|
||||
));
|
||||
}
|
||||
}
|
||||
// aws doesn't allow these codes, but netlify does, and it seems like a
|
||||
// cool feature (change the page seen without changing the url shown by the
|
||||
// user agent)
|
||||
200 | 404 => {
|
||||
if self.hostname.is_some() || self.protocol.is_some() {
|
||||
// hostname would mean different bucket, protocol doesn't make
|
||||
// sense
|
||||
return Err(Error::bad_request(
|
||||
"Bad XML: an HttpRedirectCode of 200 is not acceptable alongside HostName or Protocol",
|
||||
));
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::bad_request("Bad XML: invalid HttpRedirectCode"));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -311,6 +410,15 @@ mod tests {
|
|||
<ReplaceKeyWith>fullkey</ReplaceKeyWith>
|
||||
</Redirect>
|
||||
</RoutingRule>
|
||||
<RoutingRule>
|
||||
<Condition>
|
||||
<KeyPrefixEquals></KeyPrefixEquals>
|
||||
</Condition>
|
||||
<Redirect>
|
||||
<HttpRedirectCode>404</HttpRedirectCode>
|
||||
<ReplaceKeyWith>missing</ReplaceKeyWith>
|
||||
</Redirect>
|
||||
</RoutingRule>
|
||||
</RoutingRules>
|
||||
</WebsiteConfiguration>"#;
|
||||
let conf: WebsiteConfiguration = from_str(message).unwrap();
|
||||
|
@ -326,21 +434,36 @@ mod tests {
|
|||
hostname: Value("garage.tld".to_owned()),
|
||||
protocol: Some(Value("https".to_owned())),
|
||||
}),
|
||||
routing_rules: Some(vec![RoutingRule {
|
||||
inner: RoutingRuleInner {
|
||||
condition: Some(Condition {
|
||||
http_error_code: Some(IntValue(404)),
|
||||
prefix: Some(Value("prefix1".to_owned())),
|
||||
}),
|
||||
redirect: Redirect {
|
||||
hostname: Some(Value("gara.ge".to_owned())),
|
||||
protocol: Some(Value("http".to_owned())),
|
||||
http_redirect_code: Some(IntValue(303)),
|
||||
replace_prefix: Some(Value("prefix2".to_owned())),
|
||||
replace_full: Some(Value("fullkey".to_owned())),
|
||||
routing_rules: RoutingRules {
|
||||
rules: vec![
|
||||
RoutingRule {
|
||||
condition: Some(Condition {
|
||||
http_error_code: Some(IntValue(404)),
|
||||
prefix: Some(Value("prefix1".to_owned())),
|
||||
}),
|
||||
redirect: Redirect {
|
||||
hostname: Some(Value("gara.ge".to_owned())),
|
||||
protocol: Some(Value("http".to_owned())),
|
||||
http_redirect_code: Some(IntValue(303)),
|
||||
replace_prefix: Some(Value("prefix2".to_owned())),
|
||||
replace_full: Some(Value("fullkey".to_owned())),
|
||||
},
|
||||
},
|
||||
},
|
||||
}]),
|
||||
RoutingRule {
|
||||
condition: Some(Condition {
|
||||
http_error_code: None,
|
||||
prefix: Some(Value("".to_owned())),
|
||||
}),
|
||||
redirect: Redirect {
|
||||
hostname: None,
|
||||
protocol: None,
|
||||
http_redirect_code: Some(IntValue(404)),
|
||||
replace_prefix: None,
|
||||
replace_full: Some(Value("missing".to_owned())),
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
assert_eq! {
|
||||
ref_value,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_block"
|
||||
version = "1.1.0"
|
||||
version = "1.0.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
|
@ -370,7 +370,7 @@ impl BlockManager {
|
|||
prevent_compression: bool,
|
||||
order_tag: Option<OrderTag>,
|
||||
) -> Result<(), Error> {
|
||||
let who = self.system.cluster_layout().current_storage_nodes_of(&hash);
|
||||
let who = self.replication.write_sets(&hash);
|
||||
|
||||
let compression_level = self.compression_level.filter(|_| !prevent_compression);
|
||||
let (header, bytes) = DataBlock::from_buffer(data, compression_level)
|
||||
|
@ -396,7 +396,7 @@ impl BlockManager {
|
|||
.rpc_helper()
|
||||
.try_write_many_sets(
|
||||
&self.endpoint,
|
||||
&[who],
|
||||
who.as_ref(),
|
||||
put_block_rpc,
|
||||
RequestStrategy::with_priority(PRIO_NORMAL | PRIO_SECONDARY)
|
||||
.with_drop_on_completion(permit)
|
||||
|
@ -668,12 +668,10 @@ impl BlockManager {
|
|||
hash: &Hash,
|
||||
wrong_path: DataBlockPath,
|
||||
) -> Result<usize, Error> {
|
||||
let data = self.read_block_from(hash, &wrong_path).await?;
|
||||
self.lock_mutate(hash)
|
||||
.await
|
||||
.write_block_inner(hash, &data, self, Some(wrong_path))
|
||||
.await?;
|
||||
Ok(data.as_parts_ref().1.len())
|
||||
.fix_block_location(hash, wrong_path, self)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn lock_mutate(&self, hash: &Hash) -> MutexGuard<'_, BlockManagerLocked> {
|
||||
|
@ -829,6 +827,18 @@ impl BlockManagerLocked {
|
|||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn fix_block_location(
|
||||
&self,
|
||||
hash: &Hash,
|
||||
wrong_path: DataBlockPath,
|
||||
mgr: &BlockManager,
|
||||
) -> Result<usize, Error> {
|
||||
let data = mgr.read_block_from(hash, &wrong_path).await?;
|
||||
self.write_block_inner(hash, &data, mgr, Some(wrong_path))
|
||||
.await?;
|
||||
Ok(data.as_parts_ref().1.len())
|
||||
}
|
||||
}
|
||||
|
||||
struct DeleteOnDrop(Option<PathBuf>);
|
||||
|
|
|
@ -377,10 +377,7 @@ impl BlockResyncManager {
|
|||
info!("Resync block {:?}: offloading and deleting", hash);
|
||||
let existing_path = existing_path.unwrap();
|
||||
|
||||
let mut who = manager
|
||||
.system
|
||||
.cluster_layout()
|
||||
.current_storage_nodes_of(hash);
|
||||
let mut who = manager.replication.storage_nodes(hash);
|
||||
if who.len() < manager.replication.write_quorum() {
|
||||
return Err(Error::Message("Not trying to offload block because we don't have a quorum of nodes to write to".to_string()));
|
||||
}
|
||||
|
@ -458,25 +455,6 @@ impl BlockResyncManager {
|
|||
}
|
||||
|
||||
if rc.is_nonzero() && !exists {
|
||||
// The refcount is > 0, and the block is not present locally.
|
||||
// We might need to fetch it from another node.
|
||||
|
||||
// First, check whether we are still supposed to store that
|
||||
// block in the latest cluster layout version.
|
||||
let storage_nodes = manager
|
||||
.system
|
||||
.cluster_layout()
|
||||
.current_storage_nodes_of(&hash);
|
||||
|
||||
if !storage_nodes.contains(&manager.system.id) {
|
||||
info!(
|
||||
"Resync block {:?}: block is absent with refcount > 0, but it will drop to zero after all metadata is synced. Not fetching the block.",
|
||||
hash
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// We know we need the block. Fetch it.
|
||||
info!(
|
||||
"Resync block {:?}: fetching absent but needed block (refcount > 0)",
|
||||
hash
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_db"
|
||||
version = "1.1.0"
|
||||
version = "1.0.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage"
|
||||
version = "1.1.0"
|
||||
version = "1.0.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
@ -26,6 +26,7 @@ garage_db.workspace = true
|
|||
garage_api_admin.workspace = true
|
||||
garage_api_s3.workspace = true
|
||||
garage_api_k2v = { workspace = true, optional = true }
|
||||
garage_api_common.workspace = true
|
||||
garage_block.workspace = true
|
||||
garage_model.workspace = true
|
||||
garage_net.workspace = true
|
||||
|
@ -48,8 +49,6 @@ sodiumoxide.workspace = true
|
|||
structopt.workspace = true
|
||||
git-version.workspace = true
|
||||
|
||||
serde.workspace = true
|
||||
|
||||
futures.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
|
@ -62,7 +61,6 @@ syslog-tracing = { workspace = true, optional = true }
|
|||
garage_api_common.workspace = true
|
||||
|
||||
aws-sdk-s3.workspace = true
|
||||
aws-smithy-runtime.workspace = true
|
||||
chrono.workspace = true
|
||||
http.workspace = true
|
||||
hmac.workspace = true
|
||||
|
@ -72,12 +70,10 @@ hyper-util.workspace = true
|
|||
mktemp.workspace = true
|
||||
sha2.workspace = true
|
||||
|
||||
|
||||
static_init.workspace = true
|
||||
assert-json-diff.workspace = true
|
||||
serde_json.workspace = true
|
||||
base64.workspace = true
|
||||
crc32fast.workspace = true
|
||||
|
||||
k2v-client.workspace = true
|
||||
|
||||
|
|
|
@ -1,235 +0,0 @@
|
|||
use garage_util::data::*;
|
||||
|
||||
use garage_table::*;
|
||||
|
||||
use garage_model::helper::error::{Error, OkOrBadRequest};
|
||||
use garage_model::s3::object_table::*;
|
||||
use garage_model::s3::version_table::*;
|
||||
|
||||
use crate::cli::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl AdminRpcHandler {
|
||||
pub(super) async fn handle_block_cmd(&self, cmd: &BlockOperation) -> Result<AdminRpc, Error> {
|
||||
match cmd {
|
||||
BlockOperation::ListErrors => Ok(AdminRpc::BlockErrorList(
|
||||
self.garage.block_manager.list_resync_errors()?,
|
||||
)),
|
||||
BlockOperation::Info { hash } => self.handle_block_info(hash).await,
|
||||
BlockOperation::RetryNow { all, blocks } => {
|
||||
self.handle_block_retry_now(*all, blocks).await
|
||||
}
|
||||
BlockOperation::Purge { yes, blocks } => self.handle_block_purge(*yes, blocks).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_block_info(&self, hash: &String) -> Result<AdminRpc, Error> {
|
||||
let hash = self.find_block_hash_by_prefix(hash)?;
|
||||
let refcount = self.garage.block_manager.get_block_rc(&hash)?;
|
||||
let block_refs = self
|
||||
.garage
|
||||
.block_ref_table
|
||||
.get_range(&hash, None, None, 10000, Default::default())
|
||||
.await?;
|
||||
let mut versions = vec![];
|
||||
let mut uploads = vec![];
|
||||
for br in block_refs {
|
||||
if let Some(v) = self
|
||||
.garage
|
||||
.version_table
|
||||
.get(&br.version, &EmptyKey)
|
||||
.await?
|
||||
{
|
||||
if let VersionBacklink::MultipartUpload { upload_id } = &v.backlink {
|
||||
if let Some(u) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? {
|
||||
uploads.push(u);
|
||||
}
|
||||
}
|
||||
versions.push(Ok(v));
|
||||
} else {
|
||||
versions.push(Err(br.version));
|
||||
}
|
||||
}
|
||||
Ok(AdminRpc::BlockInfo {
|
||||
hash,
|
||||
refcount,
|
||||
versions,
|
||||
uploads,
|
||||
})
|
||||
}
|
||||
|
||||
async fn handle_block_retry_now(
|
||||
&self,
|
||||
all: bool,
|
||||
blocks: &[String],
|
||||
) -> Result<AdminRpc, Error> {
|
||||
if all {
|
||||
if !blocks.is_empty() {
|
||||
return Err(Error::BadRequest(
|
||||
"--all was specified, cannot also specify blocks".into(),
|
||||
));
|
||||
}
|
||||
let blocks = self.garage.block_manager.list_resync_errors()?;
|
||||
for b in blocks.iter() {
|
||||
self.garage.block_manager.resync.clear_backoff(&b.hash)?;
|
||||
}
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"{} blocks returned in queue for a retry now (check logs to see results)",
|
||||
blocks.len()
|
||||
)))
|
||||
} else {
|
||||
for hash in blocks {
|
||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
||||
self.garage.block_manager.resync.clear_backoff(&hash)?;
|
||||
}
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"{} blocks returned in queue for a retry now (check logs to see results)",
|
||||
blocks.len()
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_block_purge(&self, yes: bool, blocks: &[String]) -> Result<AdminRpc, Error> {
|
||||
if !yes {
|
||||
return Err(Error::BadRequest(
|
||||
"Pass the --yes flag to confirm block purge operation.".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let mut obj_dels = 0;
|
||||
let mut mpu_dels = 0;
|
||||
let mut ver_dels = 0;
|
||||
|
||||
for hash in blocks {
|
||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
||||
let block_refs = self
|
||||
.garage
|
||||
.block_ref_table
|
||||
.get_range(&hash, None, None, 10000, Default::default())
|
||||
.await?;
|
||||
|
||||
for br in block_refs {
|
||||
if let Some(version) = self
|
||||
.garage
|
||||
.version_table
|
||||
.get(&br.version, &EmptyKey)
|
||||
.await?
|
||||
{
|
||||
self.handle_block_purge_version_backlink(
|
||||
&version,
|
||||
&mut obj_dels,
|
||||
&mut mpu_dels,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !version.deleted.get() {
|
||||
let deleted_version = Version::new(version.uuid, version.backlink, true);
|
||||
self.garage.version_table.insert(&deleted_version).await?;
|
||||
ver_dels += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Purged {} blocks, {} versions, {} objects, {} multipart uploads",
|
||||
blocks.len(),
|
||||
ver_dels,
|
||||
obj_dels,
|
||||
mpu_dels,
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_block_purge_version_backlink(
|
||||
&self,
|
||||
version: &Version,
|
||||
obj_dels: &mut usize,
|
||||
mpu_dels: &mut usize,
|
||||
) -> Result<(), Error> {
|
||||
let (bucket_id, key, ov_id) = match &version.backlink {
|
||||
VersionBacklink::Object { bucket_id, key } => (*bucket_id, key.clone(), version.uuid),
|
||||
VersionBacklink::MultipartUpload { upload_id } => {
|
||||
if let Some(mut mpu) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? {
|
||||
if !mpu.deleted.get() {
|
||||
mpu.parts.clear();
|
||||
mpu.deleted.set();
|
||||
self.garage.mpu_table.insert(&mpu).await?;
|
||||
*mpu_dels += 1;
|
||||
}
|
||||
(mpu.bucket_id, mpu.key.clone(), *upload_id)
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(object) = self.garage.object_table.get(&bucket_id, &key).await? {
|
||||
let ov = object.versions().iter().rev().find(|v| v.is_complete());
|
||||
if let Some(ov) = ov {
|
||||
if ov.uuid == ov_id {
|
||||
let del_uuid = gen_uuid();
|
||||
let deleted_object = Object::new(
|
||||
bucket_id,
|
||||
key,
|
||||
vec![ObjectVersion {
|
||||
uuid: del_uuid,
|
||||
timestamp: ov.timestamp + 1,
|
||||
state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker),
|
||||
}],
|
||||
);
|
||||
self.garage.object_table.insert(&deleted_object).await?;
|
||||
*obj_dels += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ---- helper function ----
|
||||
fn find_block_hash_by_prefix(&self, prefix: &str) -> Result<Hash, Error> {
|
||||
if prefix.len() < 4 {
|
||||
return Err(Error::BadRequest(
|
||||
"Please specify at least 4 characters of the block hash".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let prefix_bin =
|
||||
hex::decode(&prefix[..prefix.len() & !1]).ok_or_bad_request("invalid hash")?;
|
||||
|
||||
let iter = self
|
||||
.garage
|
||||
.block_ref_table
|
||||
.data
|
||||
.store
|
||||
.range(&prefix_bin[..]..)
|
||||
.map_err(GarageError::from)?;
|
||||
let mut found = None;
|
||||
for item in iter {
|
||||
let (k, _v) = item.map_err(GarageError::from)?;
|
||||
let hash = Hash::try_from(&k[..32]).unwrap();
|
||||
if &hash.as_slice()[..prefix_bin.len()] != prefix_bin {
|
||||
break;
|
||||
}
|
||||
if hex::encode(hash.as_slice()).starts_with(prefix) {
|
||||
match &found {
|
||||
Some(x) if *x == hash => (),
|
||||
Some(_) => {
|
||||
return Err(Error::BadRequest(format!(
|
||||
"Several blocks match prefix `{}`",
|
||||
prefix
|
||||
)));
|
||||
}
|
||||
None => {
|
||||
found = Some(hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
found.ok_or_else(|| Error::BadRequest("No matching block found".into()))
|
||||
}
|
||||
}
|
|
@ -1,500 +0,0 @@
|
|||
use std::collections::HashMap;
|
||||
use std::fmt::Write;
|
||||
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::time::*;
|
||||
|
||||
use garage_table::*;
|
||||
|
||||
use garage_model::bucket_alias_table::*;
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_model::helper::error::{Error, OkOrBadRequest};
|
||||
use garage_model::permission::*;
|
||||
|
||||
use crate::cli::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl AdminRpcHandler {
|
||||
pub(super) async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
|
||||
match cmd {
|
||||
BucketOperation::List => self.handle_list_buckets().await,
|
||||
BucketOperation::Info(query) => self.handle_bucket_info(query).await,
|
||||
BucketOperation::Create(query) => self.handle_create_bucket(&query.name).await,
|
||||
BucketOperation::Delete(query) => self.handle_delete_bucket(query).await,
|
||||
BucketOperation::Alias(query) => self.handle_alias_bucket(query).await,
|
||||
BucketOperation::Unalias(query) => self.handle_unalias_bucket(query).await,
|
||||
BucketOperation::Allow(query) => self.handle_bucket_allow(query).await,
|
||||
BucketOperation::Deny(query) => self.handle_bucket_deny(query).await,
|
||||
BucketOperation::Website(query) => self.handle_bucket_website(query).await,
|
||||
BucketOperation::SetQuotas(query) => self.handle_bucket_set_quotas(query).await,
|
||||
BucketOperation::CleanupIncompleteUploads(query) => {
|
||||
self.handle_bucket_cleanup_incomplete_uploads(query).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_list_buckets(&self) -> Result<AdminRpc, Error> {
|
||||
let buckets = self
|
||||
.garage
|
||||
.bucket_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(DeletedFilter::NotDeleted),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::BucketList(buckets))
|
||||
}
|
||||
|
||||
async fn handle_bucket_info(&self, query: &BucketOpt) -> Result<AdminRpc, Error> {
|
||||
let bucket_id = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.admin_get_existing_matching_bucket(&query.name)
|
||||
.await?;
|
||||
|
||||
let bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
|
||||
let counters = self
|
||||
.garage
|
||||
.object_counter_table
|
||||
.table
|
||||
.get(&bucket_id, &EmptyKey)
|
||||
.await?
|
||||
.map(|x| x.filtered_values(&self.garage.system.cluster_layout()))
|
||||
.unwrap_or_default();
|
||||
|
||||
let mpu_counters = self
|
||||
.garage
|
||||
.mpu_counter_table
|
||||
.table
|
||||
.get(&bucket_id, &EmptyKey)
|
||||
.await?
|
||||
.map(|x| x.filtered_values(&self.garage.system.cluster_layout()))
|
||||
.unwrap_or_default();
|
||||
|
||||
let mut relevant_keys = HashMap::new();
|
||||
for (k, _) in bucket
|
||||
.state
|
||||
.as_option()
|
||||
.unwrap()
|
||||
.authorized_keys
|
||||
.items()
|
||||
.iter()
|
||||
{
|
||||
if let Some(key) = self
|
||||
.garage
|
||||
.key_table
|
||||
.get(&EmptyKey, k)
|
||||
.await?
|
||||
.filter(|k| !k.is_deleted())
|
||||
{
|
||||
relevant_keys.insert(k.clone(), key);
|
||||
}
|
||||
}
|
||||
for ((k, _), _, _) in bucket
|
||||
.state
|
||||
.as_option()
|
||||
.unwrap()
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
{
|
||||
if relevant_keys.contains_key(k) {
|
||||
continue;
|
||||
}
|
||||
if let Some(key) = self.garage.key_table.get(&EmptyKey, k).await? {
|
||||
relevant_keys.insert(k.clone(), key);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AdminRpc::BucketInfo {
|
||||
bucket,
|
||||
relevant_keys,
|
||||
counters,
|
||||
mpu_counters,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(clippy::ptr_arg)]
|
||||
async fn handle_create_bucket(&self, name: &String) -> Result<AdminRpc, Error> {
|
||||
if !is_valid_bucket_name(name) {
|
||||
return Err(Error::BadRequest(format!(
|
||||
"{}: {}",
|
||||
name, INVALID_BUCKET_NAME_MESSAGE
|
||||
)));
|
||||
}
|
||||
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
if let Some(alias) = self.garage.bucket_alias_table.get(&EmptyKey, name).await? {
|
||||
if alias.state.get().is_some() {
|
||||
return Err(Error::BadRequest(format!("Bucket {} already exists", name)));
|
||||
}
|
||||
}
|
||||
|
||||
// ---- done checking, now commit ----
|
||||
|
||||
let bucket = Bucket::new();
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
helper.set_global_bucket_alias(bucket.id, name).await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!("Bucket {} was created.", name)))
|
||||
}
|
||||
|
||||
async fn handle_delete_bucket(&self, query: &DeleteBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
let bucket_id = helper
|
||||
.bucket()
|
||||
.admin_get_existing_matching_bucket(&query.name)
|
||||
.await?;
|
||||
|
||||
// Get the alias, but keep in minde here the bucket name
|
||||
// given in parameter can also be directly the bucket's ID.
|
||||
// In that case bucket_alias will be None, and
|
||||
// we can still delete the bucket if it has zero aliases
|
||||
// (a condition which we try to prevent but that could still happen somehow).
|
||||
// We just won't try to delete an alias entry because there isn't one.
|
||||
let bucket_alias = self
|
||||
.garage
|
||||
.bucket_alias_table
|
||||
.get(&EmptyKey, &query.name)
|
||||
.await?;
|
||||
|
||||
// Check bucket doesn't have other aliases
|
||||
let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
|
||||
let bucket_state = bucket.state.as_option().unwrap();
|
||||
if bucket_state
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, active)| *active)
|
||||
.any(|(name, _, _)| name != &query.name)
|
||||
{
|
||||
return Err(Error::BadRequest(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", query.name)));
|
||||
}
|
||||
if bucket_state
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.any(|(_, _, active)| *active)
|
||||
{
|
||||
return Err(Error::BadRequest(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", query.name)));
|
||||
}
|
||||
|
||||
// Check bucket is empty
|
||||
if !helper.bucket().is_bucket_empty(bucket_id).await? {
|
||||
return Err(Error::BadRequest(format!(
|
||||
"Bucket {} is not empty",
|
||||
query.name
|
||||
)));
|
||||
}
|
||||
|
||||
if !query.yes {
|
||||
return Err(Error::BadRequest(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// --- done checking, now commit ---
|
||||
// 1. delete authorization from keys that had access
|
||||
for (key_id, _) in bucket.authorized_keys() {
|
||||
helper
|
||||
.set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 2. delete bucket alias
|
||||
if bucket_alias.is_some() {
|
||||
helper
|
||||
.purge_global_bucket_alias(bucket_id, &query.name)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 3. delete bucket
|
||||
bucket.state = Deletable::delete();
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name)))
|
||||
}
|
||||
|
||||
async fn handle_alias_bucket(&self, query: &AliasBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
let bucket_id = helper
|
||||
.bucket()
|
||||
.admin_get_existing_matching_bucket(&query.existing_bucket)
|
||||
.await?;
|
||||
|
||||
if let Some(key_pattern) = &query.local {
|
||||
let key = helper.key().get_existing_matching_key(key_pattern).await?;
|
||||
|
||||
helper
|
||||
.set_local_bucket_alias(bucket_id, &key.key_id, &query.new_name)
|
||||
.await?;
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Alias {} now points to bucket {:?} in namespace of key {}",
|
||||
query.new_name, bucket_id, key.key_id
|
||||
)))
|
||||
} else {
|
||||
helper
|
||||
.set_global_bucket_alias(bucket_id, &query.new_name)
|
||||
.await?;
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Alias {} now points to bucket {:?}",
|
||||
query.new_name, bucket_id
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_unalias_bucket(&self, query: &UnaliasBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
if let Some(key_pattern) = &query.local {
|
||||
let key = helper.key().get_existing_matching_key(key_pattern).await?;
|
||||
|
||||
let bucket_id = key
|
||||
.state
|
||||
.as_option()
|
||||
.unwrap()
|
||||
.local_aliases
|
||||
.get(&query.name)
|
||||
.cloned()
|
||||
.flatten()
|
||||
.ok_or_bad_request("Bucket not found")?;
|
||||
|
||||
helper
|
||||
.unset_local_bucket_alias(bucket_id, &key.key_id, &query.name)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Alias {} no longer points to bucket {:?} in namespace of key {}",
|
||||
&query.name, bucket_id, key.key_id
|
||||
)))
|
||||
} else {
|
||||
let bucket_id = helper
|
||||
.bucket()
|
||||
.resolve_global_bucket_name(&query.name)
|
||||
.await?
|
||||
.ok_or_bad_request("Bucket not found")?;
|
||||
|
||||
helper
|
||||
.unset_global_bucket_alias(bucket_id, &query.name)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Alias {} no longer points to bucket {:?}",
|
||||
&query.name, bucket_id
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
let bucket_id = helper
|
||||
.bucket()
|
||||
.admin_get_existing_matching_bucket(&query.bucket)
|
||||
.await?;
|
||||
let key = helper
|
||||
.key()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
|
||||
let allow_read = query.read || key.allow_read(&bucket_id);
|
||||
let allow_write = query.write || key.allow_write(&bucket_id);
|
||||
let allow_owner = query.owner || key.allow_owner(&bucket_id);
|
||||
|
||||
helper
|
||||
.set_bucket_key_permissions(
|
||||
bucket_id,
|
||||
&key.key_id,
|
||||
BucketKeyPerm {
|
||||
timestamp: now_msec(),
|
||||
allow_read,
|
||||
allow_write,
|
||||
allow_owner,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"New permissions for {} on {}: read {}, write {}, owner {}.",
|
||||
&key.key_id, &query.bucket, allow_read, allow_write, allow_owner
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_bucket_deny(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
let bucket_id = helper
|
||||
.bucket()
|
||||
.admin_get_existing_matching_bucket(&query.bucket)
|
||||
.await?;
|
||||
let key = helper
|
||||
.key()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
|
||||
let allow_read = !query.read && key.allow_read(&bucket_id);
|
||||
let allow_write = !query.write && key.allow_write(&bucket_id);
|
||||
let allow_owner = !query.owner && key.allow_owner(&bucket_id);
|
||||
|
||||
helper
|
||||
.set_bucket_key_permissions(
|
||||
bucket_id,
|
||||
&key.key_id,
|
||||
BucketKeyPerm {
|
||||
timestamp: now_msec(),
|
||||
allow_read,
|
||||
allow_write,
|
||||
allow_owner,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"New permissions for {} on {}: read {}, write {}, owner {}.",
|
||||
&key.key_id, &query.bucket, allow_read, allow_write, allow_owner
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_bucket_website(&self, query: &WebsiteOpt) -> Result<AdminRpc, Error> {
|
||||
let bucket_id = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.admin_get_existing_matching_bucket(&query.bucket)
|
||||
.await?;
|
||||
|
||||
let mut bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
let bucket_state = bucket.state.as_option_mut().unwrap();
|
||||
|
||||
if !(query.allow ^ query.deny) {
|
||||
return Err(Error::BadRequest(
|
||||
"You must specify exactly one flag, either --allow or --deny".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let website = if query.allow {
|
||||
Some(WebsiteConfig {
|
||||
index_document: query.index_document.clone(),
|
||||
error_document: query.error_document.clone(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
bucket_state.website_config.update(website);
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
let msg = if query.allow {
|
||||
format!("Website access allowed for {}", &query.bucket)
|
||||
} else {
|
||||
format!("Website access denied for {}", &query.bucket)
|
||||
};
|
||||
|
||||
Ok(AdminRpc::Ok(msg))
|
||||
}
|
||||
|
||||
async fn handle_bucket_set_quotas(&self, query: &SetQuotasOpt) -> Result<AdminRpc, Error> {
|
||||
let bucket_id = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.admin_get_existing_matching_bucket(&query.bucket)
|
||||
.await?;
|
||||
|
||||
let mut bucket = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
let bucket_state = bucket.state.as_option_mut().unwrap();
|
||||
|
||||
if query.max_size.is_none() && query.max_objects.is_none() {
|
||||
return Err(Error::BadRequest(
|
||||
"You must specify either --max-size or --max-objects (or both) for this command to do something.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let mut quotas = bucket_state.quotas.get().clone();
|
||||
|
||||
match query.max_size.as_ref().map(String::as_ref) {
|
||||
Some("none") => quotas.max_size = None,
|
||||
Some(v) => {
|
||||
let bs = v
|
||||
.parse::<bytesize::ByteSize>()
|
||||
.ok_or_bad_request(format!("Invalid size specified: {}", v))?;
|
||||
quotas.max_size = Some(bs.as_u64());
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
match query.max_objects.as_ref().map(String::as_ref) {
|
||||
Some("none") => quotas.max_objects = None,
|
||||
Some(v) => {
|
||||
let mo = v
|
||||
.parse::<u64>()
|
||||
.ok_or_bad_request(format!("Invalid number specified: {}", v))?;
|
||||
quotas.max_objects = Some(mo);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
bucket_state.quotas.update(quotas);
|
||||
self.garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Quotas updated for {}",
|
||||
&query.bucket
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_bucket_cleanup_incomplete_uploads(
|
||||
&self,
|
||||
query: &CleanupIncompleteUploadsOpt,
|
||||
) -> Result<AdminRpc, Error> {
|
||||
let mut bucket_ids = vec![];
|
||||
for b in query.buckets.iter() {
|
||||
bucket_ids.push(
|
||||
self.garage
|
||||
.bucket_helper()
|
||||
.admin_get_existing_matching_bucket(b)
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
let duration = parse_duration::parse::parse(&query.older_than)
|
||||
.ok_or_bad_request("Invalid duration passed for --older-than parameter")?;
|
||||
|
||||
let mut ret = String::new();
|
||||
for bucket in bucket_ids {
|
||||
let count = self
|
||||
.garage
|
||||
.bucket_helper()
|
||||
.cleanup_incomplete_uploads(&bucket, duration)
|
||||
.await?;
|
||||
writeln!(
|
||||
&mut ret,
|
||||
"Bucket {:?}: {} incomplete uploads aborted",
|
||||
bucket, count
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
Ok(AdminRpc::Ok(ret))
|
||||
}
|
||||
}
|
|
@ -1,161 +0,0 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use garage_table::*;
|
||||
|
||||
use garage_model::helper::error::*;
|
||||
use garage_model::key_table::*;
|
||||
|
||||
use crate::cli::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl AdminRpcHandler {
|
||||
pub(super) async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> {
|
||||
match cmd {
|
||||
KeyOperation::List => self.handle_list_keys().await,
|
||||
KeyOperation::Info(query) => self.handle_key_info(query).await,
|
||||
KeyOperation::Create(query) => self.handle_create_key(query).await,
|
||||
KeyOperation::Rename(query) => self.handle_rename_key(query).await,
|
||||
KeyOperation::Delete(query) => self.handle_delete_key(query).await,
|
||||
KeyOperation::Allow(query) => self.handle_allow_key(query).await,
|
||||
KeyOperation::Deny(query) => self.handle_deny_key(query).await,
|
||||
KeyOperation::Import(query) => self.handle_import_key(query).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_list_keys(&self) -> Result<AdminRpc, Error> {
|
||||
let key_ids = self
|
||||
.garage
|
||||
.key_table
|
||||
.get_range(
|
||||
&EmptyKey,
|
||||
None,
|
||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||
10000,
|
||||
EnumerationOrder::Forward,
|
||||
)
|
||||
.await?
|
||||
.iter()
|
||||
.map(|k| (k.key_id.to_string(), k.params().unwrap().name.get().clone()))
|
||||
.collect::<Vec<_>>();
|
||||
Ok(AdminRpc::KeyList(key_ids))
|
||||
}
|
||||
|
||||
async fn handle_key_info(&self, query: &KeyInfoOpt) -> Result<AdminRpc, Error> {
|
||||
let mut key = self
|
||||
.garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
|
||||
if !query.show_secret {
|
||||
key.state.as_option_mut().unwrap().secret_key = "(redacted)".into();
|
||||
}
|
||||
|
||||
self.key_info_result(key).await
|
||||
}
|
||||
|
||||
async fn handle_create_key(&self, query: &KeyNewOpt) -> Result<AdminRpc, Error> {
|
||||
let key = Key::new(&query.name);
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
self.key_info_result(key).await
|
||||
}
|
||||
|
||||
async fn handle_rename_key(&self, query: &KeyRenameOpt) -> Result<AdminRpc, Error> {
|
||||
let mut key = self
|
||||
.garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
key.params_mut()
|
||||
.unwrap()
|
||||
.name
|
||||
.update(query.new_name.clone());
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
self.key_info_result(key).await
|
||||
}
|
||||
|
||||
async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result<AdminRpc, Error> {
|
||||
let helper = self.garage.locked_helper().await;
|
||||
|
||||
let mut key = helper
|
||||
.key()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
|
||||
if !query.yes {
|
||||
return Err(Error::BadRequest(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
helper.delete_key(&mut key).await?;
|
||||
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Key {} was deleted successfully.",
|
||||
key.key_id
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_allow_key(&self, query: &KeyPermOpt) -> Result<AdminRpc, Error> {
|
||||
let mut key = self
|
||||
.garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
if query.create_bucket {
|
||||
key.params_mut().unwrap().allow_create_bucket.update(true);
|
||||
}
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
self.key_info_result(key).await
|
||||
}
|
||||
|
||||
async fn handle_deny_key(&self, query: &KeyPermOpt) -> Result<AdminRpc, Error> {
|
||||
let mut key = self
|
||||
.garage
|
||||
.key_helper()
|
||||
.get_existing_matching_key(&query.key_pattern)
|
||||
.await?;
|
||||
if query.create_bucket {
|
||||
key.params_mut().unwrap().allow_create_bucket.update(false);
|
||||
}
|
||||
self.garage.key_table.insert(&key).await?;
|
||||
self.key_info_result(key).await
|
||||
}
|
||||
|
||||
async fn handle_import_key(&self, query: &KeyImportOpt) -> Result<AdminRpc, Error> {
|
||||
if !query.yes {
|
||||
return Err(Error::BadRequest("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string()));
|
||||
}
|
||||
|
||||
let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?;
|
||||
if prev_key.is_some() {
|
||||
return Err(Error::BadRequest(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id)));
|
||||
}
|
||||
|
||||
let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name)
|
||||
.ok_or_bad_request("Invalid key format")?;
|
||||
self.garage.key_table.insert(&imported_key).await?;
|
||||
|
||||
self.key_info_result(imported_key).await
|
||||
}
|
||||
|
||||
async fn key_info_result(&self, key: Key) -> Result<AdminRpc, Error> {
|
||||
let mut relevant_buckets = HashMap::new();
|
||||
|
||||
for (id, _) in key
|
||||
.state
|
||||
.as_option()
|
||||
.unwrap()
|
||||
.authorized_buckets
|
||||
.items()
|
||||
.iter()
|
||||
{
|
||||
if let Some(b) = self.garage.bucket_table.get(&EmptyKey, id).await? {
|
||||
relevant_buckets.insert(*id, b);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AdminRpc::KeyInfo(key, relevant_buckets))
|
||||
}
|
||||
}
|
|
@ -1,540 +0,0 @@
|
|||
mod block;
|
||||
mod bucket;
|
||||
mod key;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Write;
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::future::FutureExt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use format_table::format_table_to_string;
|
||||
|
||||
use garage_util::background::BackgroundRunner;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
use garage_table::replication::*;
|
||||
use garage_table::*;
|
||||
|
||||
use garage_rpc::layout::PARTITION_BITS;
|
||||
use garage_rpc::*;
|
||||
|
||||
use garage_block::manager::BlockResyncErrorInfo;
|
||||
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::helper::error::{Error, OkOrBadRequest};
|
||||
use garage_model::key_table::*;
|
||||
use garage_model::s3::mpu_table::MultipartUpload;
|
||||
use garage_model::s3::version_table::Version;
|
||||
|
||||
use crate::cli::*;
|
||||
use crate::repair::online::launch_online_repair;
|
||||
|
||||
pub const ADMIN_RPC_PATH: &str = "garage/admin_rpc.rs/Rpc";
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum AdminRpc {
|
||||
BucketOperation(BucketOperation),
|
||||
KeyOperation(KeyOperation),
|
||||
LaunchRepair(RepairOpt),
|
||||
Stats(StatsOpt),
|
||||
Worker(WorkerOperation),
|
||||
BlockOperation(BlockOperation),
|
||||
MetaOperation(MetaOperation),
|
||||
|
||||
// Replies
|
||||
Ok(String),
|
||||
BucketList(Vec<Bucket>),
|
||||
BucketInfo {
|
||||
bucket: Bucket,
|
||||
relevant_keys: HashMap<String, Key>,
|
||||
counters: HashMap<String, i64>,
|
||||
mpu_counters: HashMap<String, i64>,
|
||||
},
|
||||
KeyList(Vec<(String, String)>),
|
||||
KeyInfo(Key, HashMap<Uuid, Bucket>),
|
||||
WorkerList(
|
||||
HashMap<usize, garage_util::background::WorkerInfo>,
|
||||
WorkerListOpt,
|
||||
),
|
||||
WorkerVars(Vec<(Uuid, String, String)>),
|
||||
WorkerInfo(usize, garage_util::background::WorkerInfo),
|
||||
BlockErrorList(Vec<BlockResyncErrorInfo>),
|
||||
BlockInfo {
|
||||
hash: Hash,
|
||||
refcount: u64,
|
||||
versions: Vec<Result<Version, Uuid>>,
|
||||
uploads: Vec<MultipartUpload>,
|
||||
},
|
||||
}
|
||||
|
||||
impl Rpc for AdminRpc {
|
||||
type Response = Result<AdminRpc, Error>;
|
||||
}
|
||||
|
||||
pub struct AdminRpcHandler {
|
||||
garage: Arc<Garage>,
|
||||
background: Arc<BackgroundRunner>,
|
||||
endpoint: Arc<Endpoint<AdminRpc, Self>>,
|
||||
}
|
||||
|
||||
impl AdminRpcHandler {
|
||||
pub fn new(garage: Arc<Garage>, background: Arc<BackgroundRunner>) -> Arc<Self> {
|
||||
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
|
||||
let admin = Arc::new(Self {
|
||||
garage,
|
||||
background,
|
||||
endpoint,
|
||||
});
|
||||
admin.endpoint.set_handler(admin.clone());
|
||||
admin
|
||||
}
|
||||
|
||||
// ================ REPAIR COMMANDS ====================
|
||||
|
||||
async fn handle_launch_repair(self: &Arc<Self>, opt: RepairOpt) -> Result<AdminRpc, Error> {
|
||||
if !opt.yes {
|
||||
return Err(Error::BadRequest(
|
||||
"Please provide the --yes flag to initiate repair operations.".to_string(),
|
||||
));
|
||||
}
|
||||
if opt.all_nodes {
|
||||
let mut opt_to_send = opt.clone();
|
||||
opt_to_send.all_nodes = false;
|
||||
|
||||
let mut failures = vec![];
|
||||
let all_nodes = self.garage.system.cluster_layout().all_nodes().to_vec();
|
||||
for node in all_nodes.iter() {
|
||||
let node = (*node).into();
|
||||
let resp = self
|
||||
.endpoint
|
||||
.call(
|
||||
&node,
|
||||
AdminRpc::LaunchRepair(opt_to_send.clone()),
|
||||
PRIO_NORMAL,
|
||||
)
|
||||
.await;
|
||||
if !matches!(resp, Ok(Ok(_))) {
|
||||
failures.push(node);
|
||||
}
|
||||
}
|
||||
if failures.is_empty() {
|
||||
Ok(AdminRpc::Ok("Repair launched on all nodes".to_string()))
|
||||
} else {
|
||||
Err(Error::BadRequest(format!(
|
||||
"Could not launch repair on nodes: {:?} (launched successfully on other nodes)",
|
||||
failures
|
||||
)))
|
||||
}
|
||||
} else {
|
||||
launch_online_repair(&self.garage, &self.background, opt).await?;
|
||||
Ok(AdminRpc::Ok(format!(
|
||||
"Repair launched on {:?}",
|
||||
self.garage.system.id
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
// ================ STATS COMMANDS ====================
|
||||
|
||||
async fn handle_stats(&self, opt: StatsOpt) -> Result<AdminRpc, Error> {
|
||||
if opt.all_nodes {
|
||||
let mut ret = String::new();
|
||||
let mut all_nodes = self.garage.system.cluster_layout().all_nodes().to_vec();
|
||||
for node in self.garage.system.get_known_nodes().iter() {
|
||||
if node.is_up && !all_nodes.contains(&node.id) {
|
||||
all_nodes.push(node.id);
|
||||
}
|
||||
}
|
||||
|
||||
for node in all_nodes.iter() {
|
||||
let mut opt = opt.clone();
|
||||
opt.all_nodes = false;
|
||||
opt.skip_global = true;
|
||||
|
||||
writeln!(&mut ret, "\n======================").unwrap();
|
||||
writeln!(&mut ret, "Stats for node {:?}:", node).unwrap();
|
||||
|
||||
let node_id = (*node).into();
|
||||
match self
|
||||
.endpoint
|
||||
.call(&node_id, AdminRpc::Stats(opt), PRIO_NORMAL)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(AdminRpc::Ok(s))) => writeln!(&mut ret, "{}", s).unwrap(),
|
||||
Ok(Ok(x)) => writeln!(&mut ret, "Bad answer: {:?}", x).unwrap(),
|
||||
Ok(Err(e)) => writeln!(&mut ret, "Remote error: {}", e).unwrap(),
|
||||
Err(e) => writeln!(&mut ret, "Network error: {}", e).unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
writeln!(&mut ret, "\n======================").unwrap();
|
||||
write!(
|
||||
&mut ret,
|
||||
"Cluster statistics:\n\n{}",
|
||||
self.gather_cluster_stats()
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
Ok(AdminRpc::Ok(ret))
|
||||
} else {
|
||||
Ok(AdminRpc::Ok(self.gather_stats_local(opt)?))
|
||||
}
|
||||
}
|
||||
|
||||
fn gather_stats_local(&self, opt: StatsOpt) -> Result<String, Error> {
|
||||
let mut ret = String::new();
|
||||
writeln!(
|
||||
&mut ret,
|
||||
"\nGarage version: {} [features: {}]\nRust compiler version: {}",
|
||||
garage_util::version::garage_version(),
|
||||
garage_util::version::garage_features()
|
||||
.map(|list| list.join(", "))
|
||||
.unwrap_or_else(|| "(unknown)".into()),
|
||||
garage_util::version::rust_version(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap();
|
||||
|
||||
// Gather table statistics
|
||||
let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()];
|
||||
table.push(self.gather_table_stats(&self.garage.bucket_table)?);
|
||||
table.push(self.gather_table_stats(&self.garage.key_table)?);
|
||||
table.push(self.gather_table_stats(&self.garage.object_table)?);
|
||||
table.push(self.gather_table_stats(&self.garage.version_table)?);
|
||||
table.push(self.gather_table_stats(&self.garage.block_ref_table)?);
|
||||
write!(
|
||||
&mut ret,
|
||||
"\nTable stats:\n{}",
|
||||
format_table_to_string(table)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Gather block manager statistics
|
||||
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
|
||||
let rc_len = self.garage.block_manager.rc_len()?.to_string();
|
||||
|
||||
writeln!(
|
||||
&mut ret,
|
||||
" number of RC entries (~= number of blocks): {}",
|
||||
rc_len
|
||||
)
|
||||
.unwrap();
|
||||
writeln!(
|
||||
&mut ret,
|
||||
" resync queue length: {}",
|
||||
self.garage.block_manager.resync.queue_len()?
|
||||
)
|
||||
.unwrap();
|
||||
writeln!(
|
||||
&mut ret,
|
||||
" blocks with resync errors: {}",
|
||||
self.garage.block_manager.resync.errors_len()?
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
if !opt.skip_global {
|
||||
write!(&mut ret, "\n{}", self.gather_cluster_stats()).unwrap();
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
fn gather_cluster_stats(&self) -> String {
|
||||
let mut ret = String::new();
|
||||
|
||||
// Gather storage node and free space statistics for current nodes
|
||||
let layout = &self.garage.system.cluster_layout();
|
||||
let mut node_partition_count = HashMap::<Uuid, u64>::new();
|
||||
for short_id in layout.current().ring_assignment_data.iter() {
|
||||
let id = layout.current().node_id_vec[*short_id as usize];
|
||||
*node_partition_count.entry(id).or_default() += 1;
|
||||
}
|
||||
let node_info = self
|
||||
.garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|n| (n.id, n))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let mut table = vec![" ID\tHostname\tZone\tCapacity\tPart.\tDataAvail\tMetaAvail".into()];
|
||||
for (id, parts) in node_partition_count.iter() {
|
||||
let info = node_info.get(id);
|
||||
let status = info.map(|x| &x.status);
|
||||
let role = layout.current().roles.get(id).and_then(|x| x.0.as_ref());
|
||||
let hostname = status.and_then(|x| x.hostname.as_deref()).unwrap_or("?");
|
||||
let zone = role.map(|x| x.zone.as_str()).unwrap_or("?");
|
||||
let capacity = role
|
||||
.map(|x| x.capacity_string())
|
||||
.unwrap_or_else(|| "?".into());
|
||||
let avail_str = |x| match x {
|
||||
Some((avail, total)) => {
|
||||
let pct = (avail as f64) / (total as f64) * 100.;
|
||||
let avail = bytesize::ByteSize::b(avail);
|
||||
let total = bytesize::ByteSize::b(total);
|
||||
format!("{}/{} ({:.1}%)", avail, total, pct)
|
||||
}
|
||||
None => "?".into(),
|
||||
};
|
||||
let data_avail = avail_str(status.and_then(|x| x.data_disk_avail));
|
||||
let meta_avail = avail_str(status.and_then(|x| x.meta_disk_avail));
|
||||
table.push(format!(
|
||||
" {:?}\t{}\t{}\t{}\t{}\t{}\t{}",
|
||||
id, hostname, zone, capacity, parts, data_avail, meta_avail
|
||||
));
|
||||
}
|
||||
write!(
|
||||
&mut ret,
|
||||
"Storage nodes:\n{}",
|
||||
format_table_to_string(table)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let meta_part_avail = node_partition_count
|
||||
.iter()
|
||||
.filter_map(|(id, parts)| {
|
||||
node_info
|
||||
.get(id)
|
||||
.and_then(|x| x.status.meta_disk_avail)
|
||||
.map(|c| c.0 / *parts)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let data_part_avail = node_partition_count
|
||||
.iter()
|
||||
.filter_map(|(id, parts)| {
|
||||
node_info
|
||||
.get(id)
|
||||
.and_then(|x| x.status.data_disk_avail)
|
||||
.map(|c| c.0 / *parts)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if !meta_part_avail.is_empty() && !data_part_avail.is_empty() {
|
||||
let meta_avail =
|
||||
bytesize::ByteSize(meta_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
|
||||
let data_avail =
|
||||
bytesize::ByteSize(data_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
|
||||
writeln!(
|
||||
&mut ret,
|
||||
"\nEstimated available storage space cluster-wide (might be lower in practice):"
|
||||
)
|
||||
.unwrap();
|
||||
if meta_part_avail.len() < node_partition_count.len()
|
||||
|| data_part_avail.len() < node_partition_count.len()
|
||||
{
|
||||
writeln!(&mut ret, " data: < {}", data_avail).unwrap();
|
||||
writeln!(&mut ret, " metadata: < {}", meta_avail).unwrap();
|
||||
writeln!(&mut ret, "A precise estimate could not be given as information is missing for some storage nodes.").unwrap();
|
||||
} else {
|
||||
writeln!(&mut ret, " data: {}", data_avail).unwrap();
|
||||
writeln!(&mut ret, " metadata: {}", meta_avail).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
fn gather_table_stats<F, R>(&self, t: &Arc<Table<F, R>>) -> Result<String, Error>
|
||||
where
|
||||
F: TableSchema + 'static,
|
||||
R: TableReplication + 'static,
|
||||
{
|
||||
let data_len = t.data.store.len().map_err(GarageError::from)?.to_string();
|
||||
let mkl_len = t.merkle_updater.merkle_tree_len()?.to_string();
|
||||
|
||||
Ok(format!(
|
||||
" {}\t{}\t{}\t{}\t{}",
|
||||
F::TABLE_NAME,
|
||||
data_len,
|
||||
mkl_len,
|
||||
t.merkle_updater.todo_len()?,
|
||||
t.data.gc_todo_len()?
|
||||
))
|
||||
}
|
||||
|
||||
// ================ WORKER COMMANDS ====================
|
||||
|
||||
async fn handle_worker_cmd(&self, cmd: &WorkerOperation) -> Result<AdminRpc, Error> {
|
||||
match cmd {
|
||||
WorkerOperation::List { opt } => {
|
||||
let workers = self.background.get_worker_info();
|
||||
Ok(AdminRpc::WorkerList(workers, *opt))
|
||||
}
|
||||
WorkerOperation::Info { tid } => {
|
||||
let info = self
|
||||
.background
|
||||
.get_worker_info()
|
||||
.get(tid)
|
||||
.ok_or_bad_request(format!("No worker with TID {}", tid))?
|
||||
.clone();
|
||||
Ok(AdminRpc::WorkerInfo(*tid, info))
|
||||
}
|
||||
WorkerOperation::Get {
|
||||
all_nodes,
|
||||
variable,
|
||||
} => self.handle_get_var(*all_nodes, variable).await,
|
||||
WorkerOperation::Set {
|
||||
all_nodes,
|
||||
variable,
|
||||
value,
|
||||
} => self.handle_set_var(*all_nodes, variable, value).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_get_var(
|
||||
&self,
|
||||
all_nodes: bool,
|
||||
variable: &Option<String>,
|
||||
) -> Result<AdminRpc, Error> {
|
||||
if all_nodes {
|
||||
let mut ret = vec![];
|
||||
let all_nodes = self.garage.system.cluster_layout().all_nodes().to_vec();
|
||||
for node in all_nodes.iter() {
|
||||
let node = (*node).into();
|
||||
match self
|
||||
.endpoint
|
||||
.call(
|
||||
&node,
|
||||
AdminRpc::Worker(WorkerOperation::Get {
|
||||
all_nodes: false,
|
||||
variable: variable.clone(),
|
||||
}),
|
||||
PRIO_NORMAL,
|
||||
)
|
||||
.await??
|
||||
{
|
||||
AdminRpc::WorkerVars(v) => ret.extend(v),
|
||||
m => return Err(GarageError::unexpected_rpc_message(m).into()),
|
||||
}
|
||||
}
|
||||
Ok(AdminRpc::WorkerVars(ret))
|
||||
} else {
|
||||
#[allow(clippy::collapsible_else_if)]
|
||||
if let Some(v) = variable {
|
||||
Ok(AdminRpc::WorkerVars(vec![(
|
||||
self.garage.system.id,
|
||||
v.clone(),
|
||||
self.garage.bg_vars.get(v)?,
|
||||
)]))
|
||||
} else {
|
||||
let mut vars = self.garage.bg_vars.get_all();
|
||||
vars.sort();
|
||||
Ok(AdminRpc::WorkerVars(
|
||||
vars.into_iter()
|
||||
.map(|(k, v)| (self.garage.system.id, k.to_string(), v))
|
||||
.collect(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_set_var(
|
||||
&self,
|
||||
all_nodes: bool,
|
||||
variable: &str,
|
||||
value: &str,
|
||||
) -> Result<AdminRpc, Error> {
|
||||
if all_nodes {
|
||||
let mut ret = vec![];
|
||||
let all_nodes = self.garage.system.cluster_layout().all_nodes().to_vec();
|
||||
for node in all_nodes.iter() {
|
||||
let node = (*node).into();
|
||||
match self
|
||||
.endpoint
|
||||
.call(
|
||||
&node,
|
||||
AdminRpc::Worker(WorkerOperation::Set {
|
||||
all_nodes: false,
|
||||
variable: variable.to_string(),
|
||||
value: value.to_string(),
|
||||
}),
|
||||
PRIO_NORMAL,
|
||||
)
|
||||
.await??
|
||||
{
|
||||
AdminRpc::WorkerVars(v) => ret.extend(v),
|
||||
m => return Err(GarageError::unexpected_rpc_message(m).into()),
|
||||
}
|
||||
}
|
||||
Ok(AdminRpc::WorkerVars(ret))
|
||||
} else {
|
||||
self.garage.bg_vars.set(variable, value)?;
|
||||
Ok(AdminRpc::WorkerVars(vec![(
|
||||
self.garage.system.id,
|
||||
variable.to_string(),
|
||||
value.to_string(),
|
||||
)]))
|
||||
}
|
||||
}
|
||||
|
||||
// ================ META DB COMMANDS ====================
|
||||
|
||||
async fn handle_meta_cmd(self: &Arc<Self>, mo: &MetaOperation) -> Result<AdminRpc, Error> {
|
||||
match mo {
|
||||
MetaOperation::Snapshot { all: true } => {
|
||||
let to = self.garage.system.cluster_layout().all_nodes().to_vec();
|
||||
|
||||
let resps = futures::future::join_all(to.iter().map(|to| async move {
|
||||
let to = (*to).into();
|
||||
self.endpoint
|
||||
.call(
|
||||
&to,
|
||||
AdminRpc::MetaOperation(MetaOperation::Snapshot { all: false }),
|
||||
PRIO_NORMAL,
|
||||
)
|
||||
.await?
|
||||
}))
|
||||
.await;
|
||||
|
||||
let mut ret = vec![];
|
||||
for (to, resp) in to.iter().zip(resps.iter()) {
|
||||
let res_str = match resp {
|
||||
Ok(_) => "ok".to_string(),
|
||||
Err(e) => format!("error: {}", e),
|
||||
};
|
||||
ret.push(format!("{:?}\t{}", to, res_str));
|
||||
}
|
||||
|
||||
if resps.iter().any(Result::is_err) {
|
||||
Err(GarageError::Message(format_table_to_string(ret)).into())
|
||||
} else {
|
||||
Ok(AdminRpc::Ok(format_table_to_string(ret)))
|
||||
}
|
||||
}
|
||||
MetaOperation::Snapshot { all: false } => {
|
||||
garage_model::snapshot::async_snapshot_metadata(&self.garage).await?;
|
||||
Ok(AdminRpc::Ok("Snapshot has been saved.".into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EndpointHandler<AdminRpc> for AdminRpcHandler {
|
||||
fn handle(
|
||||
self: &Arc<Self>,
|
||||
message: &AdminRpc,
|
||||
_from: NodeID,
|
||||
) -> impl Future<Output = Result<AdminRpc, Error>> + Send {
|
||||
let self2 = self.clone();
|
||||
async move {
|
||||
match message {
|
||||
AdminRpc::BucketOperation(bo) => self2.handle_bucket_cmd(bo).await,
|
||||
AdminRpc::KeyOperation(ko) => self2.handle_key_cmd(ko).await,
|
||||
AdminRpc::LaunchRepair(opt) => self2.handle_launch_repair(opt.clone()).await,
|
||||
AdminRpc::Stats(opt) => self2.handle_stats(opt.clone()).await,
|
||||
AdminRpc::Worker(wo) => self2.handle_worker_cmd(wo).await,
|
||||
AdminRpc::BlockOperation(bo) => self2.handle_block_cmd(bo).await,
|
||||
AdminRpc::MetaOperation(mo) => self2.handle_meta_cmd(mo).await,
|
||||
m => Err(GarageError::unexpected_rpc_message(m).into()),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
|
@ -1,280 +0,0 @@
|
|||
use std::collections::{HashMap, HashSet};
|
||||
use std::time::Duration;
|
||||
|
||||
use format_table::format_table;
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_rpc::layout::*;
|
||||
use garage_rpc::system::*;
|
||||
use garage_rpc::*;
|
||||
|
||||
use garage_model::helper::error::Error as HelperError;
|
||||
|
||||
use crate::admin::*;
|
||||
use crate::cli::*;
|
||||
|
||||
pub async fn cli_command_dispatch(
|
||||
cmd: Command,
|
||||
system_rpc_endpoint: &Endpoint<SystemRpc, ()>,
|
||||
admin_rpc_endpoint: &Endpoint<AdminRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
) -> Result<(), HelperError> {
|
||||
match cmd {
|
||||
Command::Status => Ok(cmd_status(system_rpc_endpoint, rpc_host).await?),
|
||||
Command::Node(NodeOperation::Connect(connect_opt)) => {
|
||||
Ok(cmd_connect(system_rpc_endpoint, rpc_host, connect_opt).await?)
|
||||
}
|
||||
Command::Layout(layout_opt) => {
|
||||
Ok(cli_layout_command_dispatch(layout_opt, system_rpc_endpoint, rpc_host).await?)
|
||||
}
|
||||
Command::Bucket(bo) => {
|
||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BucketOperation(bo)).await
|
||||
}
|
||||
Command::Key(ko) => {
|
||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::KeyOperation(ko)).await
|
||||
}
|
||||
Command::Repair(ro) => {
|
||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::LaunchRepair(ro)).await
|
||||
}
|
||||
Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await,
|
||||
Command::Worker(wo) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Worker(wo)).await,
|
||||
Command::Block(bo) => {
|
||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await
|
||||
}
|
||||
Command::Meta(mo) => {
|
||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::MetaOperation(mo)).await
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) -> Result<(), Error> {
|
||||
let status = fetch_status(rpc_cli, rpc_host).await?;
|
||||
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||
|
||||
println!("==== HEALTHY NODES ====");
|
||||
let mut healthy_nodes =
|
||||
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()];
|
||||
for adv in status.iter().filter(|adv| adv.is_up) {
|
||||
let host = adv.status.hostname.as_deref().unwrap_or("?");
|
||||
let addr = match adv.addr {
|
||||
Some(addr) => addr.to_string(),
|
||||
None => "N/A".to_string(),
|
||||
};
|
||||
if let Some(NodeRoleV(Some(cfg))) = layout.current().roles.get(&adv.id) {
|
||||
let data_avail = match &adv.status.data_disk_avail {
|
||||
_ if cfg.capacity.is_none() => "N/A".into(),
|
||||
Some((avail, total)) => {
|
||||
let pct = (*avail as f64) / (*total as f64) * 100.;
|
||||
let avail = bytesize::ByteSize::b(*avail);
|
||||
format!("{} ({:.1}%)", avail, pct)
|
||||
}
|
||||
None => "?".into(),
|
||||
};
|
||||
healthy_nodes.push(format!(
|
||||
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
|
||||
id = adv.id,
|
||||
host = host,
|
||||
addr = addr,
|
||||
tags = cfg.tags.join(","),
|
||||
zone = cfg.zone,
|
||||
capacity = cfg.capacity_string(),
|
||||
data_avail = data_avail,
|
||||
));
|
||||
} else {
|
||||
let prev_role = layout
|
||||
.versions
|
||||
.iter()
|
||||
.rev()
|
||||
.find_map(|x| match x.roles.get(&adv.id) {
|
||||
Some(NodeRoleV(Some(cfg))) => Some(cfg),
|
||||
_ => None,
|
||||
});
|
||||
if let Some(cfg) = prev_role {
|
||||
healthy_nodes.push(format!(
|
||||
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\tdraining metadata...",
|
||||
id = adv.id,
|
||||
host = host,
|
||||
addr = addr,
|
||||
tags = cfg.tags.join(","),
|
||||
zone = cfg.zone,
|
||||
));
|
||||
} else {
|
||||
let new_role = match layout.staging.get().roles.get(&adv.id) {
|
||||
Some(NodeRoleV(Some(_))) => "pending...",
|
||||
_ => "NO ROLE ASSIGNED",
|
||||
};
|
||||
healthy_nodes.push(format!(
|
||||
"{id:?}\t{h}\t{addr}\t\t\t{new_role}",
|
||||
id = adv.id,
|
||||
h = host,
|
||||
addr = addr,
|
||||
new_role = new_role,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
format_table(healthy_nodes);
|
||||
|
||||
// Determine which nodes are unhealthy and print that to stdout
|
||||
let status_map = status
|
||||
.iter()
|
||||
.map(|adv| (adv.id, adv))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let tf = timeago::Formatter::new();
|
||||
let mut drain_msg = false;
|
||||
let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()];
|
||||
let mut listed = HashSet::new();
|
||||
for ver in layout.versions.iter().rev() {
|
||||
for (node, _, role) in ver.roles.items().iter() {
|
||||
let cfg = match role {
|
||||
NodeRoleV(Some(role)) if role.capacity.is_some() => role,
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
if listed.contains(node) {
|
||||
continue;
|
||||
}
|
||||
listed.insert(*node);
|
||||
|
||||
let adv = status_map.get(node);
|
||||
if adv.map(|x| x.is_up).unwrap_or(false) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Node is in a layout version, is not a gateway node, and is not up:
|
||||
// it is in a failed state, add proper line to the output
|
||||
let (host, last_seen) = match adv {
|
||||
Some(adv) => (
|
||||
adv.status.hostname.as_deref().unwrap_or("?"),
|
||||
adv.last_seen_secs_ago
|
||||
.map(|s| tf.convert(Duration::from_secs(s)))
|
||||
.unwrap_or_else(|| "never seen".into()),
|
||||
),
|
||||
None => ("??", "never seen".into()),
|
||||
};
|
||||
let capacity = if ver.version == layout.current().version {
|
||||
cfg.capacity_string()
|
||||
} else {
|
||||
drain_msg = true;
|
||||
"draining metadata...".to_string()
|
||||
};
|
||||
failed_nodes.push(format!(
|
||||
"{id:?}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
|
||||
id = node,
|
||||
host = host,
|
||||
tags = cfg.tags.join(","),
|
||||
zone = cfg.zone,
|
||||
capacity = capacity,
|
||||
last_seen = last_seen,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if failed_nodes.len() > 1 {
|
||||
println!("\n==== FAILED NODES ====");
|
||||
format_table(failed_nodes);
|
||||
if drain_msg {
|
||||
println!();
|
||||
println!("Your cluster is expecting to drain data from nodes that are currently unavailable.");
|
||||
println!("If these nodes are definitely dead, please review the layout history with");
|
||||
println!(
|
||||
"`garage layout history` and use `garage layout skip-dead-nodes` to force progress."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if print_staging_role_changes(&layout) {
|
||||
println!();
|
||||
println!("Please use `garage layout show` to check the proposed new layout and apply it.");
|
||||
println!();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_connect(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
args: ConnectNodeOpt,
|
||||
) -> Result<(), Error> {
|
||||
match rpc_cli
|
||||
.call(&rpc_host, SystemRpc::Connect(args.node), PRIO_NORMAL)
|
||||
.await??
|
||||
{
|
||||
SystemRpc::Ok => {
|
||||
println!("Success.");
|
||||
Ok(())
|
||||
}
|
||||
m => Err(Error::unexpected_rpc_message(m)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_admin(
|
||||
rpc_cli: &Endpoint<AdminRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
args: AdminRpc,
|
||||
) -> Result<(), HelperError> {
|
||||
match rpc_cli.call(&rpc_host, args, PRIO_NORMAL).await?? {
|
||||
AdminRpc::Ok(msg) => {
|
||||
println!("{}", msg);
|
||||
}
|
||||
AdminRpc::BucketList(bl) => {
|
||||
print_bucket_list(bl);
|
||||
}
|
||||
AdminRpc::BucketInfo {
|
||||
bucket,
|
||||
relevant_keys,
|
||||
counters,
|
||||
mpu_counters,
|
||||
} => {
|
||||
print_bucket_info(&bucket, &relevant_keys, &counters, &mpu_counters);
|
||||
}
|
||||
AdminRpc::KeyList(kl) => {
|
||||
print_key_list(kl);
|
||||
}
|
||||
AdminRpc::KeyInfo(key, rb) => {
|
||||
print_key_info(&key, &rb);
|
||||
}
|
||||
AdminRpc::WorkerList(wi, wlo) => {
|
||||
print_worker_list(wi, wlo);
|
||||
}
|
||||
AdminRpc::WorkerVars(wv) => {
|
||||
print_worker_vars(wv);
|
||||
}
|
||||
AdminRpc::WorkerInfo(tid, wi) => {
|
||||
print_worker_info(tid, wi);
|
||||
}
|
||||
AdminRpc::BlockErrorList(el) => {
|
||||
print_block_error_list(el);
|
||||
}
|
||||
AdminRpc::BlockInfo {
|
||||
hash,
|
||||
refcount,
|
||||
versions,
|
||||
uploads,
|
||||
} => {
|
||||
print_block_info(hash, refcount, versions, uploads);
|
||||
}
|
||||
r => {
|
||||
error!("Unexpected response: {:?}", r);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ---- utility ----
|
||||
|
||||
pub async fn fetch_status(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
) -> Result<Vec<KnownNodeInfo>, Error> {
|
||||
match rpc_cli
|
||||
.call(&rpc_host, SystemRpc::GetKnownNodes, PRIO_NORMAL)
|
||||
.await??
|
||||
{
|
||||
SystemRpc::ReturnKnownNodes(nodes) => Ok(nodes),
|
||||
resp => Err(Error::unexpected_rpc_message(resp)),
|
||||
}
|
||||
}
|
|
@ -1,182 +1,13 @@
|
|||
use bytesize::ByteSize;
|
||||
|
||||
use format_table::format_table;
|
||||
use garage_util::crdt::Crdt;
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_rpc::layout::*;
|
||||
use garage_rpc::system::*;
|
||||
use garage_rpc::*;
|
||||
|
||||
use crate::cli::*;
|
||||
|
||||
pub async fn cli_layout_command_dispatch(
|
||||
cmd: LayoutOperation,
|
||||
system_rpc_endpoint: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
) -> Result<(), Error> {
|
||||
match cmd {
|
||||
LayoutOperation::Assign(assign_opt) => {
|
||||
cmd_assign_role(system_rpc_endpoint, rpc_host, assign_opt).await
|
||||
}
|
||||
LayoutOperation::Remove(remove_opt) => {
|
||||
cmd_remove_role(system_rpc_endpoint, rpc_host, remove_opt).await
|
||||
}
|
||||
LayoutOperation::Show => cmd_show_layout(system_rpc_endpoint, rpc_host).await,
|
||||
LayoutOperation::Apply(apply_opt) => {
|
||||
cmd_apply_layout(system_rpc_endpoint, rpc_host, apply_opt).await
|
||||
}
|
||||
LayoutOperation::Revert(revert_opt) => {
|
||||
cmd_revert_layout(system_rpc_endpoint, rpc_host, revert_opt).await
|
||||
}
|
||||
LayoutOperation::Config(config_opt) => {
|
||||
cmd_config_layout(system_rpc_endpoint, rpc_host, config_opt).await
|
||||
}
|
||||
LayoutOperation::History => cmd_layout_history(system_rpc_endpoint, rpc_host).await,
|
||||
LayoutOperation::SkipDeadNodes(assume_sync_opt) => {
|
||||
cmd_layout_skip_dead_nodes(system_rpc_endpoint, rpc_host, assume_sync_opt).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_assign_role(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
args: AssignRoleOpt,
|
||||
) -> Result<(), Error> {
|
||||
let status = match rpc_cli
|
||||
.call(&rpc_host, SystemRpc::GetKnownNodes, PRIO_NORMAL)
|
||||
.await??
|
||||
{
|
||||
SystemRpc::ReturnKnownNodes(nodes) => nodes,
|
||||
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
||||
};
|
||||
|
||||
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||
let all_nodes = layout.get_all_nodes();
|
||||
|
||||
let added_nodes = args
|
||||
.node_ids
|
||||
.iter()
|
||||
.map(|node_id| {
|
||||
find_matching_node(
|
||||
status
|
||||
.iter()
|
||||
.map(|adv| adv.id)
|
||||
.chain(all_nodes.iter().cloned()),
|
||||
node_id,
|
||||
)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
|
||||
for replaced in args.replace.iter() {
|
||||
let replaced_node = find_matching_node(all_nodes.iter().cloned(), replaced)?;
|
||||
match roles.get(&replaced_node) {
|
||||
Some(NodeRoleV(Some(_))) => {
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(replaced_node, NodeRoleV(None)));
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::Message(format!(
|
||||
"Cannot replace node {:?} as it is not currently in planned layout",
|
||||
replaced_node
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if args.capacity.is_some() && args.gateway {
|
||||
return Err(Error::Message(
|
||||
"-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into()));
|
||||
}
|
||||
if args.capacity == Some(ByteSize::b(0)) {
|
||||
return Err(Error::Message("Invalid capacity value: 0".into()));
|
||||
}
|
||||
|
||||
for added_node in added_nodes {
|
||||
let new_entry = match roles.get(&added_node) {
|
||||
Some(NodeRoleV(Some(old))) => {
|
||||
let capacity = match args.capacity {
|
||||
Some(c) => Some(c.as_u64()),
|
||||
None if args.gateway => None,
|
||||
None => old.capacity,
|
||||
};
|
||||
let tags = if args.tags.is_empty() {
|
||||
old.tags.clone()
|
||||
} else {
|
||||
args.tags.clone()
|
||||
};
|
||||
NodeRole {
|
||||
zone: args.zone.clone().unwrap_or_else(|| old.zone.to_string()),
|
||||
capacity,
|
||||
tags,
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
let capacity = match args.capacity {
|
||||
Some(c) => Some(c.as_u64()),
|
||||
None if args.gateway => None,
|
||||
None => return Err(Error::Message(
|
||||
"Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())),
|
||||
};
|
||||
NodeRole {
|
||||
zone: args
|
||||
.zone
|
||||
.clone()
|
||||
.ok_or("Please specify a zone with the -z flag")?,
|
||||
capacity,
|
||||
tags: args.tags.clone(),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(added_node, NodeRoleV(Some(new_entry))));
|
||||
}
|
||||
|
||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||
|
||||
println!("Role changes are staged but not yet committed.");
|
||||
println!("Use `garage layout show` to view staged role changes,");
|
||||
println!("and `garage layout apply` to enact staged changes.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_remove_role(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
args: RemoveRoleOpt,
|
||||
) -> Result<(), Error> {
|
||||
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
|
||||
let deleted_node =
|
||||
find_matching_node(roles.items().iter().map(|(id, _, _)| *id), &args.node_id)?;
|
||||
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.merge(&roles.update_mutator(deleted_node, NodeRoleV(None)));
|
||||
|
||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||
|
||||
println!("Role removal is staged but not yet committed.");
|
||||
println!("Use `garage layout show` to view staged role changes,");
|
||||
println!("and `garage layout apply` to enact staged changes.");
|
||||
Ok(())
|
||||
}
|
||||
use crate::cli::structs::*;
|
||||
|
||||
pub async fn cmd_show_layout(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
|
@ -226,47 +57,6 @@ pub async fn cmd_show_layout(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_apply_layout(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
apply_opt: ApplyLayoutOpt,
|
||||
) -> Result<(), Error> {
|
||||
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||
|
||||
let (layout, msg) = layout.apply_staged_changes(apply_opt.version)?;
|
||||
for line in msg.iter() {
|
||||
println!("{}", line);
|
||||
}
|
||||
|
||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||
|
||||
println!("New cluster layout with updated role assignment has been applied in cluster.");
|
||||
println!("Data will now be moved around between nodes accordingly.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_revert_layout(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
revert_opt: RevertLayoutOpt,
|
||||
) -> Result<(), Error> {
|
||||
if !revert_opt.yes {
|
||||
return Err(Error::Message(
|
||||
"Please add the --yes flag to run the layout revert operation".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||
|
||||
let layout = layout.revert_staged_changes()?;
|
||||
|
||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||
|
||||
println!("All proposed role changes in cluster layout have been canceled.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_config_layout(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
|
@ -470,6 +260,19 @@ pub async fn cmd_layout_skip_dead_nodes(
|
|||
|
||||
// --- utility ---
|
||||
|
||||
pub async fn fetch_status(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
) -> Result<Vec<KnownNodeInfo>, Error> {
|
||||
match rpc_cli
|
||||
.call(&rpc_host, SystemRpc::GetKnownNodes, PRIO_NORMAL)
|
||||
.await??
|
||||
{
|
||||
SystemRpc::ReturnKnownNodes(nodes) => Ok(nodes),
|
||||
resp => Err(Error::unexpected_rpc_message(resp)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn fetch_layout(
|
||||
rpc_cli: &Endpoint<SystemRpc, ()>,
|
||||
rpc_host: NodeID,
|
||||
|
|
|
@ -1,13 +1,7 @@
|
|||
pub(crate) mod cmd;
|
||||
pub(crate) mod init;
|
||||
pub(crate) mod layout;
|
||||
pub(crate) mod structs;
|
||||
pub(crate) mod util;
|
||||
|
||||
pub(crate) mod convert_db;
|
||||
pub(crate) mod init;
|
||||
pub(crate) mod repair;
|
||||
|
||||
pub(crate) use cmd::*;
|
||||
pub(crate) use init::*;
|
||||
pub(crate) use layout::*;
|
||||
pub(crate) use structs::*;
|
||||
pub(crate) use util::*;
|
||||
pub(crate) mod layout;
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
use structopt::StructOpt;
|
||||
|
||||
use garage_util::version::garage_version;
|
||||
|
@ -190,7 +189,7 @@ pub struct SkipDeadNodesOpt {
|
|||
pub(crate) allow_missing_data: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub enum BucketOperation {
|
||||
/// List buckets
|
||||
#[structopt(name = "list", version = garage_version())]
|
||||
|
@ -237,7 +236,7 @@ pub enum BucketOperation {
|
|||
CleanupIncompleteUploads(CleanupIncompleteUploadsOpt),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct WebsiteOpt {
|
||||
/// Create
|
||||
#[structopt(long = "allow")]
|
||||
|
@ -259,13 +258,13 @@ pub struct WebsiteOpt {
|
|||
pub error_document: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct BucketOpt {
|
||||
/// Bucket name
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct DeleteBucketOpt {
|
||||
/// Bucket name
|
||||
pub name: String,
|
||||
|
@ -275,7 +274,7 @@ pub struct DeleteBucketOpt {
|
|||
pub yes: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct AliasBucketOpt {
|
||||
/// Existing bucket name (its alias in global namespace or its full hex uuid)
|
||||
pub existing_bucket: String,
|
||||
|
@ -288,7 +287,7 @@ pub struct AliasBucketOpt {
|
|||
pub local: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct UnaliasBucketOpt {
|
||||
/// Bucket name
|
||||
pub name: String,
|
||||
|
@ -298,7 +297,7 @@ pub struct UnaliasBucketOpt {
|
|||
pub local: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct PermBucketOpt {
|
||||
/// Access key name or ID
|
||||
#[structopt(long = "key")]
|
||||
|
@ -321,7 +320,7 @@ pub struct PermBucketOpt {
|
|||
pub bucket: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct SetQuotasOpt {
|
||||
/// Bucket name
|
||||
pub bucket: String,
|
||||
|
@ -336,7 +335,7 @@ pub struct SetQuotasOpt {
|
|||
pub max_objects: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct CleanupIncompleteUploadsOpt {
|
||||
/// Abort multipart uploads older than this value
|
||||
#[structopt(long = "older-than", default_value = "1d")]
|
||||
|
@ -347,7 +346,7 @@ pub struct CleanupIncompleteUploadsOpt {
|
|||
pub buckets: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub enum KeyOperation {
|
||||
/// List keys
|
||||
#[structopt(name = "list", version = garage_version())]
|
||||
|
@ -382,7 +381,7 @@ pub enum KeyOperation {
|
|||
Import(KeyImportOpt),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct KeyInfoOpt {
|
||||
/// ID or name of the key
|
||||
pub key_pattern: String,
|
||||
|
@ -391,14 +390,14 @@ pub struct KeyInfoOpt {
|
|||
pub show_secret: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct KeyNewOpt {
|
||||
/// Name of the key
|
||||
#[structopt(default_value = "Unnamed key")]
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct KeyRenameOpt {
|
||||
/// ID or name of the key
|
||||
pub key_pattern: String,
|
||||
|
@ -407,7 +406,7 @@ pub struct KeyRenameOpt {
|
|||
pub new_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct KeyDeleteOpt {
|
||||
/// ID or name of the key
|
||||
pub key_pattern: String,
|
||||
|
@ -417,7 +416,7 @@ pub struct KeyDeleteOpt {
|
|||
pub yes: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct KeyPermOpt {
|
||||
/// ID or name of the key
|
||||
pub key_pattern: String,
|
||||
|
@ -427,7 +426,7 @@ pub struct KeyPermOpt {
|
|||
pub create_bucket: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||
#[derive(StructOpt, Debug)]
|
||||
pub struct KeyImportOpt {
|
||||
/// Access key ID
|
||||
pub key_id: String,
|
||||
|
@ -444,7 +443,7 @@ pub struct KeyImportOpt {
|
|||
pub yes: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
|
||||
#[derive(StructOpt, Debug, Clone)]
|
||||
pub struct RepairOpt {
|
||||
/// Launch repair operation on all nodes
|
||||
#[structopt(short = "a", long = "all-nodes")]
|
||||
|
@ -458,7 +457,7 @@ pub struct RepairOpt {
|
|||
pub what: RepairWhat,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||
#[derive(StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||
pub enum RepairWhat {
|
||||
/// Do a full sync of metadata tables
|
||||
#[structopt(name = "tables", version = garage_version())]
|
||||
|
@ -489,7 +488,7 @@ pub enum RepairWhat {
|
|||
Rebalance,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||
#[derive(StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||
pub enum ScrubCmd {
|
||||
/// Start scrub
|
||||
#[structopt(name = "start", version = garage_version())]
|
||||
|
@ -503,15 +502,9 @@ pub enum ScrubCmd {
|
|||
/// Cancel scrub in progress
|
||||
#[structopt(name = "cancel", version = garage_version())]
|
||||
Cancel,
|
||||
/// Set tranquility level for in-progress and future scrubs
|
||||
#[structopt(name = "set-tranquility", version = garage_version())]
|
||||
SetTranquility {
|
||||
#[structopt()]
|
||||
tranquility: u32,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
|
||||
#[derive(StructOpt, Debug, Clone)]
|
||||
pub struct OfflineRepairOpt {
|
||||
/// Confirm the launch of the repair operation
|
||||
#[structopt(long = "yes")]
|
||||
|
@ -521,7 +514,7 @@ pub struct OfflineRepairOpt {
|
|||
pub what: OfflineRepairWhat,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||
#[derive(StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||
pub enum OfflineRepairWhat {
|
||||
/// Repair K2V item counters
|
||||
#[cfg(feature = "k2v")]
|
||||
|
@ -532,19 +525,14 @@ pub enum OfflineRepairWhat {
|
|||
ObjectCounters,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
|
||||
#[derive(StructOpt, Debug, Clone)]
|
||||
pub struct StatsOpt {
|
||||
/// Gather statistics from all nodes
|
||||
#[structopt(short = "a", long = "all-nodes")]
|
||||
pub all_nodes: bool,
|
||||
|
||||
/// Don't show global cluster stats (internal use in RPC)
|
||||
#[structopt(skip)]
|
||||
#[serde(default)]
|
||||
pub skip_global: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||
#[derive(StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||
pub enum WorkerOperation {
|
||||
/// List all workers on Garage node
|
||||
#[structopt(name = "list", version = garage_version())]
|
||||
|
@ -577,7 +565,7 @@ pub enum WorkerOperation {
|
|||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone, Copy)]
|
||||
#[derive(StructOpt, Debug, Eq, PartialEq, Clone, Copy)]
|
||||
pub struct WorkerListOpt {
|
||||
/// Show only busy workers
|
||||
#[structopt(short = "b", long = "busy")]
|
||||
|
@ -587,7 +575,7 @@ pub struct WorkerListOpt {
|
|||
pub errors: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||
#[derive(StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||
pub enum BlockOperation {
|
||||
/// List all blocks that currently have a resync error
|
||||
#[structopt(name = "list-errors", version = garage_version())]
|
||||
|
@ -619,7 +607,7 @@ pub enum BlockOperation {
|
|||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone, Copy)]
|
||||
#[derive(StructOpt, Debug, Eq, PartialEq, Clone, Copy)]
|
||||
pub enum MetaOperation {
|
||||
/// Save a snapshot of the metadata db file
|
||||
#[structopt(name = "snapshot", version = garage_version())]
|
||||
|
|
|
@ -1,457 +0,0 @@
|
|||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
use format_table::format_table;
|
||||
use garage_util::background::*;
|
||||
use garage_util::crdt::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::*;
|
||||
use garage_util::time::*;
|
||||
|
||||
use garage_block::manager::BlockResyncErrorInfo;
|
||||
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_model::key_table::*;
|
||||
use garage_model::s3::mpu_table::{self, MultipartUpload};
|
||||
use garage_model::s3::object_table;
|
||||
use garage_model::s3::version_table::*;
|
||||
|
||||
use crate::cli::structs::WorkerListOpt;
|
||||
|
||||
pub fn print_bucket_list(bl: Vec<Bucket>) {
|
||||
println!("List of buckets:");
|
||||
|
||||
let mut table = vec![];
|
||||
for bucket in bl {
|
||||
let aliases = bucket
|
||||
.aliases()
|
||||
.iter()
|
||||
.filter(|(_, _, active)| *active)
|
||||
.map(|(name, _, _)| name.to_string())
|
||||
.collect::<Vec<_>>();
|
||||
let local_aliases_n = match &bucket
|
||||
.local_aliases()
|
||||
.iter()
|
||||
.filter(|(_, _, active)| *active)
|
||||
.collect::<Vec<_>>()[..]
|
||||
{
|
||||
[] => "".into(),
|
||||
[((k, n), _, _)] => format!("{}:{}", k, n),
|
||||
s => format!("[{} local aliases]", s.len()),
|
||||
};
|
||||
|
||||
table.push(format!(
|
||||
"\t{}\t{}\t{}",
|
||||
aliases.join(","),
|
||||
local_aliases_n,
|
||||
hex::encode(bucket.id),
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
||||
|
||||
pub fn print_key_list(kl: Vec<(String, String)>) {
|
||||
println!("List of keys:");
|
||||
let mut table = vec![];
|
||||
for key in kl {
|
||||
table.push(format!("\t{}\t{}", key.0, key.1));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
||||
|
||||
pub fn print_key_info(key: &Key, relevant_buckets: &HashMap<Uuid, Bucket>) {
|
||||
let bucket_global_aliases = |b: &Uuid| {
|
||||
if let Some(bucket) = relevant_buckets.get(b) {
|
||||
if let Some(p) = bucket.state.as_option() {
|
||||
return p
|
||||
.aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, active)| *active)
|
||||
.map(|(a, _, _)| a.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
}
|
||||
}
|
||||
|
||||
"".to_string()
|
||||
};
|
||||
|
||||
match &key.state {
|
||||
Deletable::Present(p) => {
|
||||
println!("Key name: {}", p.name.get());
|
||||
println!("Key ID: {}", key.key_id);
|
||||
println!("Secret key: {}", p.secret_key);
|
||||
println!("Can create buckets: {}", p.allow_create_bucket.get());
|
||||
println!("\nKey-specific bucket aliases:");
|
||||
let mut table = vec![];
|
||||
for (alias_name, _, alias) in p.local_aliases.items().iter() {
|
||||
if let Some(bucket_id) = alias {
|
||||
table.push(format!(
|
||||
"\t{}\t{}\t{}",
|
||||
alias_name,
|
||||
bucket_global_aliases(bucket_id),
|
||||
hex::encode(bucket_id)
|
||||
));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
println!("\nAuthorized buckets:");
|
||||
let mut table = vec![];
|
||||
for (bucket_id, perm) in p.authorized_buckets.items().iter() {
|
||||
if !perm.is_any() {
|
||||
continue;
|
||||
}
|
||||
let rflag = if perm.allow_read { "R" } else { " " };
|
||||
let wflag = if perm.allow_write { "W" } else { " " };
|
||||
let oflag = if perm.allow_owner { "O" } else { " " };
|
||||
let local_aliases = p
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, a)| *a == Some(*bucket_id))
|
||||
.map(|(a, _, _)| a.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
table.push(format!(
|
||||
"\t{}{}{}\t{}\t{}\t{:?}",
|
||||
rflag,
|
||||
wflag,
|
||||
oflag,
|
||||
bucket_global_aliases(bucket_id),
|
||||
local_aliases,
|
||||
bucket_id
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
||||
Deletable::Deleted => {
|
||||
println!("Key {} is deleted.", key.key_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_bucket_info(
|
||||
bucket: &Bucket,
|
||||
relevant_keys: &HashMap<String, Key>,
|
||||
counters: &HashMap<String, i64>,
|
||||
mpu_counters: &HashMap<String, i64>,
|
||||
) {
|
||||
let key_name = |k| {
|
||||
relevant_keys
|
||||
.get(k)
|
||||
.map(|k| k.params().unwrap().name.get().as_str())
|
||||
.unwrap_or("<deleted>")
|
||||
};
|
||||
|
||||
println!("Bucket: {}", hex::encode(bucket.id));
|
||||
match &bucket.state {
|
||||
Deletable::Deleted => println!("Bucket is deleted."),
|
||||
Deletable::Present(p) => {
|
||||
let size =
|
||||
bytesize::ByteSize::b(*counters.get(object_table::BYTES).unwrap_or(&0) as u64);
|
||||
println!(
|
||||
"\nSize: {} ({})",
|
||||
size.to_string_as(true),
|
||||
size.to_string_as(false)
|
||||
);
|
||||
println!(
|
||||
"Objects: {}",
|
||||
*counters.get(object_table::OBJECTS).unwrap_or(&0)
|
||||
);
|
||||
println!(
|
||||
"Unfinished uploads (multipart and non-multipart): {}",
|
||||
*counters.get(object_table::UNFINISHED_UPLOADS).unwrap_or(&0)
|
||||
);
|
||||
println!(
|
||||
"Unfinished multipart uploads: {}",
|
||||
*mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0)
|
||||
);
|
||||
let mpu_size =
|
||||
bytesize::ByteSize::b(*mpu_counters.get(mpu_table::BYTES).unwrap_or(&0) as u64);
|
||||
println!(
|
||||
"Size of unfinished multipart uploads: {} ({})",
|
||||
mpu_size.to_string_as(true),
|
||||
mpu_size.to_string_as(false),
|
||||
);
|
||||
|
||||
println!("\nWebsite access: {}", p.website_config.get().is_some());
|
||||
|
||||
let quotas = p.quotas.get();
|
||||
if quotas.max_size.is_some() || quotas.max_objects.is_some() {
|
||||
println!("\nQuotas:");
|
||||
if let Some(ms) = quotas.max_size {
|
||||
let ms = bytesize::ByteSize::b(ms);
|
||||
println!(
|
||||
" maximum size: {} ({})",
|
||||
ms.to_string_as(true),
|
||||
ms.to_string_as(false)
|
||||
);
|
||||
}
|
||||
if let Some(mo) = quotas.max_objects {
|
||||
println!(" maximum number of objects: {}", mo);
|
||||
}
|
||||
}
|
||||
|
||||
println!("\nGlobal aliases:");
|
||||
for (alias, _, active) in p.aliases.items().iter() {
|
||||
if *active {
|
||||
println!(" {}", alias);
|
||||
}
|
||||
}
|
||||
|
||||
println!("\nKey-specific aliases:");
|
||||
let mut table = vec![];
|
||||
for ((key_id, alias), _, active) in p.local_aliases.items().iter() {
|
||||
if *active {
|
||||
table.push(format!("\t{} ({})\t{}", key_id, key_name(key_id), alias));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
println!("\nAuthorized keys:");
|
||||
let mut table = vec![];
|
||||
for (k, perm) in p.authorized_keys.items().iter() {
|
||||
if !perm.is_any() {
|
||||
continue;
|
||||
}
|
||||
let rflag = if perm.allow_read { "R" } else { " " };
|
||||
let wflag = if perm.allow_write { "W" } else { " " };
|
||||
let oflag = if perm.allow_owner { "O" } else { " " };
|
||||
table.push(format!(
|
||||
"\t{}{}{}\t{}\t{}",
|
||||
rflag,
|
||||
wflag,
|
||||
oflag,
|
||||
k,
|
||||
key_name(k)
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn find_matching_node(
|
||||
cand: impl std::iter::Iterator<Item = Uuid>,
|
||||
pattern: &str,
|
||||
) -> Result<Uuid, Error> {
|
||||
let mut candidates = vec![];
|
||||
for c in cand {
|
||||
if hex::encode(c).starts_with(pattern) && !candidates.contains(&c) {
|
||||
candidates.push(c);
|
||||
}
|
||||
}
|
||||
if candidates.len() != 1 {
|
||||
Err(Error::Message(format!(
|
||||
"{} nodes match '{}'",
|
||||
candidates.len(),
|
||||
pattern,
|
||||
)))
|
||||
} else {
|
||||
Ok(candidates[0])
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_worker_list(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
|
||||
let mut wi = wi.into_iter().collect::<Vec<_>>();
|
||||
wi.sort_by_key(|(tid, info)| {
|
||||
(
|
||||
match info.state {
|
||||
WorkerState::Busy | WorkerState::Throttled(_) => 0,
|
||||
WorkerState::Idle => 1,
|
||||
WorkerState::Done => 2,
|
||||
},
|
||||
*tid,
|
||||
)
|
||||
});
|
||||
|
||||
let mut table = vec!["TID\tState\tName\tTranq\tDone\tQueue\tErrors\tConsec\tLast".to_string()];
|
||||
for (tid, info) in wi.iter() {
|
||||
if wlo.busy && !matches!(info.state, WorkerState::Busy | WorkerState::Throttled(_)) {
|
||||
continue;
|
||||
}
|
||||
if wlo.errors && info.errors == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let tf = timeago::Formatter::new();
|
||||
let err_ago = info
|
||||
.last_error
|
||||
.as_ref()
|
||||
.map(|(_, t)| tf.convert(Duration::from_millis(now_msec() - t)))
|
||||
.unwrap_or_default();
|
||||
let (total_err, consec_err) = if info.errors > 0 {
|
||||
(info.errors.to_string(), info.consecutive_errors.to_string())
|
||||
} else {
|
||||
("-".into(), "-".into())
|
||||
};
|
||||
|
||||
table.push(format!(
|
||||
"{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}",
|
||||
tid,
|
||||
info.state,
|
||||
info.name,
|
||||
info.status
|
||||
.tranquility
|
||||
.as_ref()
|
||||
.map(ToString::to_string)
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
info.status.progress.as_deref().unwrap_or("-"),
|
||||
info.status
|
||||
.queue_length
|
||||
.as_ref()
|
||||
.map(ToString::to_string)
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
total_err,
|
||||
consec_err,
|
||||
err_ago,
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
||||
|
||||
pub fn print_worker_info(tid: usize, info: WorkerInfo) {
|
||||
let mut table = vec![];
|
||||
table.push(format!("Task id:\t{}", tid));
|
||||
table.push(format!("Worker name:\t{}", info.name));
|
||||
match info.state {
|
||||
WorkerState::Throttled(t) => {
|
||||
table.push(format!(
|
||||
"Worker state:\tBusy (throttled, paused for {:.3}s)",
|
||||
t
|
||||
));
|
||||
}
|
||||
s => {
|
||||
table.push(format!("Worker state:\t{}", s));
|
||||
}
|
||||
};
|
||||
if let Some(tql) = info.status.tranquility {
|
||||
table.push(format!("Tranquility:\t{}", tql));
|
||||
}
|
||||
|
||||
table.push("".into());
|
||||
table.push(format!("Total errors:\t{}", info.errors));
|
||||
table.push(format!("Consecutive errs:\t{}", info.consecutive_errors));
|
||||
if let Some((s, t)) = info.last_error {
|
||||
table.push(format!("Last error:\t{}", s));
|
||||
let tf = timeago::Formatter::new();
|
||||
table.push(format!(
|
||||
"Last error time:\t{}",
|
||||
tf.convert(Duration::from_millis(now_msec() - t))
|
||||
));
|
||||
}
|
||||
|
||||
table.push("".into());
|
||||
if let Some(p) = info.status.progress {
|
||||
table.push(format!("Progress:\t{}", p));
|
||||
}
|
||||
if let Some(ql) = info.status.queue_length {
|
||||
table.push(format!("Queue length:\t{}", ql));
|
||||
}
|
||||
if let Some(pe) = info.status.persistent_errors {
|
||||
table.push(format!("Persistent errors:\t{}", pe));
|
||||
}
|
||||
|
||||
for (i, s) in info.status.freeform.iter().enumerate() {
|
||||
if i == 0 {
|
||||
if table.last() != Some(&"".into()) {
|
||||
table.push("".into());
|
||||
}
|
||||
table.push(format!("Message:\t{}", s));
|
||||
} else {
|
||||
table.push(format!("\t{}", s));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
}
|
||||
|
||||
pub fn print_worker_vars(wv: Vec<(Uuid, String, String)>) {
|
||||
let table = wv
|
||||
.into_iter()
|
||||
.map(|(n, k, v)| format!("{:?}\t{}\t{}", n, k, v))
|
||||
.collect::<Vec<_>>();
|
||||
format_table(table);
|
||||
}
|
||||
|
||||
pub fn print_block_error_list(el: Vec<BlockResyncErrorInfo>) {
|
||||
let now = now_msec();
|
||||
let tf = timeago::Formatter::new();
|
||||
let mut tf2 = timeago::Formatter::new();
|
||||
tf2.ago("");
|
||||
|
||||
let mut table = vec!["Hash\tRC\tErrors\tLast error\tNext try".into()];
|
||||
for e in el {
|
||||
let next_try = if e.next_try > now {
|
||||
tf2.convert(Duration::from_millis(e.next_try - now))
|
||||
} else {
|
||||
"asap".to_string()
|
||||
};
|
||||
table.push(format!(
|
||||
"{}\t{}\t{}\t{}\tin {}",
|
||||
hex::encode(e.hash.as_slice()),
|
||||
e.refcount,
|
||||
e.error_count,
|
||||
tf.convert(Duration::from_millis(now - e.last_try)),
|
||||
next_try
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
||||
|
||||
pub fn print_block_info(
|
||||
hash: Hash,
|
||||
refcount: u64,
|
||||
versions: Vec<Result<Version, Uuid>>,
|
||||
uploads: Vec<MultipartUpload>,
|
||||
) {
|
||||
println!("Block hash: {}", hex::encode(hash.as_slice()));
|
||||
println!("Refcount: {}", refcount);
|
||||
println!();
|
||||
|
||||
let mut table = vec!["Version\tBucket\tKey\tMPU\tDeleted".into()];
|
||||
let mut nondeleted_count = 0;
|
||||
for v in versions.iter() {
|
||||
match v {
|
||||
Ok(ver) => {
|
||||
match &ver.backlink {
|
||||
VersionBacklink::Object { bucket_id, key } => {
|
||||
table.push(format!(
|
||||
"{:?}\t{:?}\t{}\t\t{:?}",
|
||||
ver.uuid,
|
||||
bucket_id,
|
||||
key,
|
||||
ver.deleted.get()
|
||||
));
|
||||
}
|
||||
VersionBacklink::MultipartUpload { upload_id } => {
|
||||
let upload = uploads.iter().find(|x| x.upload_id == *upload_id);
|
||||
table.push(format!(
|
||||
"{:?}\t{:?}\t{}\t{:?}\t{:?}",
|
||||
ver.uuid,
|
||||
upload.map(|u| u.bucket_id).unwrap_or_default(),
|
||||
upload.map(|u| u.key.as_str()).unwrap_or_default(),
|
||||
upload_id,
|
||||
ver.deleted.get()
|
||||
));
|
||||
}
|
||||
}
|
||||
if !ver.deleted.get() {
|
||||
nondeleted_count += 1;
|
||||
}
|
||||
}
|
||||
Err(vh) => {
|
||||
table.push(format!("{:?}\t\t\t\tyes", vh));
|
||||
}
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
if refcount != nondeleted_count {
|
||||
println!();
|
||||
println!(
|
||||
"Warning: refcount does not match number of non-deleted versions, you should try `garage repair block-rc`."
|
||||
);
|
||||
}
|
||||
}
|
145
src/garage/cli_v2/block.rs
Normal file
145
src/garage/cli_v2/block.rs
Normal file
|
@ -0,0 +1,145 @@
|
|||
//use bytesize::ByteSize;
|
||||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api_admin::api::*;
|
||||
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn cmd_block(&self, cmd: BlockOperation) -> Result<(), Error> {
|
||||
match cmd {
|
||||
BlockOperation::ListErrors => self.cmd_list_block_errors().await,
|
||||
BlockOperation::Info { hash } => self.cmd_get_block_info(hash).await,
|
||||
BlockOperation::RetryNow { all, blocks } => self.cmd_block_retry_now(all, blocks).await,
|
||||
BlockOperation::Purge { yes, blocks } => self.cmd_block_purge(yes, blocks).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_list_block_errors(&self) -> Result<(), Error> {
|
||||
let errors = self.local_api_request(LocalListBlockErrorsRequest).await?.0;
|
||||
|
||||
let tf = timeago::Formatter::new();
|
||||
let mut tf2 = timeago::Formatter::new();
|
||||
tf2.ago("");
|
||||
|
||||
let mut table = vec!["Hash\tRC\tErrors\tLast error\tNext try".into()];
|
||||
for e in errors {
|
||||
let next_try = if e.next_try_in_secs > 0 {
|
||||
tf2.convert(Duration::from_secs(e.next_try_in_secs))
|
||||
} else {
|
||||
"asap".to_string()
|
||||
};
|
||||
table.push(format!(
|
||||
"{}\t{}\t{}\t{}\tin {}",
|
||||
e.block_hash,
|
||||
e.refcount,
|
||||
e.error_count,
|
||||
tf.convert(Duration::from_secs(e.last_try_secs_ago)),
|
||||
next_try
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_get_block_info(&self, hash: String) -> Result<(), Error> {
|
||||
let info = self
|
||||
.local_api_request(LocalGetBlockInfoRequest { block_hash: hash })
|
||||
.await?;
|
||||
|
||||
println!("Block hash: {}", info.block_hash);
|
||||
println!("Refcount: {}", info.refcount);
|
||||
println!();
|
||||
|
||||
let mut table = vec!["Version\tBucket\tKey\tMPU\tDeleted".into()];
|
||||
let mut nondeleted_count = 0;
|
||||
for ver in info.versions.iter() {
|
||||
match &ver.backlink {
|
||||
Some(BlockVersionBacklink::Object { bucket_id, key }) => {
|
||||
table.push(format!(
|
||||
"{:.16}\t{:.16}\t{}\t\t{:?}",
|
||||
ver.version_id, bucket_id, key, ver.deleted
|
||||
));
|
||||
}
|
||||
Some(BlockVersionBacklink::Upload {
|
||||
upload_id,
|
||||
upload_deleted: _,
|
||||
upload_garbage_collected: _,
|
||||
bucket_id,
|
||||
key,
|
||||
}) => {
|
||||
table.push(format!(
|
||||
"{:.16}\t{:.16}\t{}\t{:.16}\t{:.16}",
|
||||
ver.version_id,
|
||||
bucket_id.as_deref().unwrap_or(""),
|
||||
key.as_deref().unwrap_or(""),
|
||||
upload_id,
|
||||
ver.deleted
|
||||
));
|
||||
}
|
||||
None => {
|
||||
table.push(format!("{:.16}\t\t\tyes", ver.version_id));
|
||||
}
|
||||
}
|
||||
if !ver.deleted {
|
||||
nondeleted_count += 1;
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
if info.refcount != nondeleted_count {
|
||||
println!();
|
||||
println!(
|
||||
"Warning: refcount does not match number of non-deleted versions, you should try `garage repair block-rc`."
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_block_retry_now(&self, all: bool, blocks: Vec<String>) -> Result<(), Error> {
|
||||
let req = match (all, blocks.len()) {
|
||||
(true, 0) => LocalRetryBlockResyncRequest::All { all: true },
|
||||
(false, n) if n > 0 => LocalRetryBlockResyncRequest::Blocks {
|
||||
block_hashes: blocks,
|
||||
},
|
||||
_ => {
|
||||
return Err(Error::Message(
|
||||
"Please specify block hashes or --all (not both)".into(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let res = self.local_api_request(req).await?;
|
||||
|
||||
println!(
|
||||
"{} blocks returned in queue for a retry now (check logs to see results)",
|
||||
res.count
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_block_purge(&self, yes: bool, blocks: Vec<String>) -> Result<(), Error> {
|
||||
if !yes {
|
||||
return Err(Error::Message(
|
||||
"Pass the --yes flag to confirm block purge operation.".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let res = self
|
||||
.local_api_request(LocalPurgeBlocksRequest(blocks))
|
||||
.await?;
|
||||
|
||||
println!(
|
||||
"Purged {} blocks: deleted {} versions, {} objects, {} multipart uploads",
|
||||
res.blocks_purged, res.versions_deleted, res.objects_deleted, res.uploads_deleted,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
549
src/garage/cli_v2/bucket.rs
Normal file
549
src/garage/cli_v2/bucket.rs
Normal file
|
@ -0,0 +1,549 @@
|
|||
//use bytesize::ByteSize;
|
||||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api_admin::api::*;
|
||||
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn cmd_bucket(&self, cmd: BucketOperation) -> Result<(), Error> {
|
||||
match cmd {
|
||||
BucketOperation::List => self.cmd_list_buckets().await,
|
||||
BucketOperation::Info(query) => self.cmd_bucket_info(query).await,
|
||||
BucketOperation::Create(query) => self.cmd_create_bucket(query).await,
|
||||
BucketOperation::Delete(query) => self.cmd_delete_bucket(query).await,
|
||||
BucketOperation::Alias(query) => self.cmd_alias_bucket(query).await,
|
||||
BucketOperation::Unalias(query) => self.cmd_unalias_bucket(query).await,
|
||||
BucketOperation::Allow(query) => self.cmd_bucket_allow(query).await,
|
||||
BucketOperation::Deny(query) => self.cmd_bucket_deny(query).await,
|
||||
BucketOperation::Website(query) => self.cmd_bucket_website(query).await,
|
||||
BucketOperation::SetQuotas(query) => self.cmd_bucket_set_quotas(query).await,
|
||||
BucketOperation::CleanupIncompleteUploads(query) => {
|
||||
self.cmd_cleanup_incomplete_uploads(query).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_list_buckets(&self) -> Result<(), Error> {
|
||||
let buckets = self.api_request(ListBucketsRequest).await?;
|
||||
|
||||
println!("List of buckets:");
|
||||
|
||||
let mut table = vec![];
|
||||
for bucket in buckets.0.iter() {
|
||||
let local_aliases_n = match &bucket.local_aliases[..] {
|
||||
[] => "".into(),
|
||||
[alias] => format!("{}:{}", alias.access_key_id, alias.alias),
|
||||
s => format!("[{} local aliases]", s.len()),
|
||||
};
|
||||
|
||||
table.push(format!(
|
||||
"\t{}\t{}\t{}",
|
||||
bucket.global_aliases.join(","),
|
||||
local_aliases_n,
|
||||
bucket.id,
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_bucket_info(&self, opt: BucketOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.name),
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!("Bucket: {}", bucket.id);
|
||||
|
||||
let size = bytesize::ByteSize::b(bucket.bytes as u64);
|
||||
println!(
|
||||
"\nSize: {} ({})",
|
||||
size.to_string_as(true),
|
||||
size.to_string_as(false)
|
||||
);
|
||||
println!("Objects: {}", bucket.objects);
|
||||
println!(
|
||||
"Unfinished uploads (multipart and non-multipart): {}",
|
||||
bucket.unfinished_uploads,
|
||||
);
|
||||
println!(
|
||||
"Unfinished multipart uploads: {}",
|
||||
bucket.unfinished_multipart_uploads
|
||||
);
|
||||
let mpu_size = bytesize::ByteSize::b(bucket.unfinished_multipart_uploads as u64);
|
||||
println!(
|
||||
"Size of unfinished multipart uploads: {} ({})",
|
||||
mpu_size.to_string_as(true),
|
||||
mpu_size.to_string_as(false),
|
||||
);
|
||||
|
||||
println!("\nWebsite access: {}", bucket.website_access);
|
||||
|
||||
if bucket.quotas.max_size.is_some() || bucket.quotas.max_objects.is_some() {
|
||||
println!("\nQuotas:");
|
||||
if let Some(ms) = bucket.quotas.max_size {
|
||||
let ms = bytesize::ByteSize::b(ms);
|
||||
println!(
|
||||
" maximum size: {} ({})",
|
||||
ms.to_string_as(true),
|
||||
ms.to_string_as(false)
|
||||
);
|
||||
}
|
||||
if let Some(mo) = bucket.quotas.max_objects {
|
||||
println!(" maximum number of objects: {}", mo);
|
||||
}
|
||||
}
|
||||
|
||||
println!("\nGlobal aliases:");
|
||||
for alias in bucket.global_aliases {
|
||||
println!(" {}", alias);
|
||||
}
|
||||
|
||||
println!("\nKey-specific aliases:");
|
||||
let mut table = vec![];
|
||||
for key in bucket.keys.iter() {
|
||||
for alias in key.bucket_local_aliases.iter() {
|
||||
table.push(format!("\t{} ({})\t{}", key.access_key_id, key.name, alias));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
println!("\nAuthorized keys:");
|
||||
let mut table = vec![];
|
||||
for key in bucket.keys.iter() {
|
||||
if !(key.permissions.read || key.permissions.write || key.permissions.owner) {
|
||||
continue;
|
||||
}
|
||||
let rflag = if key.permissions.read { "R" } else { " " };
|
||||
let wflag = if key.permissions.write { "W" } else { " " };
|
||||
let oflag = if key.permissions.owner { "O" } else { " " };
|
||||
table.push(format!(
|
||||
"\t{}{}{}\t{}\t{}",
|
||||
rflag, wflag, oflag, key.access_key_id, key.name
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_create_bucket(&self, opt: BucketOpt) -> Result<(), Error> {
|
||||
self.api_request(CreateBucketRequest {
|
||||
global_alias: Some(opt.name.clone()),
|
||||
local_alias: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!("Bucket {} was created.", opt.name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_delete_bucket(&self, opt: DeleteBucketOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.name.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
// CLI-only checks: the bucket must not have other aliases
|
||||
if bucket
|
||||
.global_aliases
|
||||
.iter()
|
||||
.find(|a| **a != opt.name)
|
||||
.is_some()
|
||||
{
|
||||
return Err(Error::Message(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", opt.name)));
|
||||
}
|
||||
|
||||
if bucket
|
||||
.keys
|
||||
.iter()
|
||||
.any(|k| !k.bucket_local_aliases.is_empty())
|
||||
{
|
||||
return Err(Error::Message(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", opt.name)));
|
||||
}
|
||||
|
||||
if !opt.yes {
|
||||
println!("About to delete bucket {}.", bucket.id);
|
||||
return Err(Error::Message(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
self.api_request(DeleteBucketRequest {
|
||||
id: bucket.id.clone(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!("Bucket {} has been deleted.", bucket.id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_alias_bucket(&self, opt: AliasBucketOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.existing_bucket.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
if let Some(key_pat) = &opt.local {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
search: Some(key_pat.clone()),
|
||||
id: None,
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
self.api_request(AddBucketAliasRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
alias: BucketAliasEnum::Local {
|
||||
local_alias: opt.new_name.clone(),
|
||||
access_key_id: key.access_key_id.clone(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!(
|
||||
"Alias {} now points to bucket {:.16} in namespace of key {}",
|
||||
opt.new_name, bucket.id, key.access_key_id
|
||||
)
|
||||
} else {
|
||||
self.api_request(AddBucketAliasRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
alias: BucketAliasEnum::Global {
|
||||
global_alias: opt.new_name.clone(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!(
|
||||
"Alias {} now points to bucket {:.16}",
|
||||
opt.new_name, bucket.id
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_unalias_bucket(&self, opt: UnaliasBucketOpt) -> Result<(), Error> {
|
||||
if let Some(key_pat) = &opt.local {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
search: Some(key_pat.clone()),
|
||||
id: None,
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let bucket = key
|
||||
.buckets
|
||||
.iter()
|
||||
.find(|x| x.local_aliases.contains(&opt.name))
|
||||
.ok_or_message(format!(
|
||||
"No bucket called {} in namespace of key {}",
|
||||
opt.name, key.access_key_id
|
||||
))?;
|
||||
|
||||
self.api_request(RemoveBucketAliasRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
alias: BucketAliasEnum::Local {
|
||||
access_key_id: key.access_key_id.clone(),
|
||||
local_alias: opt.name.clone(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!(
|
||||
"Alias {} no longer points to bucket {:.16} in namespace of key {}",
|
||||
&opt.name, bucket.id, key.access_key_id
|
||||
)
|
||||
} else {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: Some(opt.name.clone()),
|
||||
search: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
self.api_request(RemoveBucketAliasRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
alias: BucketAliasEnum::Global {
|
||||
global_alias: opt.name.clone(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!(
|
||||
"Alias {} no longer points to bucket {:.16}",
|
||||
opt.name, bucket.id
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_bucket_allow(&self, opt: PermBucketOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.bucket.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern.clone()),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
self.api_request(AllowBucketKeyRequest(BucketKeyPermChangeRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
access_key_id: key.access_key_id.clone(),
|
||||
permissions: ApiBucketKeyPerm {
|
||||
read: opt.read,
|
||||
write: opt.write,
|
||||
owner: opt.owner,
|
||||
},
|
||||
}))
|
||||
.await?;
|
||||
|
||||
let new_bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: Some(bucket.id),
|
||||
global_alias: None,
|
||||
search: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
if let Some(new_key) = new_bucket
|
||||
.keys
|
||||
.iter()
|
||||
.find(|k| k.access_key_id == key.access_key_id)
|
||||
{
|
||||
println!(
|
||||
"New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}",
|
||||
key.access_key_id,
|
||||
new_bucket.id,
|
||||
new_key.permissions.read,
|
||||
new_key.permissions.write,
|
||||
new_key.permissions.owner
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
"Access key {} has no permissions on bucket {:.16}",
|
||||
key.access_key_id, new_bucket.id
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_bucket_deny(&self, opt: PermBucketOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.bucket.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern.clone()),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
self.api_request(DenyBucketKeyRequest(BucketKeyPermChangeRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
access_key_id: key.access_key_id.clone(),
|
||||
permissions: ApiBucketKeyPerm {
|
||||
read: opt.read,
|
||||
write: opt.write,
|
||||
owner: opt.owner,
|
||||
},
|
||||
}))
|
||||
.await?;
|
||||
|
||||
let new_bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: Some(bucket.id),
|
||||
global_alias: None,
|
||||
search: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
if let Some(new_key) = new_bucket
|
||||
.keys
|
||||
.iter()
|
||||
.find(|k| k.access_key_id == key.access_key_id)
|
||||
{
|
||||
println!(
|
||||
"New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}",
|
||||
key.access_key_id,
|
||||
new_bucket.id,
|
||||
new_key.permissions.read,
|
||||
new_key.permissions.write,
|
||||
new_key.permissions.owner
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
"Access key {} no longer has permissions on bucket {:.16}",
|
||||
key.access_key_id, new_bucket.id
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_bucket_website(&self, opt: WebsiteOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.bucket.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
if !(opt.allow ^ opt.deny) {
|
||||
return Err(Error::Message(
|
||||
"You must specify exactly one flag, either --allow or --deny".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let wa = if opt.allow {
|
||||
UpdateBucketWebsiteAccess {
|
||||
enabled: true,
|
||||
index_document: Some(opt.index_document.clone()),
|
||||
error_document: opt
|
||||
.error_document
|
||||
.or(bucket.website_config.and_then(|x| x.error_document.clone())),
|
||||
}
|
||||
} else {
|
||||
UpdateBucketWebsiteAccess {
|
||||
enabled: false,
|
||||
index_document: None,
|
||||
error_document: None,
|
||||
}
|
||||
};
|
||||
|
||||
self.api_request(UpdateBucketRequest {
|
||||
id: bucket.id,
|
||||
body: UpdateBucketRequestBody {
|
||||
website_access: Some(wa),
|
||||
quotas: None,
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
if opt.allow {
|
||||
println!("Website access allowed for {}", &opt.bucket);
|
||||
} else {
|
||||
println!("Website access denied for {}", &opt.bucket);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_bucket_set_quotas(&self, opt: SetQuotasOpt) -> Result<(), Error> {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(opt.bucket.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
if opt.max_size.is_none() && opt.max_objects.is_none() {
|
||||
return Err(Error::Message(
|
||||
"You must specify either --max-size or --max-objects (or both) for this command to do something.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let new_quotas = ApiBucketQuotas {
|
||||
max_size: match opt.max_size.as_deref() {
|
||||
Some("none") => None,
|
||||
Some(v) => Some(
|
||||
v.parse::<bytesize::ByteSize>()
|
||||
.ok_or_message(format!("Invalid size specified: {}", v))?
|
||||
.as_u64(),
|
||||
),
|
||||
None => bucket.quotas.max_size,
|
||||
},
|
||||
max_objects: match opt.max_objects.as_deref() {
|
||||
Some("none") => None,
|
||||
Some(v) => Some(
|
||||
v.parse::<u64>()
|
||||
.ok_or_message(format!("Invalid number: {}", v))?,
|
||||
),
|
||||
None => bucket.quotas.max_objects,
|
||||
},
|
||||
};
|
||||
|
||||
self.api_request(UpdateBucketRequest {
|
||||
id: bucket.id.clone(),
|
||||
body: UpdateBucketRequestBody {
|
||||
website_access: None,
|
||||
quotas: Some(new_quotas),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!("Quotas updated for bucket {:.16}", bucket.id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_cleanup_incomplete_uploads(
|
||||
&self,
|
||||
opt: CleanupIncompleteUploadsOpt,
|
||||
) -> Result<(), Error> {
|
||||
let older_than = parse_duration::parse::parse(&opt.older_than)
|
||||
.ok_or_message("Invalid duration passed for --older-than parameter")?;
|
||||
|
||||
for b in opt.buckets.iter() {
|
||||
let bucket = self
|
||||
.api_request(GetBucketInfoRequest {
|
||||
id: None,
|
||||
global_alias: None,
|
||||
search: Some(b.clone()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let res = self
|
||||
.api_request(CleanupIncompleteUploadsRequest {
|
||||
bucket_id: bucket.id.clone(),
|
||||
older_than_secs: older_than.as_secs(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
if res.uploads_deleted > 0 {
|
||||
println!("{:.16}: {} uploads deleted", bucket.id, res.uploads_deleted);
|
||||
} else {
|
||||
println!("{:.16}: no uploads deleted", bucket.id);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
158
src/garage/cli_v2/cluster.rs
Normal file
158
src/garage/cli_v2/cluster.rs
Normal file
|
@ -0,0 +1,158 @@
|
|||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api_admin::api::*;
|
||||
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::layout::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn cmd_status(&self) -> Result<(), Error> {
|
||||
let status = self.api_request(GetClusterStatusRequest).await?;
|
||||
let layout = self.api_request(GetClusterLayoutRequest).await?;
|
||||
|
||||
println!("==== HEALTHY NODES ====");
|
||||
|
||||
let mut healthy_nodes =
|
||||
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()];
|
||||
|
||||
for adv in status.nodes.iter().filter(|adv| adv.is_up) {
|
||||
let host = adv.hostname.as_deref().unwrap_or("?");
|
||||
let addr = match adv.addr {
|
||||
Some(addr) => addr.to_string(),
|
||||
None => "N/A".to_string(),
|
||||
};
|
||||
if let Some(cfg) = &adv.role {
|
||||
let data_avail = match &adv.data_partition {
|
||||
_ if cfg.capacity.is_none() => "N/A".into(),
|
||||
Some(FreeSpaceResp { available, total }) => {
|
||||
let pct = (*available as f64) / (*total as f64) * 100.;
|
||||
let avail_str = bytesize::ByteSize::b(*available);
|
||||
format!("{} ({:.1}%)", avail_str, pct)
|
||||
}
|
||||
None => "?".into(),
|
||||
};
|
||||
healthy_nodes.push(format!(
|
||||
"{id:.16}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
|
||||
id = adv.id,
|
||||
host = host,
|
||||
addr = addr,
|
||||
tags = cfg.tags.join(","),
|
||||
zone = cfg.zone,
|
||||
capacity = capacity_string(cfg.capacity),
|
||||
data_avail = data_avail,
|
||||
));
|
||||
} else {
|
||||
let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) {
|
||||
Some(NodeRoleChange {
|
||||
action: NodeRoleChangeEnum::Update { .. },
|
||||
..
|
||||
}) => "pending...",
|
||||
_ if adv.draining => "draining metadata..",
|
||||
_ => "NO ROLE ASSIGNED",
|
||||
};
|
||||
healthy_nodes.push(format!(
|
||||
"{id:.16}\t{h}\t{addr}\t\t\t{status}",
|
||||
id = adv.id,
|
||||
h = host,
|
||||
addr = addr,
|
||||
status = status,
|
||||
));
|
||||
}
|
||||
}
|
||||
format_table(healthy_nodes);
|
||||
|
||||
let tf = timeago::Formatter::new();
|
||||
let mut drain_msg = false;
|
||||
let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()];
|
||||
for adv in status.nodes.iter().filter(|x| !x.is_up) {
|
||||
let node = &adv.id;
|
||||
|
||||
let host = adv.hostname.as_deref().unwrap_or("?");
|
||||
let last_seen = adv
|
||||
.last_seen_secs_ago
|
||||
.map(|s| tf.convert(Duration::from_secs(s)))
|
||||
.unwrap_or_else(|| "never seen".into());
|
||||
|
||||
if let Some(cfg) = &adv.role {
|
||||
let capacity = capacity_string(cfg.capacity);
|
||||
|
||||
failed_nodes.push(format!(
|
||||
"{id:.16}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
|
||||
id = node,
|
||||
host = host,
|
||||
tags = cfg.tags.join(","),
|
||||
zone = cfg.zone,
|
||||
capacity = capacity,
|
||||
last_seen = last_seen,
|
||||
));
|
||||
} else {
|
||||
let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) {
|
||||
Some(NodeRoleChange {
|
||||
action: NodeRoleChangeEnum::Update { .. },
|
||||
..
|
||||
}) => "pending...",
|
||||
_ if adv.draining => {
|
||||
drain_msg = true;
|
||||
"draining metadata.."
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
failed_nodes.push(format!(
|
||||
"{id:.16}\t{host}\t\t\t{status}\t{last_seen}",
|
||||
id = node,
|
||||
host = host,
|
||||
status = status,
|
||||
last_seen = last_seen,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if failed_nodes.len() > 1 {
|
||||
println!("\n==== FAILED NODES ====");
|
||||
format_table(failed_nodes);
|
||||
if drain_msg {
|
||||
println!();
|
||||
println!("Your cluster is expecting to drain data from nodes that are currently unavailable.");
|
||||
println!(
|
||||
"If these nodes are definitely dead, please review the layout history with"
|
||||
);
|
||||
println!(
|
||||
"`garage layout history` and use `garage layout skip-dead-nodes` to force progress."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if print_staging_role_changes(&layout) {
|
||||
println!();
|
||||
println!(
|
||||
"Please use `garage layout show` to check the proposed new layout and apply it."
|
||||
);
|
||||
println!();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_connect(&self, opt: ConnectNodeOpt) -> Result<(), Error> {
|
||||
let res = self
|
||||
.api_request(ConnectClusterNodesRequest(vec![opt.node]))
|
||||
.await?;
|
||||
if res.0.len() != 1 {
|
||||
return Err(Error::Message(format!("unexpected response: {:?}", res)));
|
||||
}
|
||||
let res = res.0.into_iter().next().unwrap();
|
||||
if res.success {
|
||||
println!("Success.");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::Message(format!(
|
||||
"Failure: {}",
|
||||
res.error.unwrap_or_default()
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
227
src/garage/cli_v2/key.rs
Normal file
227
src/garage/cli_v2/key.rs
Normal file
|
@ -0,0 +1,227 @@
|
|||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api_admin::api::*;
|
||||
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn cmd_key(&self, cmd: KeyOperation) -> Result<(), Error> {
|
||||
match cmd {
|
||||
KeyOperation::List => self.cmd_list_keys().await,
|
||||
KeyOperation::Info(query) => self.cmd_key_info(query).await,
|
||||
KeyOperation::Create(query) => self.cmd_create_key(query).await,
|
||||
KeyOperation::Rename(query) => self.cmd_rename_key(query).await,
|
||||
KeyOperation::Delete(query) => self.cmd_delete_key(query).await,
|
||||
KeyOperation::Allow(query) => self.cmd_allow_key(query).await,
|
||||
KeyOperation::Deny(query) => self.cmd_deny_key(query).await,
|
||||
KeyOperation::Import(query) => self.cmd_import_key(query).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_list_keys(&self) -> Result<(), Error> {
|
||||
let keys = self.api_request(ListKeysRequest).await?;
|
||||
|
||||
println!("List of keys:");
|
||||
let mut table = vec![];
|
||||
for key in keys.0.iter() {
|
||||
table.push(format!("\t{}\t{}", key.id, key.name));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_key_info(&self, opt: KeyInfoOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern),
|
||||
show_secret_key: opt.show_secret,
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&key);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_create_key(&self, opt: KeyNewOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(CreateKeyRequest {
|
||||
name: Some(opt.name),
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&key.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_rename_key(&self, opt: KeyRenameOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let new_key = self
|
||||
.api_request(UpdateKeyRequest {
|
||||
id: key.access_key_id,
|
||||
body: UpdateKeyRequestBody {
|
||||
name: Some(opt.new_name),
|
||||
allow: None,
|
||||
deny: None,
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&new_key.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_delete_key(&self, opt: KeyDeleteOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
if !opt.yes {
|
||||
println!("About to delete key {}...", key.access_key_id);
|
||||
return Err(Error::Message(
|
||||
"Add --yes flag to really perform this operation".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
self.api_request(DeleteKeyRequest {
|
||||
id: key.access_key_id.clone(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
println!("Access key {} has been deleted.", key.access_key_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_allow_key(&self, opt: KeyPermOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let new_key = self
|
||||
.api_request(UpdateKeyRequest {
|
||||
id: key.access_key_id,
|
||||
body: UpdateKeyRequestBody {
|
||||
name: None,
|
||||
allow: Some(KeyPerm {
|
||||
create_bucket: opt.create_bucket,
|
||||
}),
|
||||
deny: None,
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&new_key.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_deny_key(&self, opt: KeyPermOpt) -> Result<(), Error> {
|
||||
let key = self
|
||||
.api_request(GetKeyInfoRequest {
|
||||
id: None,
|
||||
search: Some(opt.key_pattern),
|
||||
show_secret_key: false,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let new_key = self
|
||||
.api_request(UpdateKeyRequest {
|
||||
id: key.access_key_id,
|
||||
body: UpdateKeyRequestBody {
|
||||
name: None,
|
||||
allow: None,
|
||||
deny: Some(KeyPerm {
|
||||
create_bucket: opt.create_bucket,
|
||||
}),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&new_key.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_import_key(&self, opt: KeyImportOpt) -> Result<(), Error> {
|
||||
if !opt.yes {
|
||||
return Err(Error::Message("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string()));
|
||||
}
|
||||
|
||||
let new_key = self
|
||||
.api_request(ImportKeyRequest {
|
||||
name: Some(opt.name),
|
||||
access_key_id: opt.key_id,
|
||||
secret_access_key: opt.secret_key,
|
||||
})
|
||||
.await?;
|
||||
|
||||
print_key_info(&new_key.0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn print_key_info(key: &GetKeyInfoResponse) {
|
||||
println!("Key name: {}", key.name);
|
||||
println!("Key ID: {}", key.access_key_id);
|
||||
println!(
|
||||
"Secret key: {}",
|
||||
key.secret_access_key.as_deref().unwrap_or("(redacted)")
|
||||
);
|
||||
println!("Can create buckets: {}", key.permissions.create_bucket);
|
||||
|
||||
println!("\nKey-specific bucket aliases:");
|
||||
let mut table = vec![];
|
||||
for bucket in key.buckets.iter() {
|
||||
for la in bucket.local_aliases.iter() {
|
||||
table.push(format!(
|
||||
"\t{}\t{}\t{}",
|
||||
la,
|
||||
bucket.global_aliases.join(","),
|
||||
bucket.id
|
||||
));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
println!("\nAuthorized buckets:");
|
||||
let mut table = vec![];
|
||||
for bucket in key.buckets.iter() {
|
||||
let rflag = if bucket.permissions.read { "R" } else { " " };
|
||||
let wflag = if bucket.permissions.write { "W" } else { " " };
|
||||
let oflag = if bucket.permissions.owner { "O" } else { " " };
|
||||
table.push(format!(
|
||||
"\t{}{}{}\t{}\t{}\t{:.16}",
|
||||
rflag,
|
||||
wflag,
|
||||
oflag,
|
||||
bucket.global_aliases.join(","),
|
||||
bucket.local_aliases.join(","),
|
||||
bucket.id
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
}
|
284
src/garage/cli_v2/layout.rs
Normal file
284
src/garage/cli_v2/layout.rs
Normal file
|
@ -0,0 +1,284 @@
|
|||
use bytesize::ByteSize;
|
||||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api_admin::api::*;
|
||||
|
||||
use crate::cli::layout as cli_v1;
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn layout_command_dispatch(&self, cmd: LayoutOperation) -> Result<(), Error> {
|
||||
match cmd {
|
||||
LayoutOperation::Assign(assign_opt) => self.cmd_assign_role(assign_opt).await,
|
||||
LayoutOperation::Remove(remove_opt) => self.cmd_remove_role(remove_opt).await,
|
||||
LayoutOperation::Apply(apply_opt) => self.cmd_apply_layout(apply_opt).await,
|
||||
LayoutOperation::Revert(revert_opt) => self.cmd_revert_layout(revert_opt).await,
|
||||
|
||||
// TODO
|
||||
LayoutOperation::Show => {
|
||||
cli_v1::cmd_show_layout(&self.system_rpc_endpoint, self.rpc_host).await
|
||||
}
|
||||
LayoutOperation::Config(config_opt) => {
|
||||
cli_v1::cmd_config_layout(&self.system_rpc_endpoint, self.rpc_host, config_opt)
|
||||
.await
|
||||
}
|
||||
LayoutOperation::History => {
|
||||
cli_v1::cmd_layout_history(&self.system_rpc_endpoint, self.rpc_host).await
|
||||
}
|
||||
LayoutOperation::SkipDeadNodes(assume_sync_opt) => {
|
||||
cli_v1::cmd_layout_skip_dead_nodes(
|
||||
&self.system_rpc_endpoint,
|
||||
self.rpc_host,
|
||||
assume_sync_opt,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_assign_role(&self, opt: AssignRoleOpt) -> Result<(), Error> {
|
||||
let status = self.api_request(GetClusterStatusRequest).await?;
|
||||
let layout = self.api_request(GetClusterLayoutRequest).await?;
|
||||
|
||||
let all_node_ids_iter = status
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|x| x.id.as_str())
|
||||
.chain(layout.roles.iter().map(|x| x.id.as_str()));
|
||||
|
||||
let mut actions = vec![];
|
||||
|
||||
for node in opt.replace.iter() {
|
||||
let id = find_matching_node(all_node_ids_iter.clone(), &node)?;
|
||||
|
||||
actions.push(NodeRoleChange {
|
||||
id,
|
||||
action: NodeRoleChangeEnum::Remove { remove: true },
|
||||
});
|
||||
}
|
||||
|
||||
for node in opt.node_ids.iter() {
|
||||
let id = find_matching_node(all_node_ids_iter.clone(), &node)?;
|
||||
|
||||
let current = get_staged_or_current_role(&id, &layout);
|
||||
|
||||
let zone = opt
|
||||
.zone
|
||||
.clone()
|
||||
.or_else(|| current.as_ref().map(|c| c.zone.clone()))
|
||||
.ok_or_message("Please specify a zone with the -z flag")?;
|
||||
|
||||
let capacity = if opt.gateway {
|
||||
if opt.capacity.is_some() {
|
||||
return Err(Error::Message("Please specify only -c or -g".into()));
|
||||
}
|
||||
None
|
||||
} else if let Some(cap) = opt.capacity {
|
||||
Some(cap.as_u64())
|
||||
} else {
|
||||
current.as_ref().ok_or_message("Please specify a capacity with the -c flag, or set node explicitly as gateway with -g")?.capacity
|
||||
};
|
||||
|
||||
let tags = if !opt.tags.is_empty() {
|
||||
opt.tags.clone()
|
||||
} else if let Some(cur) = current.as_ref() {
|
||||
cur.tags.clone()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
actions.push(NodeRoleChange {
|
||||
id,
|
||||
action: NodeRoleChangeEnum::Update {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
self.api_request(UpdateClusterLayoutRequest(actions))
|
||||
.await?;
|
||||
|
||||
println!("Role changes are staged but not yet committed.");
|
||||
println!("Use `garage layout show` to view staged role changes,");
|
||||
println!("and `garage layout apply` to enact staged changes.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_remove_role(&self, opt: RemoveRoleOpt) -> Result<(), Error> {
|
||||
let status = self.api_request(GetClusterStatusRequest).await?;
|
||||
let layout = self.api_request(GetClusterLayoutRequest).await?;
|
||||
|
||||
let all_node_ids_iter = status
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|x| x.id.as_str())
|
||||
.chain(layout.roles.iter().map(|x| x.id.as_str()));
|
||||
|
||||
let id = find_matching_node(all_node_ids_iter.clone(), &opt.node_id)?;
|
||||
|
||||
let actions = vec![NodeRoleChange {
|
||||
id,
|
||||
action: NodeRoleChangeEnum::Remove { remove: true },
|
||||
}];
|
||||
|
||||
self.api_request(UpdateClusterLayoutRequest(actions))
|
||||
.await?;
|
||||
|
||||
println!("Role removal is staged but not yet committed.");
|
||||
println!("Use `garage layout show` to view staged role changes,");
|
||||
println!("and `garage layout apply` to enact staged changes.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_apply_layout(&self, apply_opt: ApplyLayoutOpt) -> Result<(), Error> {
|
||||
let missing_version_error = r#"
|
||||
Please pass the new layout version number to ensure that you are writing the correct version of the cluster layout.
|
||||
To know the correct value of the new layout version, invoke `garage layout show` and review the proposed changes.
|
||||
"#;
|
||||
|
||||
let req = ApplyClusterLayoutRequest {
|
||||
version: apply_opt.version.ok_or_message(missing_version_error)?,
|
||||
};
|
||||
let res = self.api_request(req).await?;
|
||||
|
||||
for line in res.message.iter() {
|
||||
println!("{}", line);
|
||||
}
|
||||
|
||||
println!("New cluster layout with updated role assignment has been applied in cluster.");
|
||||
println!("Data will now be moved around between nodes accordingly.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_revert_layout(&self, revert_opt: RevertLayoutOpt) -> Result<(), Error> {
|
||||
if !revert_opt.yes {
|
||||
return Err(Error::Message(
|
||||
"Please add the --yes flag to run the layout revert operation".into(),
|
||||
));
|
||||
}
|
||||
|
||||
self.api_request(RevertClusterLayoutRequest).await?;
|
||||
|
||||
println!("All proposed role changes in cluster layout have been canceled.");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------
|
||||
// ---- helper functions ----
|
||||
// --------------------------
|
||||
|
||||
pub fn capacity_string(v: Option<u64>) -> String {
|
||||
match v {
|
||||
Some(c) => ByteSize::b(c).to_string_as(false),
|
||||
None => "gateway".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_staged_or_current_role(
|
||||
id: &str,
|
||||
layout: &GetClusterLayoutResponse,
|
||||
) -> Option<NodeRoleResp> {
|
||||
for node in layout.staged_role_changes.iter() {
|
||||
if node.id == id {
|
||||
return match &node.action {
|
||||
NodeRoleChangeEnum::Remove { .. } => None,
|
||||
NodeRoleChangeEnum::Update {
|
||||
zone,
|
||||
capacity,
|
||||
tags,
|
||||
} => Some(NodeRoleResp {
|
||||
id: id.to_string(),
|
||||
zone: zone.to_string(),
|
||||
capacity: *capacity,
|
||||
tags: tags.clone(),
|
||||
}),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
for node in layout.roles.iter() {
|
||||
if node.id == id {
|
||||
return Some(node.clone());
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub fn find_matching_node<'a>(
|
||||
cand: impl std::iter::Iterator<Item = &'a str>,
|
||||
pattern: &'a str,
|
||||
) -> Result<String, Error> {
|
||||
let mut candidates = vec![];
|
||||
for c in cand {
|
||||
if c.starts_with(pattern) && !candidates.contains(&c) {
|
||||
candidates.push(c);
|
||||
}
|
||||
}
|
||||
if candidates.len() != 1 {
|
||||
Err(Error::Message(format!(
|
||||
"{} nodes match '{}'",
|
||||
candidates.len(),
|
||||
pattern,
|
||||
)))
|
||||
} else {
|
||||
Ok(candidates[0].to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_staging_role_changes(layout: &GetClusterLayoutResponse) -> bool {
|
||||
let has_role_changes = !layout.staged_role_changes.is_empty();
|
||||
|
||||
// TODO!! Layout parameters
|
||||
let has_layout_changes = false;
|
||||
|
||||
if has_role_changes || has_layout_changes {
|
||||
println!();
|
||||
println!("==== STAGED ROLE CHANGES ====");
|
||||
if has_role_changes {
|
||||
let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
|
||||
for change in layout.staged_role_changes.iter() {
|
||||
match &change.action {
|
||||
NodeRoleChangeEnum::Update {
|
||||
tags,
|
||||
zone,
|
||||
capacity,
|
||||
} => {
|
||||
let tags = tags.join(",");
|
||||
table.push(format!(
|
||||
"{:.16}\t{}\t{}\t{}",
|
||||
change.id,
|
||||
tags,
|
||||
zone,
|
||||
capacity_string(*capacity),
|
||||
));
|
||||
}
|
||||
NodeRoleChangeEnum::Remove { .. } => {
|
||||
table.push(format!("{:.16}\tREMOVED", change.id));
|
||||
}
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
println!();
|
||||
}
|
||||
//TODO
|
||||
/*
|
||||
if has_layout_changes {
|
||||
println!(
|
||||
"Zone redundancy: {}",
|
||||
staging.parameters.get().zone_redundancy
|
||||
);
|
||||
}
|
||||
*/
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
108
src/garage/cli_v2/mod.rs
Normal file
108
src/garage/cli_v2/mod.rs
Normal file
|
@ -0,0 +1,108 @@
|
|||
pub mod bucket;
|
||||
pub mod cluster;
|
||||
pub mod key;
|
||||
pub mod layout;
|
||||
|
||||
pub mod block;
|
||||
pub mod node;
|
||||
pub mod worker;
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_rpc::system::*;
|
||||
use garage_rpc::*;
|
||||
|
||||
use garage_api_admin::api::*;
|
||||
use garage_api_admin::api_server::{AdminRpc as ProxyRpc, AdminRpcResponse as ProxyRpcResponse};
|
||||
use garage_api_admin::RequestHandler;
|
||||
|
||||
use crate::cli::structs::*;
|
||||
|
||||
pub struct Cli {
|
||||
pub system_rpc_endpoint: Arc<Endpoint<SystemRpc, ()>>,
|
||||
pub proxy_rpc_endpoint: Arc<Endpoint<ProxyRpc, ()>>,
|
||||
pub rpc_host: NodeID,
|
||||
}
|
||||
|
||||
impl Cli {
|
||||
pub async fn handle(&self, cmd: Command) -> Result<(), Error> {
|
||||
match cmd {
|
||||
Command::Status => self.cmd_status().await,
|
||||
Command::Node(NodeOperation::Connect(connect_opt)) => {
|
||||
self.cmd_connect(connect_opt).await
|
||||
}
|
||||
Command::Layout(layout_opt) => self.layout_command_dispatch(layout_opt).await,
|
||||
Command::Bucket(bo) => self.cmd_bucket(bo).await,
|
||||
Command::Key(ko) => self.cmd_key(ko).await,
|
||||
Command::Worker(wo) => self.cmd_worker(wo).await,
|
||||
Command::Block(bo) => self.cmd_block(bo).await,
|
||||
Command::Meta(mo) => self.cmd_meta(mo).await,
|
||||
Command::Stats(so) => self.cmd_stats(so).await,
|
||||
Command::Repair(ro) => self.cmd_repair(ro).await,
|
||||
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn api_request<T>(&self, req: T) -> Result<<T as RequestHandler>::Response, Error>
|
||||
where
|
||||
T: RequestHandler,
|
||||
AdminApiRequest: From<T>,
|
||||
<T as RequestHandler>::Response: TryFrom<TaggedAdminApiResponse>,
|
||||
{
|
||||
let req = AdminApiRequest::from(req);
|
||||
let req_name = req.name();
|
||||
match self
|
||||
.proxy_rpc_endpoint
|
||||
.call(&self.rpc_host, ProxyRpc::Proxy(req), PRIO_NORMAL)
|
||||
.await??
|
||||
{
|
||||
ProxyRpcResponse::ProxyApiOkResponse(resp) => {
|
||||
<T as RequestHandler>::Response::try_from(resp).map_err(|_| {
|
||||
Error::Message(format!("{} returned unexpected response", req_name))
|
||||
})
|
||||
}
|
||||
ProxyRpcResponse::ApiErrorResponse {
|
||||
http_code,
|
||||
error_code,
|
||||
message,
|
||||
} => Err(Error::Message(format!(
|
||||
"{} returned {} ({}): {}",
|
||||
req_name, error_code, http_code, message
|
||||
))),
|
||||
m => Err(Error::unexpected_rpc_message(m)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn local_api_request<T>(
|
||||
&self,
|
||||
req: T,
|
||||
) -> Result<<T as RequestHandler>::Response, Error>
|
||||
where
|
||||
T: RequestHandler,
|
||||
MultiRequest<T>: RequestHandler<Response = MultiResponse<<T as RequestHandler>::Response>>,
|
||||
AdminApiRequest: From<MultiRequest<T>>,
|
||||
<MultiRequest<T> as RequestHandler>::Response: TryFrom<TaggedAdminApiResponse>,
|
||||
{
|
||||
let req = MultiRequest {
|
||||
node: hex::encode(self.rpc_host),
|
||||
body: req,
|
||||
};
|
||||
let resp = self.api_request(req).await?;
|
||||
|
||||
if let Some((_, e)) = resp.error.into_iter().next() {
|
||||
return Err(Error::Message(e));
|
||||
}
|
||||
if resp.success.len() != 1 {
|
||||
return Err(Error::Message(format!(
|
||||
"{} responses returned, expected 1",
|
||||
resp.success.len()
|
||||
)));
|
||||
}
|
||||
Ok(resp.success.into_iter().next().unwrap().1)
|
||||
}
|
||||
}
|
113
src/garage/cli_v2/node.rs
Normal file
113
src/garage/cli_v2/node.rs
Normal file
|
@ -0,0 +1,113 @@
|
|||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api_admin::api::*;
|
||||
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn cmd_meta(&self, cmd: MetaOperation) -> Result<(), Error> {
|
||||
let MetaOperation::Snapshot { all } = cmd;
|
||||
|
||||
let res = self
|
||||
.api_request(CreateMetadataSnapshotRequest {
|
||||
node: if all {
|
||||
"*".to_string()
|
||||
} else {
|
||||
hex::encode(self.rpc_host)
|
||||
},
|
||||
body: LocalCreateMetadataSnapshotRequest,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut table = vec![];
|
||||
for (node, err) in res.error.iter() {
|
||||
table.push(format!("{:.16}\tError: {}", node, err));
|
||||
}
|
||||
for (node, _) in res.success.iter() {
|
||||
table.push(format!("{:.16}\tSnapshot created", node));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_stats(&self, cmd: StatsOpt) -> Result<(), Error> {
|
||||
let res = self
|
||||
.api_request(GetNodeStatisticsRequest {
|
||||
node: if cmd.all_nodes {
|
||||
"*".to_string()
|
||||
} else {
|
||||
hex::encode(self.rpc_host)
|
||||
},
|
||||
body: LocalGetNodeStatisticsRequest,
|
||||
})
|
||||
.await?;
|
||||
|
||||
for (node, res) in res.success.iter() {
|
||||
println!("======================");
|
||||
println!("Stats for node {:.16}:\n", node);
|
||||
println!("{}\n", res.freeform);
|
||||
}
|
||||
|
||||
for (node, err) in res.error.iter() {
|
||||
println!("======================");
|
||||
println!("Node {:.16}: error: {}\n", node, err);
|
||||
}
|
||||
|
||||
let res = self.api_request(GetClusterStatisticsRequest).await?;
|
||||
println!("======================");
|
||||
println!("Cluster statistics:\n");
|
||||
println!("{}\n", res.freeform);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_repair(&self, cmd: RepairOpt) -> Result<(), Error> {
|
||||
if !cmd.yes {
|
||||
return Err(Error::Message(
|
||||
"Please add --yes to start the repair operation".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let repair_type = match cmd.what {
|
||||
RepairWhat::Tables => RepairType::Tables,
|
||||
RepairWhat::Blocks => RepairType::Blocks,
|
||||
RepairWhat::Versions => RepairType::Versions,
|
||||
RepairWhat::MultipartUploads => RepairType::MultipartUploads,
|
||||
RepairWhat::BlockRefs => RepairType::BlockRefs,
|
||||
RepairWhat::BlockRc => RepairType::BlockRc,
|
||||
RepairWhat::Rebalance => RepairType::Rebalance,
|
||||
RepairWhat::Scrub { cmd } => RepairType::Scrub(match cmd {
|
||||
ScrubCmd::Start => ScrubCommand::Start,
|
||||
ScrubCmd::Cancel => ScrubCommand::Cancel,
|
||||
ScrubCmd::Pause => ScrubCommand::Pause,
|
||||
ScrubCmd::Resume => ScrubCommand::Resume,
|
||||
}),
|
||||
};
|
||||
|
||||
let res = self
|
||||
.api_request(LaunchRepairOperationRequest {
|
||||
node: if cmd.all_nodes {
|
||||
"*".to_string()
|
||||
} else {
|
||||
hex::encode(self.rpc_host)
|
||||
},
|
||||
body: LocalLaunchRepairOperationRequest { repair_type },
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut table = vec![];
|
||||
for (node, err) in res.error.iter() {
|
||||
table.push(format!("{:.16}\tError: {}", node, err));
|
||||
}
|
||||
for (node, _) in res.success.iter() {
|
||||
table.push(format!("{:.16}\tRepair launched", node));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
213
src/garage/cli_v2/worker.rs
Normal file
213
src/garage/cli_v2/worker.rs
Normal file
|
@ -0,0 +1,213 @@
|
|||
use format_table::format_table;
|
||||
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_api_admin::api::*;
|
||||
|
||||
use crate::cli::structs::*;
|
||||
use crate::cli_v2::*;
|
||||
|
||||
impl Cli {
|
||||
pub async fn cmd_worker(&self, cmd: WorkerOperation) -> Result<(), Error> {
|
||||
match cmd {
|
||||
WorkerOperation::List { opt } => self.cmd_list_workers(opt).await,
|
||||
WorkerOperation::Info { tid } => self.cmd_worker_info(tid).await,
|
||||
WorkerOperation::Get {
|
||||
all_nodes,
|
||||
variable,
|
||||
} => self.cmd_get_var(all_nodes, variable).await,
|
||||
WorkerOperation::Set {
|
||||
all_nodes,
|
||||
variable,
|
||||
value,
|
||||
} => self.cmd_set_var(all_nodes, variable, value).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_list_workers(&self, opt: WorkerListOpt) -> Result<(), Error> {
|
||||
let mut list = self
|
||||
.local_api_request(LocalListWorkersRequest {
|
||||
busy_only: opt.busy,
|
||||
error_only: opt.errors,
|
||||
})
|
||||
.await?
|
||||
.0;
|
||||
|
||||
list.sort_by_key(|info| {
|
||||
(
|
||||
match info.state {
|
||||
WorkerStateResp::Busy | WorkerStateResp::Throttled { .. } => 0,
|
||||
WorkerStateResp::Idle => 1,
|
||||
WorkerStateResp::Done => 2,
|
||||
},
|
||||
info.id,
|
||||
)
|
||||
});
|
||||
|
||||
let mut table =
|
||||
vec!["TID\tState\tName\tTranq\tDone\tQueue\tErrors\tConsec\tLast".to_string()];
|
||||
let tf = timeago::Formatter::new();
|
||||
for info in list.iter() {
|
||||
let err_ago = info
|
||||
.last_error
|
||||
.as_ref()
|
||||
.map(|x| tf.convert(Duration::from_secs(x.secs_ago)))
|
||||
.unwrap_or_default();
|
||||
let (total_err, consec_err) = if info.errors > 0 {
|
||||
(info.errors.to_string(), info.consecutive_errors.to_string())
|
||||
} else {
|
||||
("-".into(), "-".into())
|
||||
};
|
||||
|
||||
table.push(format!(
|
||||
"{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}",
|
||||
info.id,
|
||||
format_worker_state(&info.state),
|
||||
info.name,
|
||||
info.tranquility
|
||||
.as_ref()
|
||||
.map(ToString::to_string)
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
info.progress.as_deref().unwrap_or("-"),
|
||||
info.queue_length
|
||||
.as_ref()
|
||||
.map(ToString::to_string)
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
total_err,
|
||||
consec_err,
|
||||
err_ago,
|
||||
));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_worker_info(&self, tid: usize) -> Result<(), Error> {
|
||||
let info = self
|
||||
.local_api_request(LocalGetWorkerInfoRequest { id: tid as u64 })
|
||||
.await?
|
||||
.0;
|
||||
|
||||
let mut table = vec![];
|
||||
table.push(format!("Task id:\t{}", info.id));
|
||||
table.push(format!("Worker name:\t{}", info.name));
|
||||
match &info.state {
|
||||
WorkerStateResp::Throttled { duration_secs } => {
|
||||
table.push(format!(
|
||||
"Worker state:\tBusy (throttled, paused for {:.3}s)",
|
||||
duration_secs
|
||||
));
|
||||
}
|
||||
s => {
|
||||
table.push(format!("Worker state:\t{}", format_worker_state(s)));
|
||||
}
|
||||
};
|
||||
if let Some(tql) = info.tranquility {
|
||||
table.push(format!("Tranquility:\t{}", tql));
|
||||
}
|
||||
|
||||
table.push("".into());
|
||||
table.push(format!("Total errors:\t{}", info.errors));
|
||||
table.push(format!("Consecutive errs:\t{}", info.consecutive_errors));
|
||||
if let Some(err) = info.last_error {
|
||||
table.push(format!("Last error:\t{}", err.message));
|
||||
let tf = timeago::Formatter::new();
|
||||
table.push(format!(
|
||||
"Last error time:\t{}",
|
||||
tf.convert(Duration::from_secs(err.secs_ago))
|
||||
));
|
||||
}
|
||||
|
||||
table.push("".into());
|
||||
if let Some(p) = info.progress {
|
||||
table.push(format!("Progress:\t{}", p));
|
||||
}
|
||||
if let Some(ql) = info.queue_length {
|
||||
table.push(format!("Queue length:\t{}", ql));
|
||||
}
|
||||
if let Some(pe) = info.persistent_errors {
|
||||
table.push(format!("Persistent errors:\t{}", pe));
|
||||
}
|
||||
|
||||
for (i, s) in info.freeform.iter().enumerate() {
|
||||
if i == 0 {
|
||||
if table.last() != Some(&"".into()) {
|
||||
table.push("".into());
|
||||
}
|
||||
table.push(format!("Message:\t{}", s));
|
||||
} else {
|
||||
table.push(format!("\t{}", s));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_get_var(&self, all: bool, var: Option<String>) -> Result<(), Error> {
|
||||
let res = self
|
||||
.api_request(GetWorkerVariableRequest {
|
||||
node: if all {
|
||||
"*".to_string()
|
||||
} else {
|
||||
hex::encode(self.rpc_host)
|
||||
},
|
||||
body: LocalGetWorkerVariableRequest { variable: var },
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut table = vec![];
|
||||
for (node, vars) in res.success.iter() {
|
||||
for (key, val) in vars.0.iter() {
|
||||
table.push(format!("{:.16}\t{}\t{}", node, key, val));
|
||||
}
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
for (node, err) in res.error.iter() {
|
||||
eprintln!("{:.16}: error: {}", node, err);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cmd_set_var(
|
||||
&self,
|
||||
all: bool,
|
||||
variable: String,
|
||||
value: String,
|
||||
) -> Result<(), Error> {
|
||||
let res = self
|
||||
.api_request(SetWorkerVariableRequest {
|
||||
node: if all {
|
||||
"*".to_string()
|
||||
} else {
|
||||
hex::encode(self.rpc_host)
|
||||
},
|
||||
body: LocalSetWorkerVariableRequest { variable, value },
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut table = vec![];
|
||||
for (node, kv) in res.success.iter() {
|
||||
table.push(format!("{:.16}\t{}\t{}", node, kv.variable, kv.value));
|
||||
}
|
||||
format_table(table);
|
||||
|
||||
for (node, err) in res.error.iter() {
|
||||
eprintln!("{:.16}: error: {}", node, err);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn format_worker_state(s: &WorkerStateResp) -> &'static str {
|
||||
match s {
|
||||
WorkerStateResp::Busy => "Busy",
|
||||
WorkerStateResp::Throttled { .. } => "Busy*",
|
||||
WorkerStateResp::Idle => "Idle",
|
||||
WorkerStateResp::Done => "Done",
|
||||
}
|
||||
}
|
|
@ -4,9 +4,8 @@
|
|||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
mod admin;
|
||||
mod cli;
|
||||
mod repair;
|
||||
mod cli_v2;
|
||||
mod secrets;
|
||||
mod server;
|
||||
#[cfg(feature = "telemetry-otlp")]
|
||||
|
@ -34,10 +33,9 @@ use garage_util::error::*;
|
|||
use garage_rpc::system::*;
|
||||
use garage_rpc::*;
|
||||
|
||||
use garage_model::helper::error::Error as HelperError;
|
||||
use garage_api_admin::api_server::{AdminRpc as ProxyRpc, ADMIN_RPC_PATH as PROXY_RPC_PATH};
|
||||
|
||||
use admin::*;
|
||||
use cli::*;
|
||||
use cli::structs::*;
|
||||
use secrets::Secrets;
|
||||
|
||||
#[derive(StructOpt, Debug)]
|
||||
|
@ -145,13 +143,13 @@ async fn main() {
|
|||
let res = match opt.cmd {
|
||||
Command::Server => server::run_server(opt.config_file, opt.secrets).await,
|
||||
Command::OfflineRepair(repair_opt) => {
|
||||
repair::offline::offline_repair(opt.config_file, opt.secrets, repair_opt).await
|
||||
cli::repair::offline_repair(opt.config_file, opt.secrets, repair_opt).await
|
||||
}
|
||||
Command::ConvertDb(conv_opt) => {
|
||||
cli::convert_db::do_conversion(conv_opt).map_err(From::from)
|
||||
}
|
||||
Command::Node(NodeOperation::NodeId(node_id_opt)) => {
|
||||
node_id_command(opt.config_file, node_id_opt.quiet)
|
||||
cli::init::node_id_command(opt.config_file, node_id_opt.quiet)
|
||||
}
|
||||
_ => cli_command(opt).await,
|
||||
};
|
||||
|
@ -252,7 +250,7 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
|
|||
(id, addrs[0], false)
|
||||
} else {
|
||||
let node_id = garage_rpc::system::read_node_id(&config.as_ref().unwrap().metadata_dir)
|
||||
.err_context(READ_KEY_ERROR)?;
|
||||
.err_context(cli::init::READ_KEY_ERROR)?;
|
||||
if let Some(a) = config.as_ref().and_then(|c| c.rpc_public_addr.as_ref()) {
|
||||
use std::net::ToSocketAddrs;
|
||||
let a = a
|
||||
|
@ -282,12 +280,13 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
|
|||
}
|
||||
|
||||
let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into());
|
||||
let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into());
|
||||
let proxy_rpc_endpoint = netapp.endpoint::<ProxyRpc, ()>(PROXY_RPC_PATH.into());
|
||||
|
||||
match cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await {
|
||||
Err(HelperError::Internal(i)) => Err(Error::Message(format!("Internal error: {}", i))),
|
||||
Err(HelperError::BadRequest(b)) => Err(Error::Message(b)),
|
||||
Err(e) => Err(Error::Message(format!("{}", e))),
|
||||
Ok(x) => Ok(x),
|
||||
}
|
||||
let cli = cli_v2::Cli {
|
||||
system_rpc_endpoint,
|
||||
proxy_rpc_endpoint,
|
||||
rpc_host: id,
|
||||
};
|
||||
|
||||
cli.handle(opt.cmd).await
|
||||
}
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
pub mod offline;
|
||||
pub mod online;
|
|
@ -14,7 +14,6 @@ use garage_web::WebServer;
|
|||
#[cfg(feature = "k2v")]
|
||||
use garage_api_k2v::api_server::K2VApiServer;
|
||||
|
||||
use crate::admin::*;
|
||||
use crate::secrets::{fill_secrets, Secrets};
|
||||
#[cfg(feature = "telemetry-otlp")]
|
||||
use crate::tracing_setup::*;
|
||||
|
@ -66,6 +65,7 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er
|
|||
info!("Initialize Admin API server and metrics collector...");
|
||||
let admin_server = AdminApiServer::new(
|
||||
garage.clone(),
|
||||
background.clone(),
|
||||
#[cfg(feature = "metrics")]
|
||||
metrics_exporter,
|
||||
);
|
||||
|
@ -73,9 +73,6 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er
|
|||
info!("Launching internal Garage cluster communications...");
|
||||
let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone()));
|
||||
|
||||
info!("Create admin RPC handler...");
|
||||
AdminRpcHandler::new(garage.clone(), background.clone());
|
||||
|
||||
// ---- Launch public-facing API servers ----
|
||||
|
||||
let mut servers = vec![];
|
||||
|
@ -113,7 +110,7 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er
|
|||
|
||||
if let Some(web_config) = &config.s3_web {
|
||||
info!("Initializing web server...");
|
||||
let web_server = WebServer::new(garage.clone(), &web_config);
|
||||
let web_server = WebServer::new(garage.clone(), web_config.root_domain.clone());
|
||||
servers.push((
|
||||
"Web",
|
||||
tokio::spawn(web_server.run(web_config.bind_addr.clone(), watch_cancel.clone())),
|
||||
|
|
|
@ -12,7 +12,7 @@ pub fn build_client(key: &Key) -> Client {
|
|||
.endpoint_url(format!("http://127.0.0.1:{}", DEFAULT_PORT))
|
||||
.region(super::REGION)
|
||||
.credentials_provider(credentials)
|
||||
.behavior_version(BehaviorVersion::v2024_03_28())
|
||||
.behavior_version(BehaviorVersion::v2023_11_09())
|
||||
.build();
|
||||
|
||||
Client::from_conf(config)
|
||||
|
|
|
@ -192,13 +192,16 @@ impl<'a> RequestBuilder<'a> {
|
|||
.collect::<HeaderMap>();
|
||||
|
||||
let date = now.format(signature::LONG_DATETIME).to_string();
|
||||
all_headers.insert(signature::X_AMZ_DATE, HeaderValue::from_str(&date).unwrap());
|
||||
all_headers.insert(
|
||||
signature::payload::X_AMZ_DATE,
|
||||
HeaderValue::from_str(&date).unwrap(),
|
||||
);
|
||||
all_headers.insert(HOST, HeaderValue::from_str(&host).unwrap());
|
||||
|
||||
let body_sha = match &self.body_signature {
|
||||
let body_sha = match self.body_signature {
|
||||
BodySignature::Unsigned => "UNSIGNED-PAYLOAD".to_owned(),
|
||||
BodySignature::Classic => hex::encode(garage_util::data::sha256sum(&self.body)),
|
||||
BodySignature::Streaming { chunk_size } => {
|
||||
BodySignature::Streaming(size) => {
|
||||
all_headers.insert(
|
||||
CONTENT_ENCODING,
|
||||
HeaderValue::from_str("aws-chunked").unwrap(),
|
||||
|
@ -213,59 +216,18 @@ impl<'a> RequestBuilder<'a> {
|
|||
// code.
|
||||
all_headers.insert(
|
||||
CONTENT_LENGTH,
|
||||
to_streaming_body(
|
||||
&self.body,
|
||||
*chunk_size,
|
||||
String::new(),
|
||||
signer.clone(),
|
||||
now,
|
||||
"",
|
||||
)
|
||||
.len()
|
||||
.to_string()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
to_streaming_body(&self.body, size, String::new(), signer.clone(), now, "")
|
||||
.len()
|
||||
.to_string()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD".to_owned()
|
||||
}
|
||||
BodySignature::StreamingUnsignedTrailer {
|
||||
chunk_size,
|
||||
trailer_algorithm,
|
||||
trailer_value,
|
||||
} => {
|
||||
all_headers.insert(
|
||||
CONTENT_ENCODING,
|
||||
HeaderValue::from_str("aws-chunked").unwrap(),
|
||||
);
|
||||
all_headers.insert(
|
||||
HeaderName::from_static("x-amz-decoded-content-length"),
|
||||
HeaderValue::from_str(&self.body.len().to_string()).unwrap(),
|
||||
);
|
||||
all_headers.insert(
|
||||
HeaderName::from_static("x-amz-trailer"),
|
||||
HeaderValue::from_str(&trailer_algorithm).unwrap(),
|
||||
);
|
||||
|
||||
all_headers.insert(
|
||||
CONTENT_LENGTH,
|
||||
to_streaming_unsigned_trailer_body(
|
||||
&self.body,
|
||||
*chunk_size,
|
||||
&trailer_algorithm,
|
||||
&trailer_value,
|
||||
)
|
||||
.len()
|
||||
.to_string()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
"STREAMING-UNSIGNED-PAYLOAD-TRAILER".to_owned()
|
||||
}
|
||||
};
|
||||
all_headers.insert(
|
||||
signature::X_AMZ_CONTENT_SHA256,
|
||||
signature::payload::X_AMZ_CONTENT_SH256,
|
||||
HeaderValue::from_str(&body_sha).unwrap(),
|
||||
);
|
||||
|
||||
|
@ -314,26 +276,10 @@ impl<'a> RequestBuilder<'a> {
|
|||
let mut request = Request::builder();
|
||||
*request.headers_mut().unwrap() = all_headers;
|
||||
|
||||
let body = match &self.body_signature {
|
||||
BodySignature::Streaming { chunk_size } => to_streaming_body(
|
||||
&self.body,
|
||||
*chunk_size,
|
||||
signature,
|
||||
streaming_signer,
|
||||
now,
|
||||
&scope,
|
||||
),
|
||||
BodySignature::StreamingUnsignedTrailer {
|
||||
chunk_size,
|
||||
trailer_algorithm,
|
||||
trailer_value,
|
||||
} => to_streaming_unsigned_trailer_body(
|
||||
&self.body,
|
||||
*chunk_size,
|
||||
&trailer_algorithm,
|
||||
&trailer_value,
|
||||
),
|
||||
_ => self.body.clone(),
|
||||
let body = if let BodySignature::Streaming(size) = self.body_signature {
|
||||
to_streaming_body(&self.body, size, signature, streaming_signer, now, &scope)
|
||||
} else {
|
||||
self.body.clone()
|
||||
};
|
||||
let request = request
|
||||
.uri(uri)
|
||||
|
@ -362,14 +308,7 @@ impl<'a> RequestBuilder<'a> {
|
|||
pub enum BodySignature {
|
||||
Unsigned,
|
||||
Classic,
|
||||
Streaming {
|
||||
chunk_size: usize,
|
||||
},
|
||||
StreamingUnsignedTrailer {
|
||||
chunk_size: usize,
|
||||
trailer_algorithm: String,
|
||||
trailer_value: String,
|
||||
},
|
||||
Streaming(usize),
|
||||
}
|
||||
|
||||
fn query_param_to_string(params: &HashMap<String, Option<String>>) -> String {
|
||||
|
@ -424,26 +363,3 @@ fn to_streaming_body(
|
|||
|
||||
res
|
||||
}
|
||||
|
||||
fn to_streaming_unsigned_trailer_body(
|
||||
body: &[u8],
|
||||
chunk_size: usize,
|
||||
trailer_algorithm: &str,
|
||||
trailer_value: &str,
|
||||
) -> Vec<u8> {
|
||||
let mut res = Vec::with_capacity(body.len());
|
||||
for chunk in body.chunks(chunk_size) {
|
||||
let header = format!("{:x}\r\n", chunk.len());
|
||||
res.extend_from_slice(header.as_bytes());
|
||||
res.extend_from_slice(chunk);
|
||||
res.extend_from_slice(b"\r\n");
|
||||
}
|
||||
|
||||
res.extend_from_slice(b"0\r\n");
|
||||
res.extend_from_slice(trailer_algorithm.as_bytes());
|
||||
res.extend_from_slice(b":");
|
||||
res.extend_from_slice(trailer_value.as_bytes());
|
||||
res.extend_from_slice(b"\n\r\n\r\n");
|
||||
|
||||
res
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ static GARAGE_TEST_SECRET: &str =
|
|||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct Key {
|
||||
pub name: Option<String>,
|
||||
pub id: String,
|
||||
pub secret: String,
|
||||
}
|
||||
|
@ -99,10 +100,7 @@ api_bind_addr = "127.0.0.1:{admin_port}"
|
|||
.arg("server")
|
||||
.stdout(stdout)
|
||||
.stderr(stderr)
|
||||
.env(
|
||||
"RUST_LOG",
|
||||
"garage=debug,garage_api_common=trace,garage_api_s3=trace",
|
||||
)
|
||||
.env("RUST_LOG", "garage=debug,garage_api=trace")
|
||||
.spawn()
|
||||
.expect("Could not start garage");
|
||||
|
||||
|
@ -215,7 +213,10 @@ api_bind_addr = "127.0.0.1:{admin_port}"
|
|||
assert!(!key.id.is_empty(), "Invalid key: Key ID is empty");
|
||||
assert!(!key.secret.is_empty(), "Invalid key: Key secret is empty");
|
||||
|
||||
key
|
||||
Key {
|
||||
name: maybe_name.map(String::from),
|
||||
..key
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use crate::common;
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
use aws_sdk_s3::primitives::{ByteStream, DateTime};
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use aws_sdk_s3::types::{Delete, ObjectIdentifier};
|
||||
|
||||
const STD_KEY: &str = "hello world";
|
||||
|
@ -126,129 +125,6 @@ async fn test_putobject() {
|
|||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_precondition() {
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket("precondition");
|
||||
|
||||
let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
|
||||
let etag2 = "\"ae4984b984cd984fe98d4efa954dce98\"";
|
||||
let data = ByteStream::from_static(BODY);
|
||||
|
||||
let r = ctx
|
||||
.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.body(data)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(r.e_tag.unwrap().as_str(), etag);
|
||||
|
||||
let last_modified;
|
||||
{
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_match(etag)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||
last_modified = o.last_modified.unwrap();
|
||||
|
||||
let err = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_match(etag2)
|
||||
.send()
|
||||
.await;
|
||||
assert!(
|
||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412)
|
||||
);
|
||||
}
|
||||
{
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_none_match(etag2)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||
|
||||
let err = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_none_match(etag)
|
||||
.send()
|
||||
.await;
|
||||
assert!(
|
||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
||||
);
|
||||
}
|
||||
let older_date = DateTime::from_secs_f64(last_modified.as_secs_f64() - 10.0);
|
||||
let newer_date = DateTime::from_secs_f64(last_modified.as_secs_f64() + 10.0);
|
||||
{
|
||||
let err = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_modified_since(newer_date)
|
||||
.send()
|
||||
.await;
|
||||
assert!(
|
||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 304)
|
||||
);
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_modified_since(older_date)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||
}
|
||||
{
|
||||
let err = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_unmodified_since(older_date)
|
||||
.send()
|
||||
.await;
|
||||
assert!(
|
||||
matches!(err, Err(SdkError::ServiceError(se)) if se.raw().status().as_u16() == 412)
|
||||
);
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.if_unmodified_since(newer_date)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(o.e_tag.as_ref().unwrap().as_str(), etag);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_getobject() {
|
||||
let ctx = common::context();
|
||||
|
@ -313,14 +189,12 @@ async fn test_getobject() {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_metadata() {
|
||||
use aws_sdk_s3::primitives::{DateTime, DateTimeFormat};
|
||||
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket("testmetadata");
|
||||
|
||||
let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
|
||||
let exp = DateTime::from_secs(10000000000);
|
||||
let exp2 = DateTime::from_secs(10000500000);
|
||||
let exp = aws_sdk_s3::primitives::DateTime::from_secs(10000000000);
|
||||
let exp2 = aws_sdk_s3::primitives::DateTime::from_secs(10000500000);
|
||||
|
||||
{
|
||||
// Note. The AWS client SDK adds a Content-Type header
|
||||
|
@ -353,7 +227,7 @@ async fn test_metadata() {
|
|||
assert_eq!(o.content_disposition, None);
|
||||
assert_eq!(o.content_encoding, None);
|
||||
assert_eq!(o.content_language, None);
|
||||
assert_eq!(o.expires_string, None);
|
||||
assert_eq!(o.expires, None);
|
||||
assert_eq!(o.metadata.unwrap_or_default().len(), 0);
|
||||
|
||||
let o = ctx
|
||||
|
@ -376,10 +250,7 @@ async fn test_metadata() {
|
|||
assert_eq!(o.content_disposition.unwrap().as_str(), "cddummy");
|
||||
assert_eq!(o.content_encoding.unwrap().as_str(), "cedummy");
|
||||
assert_eq!(o.content_language.unwrap().as_str(), "cldummy");
|
||||
assert_eq!(
|
||||
o.expires_string.unwrap(),
|
||||
exp.fmt(DateTimeFormat::HttpDate).unwrap()
|
||||
);
|
||||
assert_eq!(o.expires.unwrap(), exp);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -417,10 +288,7 @@ async fn test_metadata() {
|
|||
assert_eq!(o.content_disposition.unwrap().as_str(), "cdtest");
|
||||
assert_eq!(o.content_encoding.unwrap().as_str(), "cetest");
|
||||
assert_eq!(o.content_language.unwrap().as_str(), "cltest");
|
||||
assert_eq!(
|
||||
o.expires_string.unwrap(),
|
||||
exp2.fmt(DateTimeFormat::HttpDate).unwrap()
|
||||
);
|
||||
assert_eq!(o.expires.unwrap(), exp2);
|
||||
let mut meta = o.metadata.unwrap();
|
||||
assert_eq!(meta.remove("testmeta").unwrap(), "hello people");
|
||||
assert_eq!(meta.remove("nice-unicode-meta").unwrap(), "宅配便");
|
||||
|
@ -446,10 +314,7 @@ async fn test_metadata() {
|
|||
assert_eq!(o.content_disposition.unwrap().as_str(), "cddummy");
|
||||
assert_eq!(o.content_encoding.unwrap().as_str(), "cedummy");
|
||||
assert_eq!(o.content_language.unwrap().as_str(), "cldummy");
|
||||
assert_eq!(
|
||||
o.expires_string.unwrap(),
|
||||
exp.fmt(DateTimeFormat::HttpDate).unwrap()
|
||||
);
|
||||
assert_eq!(o.expires.unwrap(), exp);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use base64::prelude::*;
|
||||
use crc32fast::Hasher as Crc32;
|
||||
|
||||
use crate::common;
|
||||
use crate::common::ext::CommandExt;
|
||||
use common::custom_requester::BodySignature;
|
||||
|
@ -24,7 +21,7 @@ async fn test_putobject_streaming() {
|
|||
let content_type = "text/csv";
|
||||
let mut headers = HashMap::new();
|
||||
headers.insert("content-type".to_owned(), content_type.to_owned());
|
||||
let res = ctx
|
||||
let _ = ctx
|
||||
.custom_request
|
||||
.builder(bucket.clone())
|
||||
.method(Method::PUT)
|
||||
|
@ -32,11 +29,10 @@ async fn test_putobject_streaming() {
|
|||
.signed_headers(headers)
|
||||
.vhost_style(true)
|
||||
.body(vec![])
|
||||
.body_signature(BodySignature::Streaming { chunk_size: 10 })
|
||||
.body_signature(BodySignature::Streaming(10))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.status().is_success(), "got response: {:?}", res);
|
||||
|
||||
// assert_eq!(r.e_tag.unwrap().as_str(), etag);
|
||||
// We return a version ID here
|
||||
|
@ -69,14 +65,7 @@ async fn test_putobject_streaming() {
|
|||
{
|
||||
let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
|
||||
|
||||
let mut crc32 = Crc32::new();
|
||||
crc32.update(&BODY[..]);
|
||||
let crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(crc32.finalize())[..]);
|
||||
|
||||
let mut headers = HashMap::new();
|
||||
headers.insert("x-amz-checksum-crc32".to_owned(), crc32.clone());
|
||||
|
||||
let res = ctx
|
||||
let _ = ctx
|
||||
.custom_request
|
||||
.builder(bucket.clone())
|
||||
.method(Method::PUT)
|
||||
|
@ -84,13 +73,11 @@ async fn test_putobject_streaming() {
|
|||
//fail
|
||||
.path("abc".to_owned())
|
||||
.vhost_style(true)
|
||||
.signed_headers(headers)
|
||||
.body(BODY.to_vec())
|
||||
.body_signature(BodySignature::Streaming { chunk_size: 16 })
|
||||
.body_signature(BodySignature::Streaming(16))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.status().is_success(), "got response: {:?}", res);
|
||||
|
||||
// assert_eq!(r.e_tag.unwrap().as_str(), etag);
|
||||
// assert!(r.version_id.is_some());
|
||||
|
@ -101,7 +88,6 @@ async fn test_putobject_streaming() {
|
|||
.bucket(&bucket)
|
||||
//.key(CTRL_KEY)
|
||||
.key("abc")
|
||||
.checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
@ -112,142 +98,6 @@ async fn test_putobject_streaming() {
|
|||
assert_eq!(o.content_length.unwrap(), 62);
|
||||
assert_eq!(o.parts_count, None);
|
||||
assert_eq!(o.tag_count, None);
|
||||
assert_eq!(o.checksum_crc32.unwrap(), crc32);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_putobject_streaming_unsigned_trailer() {
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket("putobject-streaming-unsigned-trailer");
|
||||
|
||||
{
|
||||
// Send an empty object (can serve as a directory marker)
|
||||
// with a content type
|
||||
let etag = "\"d41d8cd98f00b204e9800998ecf8427e\"";
|
||||
let content_type = "text/csv";
|
||||
let mut headers = HashMap::new();
|
||||
headers.insert("content-type".to_owned(), content_type.to_owned());
|
||||
|
||||
let empty_crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(Crc32::new().finalize())[..]);
|
||||
|
||||
let res = ctx
|
||||
.custom_request
|
||||
.builder(bucket.clone())
|
||||
.method(Method::PUT)
|
||||
.path(STD_KEY.to_owned())
|
||||
.signed_headers(headers)
|
||||
.vhost_style(true)
|
||||
.body(vec![])
|
||||
.body_signature(BodySignature::StreamingUnsignedTrailer {
|
||||
chunk_size: 10,
|
||||
trailer_algorithm: "x-amz-checksum-crc32".into(),
|
||||
trailer_value: empty_crc32,
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.status().is_success(), "got response: {:?}", res);
|
||||
|
||||
// assert_eq!(r.e_tag.unwrap().as_str(), etag);
|
||||
// We return a version ID here
|
||||
// We should check if Amazon is returning one when versioning is not enabled
|
||||
// assert!(r.version_id.is_some());
|
||||
|
||||
//let _version = r.version_id.unwrap();
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
.key(STD_KEY)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_bytes_eq!(o.body, b"");
|
||||
assert_eq!(o.e_tag.unwrap(), etag);
|
||||
// We do not return version ID
|
||||
// We should check if Amazon is returning one when versioning is not enabled
|
||||
// assert_eq!(o.version_id.unwrap(), _version);
|
||||
assert_eq!(o.content_type.unwrap(), content_type);
|
||||
assert!(o.last_modified.is_some());
|
||||
assert_eq!(o.content_length.unwrap(), 0);
|
||||
assert_eq!(o.parts_count, None);
|
||||
assert_eq!(o.tag_count, None);
|
||||
}
|
||||
|
||||
{
|
||||
let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
|
||||
|
||||
let mut crc32 = Crc32::new();
|
||||
crc32.update(&BODY[..]);
|
||||
let crc32 = BASE64_STANDARD.encode(&u32::to_be_bytes(crc32.finalize())[..]);
|
||||
|
||||
// try sending with wrong crc32, check that it fails
|
||||
let err_res = ctx
|
||||
.custom_request
|
||||
.builder(bucket.clone())
|
||||
.method(Method::PUT)
|
||||
//.path(CTRL_KEY.to_owned()) at the moment custom_request does not encode url so this
|
||||
//fail
|
||||
.path("abc".to_owned())
|
||||
.vhost_style(true)
|
||||
.body(BODY.to_vec())
|
||||
.body_signature(BodySignature::StreamingUnsignedTrailer {
|
||||
chunk_size: 16,
|
||||
trailer_algorithm: "x-amz-checksum-crc32".into(),
|
||||
trailer_value: "2Yp9Yw==".into(),
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(
|
||||
err_res.status().is_client_error(),
|
||||
"got response: {:?}",
|
||||
err_res
|
||||
);
|
||||
|
||||
let res = ctx
|
||||
.custom_request
|
||||
.builder(bucket.clone())
|
||||
.method(Method::PUT)
|
||||
//.path(CTRL_KEY.to_owned()) at the moment custom_request does not encode url so this
|
||||
//fail
|
||||
.path("abc".to_owned())
|
||||
.vhost_style(true)
|
||||
.body(BODY.to_vec())
|
||||
.body_signature(BodySignature::StreamingUnsignedTrailer {
|
||||
chunk_size: 16,
|
||||
trailer_algorithm: "x-amz-checksum-crc32".into(),
|
||||
trailer_value: crc32.clone(),
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(res.status().is_success(), "got response: {:?}", res);
|
||||
|
||||
// assert_eq!(r.e_tag.unwrap().as_str(), etag);
|
||||
// assert!(r.version_id.is_some());
|
||||
|
||||
let o = ctx
|
||||
.client
|
||||
.get_object()
|
||||
.bucket(&bucket)
|
||||
//.key(CTRL_KEY)
|
||||
.key("abc")
|
||||
.checksum_mode(aws_sdk_s3::types::ChecksumMode::Enabled)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_bytes_eq!(o.body, BODY);
|
||||
assert_eq!(o.e_tag.unwrap(), etag);
|
||||
assert!(o.last_modified.is_some());
|
||||
assert_eq!(o.content_length.unwrap(), 62);
|
||||
assert_eq!(o.parts_count, None);
|
||||
assert_eq!(o.tag_count, None);
|
||||
assert_eq!(o.checksum_crc32.unwrap(), crc32);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -269,7 +119,7 @@ async fn test_create_bucket_streaming() {
|
|||
.custom_request
|
||||
.builder(bucket.to_owned())
|
||||
.method(Method::PUT)
|
||||
.body_signature(BodySignature::Streaming { chunk_size: 10 })
|
||||
.body_signature(BodySignature::Streaming(10))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
@ -324,7 +174,7 @@ async fn test_put_website_streaming() {
|
|||
.method(Method::PUT)
|
||||
.query_params(query)
|
||||
.body(website_config.as_bytes().to_vec())
|
||||
.body_signature(BodySignature::Streaming { chunk_size: 10 })
|
||||
.body_signature(BodySignature::Streaming(10))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
|
|
@ -5,13 +5,15 @@ use crate::json_body;
|
|||
use assert_json_diff::assert_json_eq;
|
||||
use aws_sdk_s3::{
|
||||
primitives::ByteStream,
|
||||
types::{CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, WebsiteConfiguration},
|
||||
types::{
|
||||
Condition, CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, Protocol, Redirect,
|
||||
RoutingRule, WebsiteConfiguration,
|
||||
},
|
||||
};
|
||||
use http::{Request, StatusCode};
|
||||
use http_body_util::BodyExt;
|
||||
use http_body_util::Full as FullBody;
|
||||
use hyper::body::Bytes;
|
||||
use hyper::header::LOCATION;
|
||||
use hyper_util::client::legacy::Client;
|
||||
use hyper_util::rt::TokioExecutor;
|
||||
use serde_json::json;
|
||||
|
@ -296,33 +298,6 @@ async fn test_website_s3_api() {
|
|||
);
|
||||
}
|
||||
|
||||
// Test x-amz-website-redirect-location
|
||||
{
|
||||
ctx.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key("test-redirect.html")
|
||||
.website_redirect_location("https://perdu.com")
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let req = Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!(
|
||||
"http://127.0.0.1:{}/test-redirect.html",
|
||||
ctx.garage.web_port
|
||||
))
|
||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap();
|
||||
|
||||
let resp = client.request(req).await.unwrap();
|
||||
|
||||
assert_eq!(resp.status(), StatusCode::MOVED_PERMANENTLY);
|
||||
assert_eq!(resp.headers().get(LOCATION).unwrap(), "https://perdu.com");
|
||||
}
|
||||
|
||||
// Test CORS with an allowed preflight request
|
||||
{
|
||||
let req = Request::builder()
|
||||
|
@ -455,12 +430,18 @@ async fn test_website_check_domain() {
|
|||
res_body,
|
||||
json!({
|
||||
"code": "InvalidRequest",
|
||||
"message": "Bad request: No domain query string found",
|
||||
"message": "Bad request: Missing argument `domain` for endpoint",
|
||||
"region": "garage-integ-test",
|
||||
"path": "/check",
|
||||
})
|
||||
);
|
||||
|
||||
// FIXME: Edge case with empty domain
|
||||
// Currently, empty domain is interpreted as an absent parameter
|
||||
// due to logic in router_macros.rs, so this test fails.
|
||||
// Maybe we want empty parameters to be acceptable? But that might
|
||||
// break a lot of S3 stuff.
|
||||
/*
|
||||
let admin_req = || {
|
||||
Request::builder()
|
||||
.method("GET")
|
||||
|
@ -484,6 +465,7 @@ async fn test_website_check_domain() {
|
|||
"path": "/check",
|
||||
})
|
||||
);
|
||||
*/
|
||||
|
||||
let admin_req = || {
|
||||
Request::builder()
|
||||
|
@ -533,3 +515,444 @@ async fn test_website_check_domain() {
|
|||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_website_redirect_full_bucket() {
|
||||
const BCKT_NAME: &str = "my-redirect-full";
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket(BCKT_NAME);
|
||||
|
||||
let conf = WebsiteConfiguration::builder()
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(Condition::builder().key_prefix_equals("").build())
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.protocol(Protocol::Https)
|
||||
.host_name("other.tld")
|
||||
.replace_key_prefix_with("")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
.build();
|
||||
|
||||
ctx.client
|
||||
.put_bucket_website()
|
||||
.bucket(&bucket)
|
||||
.website_configuration(conf)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let req = Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!("http://127.0.0.1:{}/my-path", ctx.garage.web_port))
|
||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap();
|
||||
|
||||
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||
let resp = client.request(req).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::FOUND);
|
||||
assert_eq!(
|
||||
resp.headers()
|
||||
.get(hyper::header::LOCATION)
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap(),
|
||||
"https://other.tld/my-path"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_website_redirect() {
|
||||
const BCKT_NAME: &str = "my-redirect";
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket(BCKT_NAME);
|
||||
|
||||
ctx.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key("index.html")
|
||||
.body(ByteStream::from_static(b"index"))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
ctx.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key("404.html")
|
||||
.body(ByteStream::from_static(b"main 404"))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
ctx.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key("static-file")
|
||||
.body(ByteStream::from_static(b"static file"))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut conf = WebsiteConfiguration::builder()
|
||||
.index_document(
|
||||
IndexDocument::builder()
|
||||
.suffix("home.html")
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.error_document(ErrorDocument::builder().key("404.html").build().unwrap());
|
||||
|
||||
for (prefix, condition) in [("unconditional", false), ("conditional", true)] {
|
||||
let code = condition.then(|| "404".to_string());
|
||||
conf = conf
|
||||
// simple redirect
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/redirect-prefix/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("302")
|
||||
.replace_key_prefix_with("other-prefix/")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/redirect-prefix-307/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("307")
|
||||
.replace_key_prefix_with("other-prefix/")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
// simple redirect
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/redirect-fixed/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("302")
|
||||
.replace_key_with("fixed_key")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
// stream other file
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/stream-fixed/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("200")
|
||||
.replace_key_with("static-file")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
// stream other file as error
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/stream-404/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("404")
|
||||
.replace_key_with("static-file")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
// fail to stream other file
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(
|
||||
Condition::builder()
|
||||
.set_http_error_code_returned_equals(code.clone())
|
||||
.key_prefix_equals(format!("{prefix}/stream-missing/"))
|
||||
.build(),
|
||||
)
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.http_redirect_code("200")
|
||||
.replace_key_with("missing-file")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
);
|
||||
}
|
||||
let conf = conf.build();
|
||||
|
||||
ctx.client
|
||||
.put_bucket_website()
|
||||
.bucket(&bucket)
|
||||
.website_configuration(conf.clone())
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_cfg = ctx
|
||||
.client
|
||||
.get_bucket_website()
|
||||
.bucket(&bucket)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(stored_cfg.index_document, conf.index_document);
|
||||
assert_eq!(stored_cfg.error_document, conf.error_document);
|
||||
assert_eq!(stored_cfg.routing_rules, conf.routing_rules);
|
||||
|
||||
let req = |path| {
|
||||
Request::builder()
|
||||
.method("GET")
|
||||
.uri(format!(
|
||||
"http://127.0.0.1:{}/{}/path",
|
||||
ctx.garage.web_port, path
|
||||
))
|
||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||
.body(Body::new(Bytes::new()))
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
test_redirect_helper("unconditional", true, &req).await;
|
||||
test_redirect_helper("conditional", true, &req).await;
|
||||
for prefix in ["unconditional", "conditional"] {
|
||||
for rule_path in [
|
||||
"redirect-prefix",
|
||||
"redirect-prefix-307",
|
||||
"redirect-fixed",
|
||||
"stream-fixed",
|
||||
"stream-404",
|
||||
"stream-missing",
|
||||
] {
|
||||
ctx.client
|
||||
.put_object()
|
||||
.bucket(&bucket)
|
||||
.key(format!("{prefix}/{rule_path}/path"))
|
||||
.body(ByteStream::from_static(b"i exist"))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
test_redirect_helper("unconditional", true, &req).await;
|
||||
test_redirect_helper("conditional", false, &req).await;
|
||||
}
|
||||
|
||||
async fn test_redirect_helper(
|
||||
prefix: &str,
|
||||
should_see_redirect: bool,
|
||||
req: impl Fn(String) -> Request<http_body_util::Full<Bytes>>,
|
||||
) {
|
||||
use http::header;
|
||||
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||
let expected_body = b"i exist".as_ref();
|
||||
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/redirect-prefix")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::FOUND);
|
||||
assert_eq!(
|
||||
resp.headers()
|
||||
.get(header::LOCATION)
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap(),
|
||||
"/other-prefix/path"
|
||||
);
|
||||
assert!(resp
|
||||
.into_body()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
.is_empty());
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/redirect-prefix-307")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::TEMPORARY_REDIRECT);
|
||||
assert_eq!(
|
||||
resp.headers()
|
||||
.get(header::LOCATION)
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap(),
|
||||
"/other-prefix/path"
|
||||
);
|
||||
assert!(resp
|
||||
.into_body()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
.is_empty());
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/redirect-fixed")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::FOUND);
|
||||
assert_eq!(
|
||||
resp.headers()
|
||||
.get(header::LOCATION)
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap(),
|
||||
"/fixed_key"
|
||||
);
|
||||
assert!(resp
|
||||
.into_body()
|
||||
.collect()
|
||||
.await
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
.is_empty());
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/stream-fixed")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
b"static file".as_ref(),
|
||||
);
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/stream-404")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
b"static file".as_ref(),
|
||||
);
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
let resp = client
|
||||
.request(req(format!("{prefix}/stream-404")))
|
||||
.await
|
||||
.unwrap();
|
||||
if should_see_redirect {
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
b"static file".as_ref(),
|
||||
);
|
||||
} else {
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
assert!(resp.headers().get(header::LOCATION).is_none());
|
||||
assert_eq!(
|
||||
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||
expected_body,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_website_invalid_redirect() {
|
||||
const BCKT_NAME: &str = "my-invalid-redirect";
|
||||
let ctx = common::context();
|
||||
let bucket = ctx.create_bucket(BCKT_NAME);
|
||||
|
||||
let conf = WebsiteConfiguration::builder()
|
||||
.routing_rules(
|
||||
RoutingRule::builder()
|
||||
.condition(Condition::builder().key_prefix_equals("").build())
|
||||
.redirect(
|
||||
Redirect::builder()
|
||||
.protocol(Protocol::Https)
|
||||
.host_name("other.tld")
|
||||
.replace_key_prefix_with("")
|
||||
// we don't allow 200 with hostname
|
||||
.http_redirect_code("200")
|
||||
.build(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
.build();
|
||||
|
||||
ctx.client
|
||||
.put_bucket_website()
|
||||
.bucket(&bucket)
|
||||
.website_configuration(conf)
|
||||
.send()
|
||||
.await
|
||||
.unwrap_err();
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_model"
|
||||
version = "1.1.0"
|
||||
version = "1.0.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
|
@ -119,7 +119,122 @@ mod v08 {
|
|||
impl garage_util::migrate::InitialFormat for Bucket {}
|
||||
}
|
||||
|
||||
pub use v08::*;
|
||||
mod v2 {
|
||||
use crate::permission::BucketKeyPerm;
|
||||
use garage_util::crdt;
|
||||
use garage_util::data::Uuid;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::v08;
|
||||
|
||||
pub use v08::{BucketQuotas, CorsRule, LifecycleExpiration, LifecycleFilter, LifecycleRule};
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Bucket {
|
||||
/// ID of the bucket
|
||||
pub id: Uuid,
|
||||
/// State, and configuration if not deleted, of the bucket
|
||||
pub state: crdt::Deletable<BucketParams>,
|
||||
}
|
||||
|
||||
/// Configuration for a bucket
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct BucketParams {
|
||||
/// Bucket's creation date
|
||||
pub creation_date: u64,
|
||||
/// Map of key with access to the bucket, and what kind of access they give
|
||||
pub authorized_keys: crdt::Map<String, BucketKeyPerm>,
|
||||
|
||||
/// Map of aliases that are or have been given to this bucket
|
||||
/// in the global namespace
|
||||
/// (not authoritative: this is just used as an indication to
|
||||
/// map back to aliases when doing ListBuckets)
|
||||
pub aliases: crdt::LwwMap<String, bool>,
|
||||
/// Map of aliases that are or have been given to this bucket
|
||||
/// in namespaces local to keys
|
||||
/// key = (access key id, alias name)
|
||||
pub local_aliases: crdt::LwwMap<(String, String), bool>,
|
||||
|
||||
/// Whether this bucket is allowed for website access
|
||||
/// (under all of its global alias names),
|
||||
/// and if so, the website configuration XML document
|
||||
pub website_config: crdt::Lww<Option<WebsiteConfig>>,
|
||||
/// CORS rules
|
||||
pub cors_config: crdt::Lww<Option<Vec<CorsRule>>>,
|
||||
/// Lifecycle configuration
|
||||
pub lifecycle_config: crdt::Lww<Option<Vec<LifecycleRule>>>,
|
||||
/// Bucket quotas
|
||||
pub quotas: crdt::Lww<BucketQuotas>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct WebsiteConfig {
|
||||
pub index_document: String,
|
||||
pub error_document: Option<String>,
|
||||
// this field is currently unused, but present so adding it in the future doesn't
|
||||
// need a new migration
|
||||
pub redirect_all: Option<RedirectAll>,
|
||||
pub routing_rules: Vec<RoutingRule>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct RedirectAll {
|
||||
pub hostname: String,
|
||||
pub protocol: String,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct RoutingRule {
|
||||
pub condition: Option<RedirectCondition>,
|
||||
pub redirect: Redirect,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct RedirectCondition {
|
||||
pub http_error_code: Option<u16>,
|
||||
pub prefix: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Redirect {
|
||||
pub hostname: Option<String>,
|
||||
pub http_redirect_code: u16,
|
||||
pub protocol: Option<String>,
|
||||
pub replace_key_prefix: Option<String>,
|
||||
pub replace_key: Option<String>,
|
||||
}
|
||||
|
||||
impl garage_util::migrate::Migrate for Bucket {
|
||||
const VERSION_MARKER: &'static [u8] = b"G2bkt";
|
||||
|
||||
type Previous = v08::Bucket;
|
||||
|
||||
fn migrate(old: v08::Bucket) -> Bucket {
|
||||
Bucket {
|
||||
id: old.id,
|
||||
state: old.state.map(|x| BucketParams {
|
||||
creation_date: x.creation_date,
|
||||
authorized_keys: x.authorized_keys,
|
||||
aliases: x.aliases,
|
||||
local_aliases: x.local_aliases,
|
||||
website_config: x.website_config.map(|wc_opt| {
|
||||
wc_opt.map(|wc| WebsiteConfig {
|
||||
index_document: wc.index_document,
|
||||
error_document: wc.error_document,
|
||||
redirect_all: None,
|
||||
routing_rules: vec![],
|
||||
})
|
||||
}),
|
||||
cors_config: x.cors_config,
|
||||
lifecycle_config: x.lifecycle_config,
|
||||
quotas: x.quotas,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub use v2::*;
|
||||
|
||||
impl AutoCrdt for BucketQuotas {
|
||||
const WARN_IF_DIFFERENT: bool = true;
|
||||
|
|
|
@ -329,7 +329,7 @@ impl Garage {
|
|||
|
||||
pub async fn locked_helper(&self) -> helper::locked::LockedHelper {
|
||||
let lock = self.bucket_lock.lock().await;
|
||||
helper::locked::LockedHelper(self, Some(lock))
|
||||
helper::locked::LockedHelper(self, lock)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue