replace RPC stack with netapp #123

Merged
lx merged 5 commits from netapp into main 2021-10-25 13:49:35 +00:00
53 changed files with 4011 additions and 4621 deletions

View File

@ -276,115 +276,115 @@ trigger:
node:
nix: 1
---
kind: pipeline
type: docker
name: release-linux-i686
volumes:
- name: nix_store
host:
path: /var/lib/drone/nix
- name: nix_config
temp: {}
environment:
TARGET: i686-unknown-linux-musl
steps:
- name: setup nix
image: nixpkgs/nix:nixos-21.05
volumes:
- name: nix_store
path: /nix
- name: nix_config
path: /etc/nix
commands:
- cp nix/nix.conf /etc/nix/nix.conf
- nix-build --no-build-output --no-out-link shell.nix -A inputDerivation
- name: build
image: nixpkgs/nix:nixos-21.05
volumes:
- name: nix_store
path: /nix
- name: nix_config
path: /etc/nix
commands:
- nix-build --no-build-output --argstr target $TARGET --arg release true --argstr git_version $DRONE_COMMIT
- name: integration
image: nixpkgs/nix:nixos-21.05
volumes:
- name: nix_store
path: /nix
- name: nix_config
path: /etc/nix
commands:
- nix-shell --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
- name: update cache
image: nixpkgs/nix:nixos-21.05
environment:
AWS_ACCESS_KEY_ID:
from_secret: cache_aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: cache_aws_secret_access_key
NIX_PRIV_KEY:
from_secret: nix_priv_key
volumes:
- name: nix_store
path: /nix
- name: nix_config
path: /etc/nix
commands:
- (umask 377 && echo $NIX_PRIV_KEY > /etc/nix/signing-key.sec)
- |
nix copy --to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/etc/nix/signing-key.sec' \
$(nix-store -qR --include-outputs \
$(nix-instantiate --argstr target $TARGET --arg release true))
- name: push static binary
image: nixpkgs/nix:nixos-21.05
volumes:
- name: nix_store
path: /nix
- name: nix_config
path: /etc/nix
environment:
AWS_ACCESS_KEY_ID:
from_secret: garagehq_aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: garagehq_aws_secret_access_key
commands:
- nix-shell --arg rust false --arg integration false --run "to_s3"
- name: docker build and publish
image: nixpkgs/nix:nixos-21.05
volumes:
- name: nix_store
path: /nix
- name: nix_config
path: /etc/nix
environment:
DOCKER_AUTH:
from_secret: docker_auth
DOCKER_PLATFORM: "linux/386"
CONTAINER_NAME: "dxflrs/386_garage"
HOME: "/kaniko"
commands:
- mkdir -p /kaniko/.docker
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
- nix-shell --arg rust false --arg integration false --run "to_docker"
trigger:
event:
- promote
- cron
node:
nix: 1
# ---
# kind: pipeline
# type: docker
# name: release-linux-i686
#
# volumes:
# - name: nix_store
# host:
# path: /var/lib/drone/nix
# - name: nix_config
# temp: {}
#
# environment:
# TARGET: i686-unknown-linux-musl
#
# steps:
# - name: setup nix
# image: nixpkgs/nix:nixos-21.05
# volumes:
# - name: nix_store
# path: /nix
# - name: nix_config
# path: /etc/nix
# commands:
# - cp nix/nix.conf /etc/nix/nix.conf
# - nix-build --no-build-output --no-out-link shell.nix -A inputDerivation
#
# - name: build
# image: nixpkgs/nix:nixos-21.05
# volumes:
# - name: nix_store
# path: /nix
# - name: nix_config
# path: /etc/nix
# commands:
# - nix-build --no-build-output --argstr target $TARGET --arg release true --argstr git_version $DRONE_COMMIT
#
# - name: integration
# image: nixpkgs/nix:nixos-21.05
# volumes:
# - name: nix_store
# path: /nix
# - name: nix_config
# path: /etc/nix
# commands:
# - nix-shell --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
#
# - name: update cache
# image: nixpkgs/nix:nixos-21.05
# environment:
# AWS_ACCESS_KEY_ID:
# from_secret: cache_aws_access_key_id
# AWS_SECRET_ACCESS_KEY:
# from_secret: cache_aws_secret_access_key
# NIX_PRIV_KEY:
# from_secret: nix_priv_key
# volumes:
# - name: nix_store
# path: /nix
# - name: nix_config
# path: /etc/nix
# commands:
# - (umask 377 && echo $NIX_PRIV_KEY > /etc/nix/signing-key.sec)
# - |
# nix copy --to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/etc/nix/signing-key.sec' \
# $(nix-store -qR --include-outputs \
# $(nix-instantiate --argstr target $TARGET --arg release true))
#
# - name: push static binary
# image: nixpkgs/nix:nixos-21.05
# volumes:
# - name: nix_store
# path: /nix
# - name: nix_config
# path: /etc/nix
# environment:
# AWS_ACCESS_KEY_ID:
# from_secret: garagehq_aws_access_key_id
# AWS_SECRET_ACCESS_KEY:
# from_secret: garagehq_aws_secret_access_key
# commands:
# - nix-shell --arg rust false --arg integration false --run "to_s3"
#
# - name: docker build and publish
# image: nixpkgs/nix:nixos-21.05
# volumes:
# - name: nix_store
# path: /nix
# - name: nix_config
# path: /etc/nix
# environment:
# DOCKER_AUTH:
# from_secret: docker_auth
# DOCKER_PLATFORM: "linux/386"
# CONTAINER_NAME: "dxflrs/386_garage"
# HOME: "/kaniko"
# commands:
# - mkdir -p /kaniko/.docker
# - echo $DOCKER_AUTH > /kaniko/.docker/config.json
# - export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
# - nix-shell --arg rust false --arg integration false --run "to_docker"
#
# trigger:
# event:
# - promote
# - cron
#
# node:
# nix: 1
---
kind: pipeline
@ -486,105 +486,105 @@ trigger:
node:
nix: 1
---
kind: pipeline
type: docker
name: release-linux-armv6l
volumes:
- name: nix_store
host:
path: /var/lib/drone/nix
- name: nix_config
temp: {}
environment:
TARGET: armv6l-unknown-linux-musleabihf
steps:
- name: setup nix
image: nixpkgs/nix:nixos-21.05
volumes:
- name: nix_store
path: /nix
- name: nix_config
path: /etc/nix
commands:
- cp nix/nix.conf /etc/nix/nix.conf
- nix-build --no-build-output --no-out-link --arg rust false --arg integration false -A inputDerivation
- name: build
image: nixpkgs/nix:nixos-21.05
volumes:
- name: nix_store
path: /nix
- name: nix_config
path: /etc/nix
commands:
- nix-build --no-build-output --argstr target $TARGET --arg release true --argstr git_version $DRONE_COMMIT
- name: update cache
image: nixpkgs/nix:nixos-21.05
environment:
AWS_ACCESS_KEY_ID:
from_secret: cache_aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: cache_aws_secret_access_key
NIX_PRIV_KEY:
from_secret: nix_priv_key
volumes:
- name: nix_store
path: /nix
- name: nix_config
path: /etc/nix
commands:
- (umask 377 && echo $NIX_PRIV_KEY > /etc/nix/signing-key.sec)
- |
nix copy --to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/etc/nix/signing-key.sec' \
$(nix-store -qR --include-outputs \
$(nix-instantiate --argstr target $TARGET --arg release true))
- name: push static binary
image: nixpkgs/nix:nixos-21.05
volumes:
- name: nix_store
path: /nix
- name: nix_config
path: /etc/nix
environment:
AWS_ACCESS_KEY_ID:
from_secret: garagehq_aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: garagehq_aws_secret_access_key
commands:
- nix-shell --arg integration false --arg rust false --run "to_s3"
- name: docker build and publish
image: nixpkgs/nix:nixos-21.05
volumes:
- name: nix_store
path: /nix
- name: nix_config
path: /etc/nix
environment:
DOCKER_AUTH:
from_secret: docker_auth
DOCKER_PLATFORM: "linux/arm"
CONTAINER_NAME: "dxflrs/arm_garage"
HOME: "/kaniko"
commands:
- mkdir -p /kaniko/.docker
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
- nix-shell --arg rust false --arg integration false --run "to_docker"
trigger:
event:
- promote
- cron
node:
nix: 1
# ---
# kind: pipeline
# type: docker
# name: release-linux-armv6l
#
# volumes:
# - name: nix_store
# host:
# path: /var/lib/drone/nix
# - name: nix_config
# temp: {}
#
# environment:
# TARGET: armv6l-unknown-linux-musleabihf
#
# steps:
# - name: setup nix
# image: nixpkgs/nix:nixos-21.05
# volumes:
# - name: nix_store
# path: /nix
# - name: nix_config
# path: /etc/nix
# commands:
# - cp nix/nix.conf /etc/nix/nix.conf
# - nix-build --no-build-output --no-out-link --arg rust false --arg integration false -A inputDerivation
#
# - name: build
# image: nixpkgs/nix:nixos-21.05
# volumes:
# - name: nix_store
# path: /nix
# - name: nix_config
# path: /etc/nix
# commands:
# - nix-build --no-build-output --argstr target $TARGET --arg release true --argstr git_version $DRONE_COMMIT
#
# - name: update cache
# image: nixpkgs/nix:nixos-21.05
# environment:
# AWS_ACCESS_KEY_ID:
# from_secret: cache_aws_access_key_id
# AWS_SECRET_ACCESS_KEY:
# from_secret: cache_aws_secret_access_key
# NIX_PRIV_KEY:
# from_secret: nix_priv_key
# volumes:
# - name: nix_store
# path: /nix
# - name: nix_config
# path: /etc/nix
# commands:
# - (umask 377 && echo $NIX_PRIV_KEY > /etc/nix/signing-key.sec)
# - |
# nix copy --to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/etc/nix/signing-key.sec' \
# $(nix-store -qR --include-outputs \
# $(nix-instantiate --argstr target $TARGET --arg release true))
#
# - name: push static binary
# image: nixpkgs/nix:nixos-21.05
# volumes:
# - name: nix_store
# path: /nix
# - name: nix_config
# path: /etc/nix
# environment:
# AWS_ACCESS_KEY_ID:
# from_secret: garagehq_aws_access_key_id
# AWS_SECRET_ACCESS_KEY:
# from_secret: garagehq_aws_secret_access_key
# commands:
# - nix-shell --arg integration false --arg rust false --run "to_s3"
#
# - name: docker build and publish
# image: nixpkgs/nix:nixos-21.05
# volumes:
# - name: nix_store
# path: /nix
# - name: nix_config
# path: /etc/nix
# environment:
# DOCKER_AUTH:
# from_secret: docker_auth
# DOCKER_PLATFORM: "linux/arm"
# CONTAINER_NAME: "dxflrs/arm_garage"
# HOME: "/kaniko"
# commands:
# - mkdir -p /kaniko/.docker
# - echo $DOCKER_AUTH > /kaniko/.docker/config.json
# - export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
# - nix-shell --arg rust false --arg integration false --run "to_docker"
#
# trigger:
# event:
# - promote
# - cron
#
# node:
# nix: 1
---
kind: pipeline
@ -613,9 +613,9 @@ steps:
depends_on:
- release-linux-x86_64
- release-linux-i686
#- release-linux-i686
- release-linux-aarch64
- release-linux-armv6l
#- release-linux-armv6l
trigger:
event:

862
Cargo.lock generated

File diff suppressed because it is too large Load Diff

1635
Cargo.nix

File diff suppressed because it is too large Load Diff

View File

@ -65,7 +65,7 @@ in let
*/
''^(src|tests)'' # fixed default
''.*\.(rs|toml)$'' # fixed default
''^(crdt|replication)'' # our crate submodules
''^(crdt|replication|cli)'' # our crate submodules
];
};

View File

@ -30,3 +30,4 @@
- [Working Documents](./working_documents/index.md)
- [Load Balancing Data](./working_documents/load_balancing.md)
- [Migrating from 0.3 to 0.4](./working_documents/migration_04.md)

View File

@ -11,15 +11,12 @@ to get familiar with Garage's command line and usage patterns.
## Prerequisites
To run a real-world deployment, make sure you the following conditions are met:
To run a real-world deployment, make sure the following conditions are met:
- You have at least three machines with sufficient storage space available.
- Each machine has a public IP address which is reachable by other machines.
Running behind a NAT is possible, but having several Garage nodes behind a single NAT
is slightly more involved as each will have to have a different RPC port number
(the local port number of a node must be the same as the port number exposed publicly
by the NAT).
Running behind a NAT is likely to be possible but hasn't been tested for the latest version (TODO).
- Ideally, each machine should have a SSD available in addition to the HDD you are dedicating
to Garage. This will allow for faster access to metadata and has the potential
@ -45,44 +42,22 @@ For our example, we will suppose the following infrastructure with IPv6 connecti
## Get a Docker image
Our docker image is currently named `lxpz/garage_amd64` and is stored on the [Docker Hub](https://hub.docker.com/r/lxpz/garage_amd64/tags?page=1&ordering=last_updated).
We encourage you to use a fixed tag (eg. `v0.3.0`) and not the `latest` tag.
For this example, we will use the latest published version at the time of the writing which is `v0.3.0` but it's up to you
We encourage you to use a fixed tag (eg. `v0.4.0`) and not the `latest` tag.
For this example, we will use the latest published version at the time of the writing which is `v0.4.0` but it's up to you
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/lxpz/garage_amd64/tags?page=1&ordering=last_updated).
For example:
```
sudo docker pull lxpz/garage_amd64:v0.3.0
sudo docker pull lxpz/garage_amd64:v0.4.0
```
## Generating TLS certificates
You first need to generate TLS certificates to encrypt traffic between Garage nodes
(reffered to as RPC traffic).
To generate your TLS certificates, run on your machine:
```
wget https://git.deuxfleurs.fr/Deuxfleurs/garage/raw/branch/main/genkeys.sh
chmod +x genkeys.sh
./genkeys.sh
```
It will creates a folder named `pki/` containing the keys that you will used for the cluster.
These files will have to be copied to all of your cluster nodes, as explained below.
## Deploying and configuring Garage
On each machine, we will have a similar setup,
especially you must consider the following folders/files:
- `/etc/garage/garage.toml`: Garage daemon's configuration (see below)
- `/etc/garage/pki/`: Folder containing Garage certificates,
must be generated on your computer and copied on the servers.
Only the files `garage-ca.crt`, `garage.crt` and `garage.key` are necessary.
- `/etc/garage.toml`: Garage daemon's configuration (see below)
- `/var/lib/garage/meta/`: Folder containing Garage's metadata,
put this folder on a SSD if possible
@ -91,7 +66,7 @@ especially you must consider the following folders/files:
this folder will be your main data storage and must be on a large storage (e.g. large HDD)
A valid `/etc/garage/garage.toml` for our cluster would be:
A valid `/etc/garage/garage.toml` for our cluster would look as follows:
```toml
metadata_dir = "/var/lib/garage/meta"
@ -100,18 +75,8 @@ data_dir = "/var/lib/garage/data"
replication_mode = "3"
rpc_bind_addr = "[::]:3901"
bootstrap_peers = [
"[fc00:1::1]:3901",
"[fc00:1::2]:3901",
"[fc00:B::1]:3901",
"[fc00:F::1]:3901",
]
[rpc_tls]
ca_cert = "/etc/garage/pki/garage-ca.crt"
node_cert = "/etc/garage/pki/garage.crt"
node_key = "/etc/garage/pki/garage.key"
rpc_public_addr = "<this node's public IP>:3901"
rpc_secret = "<RPC secret>"
[s3_api]
s3_region = "garage"
@ -123,11 +88,14 @@ root_domain = ".web.garage"
index = "index.html"
```
Please make sure to change `bootstrap_peers` to **your** IP addresses!
Check the following for your configuration files:
Check the [configuration file reference documentation](../reference_manual/configuration.md)
to learn more about all available configuration options.
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
This parameter is optional but recommended: if your nodes have trouble communicating with
one another, consider adding it.
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
You can generate such a key with `openssl rand -hex 32`.
## Starting Garage using Docker
@ -139,11 +107,10 @@ docker run \
--name garaged \
--restart always \
--network host \
-v /etc/garage/pki:/etc/garage/pki \
-v /etc/garage/garage.toml:/garage/garage.toml \
-v /etc/garage.toml:/etc/garage.toml \
-v /var/lib/garage/meta:/var/lib/garage/meta \
-v /var/lib/garage/data:/var/lib/garage/data \
lxpz/garage_amd64:v0.3.0
lxpz/garage_amd64:v0.4.0
```
It should be restarted automatically at each reboot.
@ -155,101 +122,102 @@ but please check the relase notes before doing so!
To upgrade, simply stop and remove this container and
start again the command with a new version of Garage.
## Controling the daemon
The `garage` binary has two purposes:
- it acts as a daemon when launched with `garage server ...`
- it acts as a daemon when launched with `garage server`
- it acts as a control tool for the daemon when launched with any other command
In this section, we will see how to use the `garage` binary as a control tool for the daemon we just started.
You first need to get a shell having access to this binary. For instance, enter the Docker container with:
Ensure an appropriate `garage` binary (the same version as your Docker image) is available in your path.
If your configuration file is at `/etc/garage.toml`, the `garage` binary should work with no further change.
You can test your `garage` CLI utility by running a simple command such as:
```bash
sudo docker exec -ti garaged bash
garage status
```
You will now have a shell where the Garage binary is available as `/garage/garage`
*You can also install the binary on your machine to remotely control the cluster.*
## Talk to the daemon and create an alias
`garage` requires 4 options to talk with the daemon:
At this point, nodes are not yet talking to one another.
Your output should therefore look like follows:
```
--ca-cert <ca-cert>
--client-cert <client-cert>
--client-key <client-key>
-h, --rpc-host <rpc-host>
Mercury$ garage node-id
==== HEALTHY NODES ====
ID Hostname Address Tag Zone Capacity
563e1ac825ee3323… Mercury [fc00:1::1]:3901 NO ROLE ASSIGNED
```
The 3 first ones are certificates and keys needed by TLS, the last one is simply the address of Garage's RPC endpoint.
If you are invoking `garage` from a server node directly, you do not need to set `--rpc-host`
as the default value `127.0.0.1:3901` will allow it to contact Garage correctly.
## Connecting nodes together
To avoid typing the 3 first options each time we want to run a command,
you can use the following alias:
When your Garage nodes first start, they will generate a local node identifier
(based on a public/private key pair).
To obtain the node identifier of a node, once it is generated,
run `garage node-id`.
This will print keys as follows:
```bash
alias garagectl='/garage/garage \
--ca-cert /etc/garage/pki/garage-ca.crt \
--client-cert /etc/garage/pki/garage.crt \
--client-key /etc/garage/pki/garage.key'
Mercury$ garage node-id
563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
Venus$ garage node-id
86f0f26ae4afbd59aaf9cfb059eefac844951efd5b8caeec0d53f4ed6c85f332@[fc00:1::2]:3901
etc.
```
You can now use all of the commands presented in the [quick start guide](../quick_start/index.md),
simply replace occurences of `garage` by `garagectl`.
You can then instruct nodes to connect to one another as follows:
#### Test the alias
You can test your alias by running a simple command such as:
```
garagectl status
```bash
# Instruct Venus to connect to Mercury (this will establish communication both ways)
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
```
You should get something like that as result:
You don't nead to instruct all node to connect to all other nodes:
nodes will discover one another transitively.
Now if your run `garage status` on any node, you should have an output that looks as follows:
```
Healthy nodes:
8781c50c410a41b3… Mercury [fc00:1::1]:3901 UNCONFIGURED/REMOVED
2a638ed6c775b69a… Venus [fc00:1::2]:3901 UNCONFIGURED/REMOVED
68143d720f20c89d… Earth [fc00:B::1]:3901 UNCONFIGURED/REMOVED
212f7572f0c89da9… Mars [fc00:F::1]:3901 UNCONFIGURED/REMOVED
==== HEALTHY NODES ====
ID Hostname Address Tag Zone Capacity
563e1ac825ee3323… Mercury [fc00:1::1]:3901 NO ROLE ASSIGNED
86f0f26ae4afbd59… Venus [fc00:1::2]:3901 NO ROLE ASSIGNED
68143d720f20c89d… Earth [fc00:B::1]:3901 NO ROLE ASSIGNED
212f7572f0c89da9… Mars [fc00:F::1]:3901 NO ROLE ASSIGNED
```
## Configuring a cluster
## Giving roles to nodes
We will now inform Garage of the disk space available on each node of the cluster
as well as the zone (e.g. datacenter) in which each machine is located.
For our example, we will suppose we have the following infrastructure (Capacity, Identifier and Datacenter are specific values to Garage described in the following):
For our example, we will suppose we have the following infrastructure
(Capacity, Identifier and Zone are specific values to Garage described in the following):
| Location | Name | Disk Space | `Capacity` | `Identifier` | `Zone` |
|----------|---------|------------|------------|--------------|--------------|
| Paris | Mercury | 1 To | `2` | `8781c5` | `par1` |
| Paris | Venus | 2 To | `4` | `2a638e` | `par1` |
| London | Earth | 2 To | `4` | `68143d` | `lon1` |
| Brussels | Mars | 1.5 To | `3` | `212f75` | `bru1` |
| Paris | Mercury | 1 To | `2` | `563e` | `par1` |
| Paris | Venus | 2 To | `4` | `86f0` | `par1` |
| London | Earth | 2 To | `4` | `6814` | `lon1` |
| Brussels | Mars | 1.5 To | `3` | `212f` | `bru1` |
#### Node identifiers
After its first launch, Garage generates a random and unique identifier for each nodes, such as:
```
8781c50c410a41b363167e9d49cc468b6b9e4449b6577b64f15a249a149bdcbc
563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d
```
Often a shorter form can be used, containing only the beginning of the identifier, like `8781c5`,
Often a shorter form can be used, containing only the beginning of the identifier, like `563e`,
which identifies the server "Mercury" located in "Paris" according to our previous table.
The most simple way to match an identifier to a node is to run:
```
garagectl status
garage status
```
It will display the IP address associated with each node;
@ -287,16 +255,16 @@ have 66% chance of being stored by Venus and 33% chance of being stored by Mercu
Given the information above, we will configure our cluster as follow:
```
garagectl node configure -z par1 -c 2 -t mercury 8781c5
garagectl node configure -z par1 -c 4 -t venus 2a638e
garagectl node configure -z lon1 -c 4 -t earth 68143d
garagectl node configure -z bru1 -c 3 -t mars 212f75
garage node configure -z par1 -c 2 -t mercury 563e
garage node configure -z par1 -c 4 -t venus 86f0
garage node configure -z lon1 -c 4 -t earth 6814
garage node configure -z bru1 -c 3 -t mars 212f
```
## Using your Garage cluster
Creating buckets and managing keys is done using the `garagectl` CLI,
Creating buckets and managing keys is done using the `garage` CLI,
and is covered in the [quick start guide](../quick_start/index.md).
Remember also that the CLI is self-documented thanks to the `--help` flag and
the `help` subcommand (e.g. `garage help`, `garage key --help`).

View File

@ -10,8 +10,6 @@ Following this guide is recommended before moving on to
Note that this kind of deployment should not be used in production, as it provides
no redundancy for your data!
We will also skip intra-cluster TLS configuration, meaning that if you add nodes
to your cluster, communication between them will not be secure.
## Get a binary
@ -30,7 +28,10 @@ you can [build Garage from source](../cookbook/from_source.md).
## Writing a first configuration file
This first configuration file should allow you to get started easily with the simplest
possible Garage deployment:
possible Garage deployment.
**Save it as `/etc/garage.toml`.**
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
at each invocation of the `garage` binary (for example: `garage -c ./garage.toml server`, `garage -c ./garage.toml status`).
```toml
metadata_dir = "/tmp/meta"
@ -39,10 +40,10 @@ data_dir = "/tmp/data"
replication_mode = "none"
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "127.0.0.1:3901"
rpc_secret = "1799bccfd7411eddcf9ebd316bc1f5287ad12a68094e1c6ac6abde7e6feae1ec"
bootstrap_peers = [
"127.0.0.1:3901",
]
bootstrap_peers = []
[s3_api]
s3_region = "garage"
@ -54,7 +55,10 @@ root_domain = ".web.garage"
index = "index.html"
```
Save your configuration file as `garage.toml`.
The `rpc_secret` value provided above is just an example. It will work, but in
order to secure your cluster you will need to use another one. You can generate
such a value with `openssl rand -hex 32`.
As you can see in the `metadata_dir` and `data_dir` parameters, we are saving Garage's data
in `/tmp` which gets erased when your system reboots. This means that data stored on this
@ -67,15 +71,15 @@ your data to be persisted properly.
Use the following command to launch the Garage server with our configuration file:
```
RUST_LOG=garage=info garage server -c garage.toml
RUST_LOG=garage=info garage server
```
You can tune Garage's verbosity as follows (from less verbose to more verbose):
```
RUST_LOG=garage=info garage server -c garage.toml
RUST_LOG=garage=debug garage server -c garage.toml
RUST_LOG=garage=trace garage server -c garage.toml
RUST_LOG=garage=info garage server
RUST_LOG=garage=debug garage server
RUST_LOG=garage=trace garage server
```
Log level `info` is recommended for most use cases.
@ -85,11 +89,12 @@ Log level `debug` can help you check why your S3 API calls are not working.
## Checking that Garage runs correctly
The `garage` utility is also used as a CLI tool to configure your Garage deployment.
It tries to connect to a Garage server through the RPC protocol, by default looking
for a Garage server at `localhost:3901`.
It uses values from the TOML configuration file to find the Garage daemon running on the
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
again have to specify `-c path/to/garage.toml`.
Since our deployment already binds to port 3901, the following command should be sufficient
to show Garage's status:
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
the following command should be enough to show the status of your cluster:
```
garage status
@ -98,8 +103,9 @@ garage status
This should show something like this:
```
Healthy nodes:
2a638ed6c775b69a… linuxbox 127.0.0.1:3901 UNCONFIGURED/REMOVED
==== HEALTHY NODES ====
ID Hostname Address Tag Zone Capacity
563e1ac825ee3323… linuxbox 127.0.0.1:3901 NO ROLE ASSIGNED
```
## Configuring your Garage node
@ -117,7 +123,7 @@ garage node configure -z dc1 -c 1 <node_id>
where `<node_id>` corresponds to the identifier of the node shown by `garage status` (first column).
You can enter simply a prefix of that identifier.
For instance here you could write just `garage node configure -z dc1 -c 1 2a63`.
For instance here you could write just `garage node configure -z dc1 -c 1 563e`.

View File

@ -10,31 +10,26 @@ block_size = 1048576
replication_mode = "3"
rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "[fc00:1::1]:3901"
bootstrap_peers = [
"[fc00:1::1]:3901",
"[fc00:1::2]:3901",
"[fc00:B::1]:3901",
"[fc00:F::1]:3901",
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
"86f0f26ae4afbd59aaf9cfb059eefac844951efd5b8caeec0d53f4ed6c85f332[fc00:1::2]:3901",
"681456ab91350f92242e80a531a3ec9392cb7c974f72640112f90a600d7921a4@[fc00:B::1]:3901",
"212fd62eeaca72c122b45a7f4fa0f55e012aa5e24ac384a72a3016413fa724ff@[fc00:F::1]:3901",
]
consul_host = "consul.service"
consul_service_name = "garage-daemon"
max_concurrent_rpc_requests = 12
sled_cache_capacity = 134217728
sled_flush_every_ms = 2000
[rpc_tls]
ca_cert = "/etc/garage/pki/garage-ca.crt"
node_cert = "/etc/garage/pki/garage.crt"
node_key = "/etc/garage/pki/garage.key"
[s3_api]
s3_region = "garage"
api_bind_addr = "[::]:3900"
s3_region = "garage"
[s3_web]
bind_addr = "[::]:3902"
@ -63,10 +58,15 @@ when [configuring it](../getting_started/05_cluster.md).
#### `block_size`
Garage splits stored objects in consecutive chunks of size `block_size` (except the last
one which might be standard). The default size is 1MB and should work in most cases.
If you are interested in tuning this, feel free to do so (and remember to report your
findings to us!)
Garage splits stored objects in consecutive chunks of size `block_size`
(except the last one which might be smaller). The default size is 1MB and
should work in most cases. If you are interested in tuning this, feel free
to do so (and remember to report your findings to us!). If this value is
changed for a running Garage installation, only files newly uploaded will be
affected. Previously uploaded files will remain available. This however
means that chunks from existing files will not be deduplicated with chunks
from newly uploaded files, meaning you might use more storage space that is
optimally possible.
#### `replication_mode`
@ -97,6 +97,14 @@ Never run a Garage cluster where that is not the case.**
Changing the `replication_mode` of a cluster might work (make sure to shut down all nodes
and changing it everywhere at the time), but is not officially supported.
#### `rpc_secret`
Garage uses a secret key that is shared between all nodes of the cluster
in order to identify these nodes and allow them to communicate together.
This key should be specified here in the form of a 32-byte hex-encoded
random string. Such a string can be generated with a command
such as `openssl rand -hex 32`.
#### `rpc_bind_addr`
The address and port on which to bind for inter-cluster communcations
@ -106,10 +114,28 @@ the node, even in the case of a NAT: the NAT should be configured to forward the
port number to the same internal port nubmer. This means that if you have several nodes running
behind a NAT, they should each use a different RPC port number.
#### `rpc_public_addr`
The address and port that other nodes need to use to contact this node for
RPC calls. **This parameter is optional but recommended.** In case you have
a NAT that binds the RPC port to a port that is different on your public IP,
this field might help making it work.
#### `bootstrap_peers`
A list of IPs and ports on which to contact other Garage peers of this cluster.
This should correspond to the RPC ports set up with `rpc_bind_addr`.
A list of peer identifiers on which to contact other Garage peers of this cluster.
These peer identifiers have the following syntax:
```
<node public key>@<node public IP or hostname>:<port>
```
In the case where `rpc_public_addr` is correctly specified in the
configuration file, the full identifier of a node including IP and port can
be obtained by running `garage node-id` and then included directly in the
`bootstrap_peers` list of other nodes. Otherwise, only the node's public
key will be returned by `garage node-id` and you will have to add the IP
yourself.
#### `consul_host` and `consul_service_name`
@ -121,12 +147,6 @@ The `consul_host` parameter should be set to the hostname of the Consul server,
and `consul_service_name` should be set to the service name under which Garage's
RPC ports are announced.
#### `max_concurrent_rpc_requests`
Garage implements rate limiting for RPC requests: no more than
`max_concurrent_rpc_requests` concurrent outbound RPC requests will be made
by a Garage node (additionnal requests will be put in a waiting queue).
#### `sled_cache_capacity`
This parameter can be used to tune the capacity of the cache used by
@ -143,21 +163,6 @@ of a power outage (though this should not matter much as data is replicated on o
nodes). The default value, 2000ms, should be appropriate for most use cases.
## The `[rpc_tls]` section
This section should be used to configure the TLS certificates used to encrypt
intra-cluster traffic (RPC traffic). The following parameters should be set:
- `ca_cert`: the certificate of the CA that is allowed to sign individual node certificates
- `node_cert`: the node certificate for the current node
- `node_key`: the key associated with the node certificate
Note tha several nodes may use the same node certificate, as long as it is signed
by the CA.
If this section is absent, TLS is not used to encrypt intra-cluster traffic.
## The `[s3_api]` section
#### `api_bind_addr`

View File

@ -23,76 +23,35 @@ Not implemented:
All APIs that are not mentionned are not implemented and will return a 400 bad request.
#### AbortMultipartUpload
| Endpoint | Status |
|------------------------------|----------------------------------|
| AbortMultipartUpload | Implemented |
| CompleteMultipartUpload | Implemented |
| CopyObject | Implemented |
| CreateBucket | Unsupported, stub (see below) |
| CreateMultipartUpload | Implemented |
| DeleteBucket | Unsupported (see below) |
| DeleteObject | Implemented |
| DeleteObjects | Implemented |
| GetBucketLocation | Implemented |
| GetBucketVersioning | Stub (see below) |
| GetObject | Implemented |
| HeadBucket | Implemented |
| HeadObject | Implemented |
| ListBuckets | Implemented |
| ListObjects | Implemented, bugs? (see below) |
| ListObjectsV2 | Implemented |
| PutObject | Implemented |
| UploadPart | Implemented |
Implemented.
#### CompleteMultipartUpload
Implemented badly. Garage will not check that all the parts stored correspond to the list given by the client in the request body. This means that the multipart upload might be completed with an invalid size. This is a bug and will be fixed.
- **CreateBucket:** Garage does not yet accept creating buckets or giving access using API calls, it has to be done using the CLI tools. CreateBucket will return a 200 if the bucket exists and user has write access, and a 403 Forbidden in all other cases.
#### CopyObject
- **DeleteBucket:** Garage does not yet accept deleting buckets using API calls, it has to be done using the CLI tools. This request will return a 403 Forbidden.
Implemented.
#### CreateBucket
Garage does not accept creating buckets or giving access using API calls, it has to be done using the CLI tools. CreateBucket will return a 200 if the bucket exists and user has write access, and a 403 Forbidden in all other cases.
#### CreateMultipartUpload
Implemented.
#### DeleteBucket
Garage does not accept deleting buckets using API calls, it has to be done using the CLI tools. This request will return a 403 Forbidden.
#### DeleteObject
Implemented.
#### DeleteObjects
Implemented.
#### GetBucketLocation
Implemented.
#### GetBucketVersioning
Stub implementation (Garage does not yet support versionning so this always returns
- **GetBucketVersioning:** Stub implementation (Garage does not yet support versionning so this always returns
"versionning not enabled").
#### GetObject
Implemented.
#### HeadBucket
Implemented.
#### HeadObject
Implemented.
#### ListBuckets
Implemented.
#### ListObjects
Implemented, but there isn't a very good specification of what `encoding-type=url` covers so there might be some encoding bugs. In our implementation the url-encoded fields are in the same in ListObjects as they are in ListObjectsV2.
#### ListObjectsV2
Implemented.
#### PutObject
Implemented.
#### UploadPart
Implemented.
- **ListObjects:** Implemented, but there isn't a very good specification of what `encoding-type=url` covers so there might be some encoding bugs. In our implementation the url-encoded fields are in the same in ListObjects as they are in ListObjectsV2.

View File

@ -0,0 +1,61 @@
# Migrating from 0.3 to 0.4
**Migrating from 0.3 to 0.4 is unsupported. This document is only intended to document the process internally for the Deuxfleurs cluster where we have to do it. Do not try it yourself, you will lose your data and we will not help you.**
**Migrating from 0.2 to 0.4 will break everything for sure. Never try it.**
The internal data format of Garage hasn't changed much between 0.3 and 0.4.
The Sled database is still the same, and the data directory as well.
The following has changed, all in the meta directory:
- `node_id` in 0.3 contains the identifier of the current node. In 0.4, this file does nothing and should be deleted. It is replaced by `node_key` (the secret key) and `node_key.pub` (the associated public key). A node's identifier on the ring is its public key.
- `peer_info` in 0.3 contains the list of peers saved automatically by Garage. The format has changed and it is now stored in `peer_list` (`peer_info` should be deleted).
When migrating, all node identifiers will change. This also means that the affectation of data partitions on the ring will change, and lots of data will have to be rebalanced.
- If your cluster has only 3 nodes, all nodes store everything, therefore nothing has to be rebalanced.
- If your cluster has only 4 nodes, for any partition there will always be at least 2 nodes that stored data before that still store it after. Therefore the migration should in theory be transparent and Garage should continue to work during the rebalance.
- If your cluster has 5 or more nodes, data will disappear during the migration. Do not migrate (fortunately we don't have this scenario at Deuxfleurs), or if you do, make Garage unavailable until things stabilize (disable web and api access).
The migration steps are as follows:
1. Prepare a new configuration file for 0.4. For each node, point to the same meta and data directories as Garage 0.3. Basically, the things that change are the following:
- No more `rpc_tls` section
- You have to generate a shared `rpc_secret` and put it in all config files
- `bootstrap_nodes` has a different syntax as it has to contain node keys. Leave it empty and use `garage node-id` and `garage node connect` instead (new features of 0.4)
- put the publicly accessible RPC address of your node in `rpc_public_addr` if possible (its optional but recommended)
- If you are using Consul, change the `consul_service_name` to NOT be the name advertised by Nomad. Now Garage is responsible for advertising its own service itself.
2. Disable api and web access for some time, do `garage repair --all --yes tables` and `garage repair --all --yes blocks`, check the logs and check that all data seems to be synced correctly between nodes.
3. Save somewhere the output of `garage status`. We will need this to remember how to reconfigure nodes in 0.4.
4. Turn off Garage 0.3
5. Backup metadata folders if you can (i.e. if you have space to do it somewhere). Backuping data folders could also be usefull but that's much harder to do. If your filesystem supports snapshots, this could be a good time to use them.
6. Turn on Garage 0.4
7. At this point, running `garage status` should indicate that all nodes of the previous cluster are "unavailable". The nodes have new identifiers that should appear in healthy nodes once they can talk to one another (use `garage node connect` if necessary`). They should have NO ROLE ASSIGNED at the moment.
8. Prepare a script with several `garage node configure` commands that replace each of the v0.3 node ID with the corresponding v0.4 node ID, with the same zone/tag/capacity. For example if your node `drosera` had identifier `c24e` before and now has identifier `789a`, and it was configured with capacity `2` in zone `dc1`, put the following command in your script:
```bash
garage node configure 789a -z dc1 -c 2 -t drosera --replace c24e
```
9. Run your reconfiguration script. Check that the new output of `garage status` contains the correct node IDs with the correct values for capacity and zone. Old nodes should no longer be mentioned.
10. If your cluster has 4 nodes or less, and you are feeling adventurous, you can reenable Web and API access now. Things will probably work.
11. Garage might already be resyncing stuff. Issue a `garage repair --all --yes tables` and `garage repair --all --yes blocks` to force it to do so.
12. Wait for resyncing activity to stop in the logs. Do steps 11 and 12 two or three times, until you see that when you issue the repair commands, nothing gets resynced any longer.
13. Your upgraded cluster should be in a working state. Re-enable API and Web access and check that everything went well.

View File

@ -9,11 +9,11 @@ GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
NIX_RELEASE="${REPO_FOLDER}/result/bin/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
garage bucket create eprouvette
KEY_INFO=`garage key new --name opérateur`
garage -c /tmp/config.1.toml bucket create eprouvette
KEY_INFO=$(garage -c /tmp/config.1.toml key new --name opérateur)
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
garage bucket allow eprouvette --read --write --key $ACCESS_KEY
garage -c /tmp/config.1.toml bucket allow eprouvette --read --write --key $ACCESS_KEY
echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3
echo "Bucket s3://eprouvette created. Credentials stored in /tmp/garage.s3."

View File

@ -17,6 +17,10 @@ MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m"
WHICH_GARAGE=$(which garage || exit 1)
echo -en "${MAIN_LABEL} Found garage at: ${WHICH_GARAGE}\n"
NETWORK_SECRET="$(openssl rand -hex 32)"
# <<<<<<<<< BEGIN FOR LOOP ON NODES
for count in $(seq 1 3); do
CONF_PATH="/tmp/config.$count.toml"
LABEL="\e[${FANCYCOLORS[$count]}[$count]\e[49m"
@ -26,13 +30,10 @@ block_size = 1048576 # objects are split in blocks of maximum this number of b
metadata_dir = "/tmp/garage-meta-$count"
data_dir = "/tmp/garage-data-$count"
rpc_bind_addr = "0.0.0.0:$((3900+$count))" # the port other Garage nodes will use to talk to this node
bootstrap_peers = [
"127.0.0.1:3901",
"127.0.0.1:3902",
"127.0.0.1:3903"
]
max_concurrent_rpc_requests = 12
rpc_public_addr = "127.0.0.1:$((3900+$count))"
bootstrap_peers = []
replication_mode = "3"
rpc_secret = "$NETWORK_SECRET"
[s3_api]
api_bind_addr = "0.0.0.0:$((3910+$count))" # the S3 API port, HTTP without TLS. Add a reverse proxy for the TLS part.
@ -61,11 +62,21 @@ if [ -z "$SKIP_HTTPS" ]; then
socat openssl-listen:4443,reuseaddr,fork,cert=/tmp/garagessl/test.pem,verify=0 tcp4-connect:localhost:3911 &
fi
(garage server -c /tmp/config.$count.toml 2>&1|while read r; do echo -en "$LABEL $r\n"; done) &
(garage -c /tmp/config.$count.toml server 2>&1|while read r; do echo -en "$LABEL $r\n"; done) &
done
# >>>>>>>>>>>>>>>> END FOR LOOP ON NODES
sleep 3
# Establish connections between nodes
for count in $(seq 1 3); do
NODE=$(garage -c /tmp/config.$count.toml node-id -q)
for count2 in $(seq 1 3); do
garage -c /tmp/config.$count2.toml node connect $NODE
done
done
RETRY=120
until garage status 2>&1|grep -q Healthy ; do
until garage -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
(( RETRY-- ))
if (( RETRY <= 0 )); then
echo -en "${MAIN_LABEL} Garage did not start"
@ -74,6 +85,7 @@ until garage status 2>&1|grep -q Healthy ; do
echo -en "${MAIN_LABEL} cluster starting...\n"
sleep 1
done
echo -en "${MAIN_LABEL} cluster started\n"
wait

View File

@ -11,7 +11,7 @@ PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
sleep 5
RETRY=120
until garage status 2>&1|grep -q Healthy ; do
until garage -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
(( RETRY-- ))
if (( RETRY <= 0 )); then
echo "garage did not start in time, failing."
@ -21,10 +21,10 @@ until garage status 2>&1|grep -q Healthy ; do
sleep 1
done
garage status \
| grep UNCONFIGURED \
garage -c /tmp/config.1.toml status \
| grep 'NO ROLE' \
| grep -Po '^[0-9a-f]+' \
| while read id; do
garage node configure -z dc1 -c 1 $id
garage -c /tmp/config.1.toml node configure -z dc1 -c 1 $id
done

View File

@ -17,13 +17,14 @@ SKIP_DUCK=1
echo "⏳ Setup"
${SCRIPT_FOLDER}/dev-clean.sh
${SCRIPT_FOLDER}/dev-cluster.sh > /tmp/garage.log 2>&1 &
sleep 6
${SCRIPT_FOLDER}/dev-configure.sh
${SCRIPT_FOLDER}/dev-bucket.sh
which garage
garage status
garage key list
garage bucket list
garage -c /tmp/config.1.toml status
garage -c /tmp/config.1.toml key list
garage -c /tmp/config.1.toml bucket list
dd if=/dev/urandom of=/tmp/garage.1.rnd bs=1k count=2 # No multipart, inline storage (< INLINE_THRESHOLD = 3072 bytes)
dd if=/dev/urandom of=/tmp/garage.2.rnd bs=1M count=5 # No multipart but file will be chunked
@ -116,9 +117,9 @@ if [ -z "$SKIP_AWS" ]; then
echo "<h1>hello world</h1>" > /tmp/garage-index.html
aws s3 cp /tmp/garage-index.html s3://eprouvette/index.html
[ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.garage.tld" http://127.0.0.1:3923/ ` == 404 ]
garage bucket website --allow eprouvette
garage -c /tmp/config.1.toml bucket website --allow eprouvette
[ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.garage.tld" http://127.0.0.1:3923/ ` == 200 ]
garage bucket website --deny eprouvette
garage -c /tmp/config.1.toml bucket website --deny eprouvette
[ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.garage.tld" http://127.0.0.1:3923/ ` == 404 ]
aws s3 rm s3://eprouvette/index.html
rm /tmp/garage-index.html
@ -127,8 +128,8 @@ fi
echo "🏁 Teardown"
AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
garage bucket deny --read --write eprouvette --key $AWS_ACCESS_KEY_ID
garage bucket delete --yes eprouvette
garage key delete --yes $AWS_ACCESS_KEY_ID
garage -c /tmp/config.1.toml bucket deny --read --write eprouvette --key $AWS_ACCESS_KEY_ID
garage -c /tmp/config.1.toml bucket delete --yes eprouvette
garage -c /tmp/config.1.toml key delete --yes $AWS_ACCESS_KEY_ID
echo "✅ Success"

View File

@ -1,6 +1,6 @@
[package]
name = "garage_api"
version = "0.3.0"
version = "0.4.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -13,9 +13,9 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_model = { version = "0.3.0", path = "../model" }
garage_table = { version = "0.3.0", path = "../table" }
garage_util = { version = "0.3.0", path = "../util" }
garage_model = { version = "0.4.0", path = "../model" }
garage_table = { version = "0.4.0", path = "../table" }
garage_util = { version = "0.4.0", path = "../util" }
base64 = "0.13"
bytes = "1.0"
@ -35,7 +35,7 @@ tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi
http = "0.2"
httpdate = "0.3"
http-range = "0.1"
hyper = "0.14"
hyper = { version = "0.14", features = ["server", "http1", "runtime", "tcp", "stream"] }
percent-encoding = "2.1.0"
roxmltree = "0.14"
serde = { version = "1.0", features = ["derive"] }

View File

@ -82,7 +82,11 @@ impl Error {
match self {
Error::NotFound => StatusCode::NOT_FOUND,
Error::Forbidden(_) => StatusCode::FORBIDDEN,
Error::InternalError(GarageError::Rpc(_)) => StatusCode::SERVICE_UNAVAILABLE,
Error::InternalError(
GarageError::Timeout
| GarageError::RemoteError(_)
| GarageError::Quorum(_, _, _, _),
) => StatusCode::SERVICE_UNAVAILABLE,
Error::InternalError(_) | Error::Hyper(_) | Error::Http(_) => {
StatusCode::INTERNAL_SERVER_ERROR
}
@ -95,7 +99,11 @@ impl Error {
Error::NotFound => "NoSuchKey",
Error::Forbidden(_) => "AccessDenied",
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
Error::InternalError(GarageError::Rpc(_)) => "ServiceUnavailable",
Error::InternalError(
GarageError::Timeout
| GarageError::RemoteError(_)
| GarageError::Quorum(_, _, _, _),
) => "ServiceUnavailable",
Error::InternalError(_) | Error::Hyper(_) | Error::Http(_) => "InternalError",
_ => "InvalidRequest",
}

View File

@ -1,6 +1,6 @@
[package]
name = "garage"
version = "0.3.0"
version = "0.4.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -14,12 +14,12 @@ path = "main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_api = { version = "0.3.0", path = "../api" }
garage_model = { version = "0.3.0", path = "../model" }
garage_rpc = { version = "0.3.0", path = "../rpc" }
garage_table = { version = "0.3.0", path = "../table" }
garage_util = { version = "0.3.0", path = "../util" }
garage_web = { version = "0.3.0", path = "../web" }
garage_api = { version = "0.4.0", path = "../api" }
garage_model = { version = "0.4.0", path = "../model" }
garage_rpc = { version = "0.4.0", path = "../rpc" }
garage_table = { version = "0.4.0", path = "../table" }
garage_util = { version = "0.4.0", path = "../util" }
garage_web = { version = "0.4.0", path = "../web" }
bytes = "1.0"
git-version = "0.3.4"
@ -27,6 +27,8 @@ hex = "0.4"
log = "0.4"
pretty_env_logger = "0.4"
rand = "0.8"
async-trait = "0.1.7"
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
sled = "0.34"
@ -38,3 +40,5 @@ toml = "0.5"
futures = "0.3"
futures-util = "0.3"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
netapp = { version = "0.3.0", git = "https://git.deuxfleurs.fr/lx/netapp" }

View File

@ -2,6 +2,7 @@ use std::collections::HashMap;
use std::fmt::Write;
use std::sync::Arc;
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use garage_util::error::Error;
@ -10,8 +11,7 @@ use garage_table::crdt::Crdt;
use garage_table::replication::*;
use garage_table::*;
use garage_rpc::rpc_client::*;
use garage_rpc::rpc_server::*;
use garage_rpc::*;
use garage_model::bucket_table::*;
use garage_model::garage::Garage;
@ -19,10 +19,8 @@ use garage_model::key_table::*;
use crate::cli::*;
use crate::repair::Repair;
use crate::*;
pub const ADMIN_RPC_TIMEOUT: Duration = Duration::from_secs(30);
pub const ADMIN_RPC_PATH: &str = "_admin";
pub const ADMIN_RPC_PATH: &str = "garage/admin_rpc.rs/Rpc";
#[derive(Debug, Serialize, Deserialize)]
pub enum AdminRpc {
@ -39,35 +37,24 @@ pub enum AdminRpc {
KeyInfo(Key),
}
impl RpcMessage for AdminRpc {}
impl Rpc for AdminRpc {
type Response = Result<AdminRpc, Error>;
}
pub struct AdminRpcHandler {
garage: Arc<Garage>,
rpc_client: Arc<RpcClient<AdminRpc>>,
endpoint: Arc<Endpoint<AdminRpc, Self>>,
}
impl AdminRpcHandler {
pub fn new(garage: Arc<Garage>) -> Arc<Self> {
let rpc_client = garage.system.clone().rpc_client::<AdminRpc>(ADMIN_RPC_PATH);
Arc::new(Self { garage, rpc_client })
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
let admin = Arc::new(Self { garage, endpoint });
admin.endpoint.set_handler(admin.clone());
admin
}
pub fn register_handler(self: Arc<Self>, rpc_server: &mut RpcServer) {
rpc_server.add_handler::<AdminRpc, _, _>(ADMIN_RPC_PATH.to_string(), move |msg, _addr| {
let self2 = self.clone();
async move {
match msg {
AdminRpc::BucketOperation(bo) => self2.handle_bucket_cmd(bo).await,
AdminRpc::KeyOperation(ko) => self2.handle_key_cmd(ko).await,
AdminRpc::LaunchRepair(opt) => self2.handle_launch_repair(opt).await,
AdminRpc::Stats(opt) => self2.handle_stats(opt).await,
_ => Err(Error::BadRpc("Invalid RPC".to_string())),
}
}
});
}
async fn handle_bucket_cmd(&self, cmd: BucketOperation) -> Result<AdminRpc, Error> {
async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
match cmd {
BucketOperation::List => {
let bucket_names = self
@ -187,7 +174,7 @@ impl AdminRpcHandler {
}
}
async fn handle_key_cmd(&self, cmd: KeyOperation) -> Result<AdminRpc, Error> {
async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> {
match cmd {
KeyOperation::List => {
let key_ids = self
@ -210,13 +197,13 @@ impl AdminRpcHandler {
Ok(AdminRpc::KeyInfo(key))
}
KeyOperation::New(query) => {
let key = Key::new(query.name);
let key = Key::new(query.name.clone());
self.garage.key_table.insert(&key).await?;
Ok(AdminRpc::KeyInfo(key))
}
KeyOperation::Rename(query) => {
let mut key = self.get_existing_key(&query.key_pattern).await?;
key.name.update(query.new_name);
key.name.update(query.new_name.clone());
self.garage.key_table.insert(&key).await?;
Ok(AdminRpc::KeyInfo(key))
}
@ -353,17 +340,17 @@ impl AdminRpcHandler {
let mut failures = vec![];
let ring = self.garage.system.ring.borrow().clone();
for node in ring.config.members.keys() {
if self
.rpc_client
let node = (*node).into();
let resp = self
.endpoint
.call(
*node,
AdminRpc::LaunchRepair(opt_to_send.clone()),
ADMIN_RPC_TIMEOUT,
&node,
&AdminRpc::LaunchRepair(opt_to_send.clone()),
PRIO_NORMAL,
)
.await
.is_err()
{
failures.push(*node);
.await;
if !matches!(resp, Ok(Ok(_))) {
failures.push(node);
}
}
if failures.is_empty() {
@ -402,10 +389,12 @@ impl AdminRpcHandler {
writeln!(&mut ret, "\n======================").unwrap();
writeln!(&mut ret, "Stats for node {:?}:", node).unwrap();
let node_id = (*node).into();
match self
.rpc_client
.call(*node, AdminRpc::Stats(opt), ADMIN_RPC_TIMEOUT)
.await
.endpoint
.call(&node_id, &AdminRpc::Stats(opt), PRIO_NORMAL)
.await?
{
Ok(AdminRpc::Ok(s)) => writeln!(&mut ret, "{}", s).unwrap(),
Ok(x) => writeln!(&mut ret, "Bad answer: {:?}", x).unwrap(),
@ -496,3 +485,20 @@ impl AdminRpcHandler {
writeln!(to, " GC todo queue length: {}", t.data.gc_todo_len()).unwrap();
}
}
#[async_trait]
impl EndpointHandler<AdminRpc> for AdminRpcHandler {
async fn handle(
self: &Arc<Self>,
message: &AdminRpc,
_from: NodeID,
) -> Result<AdminRpc, Error> {
match message {
AdminRpc::BucketOperation(bo) => self.handle_bucket_cmd(bo).await,
AdminRpc::KeyOperation(ko) => self.handle_key_cmd(ko).await,
AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await,
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
_ => Err(Error::BadRpc("Invalid RPC".to_string())),
}
}
}

View File

@ -1,634 +0,0 @@
use std::cmp::max;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
use structopt::StructOpt;
use garage_util::data::Uuid;
use garage_util::error::Error;
use garage_util::time::*;
use garage_rpc::membership::*;
use garage_rpc::ring::*;
use garage_rpc::rpc_client::*;
use garage_model::bucket_table::*;
use garage_model::key_table::*;
use crate::admin_rpc::*;
#[derive(StructOpt, Debug)]
pub enum Command {
/// Run Garage server
#[structopt(name = "server")]
Server(ServerOpt),
/// Get network status
#[structopt(name = "status")]
Status,
/// Garage node operations
#[structopt(name = "node")]
Node(NodeOperation),
/// Bucket operations
#[structopt(name = "bucket")]
Bucket(BucketOperation),
/// Key operations
#[structopt(name = "key")]
Key(KeyOperation),
/// Start repair of node data
#[structopt(name = "repair")]
Repair(RepairOpt),
/// Gather node statistics
#[structopt(name = "stats")]
Stats(StatsOpt),
}
#[derive(StructOpt, Debug)]
pub struct ServerOpt {
/// Configuration file
#[structopt(short = "c", long = "config", default_value = "./config.toml")]
pub config_file: PathBuf,
}
#[derive(StructOpt, Debug)]
pub enum NodeOperation {
/// Configure Garage node
#[structopt(name = "configure")]
Configure(ConfigureNodeOpt),
/// Remove Garage node from cluster
#[structopt(name = "remove")]
Remove(RemoveNodeOpt),
}
#[derive(StructOpt, Debug)]
pub struct ConfigureNodeOpt {
/// Node to configure (prefix of hexadecimal node id)
node_id: String,
/// Location (zone or datacenter) of the node
#[structopt(short = "z", long = "zone")]
zone: Option<String>,
/// Capacity (in relative terms, use 1 to represent your smallest server)
#[structopt(short = "c", long = "capacity")]
capacity: Option<u32>,
/// Gateway-only node
#[structopt(short = "g", long = "gateway")]
gateway: bool,
/// Optional node tag
#[structopt(short = "t", long = "tag")]
tag: Option<String>,
/// Replaced node(s): list of node IDs that will be removed from the current cluster
#[structopt(long = "replace")]
replace: Vec<String>,
}
#[derive(StructOpt, Debug)]
pub struct RemoveNodeOpt {
/// Node to configure (prefix of hexadecimal node id)
node_id: String,
/// If this flag is not given, the node won't be removed
#[structopt(long = "yes")]
yes: bool,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub enum BucketOperation {
/// List buckets
#[structopt(name = "list")]
List,
/// Get bucket info
#[structopt(name = "info")]
Info(BucketOpt),
/// Create bucket
#[structopt(name = "create")]
Create(BucketOpt),
/// Delete bucket
#[structopt(name = "delete")]
Delete(DeleteBucketOpt),
/// Allow key to read or write to bucket
#[structopt(name = "allow")]
Allow(PermBucketOpt),
/// Deny key from reading or writing to bucket
#[structopt(name = "deny")]
Deny(PermBucketOpt),
/// Expose as website or not
#[structopt(name = "website")]
Website(WebsiteOpt),
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct WebsiteOpt {
/// Create
#[structopt(long = "allow")]
pub allow: bool,
/// Delete
#[structopt(long = "deny")]
pub deny: bool,
/// Bucket name
pub bucket: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct BucketOpt {
/// Bucket name
pub name: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct DeleteBucketOpt {
/// Bucket name
pub name: String,
/// If this flag is not given, the bucket won't be deleted
#[structopt(long = "yes")]
pub yes: bool,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct PermBucketOpt {
/// Access key name or ID
#[structopt(long = "key")]
pub key_pattern: String,
/// Allow/deny read operations
#[structopt(long = "read")]
pub read: bool,
/// Allow/deny write operations
#[structopt(long = "write")]
pub write: bool,
/// Bucket name
pub bucket: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub enum KeyOperation {
/// List keys
#[structopt(name = "list")]
List,
/// Get key info
#[structopt(name = "info")]
Info(KeyOpt),
/// Create new key
#[structopt(name = "new")]
New(KeyNewOpt),
/// Rename key
#[structopt(name = "rename")]
Rename(KeyRenameOpt),
/// Delete key
#[structopt(name = "delete")]
Delete(KeyDeleteOpt),
/// Import key
#[structopt(name = "import")]
Import(KeyImportOpt),
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyOpt {
/// ID or name of the key
pub key_pattern: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyNewOpt {
/// Name of the key
#[structopt(long = "name", default_value = "Unnamed key")]
pub name: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyRenameOpt {
/// ID or name of the key
pub key_pattern: String,
/// New name of the key
pub new_name: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyDeleteOpt {
/// ID or name of the key
pub key_pattern: String,
/// Confirm deletion
#[structopt(long = "yes")]
pub yes: bool,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyImportOpt {
/// Access key ID
pub key_id: String,
/// Secret access key
pub secret_key: String,
/// Key name
#[structopt(short = "n", default_value = "Imported key")]
pub name: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
pub struct RepairOpt {
/// Launch repair operation on all nodes
#[structopt(short = "a", long = "all-nodes")]
pub all_nodes: bool,
/// Confirm the launch of the repair operation
#[structopt(long = "yes")]
pub yes: bool,
#[structopt(subcommand)]
pub what: Option<RepairWhat>,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
pub enum RepairWhat {
/// Only do a full sync of metadata tables
#[structopt(name = "tables")]
Tables,
/// Only repair (resync/rebalance) the set of stored blocks
#[structopt(name = "blocks")]
Blocks,
/// Only redo the propagation of object deletions to the version table (slow)
#[structopt(name = "versions")]
Versions,
/// Only redo the propagation of version deletions to the block ref table (extremely slow)
#[structopt(name = "block_refs")]
BlockRefs,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
pub struct StatsOpt {
/// Gather statistics from all nodes
#[structopt(short = "a", long = "all-nodes")]
pub all_nodes: bool,
/// Gather detailed statistics (this can be long)
#[structopt(short = "d", long = "detailed")]
pub detailed: bool,
}
pub async fn cli_cmd(
cmd: Command,
membership_rpc_cli: RpcAddrClient<Message>,
admin_rpc_cli: RpcAddrClient<AdminRpc>,
rpc_host: SocketAddr,
) -> Result<(), Error> {
match cmd {
Command::Status => cmd_status(membership_rpc_cli, rpc_host).await,
Command::Node(NodeOperation::Configure(configure_opt)) => {
cmd_configure(membership_rpc_cli, rpc_host, configure_opt).await
}
Command::Node(NodeOperation::Remove(remove_opt)) => {
cmd_remove(membership_rpc_cli, rpc_host, remove_opt).await
}
Command::Bucket(bo) => {
cmd_admin(admin_rpc_cli, rpc_host, AdminRpc::BucketOperation(bo)).await
}
Command::Key(ko) => cmd_admin(admin_rpc_cli, rpc_host, AdminRpc::KeyOperation(ko)).await,
Command::Repair(ro) => cmd_admin(admin_rpc_cli, rpc_host, AdminRpc::LaunchRepair(ro)).await,
Command::Stats(so) => cmd_admin(admin_rpc_cli, rpc_host, AdminRpc::Stats(so)).await,
_ => unreachable!(),
}
}
pub async fn cmd_status(
rpc_cli: RpcAddrClient<Message>,
rpc_host: SocketAddr,
) -> Result<(), Error> {
let status = match rpc_cli
.call(&rpc_host, &Message::PullStatus, ADMIN_RPC_TIMEOUT)
.await??
{
Message::AdvertiseNodesUp(nodes) => nodes,
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
};
let config = match rpc_cli
.call(&rpc_host, &Message::PullConfig, ADMIN_RPC_TIMEOUT)
.await??
{
Message::AdvertiseConfig(cfg) => cfg,
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
};
let (hostname_len, addr_len, tag_len, zone_len) = status
.iter()
.map(|adv| (adv, config.members.get(&adv.id)))
.map(|(adv, cfg)| {
(
adv.state_info.hostname.len(),
adv.addr.to_string().len(),
cfg.map(|c| c.tag.len()).unwrap_or(0),
cfg.map(|c| c.zone.len()).unwrap_or(0),
)
})
.fold((0, 0, 0, 0), |(h, a, t, z), (mh, ma, mt, mz)| {
(max(h, mh), max(a, ma), max(t, mt), max(z, mz))
});
println!("Healthy nodes:");
for adv in status.iter().filter(|x| x.is_up) {
if let Some(cfg) = config.members.get(&adv.id) {
println!(
"{id:?}\t{host}{h_pad}\t{addr}{a_pad}\t[{tag}]{t_pad}\t{zone}{z_pad}\t{capacity}",
id = adv.id,
host = adv.state_info.hostname,
addr = adv.addr,
tag = cfg.tag,
zone = cfg.zone,
capacity = cfg.capacity_string(),
h_pad = " ".repeat(hostname_len - adv.state_info.hostname.len()),
a_pad = " ".repeat(addr_len - adv.addr.to_string().len()),
t_pad = " ".repeat(tag_len - cfg.tag.len()),
z_pad = " ".repeat(zone_len - cfg.zone.len()),
);
} else {
println!(
"{id:?}\t{h}{h_pad}\t{addr}{a_pad}\tUNCONFIGURED/REMOVED",
id = adv.id,
h = adv.state_info.hostname,
addr = adv.addr,
h_pad = " ".repeat(hostname_len - adv.state_info.hostname.len()),
a_pad = " ".repeat(addr_len - adv.addr.to_string().len()),
);
}
}
let status_keys = status.iter().map(|x| x.id).collect::<HashSet<_>>();
let failure_case_1 = status.iter().any(|x| !x.is_up);
let failure_case_2 = config
.members
.iter()
.any(|(id, _)| !status_keys.contains(id));
if failure_case_1 || failure_case_2 {
println!("\nFailed nodes:");
for adv in status.iter().filter(|x| !x.is_up) {
if let Some(cfg) = config.members.get(&adv.id) {
println!(
"{id:?}\t{host}{h_pad}\t{addr}{a_pad}\t[{tag}]{t_pad}\t{zone}{z_pad}\t{capacity}\tlast seen: {last_seen}s ago",
id=adv.id,
host=adv.state_info.hostname,
addr=adv.addr,
tag=cfg.tag,
zone=cfg.zone,
capacity=cfg.capacity_string(),
last_seen=(now_msec() - adv.last_seen) / 1000,
h_pad=" ".repeat(hostname_len - adv.state_info.hostname.len()),
a_pad=" ".repeat(addr_len - adv.addr.to_string().len()),
t_pad=" ".repeat(tag_len - cfg.tag.len()),
z_pad=" ".repeat(zone_len - cfg.zone.len()),
);
}
}
let (tag_len, zone_len) = config
.members
.iter()
.filter(|(&id, _)| !status.iter().any(|x| x.id == id))
.map(|(_, cfg)| (cfg.tag.len(), cfg.zone.len()))
.fold((0, 0), |(t, z), (mt, mz)| (max(t, mt), max(z, mz)));
for (id, cfg) in config.members.iter() {
if !status.iter().any(|x| x.id == *id) {
println!(
"{id:?}\t{tag}{t_pad}\t{zone}{z_pad}\t{capacity}\tnever seen",
id = id,
tag = cfg.tag,
zone = cfg.zone,
capacity = cfg.capacity_string(),
t_pad = " ".repeat(tag_len - cfg.tag.len()),
z_pad = " ".repeat(zone_len - cfg.zone.len()),
);
}
}
}
Ok(())
}
pub fn find_matching_node(
cand: impl std::iter::Iterator<Item = Uuid>,
pattern: &str,
) -> Result<Uuid, Error> {
let mut candidates = vec![];
for c in cand {
if hex::encode(&c).starts_with(&pattern) {
candidates.push(c);
}
}
if candidates.len() != 1 {
Err(Error::Message(format!(
"{} nodes match '{}'",
candidates.len(),
pattern,
)))
} else {
Ok(candidates[0])
}
}
pub async fn cmd_configure(
rpc_cli: RpcAddrClient<Message>,
rpc_host: SocketAddr,
args: ConfigureNodeOpt,
) -> Result<(), Error> {
let status = match rpc_cli
.call(&rpc_host, &Message::PullStatus, ADMIN_RPC_TIMEOUT)
.await??
{
Message::AdvertiseNodesUp(nodes) => nodes,
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
};
let added_node = find_matching_node(status.iter().map(|x| x.id), &args.node_id)?;
let mut config = match rpc_cli
.call(&rpc_host, &Message::PullConfig, ADMIN_RPC_TIMEOUT)
.await??
{
Message::AdvertiseConfig(cfg) => cfg,
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
};
for replaced in args.replace.iter() {
let replaced_node = find_matching_node(config.members.keys().cloned(), replaced)?;
if config.members.remove(&replaced_node).is_none() {
return Err(Error::Message(format!(
"Cannot replace node {:?} as it is not in current configuration",
replaced_node
)));
}
}
if args.capacity.is_some() && args.gateway {
return Err(Error::Message(
"-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into()));
}
if args.capacity == Some(0) {
return Err(Error::Message("Invalid capacity value: 0".into()));
}
let new_entry = match config.members.get(&added_node) {
None => {
let capacity = match args.capacity {
Some(c) => Some(c),
None if args.gateway => None,
_ => return Err(Error::Message(
"Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())),
};
NetworkConfigEntry {
zone: args.zone.expect("Please specifiy a zone with the -z flag"),
capacity,
tag: args.tag.unwrap_or_default(),
}
}
Some(old) => {
let capacity = match args.capacity {
Some(c) => Some(c),
None if args.gateway => None,
_ => old.capacity,
};
NetworkConfigEntry {
zone: args.zone.unwrap_or_else(|| old.zone.to_string()),
capacity,
tag: args.tag.unwrap_or_else(|| old.tag.to_string()),
}
}
};
config.members.insert(added_node, new_entry);
config.version += 1;
rpc_cli
.call(
&rpc_host,
&Message::AdvertiseConfig(config),
ADMIN_RPC_TIMEOUT,
)
.await??;
Ok(())
}
pub async fn cmd_remove(
rpc_cli: RpcAddrClient<Message>,
rpc_host: SocketAddr,
args: RemoveNodeOpt,
) -> Result<(), Error> {
let mut config = match rpc_cli
.call(&rpc_host, &Message::PullConfig, ADMIN_RPC_TIMEOUT)
.await??
{
Message::AdvertiseConfig(cfg) => cfg,
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
};
let deleted_node = find_matching_node(config.members.keys().cloned(), &args.node_id)?;
if !args.yes {
return Err(Error::Message(format!(
"Add the flag --yes to really remove {:?} from the cluster",
deleted_node
)));
}
config.members.remove(&deleted_node);
config.version += 1;
rpc_cli
.call(
&rpc_host,
&Message::AdvertiseConfig(config),
ADMIN_RPC_TIMEOUT,
)
.await??;
Ok(())
}
pub async fn cmd_admin(
rpc_cli: RpcAddrClient<AdminRpc>,
rpc_host: SocketAddr,
args: AdminRpc,
) -> Result<(), Error> {
match rpc_cli.call(&rpc_host, args, ADMIN_RPC_TIMEOUT).await?? {
AdminRpc::Ok(msg) => {
println!("{}", msg);
}
AdminRpc::BucketList(bl) => {
println!("List of buckets:");
for bucket in bl {
println!("{}", bucket);
}
}
AdminRpc::BucketInfo(bucket) => {
print_bucket_info(&bucket);
}
AdminRpc::KeyList(kl) => {
println!("List of keys:");
for key in kl {
println!("{}\t{}", key.0, key.1);
}
}
AdminRpc::KeyInfo(key) => {
print_key_info(&key);
}
r => {
error!("Unexpected response: {:?}", r);
}
}
Ok(())
}
fn print_key_info(key: &Key) {
println!("Key name: {}", key.name.get());
println!("Key ID: {}", key.key_id);
println!("Secret key: {}", key.secret_key);
if key.deleted.get() {
println!("Key is deleted.");
} else {
println!("Authorized buckets:");
for (b, _, perm) in key.authorized_buckets.items().iter() {
println!("- {} R:{} W:{}", b, perm.allow_read, perm.allow_write);
}
}
}
fn print_bucket_info(bucket: &Bucket) {
println!("Bucket name: {}", bucket.name);
match bucket.state.get() {
BucketState::Deleted => println!("Bucket is deleted."),
BucketState::Present(p) => {
println!("Authorized keys:");
for (k, _, perm) in p.authorized_keys.items().iter() {
println!("- {} R:{} W:{}", k, perm.allow_read, perm.allow_write);
}
println!("Website access: {}", p.website.get());
}
};
}

287
src/garage/cli/cmd.rs Normal file
View File

@ -0,0 +1,287 @@
use std::collections::HashSet;
use garage_util::error::*;
use garage_rpc::ring::*;
use garage_rpc::system::*;
use garage_rpc::*;
use crate::admin::*;
use crate::cli::*;
pub async fn cli_command_dispatch(
cmd: Command,
system_rpc_endpoint: &Endpoint<SystemRpc, ()>,
admin_rpc_endpoint: &Endpoint<AdminRpc, ()>,
rpc_host: NodeID,
) -> Result<(), Error> {
match cmd {
Command::Status => cmd_status(system_rpc_endpoint, rpc_host).await,
Command::Node(NodeOperation::Connect(connect_opt)) => {
cmd_connect(system_rpc_endpoint, rpc_host, connect_opt).await
}
Command::Node(NodeOperation::Configure(configure_opt)) => {
cmd_configure(system_rpc_endpoint, rpc_host, configure_opt).await
}
Command::Node(NodeOperation::Remove(remove_opt)) => {
cmd_remove(system_rpc_endpoint, rpc_host, remove_opt).await
}
Command::Bucket(bo) => {
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BucketOperation(bo)).await
}
Command::Key(ko) => {
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::KeyOperation(ko)).await
}
Command::Repair(ro) => {
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::LaunchRepair(ro)).await
}
Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await,
_ => unreachable!(),
}
}
pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) -> Result<(), Error> {
let status = match rpc_cli
.call(&rpc_host, &SystemRpc::GetKnownNodes, PRIO_NORMAL)
.await??
{
SystemRpc::ReturnKnownNodes(nodes) => nodes,
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
};
let config = match rpc_cli
.call(&rpc_host, &SystemRpc::PullConfig, PRIO_NORMAL)
.await??
{
SystemRpc::AdvertiseConfig(cfg) => cfg,
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
};
println!("==== HEALTHY NODES ====");
let mut healthy_nodes = vec!["ID\tHostname\tAddress\tTag\tZone\tCapacity".to_string()];
for adv in status.iter().filter(|adv| adv.is_up) {
if let Some(cfg) = config.members.get(&adv.id) {
healthy_nodes.push(format!(
"{id:?}\t{host}\t{addr}\t[{tag}]\t{zone}\t{capacity}",
id = adv.id,
host = adv.status.hostname,
addr = adv.addr,
tag = cfg.tag,
zone = cfg.zone,
capacity = cfg.capacity_string(),
));
} else {
healthy_nodes.push(format!(
"{id:?}\t{h}\t{addr}\tNO ROLE ASSIGNED",
id = adv.id,
h = adv.status.hostname,
addr = adv.addr,
));
}
}
format_table(healthy_nodes);
let status_keys = status.iter().map(|adv| adv.id).collect::<HashSet<_>>();
let failure_case_1 = status.iter().any(|adv| !adv.is_up);
let failure_case_2 = config
.members
.iter()
.any(|(id, _)| !status_keys.contains(id));
if failure_case_1 || failure_case_2 {
println!("\n==== FAILED NODES ====");
let mut failed_nodes =
vec!["ID\tHostname\tAddress\tTag\tZone\tCapacity\tLast seen".to_string()];
for adv in status.iter().filter(|adv| !adv.is_up) {
if let Some(cfg) = config.members.get(&adv.id) {
failed_nodes.push(format!(
"{id:?}\t{host}\t{addr}\t[{tag}]\t{zone}\t{capacity}\t{last_seen}",
id = adv.id,
host = adv.status.hostname,
addr = adv.addr,
tag = cfg.tag,
zone = cfg.zone,
capacity = cfg.capacity_string(),
last_seen = adv
.last_seen_secs_ago
.map(|s| format!("{}s ago", s))
.unwrap_or_else(|| "never seen".into()),
));
}
}
for (id, cfg) in config.members.iter() {
if !status_keys.contains(id) {
failed_nodes.push(format!(
"{id:?}\t??\t??\t[{tag}]\t{zone}\t{capacity}\tnever seen",
id = id,
tag = cfg.tag,
zone = cfg.zone,
capacity = cfg.capacity_string(),
));
}
}
format_table(failed_nodes);
}
Ok(())
}
pub async fn cmd_connect(
rpc_cli: &Endpoint<SystemRpc, ()>,
rpc_host: NodeID,
args: ConnectNodeOpt,
) -> Result<(), Error> {
match rpc_cli
.call(&rpc_host, &SystemRpc::Connect(args.node), PRIO_NORMAL)
.await??
{
SystemRpc::Ok => {
println!("Success.");
Ok(())
}
r => Err(Error::BadRpc(format!("Unexpected response: {:?}", r))),
}
}
pub async fn cmd_configure(
rpc_cli: &Endpoint<SystemRpc, ()>,
rpc_host: NodeID,
args: ConfigureNodeOpt,
) -> Result<(), Error> {
let status = match rpc_cli
.call(&rpc_host, &SystemRpc::GetKnownNodes, PRIO_NORMAL)
.await??
{
SystemRpc::ReturnKnownNodes(nodes) => nodes,
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
};
let added_node = find_matching_node(status.iter().map(|adv| adv.id), &args.node_id)?;
let mut config = match rpc_cli
.call(&rpc_host, &SystemRpc::PullConfig, PRIO_NORMAL)
.await??
{
SystemRpc::AdvertiseConfig(cfg) => cfg,
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
};
for replaced in args.replace.iter() {
let replaced_node = find_matching_node(config.members.keys().cloned(), replaced)?;
if config.members.remove(&replaced_node).is_none() {
return Err(Error::Message(format!(
"Cannot replace node {:?} as it is not in current configuration",
replaced_node
)));
}
}
if args.capacity.is_some() && args.gateway {
return Err(Error::Message(
"-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into()));
}
if args.capacity == Some(0) {
return Err(Error::Message("Invalid capacity value: 0".into()));
}
let new_entry = match config.members.get(&added_node) {
None => {
let capacity = match args.capacity {
Some(c) => Some(c),
None if args.gateway => None,
_ => return Err(Error::Message(
"Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())),
};
NetworkConfigEntry {
zone: args.zone.ok_or("Please specifiy a zone with the -z flag")?,
capacity,
tag: args.tag.unwrap_or_default(),
}
}
Some(old) => {
let capacity = match args.capacity {
Some(c) => Some(c),
None if args.gateway => None,
_ => old.capacity,
};
NetworkConfigEntry {
zone: args.zone.unwrap_or_else(|| old.zone.to_string()),
capacity,
tag: args.tag.unwrap_or_else(|| old.tag.to_string()),
}
}
};
config.members.insert(added_node, new_entry);
config.version += 1;
rpc_cli
.call(&rpc_host, &SystemRpc::AdvertiseConfig(config), PRIO_NORMAL)
.await??;
Ok(())
}
pub async fn cmd_remove(
rpc_cli: &Endpoint<SystemRpc, ()>,
rpc_host: NodeID,
args: RemoveNodeOpt,
) -> Result<(), Error> {
let mut config = match rpc_cli
.call(&rpc_host, &SystemRpc::PullConfig, PRIO_NORMAL)
.await??
{
SystemRpc::AdvertiseConfig(cfg) => cfg,
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
};
let deleted_node = find_matching_node(config.members.keys().cloned(), &args.node_id)?;
if !args.yes {
return Err(Error::Message(format!(
"Add the flag --yes to really remove {:?} from the cluster",
deleted_node
)));
}
config.members.remove(&deleted_node);
config.version += 1;
rpc_cli
.call(&rpc_host, &SystemRpc::AdvertiseConfig(config), PRIO_NORMAL)
.await??;
Ok(())
}
pub async fn cmd_admin(
rpc_cli: &Endpoint<AdminRpc, ()>,
rpc_host: NodeID,
args: AdminRpc,
) -> Result<(), Error> {
match rpc_cli.call(&rpc_host, &args, PRIO_NORMAL).await?? {
AdminRpc::Ok(msg) => {
println!("{}", msg);
}
AdminRpc::BucketList(bl) => {
println!("List of buckets:");
for bucket in bl {
println!("{}", bucket);
}
}
AdminRpc::BucketInfo(bucket) => {
print_bucket_info(&bucket);
}
AdminRpc::KeyList(kl) => {
println!("List of keys:");
for key in kl {
println!("{}\t{}", key.0, key.1);
}
}
AdminRpc::KeyInfo(key) => {
print_key_info(&key);
}
r => {
error!("Unexpected response: {:?}", r);
}
}
Ok(())
}
// --- Utility functions ----

65
src/garage/cli/init.rs Normal file
View File

@ -0,0 +1,65 @@
use std::path::PathBuf;
use garage_util::error::*;
pub const READ_KEY_ERROR: &str = "Unable to read node key. It will be generated by your garage node the first time is it launched. Ensure that your garage node is currently running. (The node key is supposed to be stored in your metadata directory.)";
pub fn node_id_command(config_file: PathBuf, quiet: bool) -> Result<(), Error> {
let config = garage_util::config::read_config(config_file.clone()).err_context(format!(
"Unable to read configuration file {}",
config_file.to_string_lossy(),
))?;
let node_id =
garage_rpc::system::read_node_id(&config.metadata_dir).err_context(READ_KEY_ERROR)?;
let idstr = if let Some(addr) = config.rpc_public_addr {
let idstr = format!("{}@{}", hex::encode(&node_id), addr);
println!("{}", idstr);
idstr
} else {
let idstr = hex::encode(&node_id);
println!("{}", idstr);
if !quiet {
eprintln!("WARNING: I don't know the public address to reach this node.");
eprintln!("In all of the instructions below, replace 127.0.0.1:3901 by the appropriate address and port.");
}
format!("{}@127.0.0.1:3901", idstr)
};
if !quiet {
eprintln!();
eprintln!(
"To instruct a node to connect to this node, run the following command on that node:"
);
eprintln!(" garage [-c <config file path>] node connect {}", idstr);
eprintln!();
eprintln!("Or instruct them to connect from here by running:");
eprintln!(
" garage -c {} -h <remote node> node connect {}",
config_file.to_string_lossy(),
idstr
);
eprintln!(
"where <remote_node> is their own node identifier in the format: <pubkey>@<ip>:<port>"
);
eprintln!();
eprintln!("This node identifier can also be added as a bootstrap node in other node's garage.toml files:");
eprintln!(" bootstrap_peers = [");
eprintln!(" \"{}\",", idstr);
eprintln!(" ...");
eprintln!(" ]");
eprintln!();
eprintln!(
r#"Security notice: Garage's intra-cluster communications are secured primarily by the shared
secret value rpc_secret. However, an attacker that knows rpc_secret (for example if it
leaks) cannot connect if they do not know any of the identifiers of the nodes in the
cluster. It is thus a good security measure to try to keep them secret if possible.
"#
);
}
Ok(())
}

9
src/garage/cli/mod.rs Normal file
View File

@ -0,0 +1,9 @@
pub(crate) mod cmd;
pub(crate) mod init;
pub(crate) mod structs;
pub(crate) mod util;
pub(crate) use cmd::*;
pub(crate) use init::*;
pub(crate) use structs::*;
pub(crate) use util::*;

296
src/garage/cli/structs.rs Normal file
View File

@ -0,0 +1,296 @@
use serde::{Deserialize, Serialize};
use structopt::StructOpt;
#[derive(StructOpt, Debug)]
pub enum Command {
/// Run Garage server
#[structopt(name = "server")]
Server,
/// Print identifier (public key) of this garage node.
/// Generates a new keypair if necessary.
#[structopt(name = "node-id")]
NodeId(NodeIdOpt),
/// Get network status
#[structopt(name = "status")]
Status,
/// Garage node operations
#[structopt(name = "node")]
Node(NodeOperation),
/// Bucket operations
#[structopt(name = "bucket")]
Bucket(BucketOperation),
/// Key operations
#[structopt(name = "key")]
Key(KeyOperation),
/// Start repair of node data
#[structopt(name = "repair")]
Repair(RepairOpt),
/// Gather node statistics
#[structopt(name = "stats")]
Stats(StatsOpt),
}
#[derive(StructOpt, Debug)]
pub enum NodeOperation {
/// Connect to Garage node that is currently isolated from the system
#[structopt(name = "connect")]
Connect(ConnectNodeOpt),
/// Configure Garage node
#[structopt(name = "configure")]
Configure(ConfigureNodeOpt),
/// Remove Garage node from cluster
#[structopt(name = "remove")]
Remove(RemoveNodeOpt),
}
#[derive(StructOpt, Debug)]
pub struct NodeIdOpt {
/// Do not print usage instructions to stderr
#[structopt(short = "q", long = "quiet")]
pub(crate) quiet: bool,
}
#[derive(StructOpt, Debug)]
pub struct ConnectNodeOpt {
/// Node public key and address, in the format:
/// `<public key hexadecimal>@<ip or hostname>:<port>`
pub(crate) node: String,
}
#[derive(StructOpt, Debug)]
pub struct ConfigureNodeOpt {
/// Node to configure (prefix of hexadecimal node id)
pub(crate) node_id: String,
/// Location (zone or datacenter) of the node
#[structopt(short = "z", long = "zone")]
pub(crate) zone: Option<String>,
/// Capacity (in relative terms, use 1 to represent your smallest server)
#[structopt(short = "c", long = "capacity")]
pub(crate) capacity: Option<u32>,
/// Gateway-only node
#[structopt(short = "g", long = "gateway")]
pub(crate) gateway: bool,
/// Optional node tag
#[structopt(short = "t", long = "tag")]
pub(crate) tag: Option<String>,
/// Replaced node(s): list of node IDs that will be removed from the current cluster
#[structopt(long = "replace")]
pub(crate) replace: Vec<String>,
}
#[derive(StructOpt, Debug)]
pub struct RemoveNodeOpt {
/// Node to configure (prefix of hexadecimal node id)
pub(crate) node_id: String,
/// If this flag is not given, the node won't be removed
#[structopt(long = "yes")]
pub(crate) yes: bool,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub enum BucketOperation {
/// List buckets
#[structopt(name = "list")]
List,
/// Get bucket info
#[structopt(name = "info")]
Info(BucketOpt),
/// Create bucket
#[structopt(name = "create")]
Create(BucketOpt),
/// Delete bucket
#[structopt(name = "delete")]
Delete(DeleteBucketOpt),
/// Allow key to read or write to bucket
#[structopt(name = "allow")]
Allow(PermBucketOpt),
/// Deny key from reading or writing to bucket
#[structopt(name = "deny")]
Deny(PermBucketOpt),
/// Expose as website or not
#[structopt(name = "website")]
Website(WebsiteOpt),
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct WebsiteOpt {
/// Create
#[structopt(long = "allow")]
pub allow: bool,
/// Delete
#[structopt(long = "deny")]
pub deny: bool,
/// Bucket name
pub bucket: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct BucketOpt {
/// Bucket name
pub name: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct DeleteBucketOpt {
/// Bucket name
pub name: String,
/// If this flag is not given, the bucket won't be deleted
#[structopt(long = "yes")]
pub yes: bool,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct PermBucketOpt {
/// Access key name or ID
#[structopt(long = "key")]
pub key_pattern: String,
/// Allow/deny read operations
#[structopt(long = "read")]
pub read: bool,
/// Allow/deny write operations
#[structopt(long = "write")]
pub write: bool,
/// Bucket name
pub bucket: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub enum KeyOperation {
/// List keys
#[structopt(name = "list")]
List,
/// Get key info
#[structopt(name = "info")]
Info(KeyOpt),
/// Create new key
#[structopt(name = "new")]
New(KeyNewOpt),
/// Rename key
#[structopt(name = "rename")]
Rename(KeyRenameOpt),
/// Delete key
#[structopt(name = "delete")]
Delete(KeyDeleteOpt),
/// Import key
#[structopt(name = "import")]
Import(KeyImportOpt),
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyOpt {
/// ID or name of the key
pub key_pattern: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyNewOpt {
/// Name of the key
#[structopt(long = "name", default_value = "Unnamed key")]
pub name: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyRenameOpt {
/// ID or name of the key
pub key_pattern: String,
/// New name of the key
pub new_name: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyDeleteOpt {
/// ID or name of the key
pub key_pattern: String,
/// Confirm deletion
#[structopt(long = "yes")]
pub yes: bool,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyImportOpt {
/// Access key ID
pub key_id: String,
/// Secret access key
pub secret_key: String,
/// Key name
#[structopt(short = "n", default_value = "Imported key")]
pub name: String,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
pub struct RepairOpt {
/// Launch repair operation on all nodes
#[structopt(short = "a", long = "all-nodes")]
pub all_nodes: bool,
/// Confirm the launch of the repair operation
#[structopt(long = "yes")]
pub yes: bool,
#[structopt(subcommand)]
pub what: Option<RepairWhat>,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
pub enum RepairWhat {
/// Only do a full sync of metadata tables
#[structopt(name = "tables")]
Tables,
/// Only repair (resync/rebalance) the set of stored blocks
#[structopt(name = "blocks")]
Blocks,
/// Only redo the propagation of object deletions to the version table (slow)
#[structopt(name = "versions")]
Versions,
/// Only redo the propagation of version deletions to the block ref table (extremely slow)
#[structopt(name = "block_refs")]
BlockRefs,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
pub struct StatsOpt {
/// Gather statistics from all nodes
#[structopt(short = "a", long = "all-nodes")]
pub all_nodes: bool,
/// Gather detailed statistics (this can be long)
#[structopt(short = "d", long = "detailed")]
pub detailed: bool,
}

83
src/garage/cli/util.rs Normal file
View File

@ -0,0 +1,83 @@
use garage_util::data::Uuid;
use garage_util::error::*;
use garage_model::bucket_table::*;
use garage_model::key_table::*;
pub fn print_key_info(key: &Key) {
println!("Key name: {}", key.name.get());
println!("Key ID: {}", key.key_id);
println!("Secret key: {}", key.secret_key);
if key.deleted.get() {
println!("Key is deleted.");
} else {
println!("Authorized buckets:");
for (b, _, perm) in key.authorized_buckets.items().iter() {
println!("- {} R:{} W:{}", b, perm.allow_read, perm.allow_write);
}
}
}
pub fn print_bucket_info(bucket: &Bucket) {
println!("Bucket name: {}", bucket.name);
match bucket.state.get() {
BucketState::Deleted => println!("Bucket is deleted."),
BucketState::Present(p) => {
println!("Authorized keys:");
for (k, _, perm) in p.authorized_keys.items().iter() {
println!("- {} R:{} W:{}", k, perm.allow_read, perm.allow_write);
}
println!("Website access: {}", p.website.get());
}
};
}
pub fn format_table(data: Vec<String>) {
let data = data
.iter()
.map(|s| s.split('\t').collect::<Vec<_>>())
.collect::<Vec<_>>();
let columns = data.iter().map(|row| row.len()).fold(0, std::cmp::max);
let mut column_size = vec![0; columns];
let mut out = String::new();
for row in data.iter() {
for (i, col) in row.iter().enumerate() {
column_size[i] = std::cmp::max(column_size[i], col.chars().count());
}
}
for row in data.iter() {
for (col, col_len) in row[..row.len() - 1].iter().zip(column_size.iter()) {
out.push_str(col);
(0..col_len - col.chars().count() + 2).for_each(|_| out.push(' '));
}
out.push_str(&row[row.len() - 1]);
out.push('\n');
}
print!("{}", out);
}
pub fn find_matching_node(
cand: impl std::iter::Iterator<Item = Uuid>,
pattern: &str,
) -> Result<Uuid, Error> {
let mut candidates = vec![];
for c in cand {
if hex::encode(&c).starts_with(&pattern) {
candidates.push(c);
}
}
if candidates.len() != 1 {
Err(Error::Message(format!(
"{} nodes match '{}'",
candidates.len(),
pattern,
)))
} else {
Ok(candidates[0])
}
}

View File

@ -4,39 +4,41 @@
#[macro_use]
extern crate log;
mod admin_rpc;
mod admin;
mod cli;
mod repair;
mod server;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use std::path::PathBuf;
use structopt::StructOpt;
use garage_util::config::TlsConfig;
use garage_util::error::Error;
use netapp::util::parse_and_resolve_peer_addr;
use netapp::NetworkKey;
use garage_rpc::membership::*;
use garage_rpc::rpc_client::*;
use garage_util::error::*;
use admin_rpc::*;
use garage_rpc::system::*;
use garage_rpc::*;
use admin::*;
use cli::*;
#[derive(StructOpt, Debug)]
#[structopt(name = "garage")]
struct Opt {
/// RPC connect to this host to execute client operations
#[structopt(short = "h", long = "rpc-host", default_value = "127.0.0.1:3901", parse(try_from_str = parse_address))]
pub rpc_host: SocketAddr,
/// Host to connect to for admin operations, in the format:
/// <public-key>@<ip>:<port>
#[structopt(short = "h", long = "rpc-host")]
pub rpc_host: Option<String>,
#[structopt(long = "ca-cert")]
pub ca_cert: Option<String>,
#[structopt(long = "client-cert")]
pub client_cert: Option<String>,
#[structopt(long = "client-key")]
pub client_key: Option<String>,
/// RPC secret network key for admin operations
#[structopt(short = "s", long = "rpc-secret")]
pub rpc_secret: Option<String>,
/// Configuration file (garage.toml)
#[structopt(short = "c", long = "config", default_value = "/etc/garage.toml")]
pub config_file: PathBuf,
#[structopt(subcommand)]
cmd: Command,
@ -45,54 +47,72 @@ struct Opt {
#[tokio::main]
async fn main() {
pretty_env_logger::init();
sodiumoxide::init().expect("Unable to init sodiumoxide");
let opt = Opt::from_args();
let res = if let Command::Server(server_opt) = opt.cmd {
// Abort on panic (same behavior as in Go)
std::panic::set_hook(Box::new(|panic_info| {
error!("{}", panic_info.to_string());
std::process::abort();
}));
let res = match opt.cmd {
Command::Server => {
// Abort on panic (same behavior as in Go)
std::panic::set_hook(Box::new(|panic_info| {
error!("{}", panic_info.to_string());
std::process::abort();
}));
server::run_server(server_opt.config_file).await
} else {
cli_command(opt).await
server::run_server(opt.config_file).await
}
Command::NodeId(node_id_opt) => node_id_command(opt.config_file, node_id_opt.quiet),
_ => cli_command(opt).await,
};
if let Err(e) = res {
error!("{}", e);
eprintln!("Error: {}", e);
std::process::exit(1);
}
}
async fn cli_command(opt: Opt) -> Result<(), Error> {
let tls_config = match (opt.ca_cert, opt.client_cert, opt.client_key) {
(Some(ca_cert), Some(client_cert), Some(client_key)) => Some(TlsConfig {
ca_cert,
node_cert: client_cert,
node_key: client_key,
}),
(None, None, None) => None,
_ => {
warn!("Missing one of: --ca-cert, --node-cert, --node-key. Not using TLS.");
None
}
let config = if opt.rpc_secret.is_none() || opt.rpc_host.is_none() {
Some(garage_util::config::read_config(opt.config_file.clone())
.err_context(format!("Unable to read configuration file {}. Configuration file is needed because -h or -s is not provided on the command line.", opt.config_file.to_string_lossy()))?)
} else {
None
};
let rpc_http_cli =
Arc::new(RpcHttpClient::new(8, &tls_config).expect("Could not create RPC client"));
let membership_rpc_cli =
RpcAddrClient::new(rpc_http_cli.clone(), MEMBERSHIP_RPC_PATH.to_string());
let admin_rpc_cli = RpcAddrClient::new(rpc_http_cli.clone(), ADMIN_RPC_PATH.to_string());
// Find and parse network RPC secret
let net_key_hex_str = opt
.rpc_secret
.as_ref()
.or_else(|| config.as_ref().map(|c| &c.rpc_secret))
.ok_or("No RPC secret provided")?;
let network_key = NetworkKey::from_slice(
&hex::decode(net_key_hex_str).err_context("Invalid RPC secret key (bad hex)")?[..],
)
.ok_or("Invalid RPC secret provided (wrong length)")?;
cli_cmd(opt.cmd, membership_rpc_cli, admin_rpc_cli, opt.rpc_host).await
}
// Generate a temporary keypair for our RPC client
let (_pk, sk) = sodiumoxide::crypto::sign::ed25519::gen_keypair();
fn parse_address(address: &str) -> Result<SocketAddr, String> {
use std::net::ToSocketAddrs;
address
.to_socket_addrs()
.map_err(|_| format!("Could not resolve {}", address))?
.next()
.ok_or_else(|| format!("Could not resolve {}", address))
let netapp = NetApp::new(network_key, sk);
// Find and parse the address of the target host
let (id, addr) = if let Some(h) = opt.rpc_host {
let (id, addrs) = parse_and_resolve_peer_addr(&h).ok_or_else(|| format!("Invalid RPC remote node identifier: {}. Expected format is <pubkey>@<IP or hostname>:<port>.", h))?;
(id, addrs[0])
} else if let Some(a) = config.as_ref().map(|c| c.rpc_public_addr).flatten() {
let node_id = garage_rpc::system::read_node_id(&config.unwrap().metadata_dir)
.err_context(READ_KEY_ERROR)?;
(node_id, a)
} else {
return Err(Error::Message("No RPC host provided".into()));
};
// Connect to target host
netapp.clone().try_connect(addr, id).await
.err_context("Unable to connect to destination RPC host. Check that you are using the same value of rpc_secret as them, and that you have their correct public key.")?;
let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into());
let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into());
cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await
}

View File

@ -1,7 +1,5 @@
use std::path::PathBuf;
use std::sync::Arc;
use futures_util::future::*;
use tokio::sync::watch;
use garage_util::background::*;
@ -10,20 +8,9 @@ use garage_util::error::Error;
use garage_api::run_api_server;
use garage_model::garage::Garage;
use garage_rpc::rpc_server::RpcServer;
use garage_web::run_web_server;
use crate::admin_rpc::*;
async fn shutdown_signal(send_cancel: watch::Sender<bool>) -> Result<(), Error> {
// Wait for the CTRL+C signal
tokio::signal::ctrl_c()
.await
.expect("failed to install CTRL+C signal handler");
info!("Received CTRL+C, shutting down.");
send_cancel.send(true)?;
Ok(())
}
use crate::admin::*;
async fn wait_from(mut chan: watch::Receiver<bool>) {
while !*chan.borrow() {
@ -47,52 +34,52 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> {
.open()
.expect("Unable to open sled DB");
info!("Initialize RPC server...");
let mut rpc_server = RpcServer::new(config.rpc_bind_addr, config.rpc_tls.clone());
info!("Initializing background runner...");
let (send_cancel, watch_cancel) = watch::channel(false);
let watch_cancel = netapp::util::watch_ctrl_c();
let (background, await_background_done) = BackgroundRunner::new(16, watch_cancel.clone());
info!("Initializing Garage main data store...");
let garage = Garage::new(config.clone(), db, background, &mut rpc_server);
let bootstrap = garage.system.clone().bootstrap(
config.bootstrap_peers,
config.consul_host,
config.consul_service_name,
);
let garage = Garage::new(config.clone(), db, background);
let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone()));
info!("Crate admin RPC handler...");
AdminRpcHandler::new(garage.clone()).register_handler(&mut rpc_server);
AdminRpcHandler::new(garage.clone());
info!("Initializing RPC and API servers...");
let run_rpc_server = Arc::new(rpc_server).run(wait_from(watch_cancel.clone()));
let api_server = run_api_server(garage.clone(), wait_from(watch_cancel.clone()));
let web_server = run_web_server(garage, wait_from(watch_cancel.clone()));
info!("Initializing API server...");
let api_server = tokio::spawn(run_api_server(
garage.clone(),
wait_from(watch_cancel.clone()),
));
futures::try_join!(
bootstrap.map(|()| {
info!("Bootstrap done");
Ok(())
}),
run_rpc_server.map(|rv| {
info!("RPC server exited");
rv
}),
api_server.map(|rv| {
info!("API server exited");
rv
}),
web_server.map(|rv| {
info!("Web server exited");
rv
}),
await_background_done.map(|rv| {
info!("Background runner exited: {:?}", rv);
Ok(())
}),
shutdown_signal(send_cancel),
)?;
info!("Initializing web server...");
let web_server = tokio::spawn(run_web_server(
garage.clone(),
wait_from(watch_cancel.clone()),
));
// Stuff runs
// When a cancel signal is sent, stuff stops
if let Err(e) = api_server.await? {
warn!("API server exited with error: {}", e);
}
if let Err(e) = web_server.await? {
warn!("Web server exited with error: {}", e);
}
// Remove RPC handlers for system to break reference cycles
garage.system.netapp.drop_all_handlers();
// Await for netapp RPC system to end
run_system.await?;
// Break last reference cycles so that stuff can terminate properly
garage.break_reference_cycles();
drop(garage);
// Await for all background tasks to end
await_background_done.await?;
info!("Cleaning up...");

View File

@ -1,6 +1,6 @@
[package]
name = "garage_model"
version = "0.3.0"
version = "0.4.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -13,10 +13,11 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_rpc = { version = "0.3.0", path = "../rpc" }
garage_table = { version = "0.3.0", path = "../table" }
garage_util = { version = "0.3.0", path = "../util" }
garage_rpc = { version = "0.4.0", path = "../rpc" }
garage_table = { version = "0.4.0", path = "../table" }
garage_util = { version = "0.4.0", path = "../util" }
async-trait = "0.1.7"
arc-swap = "1.0"
hex = "0.4"
log = "0.4"
@ -31,3 +32,5 @@ serde_bytes = "0.11"
futures = "0.3"
futures-util = "0.3"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
netapp = { version = "0.3.0", git = "https://git.deuxfleurs.fr/lx/netapp" }

View File

@ -3,6 +3,7 @@ use std::sync::Arc;
use std::time::Duration;
use arc_swap::ArcSwapOption;
use async_trait::async_trait;
use futures::future::*;
use futures::select;
use serde::{Deserialize, Serialize};
@ -14,9 +15,8 @@ use garage_util::data::*;
use garage_util::error::Error;
use garage_util::time::*;
use garage_rpc::membership::System;
use garage_rpc::rpc_client::*;
use garage_rpc::rpc_server::*;
use garage_rpc::system::System;
use garage_rpc::*;
use garage_table::replication::{TableReplication, TableShardedReplication};
@ -27,7 +27,7 @@ use crate::garage::Garage;
/// Size under which data will be stored inlined in database instead of as files
pub const INLINE_THRESHOLD: usize = 3072;
pub const BACKGROUND_WORKERS: u64 = 1;
pub const BACKGROUND_WORKERS: u64 = 2;
const BLOCK_RW_TIMEOUT: Duration = Duration::from_secs(42);
const BLOCK_GC_TIMEOUT: Duration = Duration::from_secs(60);
@ -36,7 +36,7 @@ const RESYNC_RETRY_TIMEOUT: Duration = Duration::from_secs(10);
/// RPC messages used to share blocks of data between nodes
#[derive(Debug, Serialize, Deserialize)]
pub enum Message {
pub enum BlockRpc {
Ok,
/// Message to ask for a block of data, by hash
GetBlock(Hash),
@ -60,7 +60,9 @@ pub struct PutBlockMessage {
pub data: Vec<u8>,
}
impl RpcMessage for Message {}
impl Rpc for BlockRpc {
type Response = Result<BlockRpc, Error>;
}
/// The block manager, handling block exchange between nodes, and block storage on local node
pub struct BlockManager {
@ -68,8 +70,8 @@ pub struct BlockManager {
pub replication: TableShardedReplication,
/// Directory in which block are stored
pub data_dir: PathBuf,
/// Lock to prevent concurrent edition of the directory
pub data_dir_lock: Mutex<()>,
mutation_lock: Mutex<BlockManagerLocked>,
rc: sled::Tree,
@ -77,17 +79,21 @@ pub struct BlockManager {
resync_notify: Notify,
system: Arc<System>,
rpc_client: Arc<RpcClient<Message>>,
endpoint: Arc<Endpoint<BlockRpc, Self>>,
pub(crate) garage: ArcSwapOption<Garage>,
}
// This custom struct contains functions that must only be ran
// when the lock is held. We ensure that it is the case by storing
// it INSIDE a Mutex.
struct BlockManagerLocked();
impl BlockManager {
pub fn new(
db: &sled::Db,
data_dir: PathBuf,
replication: TableShardedReplication,
system: Arc<System>,
rpc_server: &mut RpcServer,
) -> Arc<Self> {
let rc = db
.open_tree("block_local_rc")
@ -97,144 +103,118 @@ impl BlockManager {
.open_tree("block_local_resync_queue")
.expect("Unable to open block_local_resync_queue tree");
let rpc_path = "block_manager";
let rpc_client = system.rpc_client::<Message>(rpc_path);
let endpoint = system
.netapp
.endpoint("garage_model/block.rs/Rpc".to_string());
let manager_locked = BlockManagerLocked();
let block_manager = Arc::new(Self {
replication,
data_dir,
data_dir_lock: Mutex::new(()),
mutation_lock: Mutex::new(manager_locked),
rc,
resync_queue,
resync_notify: Notify::new(),
system,
rpc_client,
endpoint,
garage: ArcSwapOption::from(None),
});
block_manager
.clone()
.register_handler(rpc_server, rpc_path.into());
block_manager.endpoint.set_handler(block_manager.clone());
block_manager
}
fn register_handler(self: Arc<Self>, rpc_server: &mut RpcServer, path: String) {
let self2 = self.clone();
rpc_server.add_handler::<Message, _, _>(path, move |msg, _addr| {
let self2 = self2.clone();
async move { self2.handle(&msg).await }
});
// ---- Public interface ----
let self2 = self.clone();
self.rpc_client
.set_local_handler(self.system.id, move |msg| {
let self2 = self2.clone();
async move { self2.handle(&msg).await }
});
}
/// Ask nodes that might have a block for it
pub async fn rpc_get_block(&self, hash: &Hash) -> Result<Vec<u8>, Error> {
let who = self.replication.read_nodes(&hash);
let resps = self
.system
.rpc
.try_call_many(
&self.endpoint,
&who[..],
BlockRpc::GetBlock(*hash),
RequestStrategy::with_priority(PRIO_NORMAL)
.with_quorum(1)
.with_timeout(BLOCK_RW_TIMEOUT)
.interrupt_after_quorum(true),
)
.await?;
async fn handle(self: Arc<Self>, msg: &Message) -> Result<Message, Error> {
match msg {
Message::PutBlock(m) => self.write_block(&m.hash, &m.data).await,
Message::GetBlock(h) => self.read_block(h).await,
Message::NeedBlockQuery(h) => self.need_block(h).await.map(Message::NeedBlockReply),
_ => Err(Error::BadRpc("Unexpected RPC message".to_string())),
}
}
pub fn spawn_background_worker(self: Arc<Self>) {
// Launch 2 simultaneous workers for background resync loop preprocessing <= TODO actually this
// launches only one worker with current value of BACKGROUND_WORKERS
for i in 0..BACKGROUND_WORKERS {
let bm2 = self.clone();
let background = self.system.background.clone();
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(10 * (i + 1))).await;
background.spawn_worker(format!("block resync worker {}", i), move |must_exit| {
bm2.resync_loop(must_exit)
});
});
}
}
/// Write a block to disk
async fn write_block(&self, hash: &Hash, data: &[u8]) -> Result<Message, Error> {
let _lock = self.data_dir_lock.lock().await;
let mut path = self.block_dir(hash);
fs::create_dir_all(&path).await?;
path.push(hex::encode(hash));
if fs::metadata(&path).await.is_ok() {
return Ok(Message::Ok);
}
let mut f = fs::File::create(path).await?;
f.write_all(data).await?;
drop(f);
Ok(Message::Ok)
}
/// Read block from disk, verifying it's integrity
async fn read_block(&self, hash: &Hash) -> Result<Message, Error> {
let path = self.block_path(hash);
let mut f = match fs::File::open(&path).await {
Ok(f) => f,
Err(e) => {
// Not found but maybe we should have had it ??
self.put_to_resync(hash, Duration::from_millis(0))?;
return Err(Into::into(e));
for resp in resps {
if let BlockRpc::PutBlock(msg) = resp {
return Ok(msg.data);
}
};
let mut data = vec![];
f.read_to_end(&mut data).await?;
drop(f);
}
Err(Error::Message(format!(
"Unable to read block {:?}: no valid blocks returned",
hash
)))
}
if blake2sum(&data[..]) != *hash {
let _lock = self.data_dir_lock.lock().await;
warn!(
"Block {:?} is corrupted. Renaming to .corrupted and resyncing.",
hash
);
let mut path2 = path.clone();
path2.set_extension(".corrupted");
fs::rename(path, path2).await?;
self.put_to_resync(&hash, Duration::from_millis(0))?;
return Err(Error::CorruptData(*hash));
/// Send block to nodes that should have it
pub async fn rpc_put_block(&self, hash: Hash, data: Vec<u8>) -> Result<(), Error> {
let who = self.replication.write_nodes(&hash);
self.system
.rpc
.try_call_many(
&self.endpoint,
&who[..],
BlockRpc::PutBlock(PutBlockMessage { hash, data }),
RequestStrategy::with_priority(PRIO_NORMAL)
.with_quorum(self.replication.write_quorum())
.with_timeout(BLOCK_RW_TIMEOUT),
)
.await?;
Ok(())
}
/// Launch the repair procedure on the data store
///
/// This will list all blocks locally present, as well as those
/// that are required because of refcount > 0, and will try
/// to fix any mismatch between the two.
pub async fn repair_data_store(&self, must_exit: &watch::Receiver<bool>) -> Result<(), Error> {
// 1. Repair blocks from RC table
let garage = self.garage.load_full().unwrap();
let mut last_hash = None;
for (i, entry) in garage.block_ref_table.data.store.iter().enumerate() {
let (_k, v_bytes) = entry?;
let block_ref = rmp_serde::decode::from_read_ref::<_, BlockRef>(v_bytes.as_ref())?;
if Some(&block_ref.block) == last_hash.as_ref() {
continue;
}
if !block_ref.deleted.get() {
last_hash = Some(block_ref.block);
self.put_to_resync(&block_ref.block, Duration::from_secs(0))?;
}
if i & 0xFF == 0 && *must_exit.borrow() {
return Ok(());
}
}
Ok(Message::PutBlock(PutBlockMessage { hash: *hash, data }))
// 2. Repair blocks actually on disk
self.repair_aux_read_dir_rec(&self.data_dir, must_exit)
.await?;
Ok(())
}
/// Check if this node should have a block, but don't actually have it
async fn need_block(&self, hash: &Hash) -> Result<bool, Error> {
let needed = self
.rc
.get(hash.as_ref())?
.map(|x| u64_from_be_bytes(x) > 0)
.unwrap_or(false);
if needed {
let path = self.block_path(hash);
let exists = fs::metadata(&path).await.is_ok();
Ok(!exists)
} else {
Ok(false)
}
/// Get lenght of resync queue
pub fn resync_queue_len(&self) -> usize {
self.resync_queue.len()
}
fn block_dir(&self, hash: &Hash) -> PathBuf {
let mut path = self.data_dir.clone();
path.push(hex::encode(&hash.as_slice()[0..1]));
path.push(hex::encode(&hash.as_slice()[1..2]));
path
}
fn block_path(&self, hash: &Hash) -> PathBuf {
let mut path = self.block_dir(hash);
path.push(hex::encode(hash.as_ref()));
path
/// Get number of items in the refcount table
pub fn rc_len(&self) -> usize {
self.rc.len()
}
//// ----- Managing the reference counter ----
/// Increment the number of time a block is used, putting it to resynchronization if it is
/// required, but not known
pub fn block_incref(&self, hash: &Hash) -> Result<(), Error> {
@ -265,6 +245,96 @@ impl BlockManager {
Ok(())
}
/// Read a block's reference count
pub fn get_block_rc(&self, hash: &Hash) -> Result<u64, Error> {
Ok(self
.rc
.get(hash.as_ref())?
.map(u64_from_be_bytes)
.unwrap_or(0))
}
// ---- Reading and writing blocks locally ----
/// Write a block to disk
async fn write_block(&self, hash: &Hash, data: &[u8]) -> Result<BlockRpc, Error> {
self.mutation_lock
.lock()
.await
.write_block(hash, data, self)
.await
}
/// Read block from disk, verifying it's integrity
async fn read_block(&self, hash: &Hash) -> Result<BlockRpc, Error> {
let path = self.block_path(hash);
let mut f = match fs::File::open(&path).await {
Ok(f) => f,
Err(e) => {
// Not found but maybe we should have had it ??
self.put_to_resync(hash, Duration::from_millis(0))?;
return Err(Into::into(e));
}
};
let mut data = vec![];
f.read_to_end(&mut data).await?;
drop(f);
if blake2sum(&data[..]) != *hash {
self.mutation_lock
.lock()
.await
.move_block_to_corrupted(hash, self)
.await?;
return Err(Error::CorruptData(*hash));
}
Ok(BlockRpc::PutBlock(PutBlockMessage { hash: *hash, data }))
}
/// Check if this node should have a block, but don't actually have it
async fn need_block(&self, hash: &Hash) -> Result<bool, Error> {
let BlockStatus { exists, needed } = self
.mutation_lock
.lock()
.await
.check_block_status(hash, self)
.await?;
Ok(needed && !exists)
}
/// Utility: gives the path of the directory in which a block should be found
fn block_dir(&self, hash: &Hash) -> PathBuf {
let mut path = self.data_dir.clone();
path.push(hex::encode(&hash.as_slice()[0..1]));
path.push(hex::encode(&hash.as_slice()[1..2]));
path
}
/// Utility: give the full path where a block should be found
fn block_path(&self, hash: &Hash) -> PathBuf {
let mut path = self.block_dir(hash);
path.push(hex::encode(hash.as_ref()));
path
}
// ---- Resync loop ----
pub fn spawn_background_worker(self: Arc<Self>) {
// Launch 2 simultaneous workers for background resync loop preprocessing
for i in 0..BACKGROUND_WORKERS {
let bm2 = self.clone();
let background = self.system.background.clone();
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(10 * (i + 1))).await;
background.spawn_worker(format!("block resync worker {}", i), move |must_exit| {
bm2.resync_loop(must_exit)
});
});
}
}
fn put_to_resync(&self, hash: &Hash, delay: Duration) -> Result<(), Error> {
let when = now_msec() + delay.as_millis() as u64;
trace!("Put resync_queue: {} {:?}", when, hash);
@ -319,16 +389,12 @@ impl BlockManager {
}
async fn resync_block(&self, hash: &Hash) -> Result<(), Error> {
let lock = self.data_dir_lock.lock().await;
let path = self.block_path(hash);
let exists = fs::metadata(&path).await.is_ok();
let needed = self
.rc
.get(hash.as_ref())?
.map(|x| u64_from_be_bytes(x) > 0)
.unwrap_or(false);
let BlockStatus { exists, needed } = self
.mutation_lock
.lock()
.await
.check_block_status(hash, self)
.await?;
if exists != needed {
info!(
@ -346,17 +412,22 @@ impl BlockManager {
}
who.retain(|id| *id != self.system.id);
let msg = Arc::new(Message::NeedBlockQuery(*hash));
let msg = Arc::new(BlockRpc::NeedBlockQuery(*hash));
let who_needs_fut = who.iter().map(|to| {
self.rpc_client
.call_arc(*to, msg.clone(), NEED_BLOCK_QUERY_TIMEOUT)
self.system.rpc.call_arc(
&self.endpoint,
*to,
msg.clone(),
RequestStrategy::with_priority(PRIO_NORMAL)
.with_timeout(NEED_BLOCK_QUERY_TIMEOUT),
)
});
let who_needs_resps = join_all(who_needs_fut).await;
let mut need_nodes = vec![];
for (node, needed) in who.iter().zip(who_needs_resps.into_iter()) {
match needed? {
Message::NeedBlockReply(needed) => {
BlockRpc::NeedBlockReply(needed) => {
if needed {
need_nodes.push(*node);
}
@ -377,11 +448,14 @@ impl BlockManager {
);
let put_block_message = self.read_block(hash).await?;
self.rpc_client
self.system
.rpc
.try_call_many(
&self.endpoint,
&need_nodes[..],
put_block_message,
RequestStrategy::with_quorum(need_nodes.len())
RequestStrategy::with_priority(PRIO_NORMAL)
.with_quorum(need_nodes.len())
.with_timeout(BLOCK_RW_TIMEOUT),
)
.await?;
@ -393,12 +467,14 @@ impl BlockManager {
who.len()
);
fs::remove_file(path).await?;
self.mutation_lock
.lock()
.await
.delete_if_unneeded(hash, self)
.await?;
}
if needed && !exists {
drop(lock);
// TODO find a way to not do this if they are sending it to us
// Let's suppose this isn't an issue for now with the BLOCK_RW_TIMEOUT delay
// between the RC being incremented and this part being called.
@ -409,73 +485,6 @@ impl BlockManager {
Ok(())
}
/// Ask nodes that might have a block for it
pub async fn rpc_get_block(&self, hash: &Hash) -> Result<Vec<u8>, Error> {
let who = self.replication.read_nodes(&hash);
let resps = self
.rpc_client
.try_call_many(
&who[..],
Message::GetBlock(*hash),
RequestStrategy::with_quorum(1)
.with_timeout(BLOCK_RW_TIMEOUT)
.interrupt_after_quorum(true),
)
.await?;
for resp in resps {
if let Message::PutBlock(msg) = resp {
return Ok(msg.data);
}
}
Err(Error::Message(format!(
"Unable to read block {:?}: no valid blocks returned",
hash
)))
}
/// Send block to nodes that should have it
pub async fn rpc_put_block(&self, hash: Hash, data: Vec<u8>) -> Result<(), Error> {
let who = self.replication.write_nodes(&hash);
self.rpc_client
.try_call_many(
&who[..],
Message::PutBlock(PutBlockMessage { hash, data }),
RequestStrategy::with_quorum(self.replication.write_quorum())
.with_timeout(BLOCK_RW_TIMEOUT),
)
.await?;
Ok(())
}
pub async fn repair_data_store(&self, must_exit: &watch::Receiver<bool>) -> Result<(), Error> {
// 1. Repair blocks from RC table
let garage = self.garage.load_full().unwrap();
let mut last_hash = None;
let mut i = 0usize;
for entry in garage.block_ref_table.data.store.iter() {
let (_k, v_bytes) = entry?;
let block_ref = rmp_serde::decode::from_read_ref::<_, BlockRef>(v_bytes.as_ref())?;
if Some(&block_ref.block) == last_hash.as_ref() {
continue;
}
if !block_ref.deleted.get() {
last_hash = Some(block_ref.block);
self.put_to_resync(&block_ref.block, Duration::from_secs(0))?;
}
i += 1;
if i & 0xFF == 0 && *must_exit.borrow() {
return Ok(());
}
}
// 2. Repair blocks actually on disk
self.repair_aux_read_dir_rec(&self.data_dir, must_exit)
.await?;
Ok(())
}
fn repair_aux_read_dir_rec<'a>(
&'a self,
path: &'a Path,
@ -520,14 +529,89 @@ impl BlockManager {
}
.boxed()
}
}
/// Get lenght of resync queue
pub fn resync_queue_len(&self) -> usize {
self.resync_queue.len()
#[async_trait]
impl EndpointHandler<BlockRpc> for BlockManager {
async fn handle(
self: &Arc<Self>,
message: &BlockRpc,
_from: NodeID,
) -> Result<BlockRpc, Error> {
match message {
BlockRpc::PutBlock(m) => self.write_block(&m.hash, &m.data).await,
BlockRpc::GetBlock(h) => self.read_block(h).await,
BlockRpc::NeedBlockQuery(h) => self.need_block(h).await.map(BlockRpc::NeedBlockReply),
_ => Err(Error::BadRpc("Unexpected RPC message".to_string())),
}
}
}
struct BlockStatus {
exists: bool,
needed: bool,
}
impl BlockManagerLocked {
async fn check_block_status(
&self,
hash: &Hash,
mgr: &BlockManager,
) -> Result<BlockStatus, Error> {
let path = mgr.block_path(hash);
let exists = fs::metadata(&path).await.is_ok();
let needed = mgr.get_block_rc(hash)? > 0;
Ok(BlockStatus { exists, needed })
}
pub fn rc_len(&self) -> usize {
self.rc.len()
async fn write_block(
&self,
hash: &Hash,
data: &[u8],
mgr: &BlockManager,
) -> Result<BlockRpc, Error> {
let mut path = mgr.block_dir(hash);
fs::create_dir_all(&path).await?;
path.push(hex::encode(hash));
if fs::metadata(&path).await.is_ok() {
return Ok(BlockRpc::Ok);
}
let mut path2 = path.clone();
path2.set_extension("tmp");
let mut f = fs::File::create(&path2).await?;
f.write_all(data).await?;
drop(f);
fs::rename(path2, path).await?;
Ok(BlockRpc::Ok)
}
async fn move_block_to_corrupted(&self, hash: &Hash, mgr: &BlockManager) -> Result<(), Error> {
warn!(
"Block {:?} is corrupted. Renaming to .corrupted and resyncing.",
hash
);
let path = mgr.block_path(hash);
let mut path2 = path.clone();
path2.set_extension(".corrupted");
fs::rename(path, path2).await?;
mgr.put_to_resync(&hash, Duration::from_millis(0))?;
Ok(())
}
async fn delete_if_unneeded(&self, hash: &Hash, mgr: &BlockManager) -> Result<(), Error> {
let BlockStatus { exists, needed } = self.check_block_status(hash, mgr).await?;
if exists && !needed {
let path = mgr.block_path(hash);
fs::remove_file(path).await?;
}
Ok(())
}
}

View File

@ -1,11 +1,11 @@
use std::sync::Arc;
use netapp::NetworkKey;
use garage_util::background::*;
use garage_util::config::*;
use garage_rpc::membership::System;
use garage_rpc::rpc_client::RpcHttpClient;
use garage_rpc::rpc_server::RpcServer;
use garage_rpc::system::System;
use garage_table::replication::ReplicationMode;
use garage_table::replication::TableFullReplication;
@ -45,26 +45,21 @@ pub struct Garage {
impl Garage {
/// Create and run garage
pub fn new(
config: Config,
db: sled::Db,
background: Arc<BackgroundRunner>,
rpc_server: &mut RpcServer,
) -> Arc<Self> {
pub fn new(config: Config, db: sled::Db, background: Arc<BackgroundRunner>) -> Arc<Self> {
let network_key = NetworkKey::from_slice(
&hex::decode(&config.rpc_secret).expect("Invalid RPC secret key")[..],
)
.expect("Invalid RPC secret key");
let replication_mode = ReplicationMode::parse(&config.replication_mode)
.expect("Invalid replication_mode in config file.");
info!("Initialize membership management system...");
let rpc_http_client = Arc::new(
RpcHttpClient::new(config.max_concurrent_rpc_requests, &config.rpc_tls)
.expect("Could not create RPC client"),
);
let system = System::new(
config.metadata_dir.clone(),
rpc_http_client,
network_key,
background.clone(),
rpc_server,
replication_mode.replication_factor(),
&config,
);
let data_rep_param = TableShardedReplication {
@ -87,13 +82,8 @@ impl Garage {
};
info!("Initialize block manager...");
let block_manager = BlockManager::new(
&db,
config.data_dir.clone(),
data_rep_param,
system.clone(),
rpc_server,
);
let block_manager =
BlockManager::new(&db, config.data_dir.clone(), data_rep_param, system.clone());
info!("Initialize block_ref_table...");
let block_ref_table = Table::new(
@ -104,7 +94,6 @@ impl Garage {
system.clone(),
&db,
"block_ref".to_string(),
rpc_server,
);
info!("Initialize version_table...");
@ -117,7 +106,6 @@ impl Garage {
system.clone(),
&db,
"version".to_string(),
rpc_server,
);
info!("Initialize object_table...");
@ -130,7 +118,6 @@ impl Garage {
system.clone(),
&db,
"object".to_string(),
rpc_server,
);
info!("Initialize bucket_table...");
@ -140,7 +127,6 @@ impl Garage {
system.clone(),
&db,
"bucket".to_string(),
rpc_server,
);
info!("Initialize key_table_table...");
@ -150,7 +136,6 @@ impl Garage {
system.clone(),
&db,
"key".to_string(),
rpc_server,
);
info!("Initialize Garage...");
@ -173,4 +158,9 @@ impl Garage {
garage
}
/// Use this for shutdown
pub fn break_reference_cycles(&self) {
self.block_manager.garage.swap(None);
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "garage_rpc"
version = "0.3.0"
version = "0.4.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -13,16 +13,17 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_util = { version = "0.3.0", path = "../util" }
garage_rpc_021 = { package = "garage_rpc", version = "0.2.1" }
garage_util = { version = "0.4.0", path = "../util" }
arc-swap = "1.0"
bytes = "1.0"
gethostname = "0.2"
hex = "0.4"
log = "0.4"
rand = "0.8"
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
async-trait = "0.1.7"
rmp-serde = "0.15"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
serde_json = "1.0"
@ -32,11 +33,6 @@ futures-util = "0.3"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
tokio-stream = { version = "0.1", features = ["net"] }
http = "0.2"
hyper = { version = "0.14", features = ["full"] }
hyper-rustls = { version = "0.22", default-features = false }
rustls = "0.19"
tokio-rustls = "0.22"
webpki = "0.21"
netapp = { version = "0.3.0", git = "https://git.deuxfleurs.fr/lx/netapp" }
hyper = { version = "0.14", features = ["client", "http1", "runtime", "tcp"] }

View File

@ -1,24 +1,31 @@
use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use hyper::client::Client;
use hyper::StatusCode;
use hyper::{Body, Method, Request};
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use netapp::NodeID;
use garage_util::error::Error;
#[derive(Deserialize, Clone)]
struct ConsulEntry {
#[serde(alias = "Address")]
// ---- READING FROM CONSUL CATALOG ----
#[derive(Deserialize, Clone, Debug)]
struct ConsulQueryEntry {
#[serde(rename = "Address")]
address: String,
#[serde(alias = "ServicePort")]
#[serde(rename = "ServicePort")]
service_port: u16,
#[serde(rename = "NodeMeta")]
node_meta: HashMap<String, String>,
}
pub async fn get_consul_nodes(
consul_host: &str,
consul_service_name: &str,
) -> Result<Vec<SocketAddr>, Error> {
) -> Result<Vec<(NodeID, SocketAddr)>, Error> {
let url = format!(
"http://{}/v1/catalog/service/{}",
consul_host, consul_service_name
@ -36,17 +43,111 @@ pub async fn get_consul_nodes(
}
let body = hyper::body::to_bytes(resp.into_body()).await?;
let entries = serde_json::from_slice::<Vec<ConsulEntry>>(body.as_ref())?;
let entries = serde_json::from_slice::<Vec<ConsulQueryEntry>>(body.as_ref())?;
let mut ret = vec![];
for ent in entries {
let ip = ent
.address
.parse::<IpAddr>()
.map_err(|e| Error::Message(format!("Could not parse IP address: {}", e)))?;
ret.push(SocketAddr::new(ip, ent.service_port));
let ip = ent.address.parse::<IpAddr>().ok();
let pubkey = ent
.node_meta
.get("pubkey")
.map(|k| hex::decode(&k).ok())
.flatten()
.map(|k| NodeID::from_slice(&k[..]))
.flatten();
if let (Some(ip), Some(pubkey)) = (ip, pubkey) {
ret.push((pubkey, SocketAddr::new(ip, ent.service_port)));
} else {
warn!(
"Could not process node spec from Consul: {:?} (invalid IP or public key)",
ent
);
}
}
debug!("Got nodes from Consul: {:?}", ret);
Ok(ret)
}
// ---- PUBLISHING TO CONSUL CATALOG ----
#[derive(Serialize, Clone, Debug)]
struct ConsulPublishEntry {
#[serde(rename = "Node")]
node: String,
#[serde(rename = "Address")]
address: IpAddr,
#[serde(rename = "NodeMeta")]
node_meta: HashMap<String, String>,
#[serde(rename = "Service")]
service: ConsulPublishService,
}
#[derive(Serialize, Clone, Debug)]
struct ConsulPublishService {
#[serde(rename = "ID")]
service_id: String,
#[serde(rename = "Service")]
service_name: String,
#[serde(rename = "Tags")]
tags: Vec<String>,
#[serde(rename = "Address")]
address: IpAddr,
#[serde(rename = "Port")]
port: u16,
}
pub async fn publish_consul_service(
consul_host: &str,
consul_service_name: &str,
node_id: NodeID,
hostname: &str,
rpc_public_addr: SocketAddr,
) -> Result<(), Error> {
let node = format!("garage:{}", hex::encode(&node_id[..8]));
let advertisment = ConsulPublishEntry {
node: node.clone(),
address: rpc_public_addr.ip(),
node_meta: [
("pubkey".to_string(), hex::encode(node_id)),
("hostname".to_string(), hostname.to_string()),
]
.iter()
.cloned()
.collect(),
service: ConsulPublishService {
service_id: node.clone(),
service_name: consul_service_name.to_string(),
tags: vec!["advertised-by-garage".into(), hostname.into()],
address: rpc_public_addr.ip(),
port: rpc_public_addr.port(),
},
};
let url = format!("http://{}/v1/catalog/register", consul_host);
let req_body = serde_json::to_string(&advertisment)?;
debug!("Request body for consul adv: {}", req_body);
let req = Request::builder()
.uri(url)
.method(Method::PUT)
.body(Body::from(req_body))?;
let client = Client::new();
let resp = client.request(req).await?;
debug!("Response of advertising to Consul: {:?}", resp);
let resp_code = resp.status();
debug!(
"{}",
std::str::from_utf8(&hyper::body::to_bytes(resp.into_body()).await?)
.unwrap_or("<invalid utf8>")
);
if resp_code != StatusCode::OK {
return Err(Error::Message(format!("HTTP error {}", resp_code)));
}
Ok(())
}

View File

@ -4,10 +4,10 @@
extern crate log;
mod consul;
pub(crate) mod tls_util;
pub mod membership;
pub mod ring;
pub mod system;
pub mod rpc_client;
pub mod rpc_server;
pub mod rpc_helper;
pub use rpc_helper::*;

View File

@ -1,722 +0,0 @@
//! Module containing structs related to membership management
use std::collections::HashMap;
use std::fmt::Write as FmtWrite;
use std::io::{Read, Write};
use std::net::{IpAddr, SocketAddr};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use futures::future::join_all;
use futures::select;
use futures_util::future::*;
use serde::{Deserialize, Serialize};
use tokio::sync::watch;
use tokio::sync::Mutex;
use garage_util::background::BackgroundRunner;
use garage_util::data::*;
use garage_util::error::Error;
use garage_util::persister::Persister;
use garage_util::time::*;
use crate::consul::get_consul_nodes;
use crate::ring::*;
use crate::rpc_client::*;
use crate::rpc_server::*;
const PING_INTERVAL: Duration = Duration::from_secs(10);
const DISCOVERY_INTERVAL: Duration = Duration::from_secs(60);
const PING_TIMEOUT: Duration = Duration::from_secs(2);
const MAX_FAILURES_BEFORE_CONSIDERED_DOWN: usize = 5;
/// RPC endpoint used for calls related to membership
pub const MEMBERSHIP_RPC_PATH: &str = "_membership";
/// RPC messages related to membership
#[derive(Debug, Serialize, Deserialize)]
pub enum Message {
/// Response to successfull advertisements
Ok,
/// Message sent to detect other nodes status
Ping(PingMessage),
/// Ask other node for the nodes it knows. Answered with AdvertiseNodesUp
PullStatus,
/// Ask other node its config. Answered with AdvertiseConfig
PullConfig,
/// Advertisement of nodes the host knows up. Sent spontanously or in response to PullStatus
AdvertiseNodesUp(Vec<AdvertisedNode>),
/// Advertisement of nodes config. Sent spontanously or in response to PullConfig
AdvertiseConfig(NetworkConfig),
}
impl RpcMessage for Message {}
/// A ping, containing informations about status and config
#[derive(Debug, Serialize, Deserialize)]
pub struct PingMessage {
id: Uuid,
rpc_port: u16,
status_hash: Hash,
config_version: u64,
state_info: StateInfo,
}
/// A node advertisement
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct AdvertisedNode {
/// Id of the node this advertisement relates to
pub id: Uuid,
/// IP and port of the node
pub addr: SocketAddr,
/// Is the node considered up
pub is_up: bool,
/// When was the node last seen up, in milliseconds since UNIX epoch
pub last_seen: u64,
pub state_info: StateInfo,
}
/// This node's membership manager
pub struct System {
/// The id of this node
pub id: Uuid,
persist_config: Persister<NetworkConfig>,
persist_status: Persister<Vec<AdvertisedNode>>,
rpc_local_port: u16,
state_info: StateInfo,
rpc_http_client: Arc<RpcHttpClient>,
rpc_client: Arc<RpcClient<Message>>,
replication_factor: usize,
pub(crate) status: watch::Receiver<Arc<Status>>,
/// The ring
pub ring: watch::Receiver<Arc<Ring>>,
update_lock: Mutex<Updaters>,
/// The job runner of this node
pub background: Arc<BackgroundRunner>,
}
struct Updaters {
update_status: watch::Sender<Arc<Status>>,
update_ring: watch::Sender<Arc<Ring>>,
}
/// The status of each nodes, viewed by this node
#[derive(Debug, Clone)]
pub struct Status {
/// Mapping of each node id to its known status
pub nodes: HashMap<Uuid, Arc<StatusEntry>>,
/// Hash of `nodes`, used to detect when nodes have different views of the cluster
pub hash: Hash,
}
/// The status of a single node
#[derive(Debug)]
pub struct StatusEntry {
/// The IP and port used to connect to this node
pub addr: SocketAddr,
/// Last time this node was seen
pub last_seen: u64,
/// Number of consecutive pings sent without reply to this node
pub num_failures: AtomicUsize,
pub state_info: StateInfo,
}
impl StatusEntry {
/// is the node associated to this entry considered up
pub fn is_up(&self) -> bool {
self.num_failures.load(Ordering::SeqCst) < MAX_FAILURES_BEFORE_CONSIDERED_DOWN
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StateInfo {
/// Hostname of the node
pub hostname: String,
/// Replication factor configured on the node
pub replication_factor: Option<usize>, // TODO Option is just for retrocompatibility. It should become a simple usize at some point
}
impl Status {
fn handle_ping(&mut self, ip: IpAddr, info: &PingMessage) -> bool {
let addr = SocketAddr::new(ip, info.rpc_port);
let old_status = self.nodes.insert(
info.id,
Arc::new(StatusEntry {
addr,
last_seen: now_msec(),
num_failures: AtomicUsize::from(0),
state_info: info.state_info.clone(),
}),
);
match old_status {
None => {
info!("Newly pingable node: {}", hex::encode(&info.id));
true
}
Some(x) => x.addr != addr,
}
}
fn recalculate_hash(&mut self) {
let mut nodes = self.nodes.iter().collect::<Vec<_>>();
nodes.sort_unstable_by_key(|(id, _status)| *id);
let mut nodes_txt = String::new();
debug!("Current set of pingable nodes: --");
for (id, status) in nodes {
debug!("{} {}", hex::encode(&id), status.addr);
writeln!(&mut nodes_txt, "{} {}", hex::encode(&id), status.addr).unwrap();
}
debug!("END --");
self.hash = blake2sum(nodes_txt.as_bytes());
}
fn to_serializable_membership(&self, system: &System) -> Vec<AdvertisedNode> {
let mut mem = vec![];
for (node, status) in self.nodes.iter() {
let state_info = if *node == system.id {
system.state_info.clone()
} else {
status.state_info.clone()
};
mem.push(AdvertisedNode {
id: *node,
addr: status.addr,
is_up: status.is_up(),
last_seen: status.last_seen,
state_info,
});
}
mem
}
}
fn gen_node_id(metadata_dir: &Path) -> Result<Uuid, Error> {
let mut id_file = metadata_dir.to_path_buf();
id_file.push("node_id");
if id_file.as_path().exists() {
let mut f = std::fs::File::open(id_file.as_path())?;
let mut d = vec![];
f.read_to_end(&mut d)?;
if d.len() != 32 {
return Err(Error::Message("Corrupt node_id file".to_string()));
}
let mut id = [0u8; 32];
id.copy_from_slice(&d[..]);
Ok(id.into())
} else {
let id = gen_uuid();
let mut f = std::fs::File::create(id_file.as_path())?;
f.write_all(id.as_slice())?;
Ok(id)
}
}
impl System {
/// Create this node's membership manager
pub fn new(
metadata_dir: PathBuf,
rpc_http_client: Arc<RpcHttpClient>,
background: Arc<BackgroundRunner>,
rpc_server: &mut RpcServer,
replication_factor: usize,
) -> Arc<Self> {
let id = gen_node_id(&metadata_dir).expect("Unable to read or generate node ID");
info!("Node ID: {}", hex::encode(&id));
let persist_config = Persister::new(&metadata_dir, "network_config");
let persist_status = Persister::new(&metadata_dir, "peer_info");
let net_config = match persist_config.load() {
Ok(x) => x,
Err(e) => {
match Persister::<garage_rpc_021::ring::NetworkConfig>::new(
&metadata_dir,
"network_config",
)
.load()
{
Ok(old_config) => NetworkConfig::migrate_from_021(old_config),
Err(e2) => {
info!(
"No valid previous network configuration stored ({}, {}), starting fresh.",
e, e2
);
NetworkConfig::new()
}
}
}
};
let mut status = Status {
nodes: HashMap::new(),
hash: Hash::default(),
};
status.recalculate_hash();
let (update_status, status) = watch::channel(Arc::new(status));
let state_info = StateInfo {
hostname: gethostname::gethostname()
.into_string()
.unwrap_or_else(|_| "<invalid utf-8>".to_string()),
replication_factor: Some(replication_factor),
};
let ring = Ring::new(net_config, replication_factor);
let (update_ring, ring) = watch::channel(Arc::new(ring));
let rpc_path = MEMBERSHIP_RPC_PATH.to_string();
let rpc_client = RpcClient::new(
RpcAddrClient::<Message>::new(rpc_http_client.clone(), rpc_path.clone()),
background.clone(),
status.clone(),
);
let sys = Arc::new(System {
id,
persist_config,
persist_status,
rpc_local_port: rpc_server.bind_addr.port(),
state_info,
rpc_http_client,
rpc_client,
replication_factor,
status,
ring,
update_lock: Mutex::new(Updaters {
update_status,
update_ring,
}),
background,
});
sys.clone().register_handler(rpc_server, rpc_path);
sys
}
fn register_handler(self: Arc<Self>, rpc_server: &mut RpcServer, path: String) {
rpc_server.add_handler::<Message, _, _>(path, move |msg, addr| {
let self2 = self.clone();
async move {
match msg {
Message::Ping(ping) => self2.handle_ping(&addr, &ping).await,
Message::PullStatus => Ok(self2.handle_pull_status()),
Message::PullConfig => Ok(self2.handle_pull_config()),
Message::AdvertiseNodesUp(adv) => self2.handle_advertise_nodes_up(&adv).await,
Message::AdvertiseConfig(adv) => self2.handle_advertise_config(&adv).await,
_ => Err(Error::BadRpc("Unexpected RPC message".to_string())),
}
}
});
}
/// Get an RPC client
pub fn rpc_client<M: RpcMessage + 'static>(self: &Arc<Self>, path: &str) -> Arc<RpcClient<M>> {
RpcClient::new(
RpcAddrClient::new(self.rpc_http_client.clone(), path.to_string()),
self.background.clone(),
self.status.clone(),
)
}
/// Save network configuration to disc
async fn save_network_config(self: Arc<Self>) -> Result<(), Error> {
let ring = self.ring.borrow().clone();
self.persist_config
.save_async(&ring.config)
.await
.expect("Cannot save current cluster configuration");
Ok(())
}
fn make_ping(&self) -> Message {
let status = self.status.borrow().clone();
let ring = self.ring.borrow().clone();
Message::Ping(PingMessage {
id: self.id,
rpc_port: self.rpc_local_port,
status_hash: status.hash,
config_version: ring.config.version,
state_info: self.state_info.clone(),
})
}
async fn broadcast(self: Arc<Self>, msg: Message, timeout: Duration) {
let status = self.status.borrow().clone();
let to = status
.nodes
.keys()
.filter(|x| **x != self.id)
.cloned()
.collect::<Vec<_>>();
self.rpc_client.call_many(&to[..], msg, timeout).await;
}
/// Perform bootstraping, starting the ping loop
pub async fn bootstrap(
self: Arc<Self>,
peers: Vec<SocketAddr>,
consul_host: Option<String>,
consul_service_name: Option<String>,
) {
let self2 = self.clone();
self.background
.spawn_worker("discovery loop".to_string(), |stop_signal| {
self2.discovery_loop(peers, consul_host, consul_service_name, stop_signal)
});
let self2 = self.clone();
self.background
.spawn_worker("ping loop".to_string(), |stop_signal| {
self2.ping_loop(stop_signal)
});
}
async fn ping_nodes(self: Arc<Self>, peers: Vec<(SocketAddr, Option<Uuid>)>) {
let ping_msg = self.make_ping();
let ping_resps = join_all(peers.iter().map(|(addr, id_option)| {
let sys = self.clone();
let ping_msg_ref = &ping_msg;
async move {
(
id_option,
addr,
sys.rpc_client
.by_addr()
.call(&addr, ping_msg_ref, PING_TIMEOUT)
.await,
)
}
}))
.await;
let update_locked = self.update_lock.lock().await;
let mut status: Status = self.status.borrow().as_ref().clone();
let ring = self.ring.borrow().clone();
let mut has_changes = false;
let mut to_advertise = vec![];
for (id_option, addr, ping_resp) in ping_resps {
if let Ok(Ok(Message::Ping(info))) = ping_resp {
let is_new = status.handle_ping(addr.ip(), &info);
if is_new {
has_changes = true;
to_advertise.push(AdvertisedNode {
id: info.id,
addr: *addr,
is_up: true,
last_seen: now_msec(),
state_info: info.state_info.clone(),
});
}
if is_new || status.hash != info.status_hash {
self.background
.spawn_cancellable(self.clone().pull_status(info.id).map(Ok));
}
if is_new || ring.config.version < info.config_version {
self.background
.spawn_cancellable(self.clone().pull_config(info.id).map(Ok));
}
} else if let Some(id) = id_option {
if let Some(st) = status.nodes.get_mut(id) {
// we need to increment failure counter as call was done using by_addr so the
// counter was not auto-incremented
st.num_failures.fetch_add(1, Ordering::SeqCst);
if !st.is_up() {
warn!("Node {:?} seems to be down.", id);
if !ring.config.members.contains_key(id) {
info!("Removing node {:?} from status (not in config and not responding to pings anymore)", id);
status.nodes.remove(&id);
has_changes = true;
}
}
}
}
}
if has_changes {
status.recalculate_hash();
}
self.update_status(&update_locked, status).await;
drop(update_locked);
if !to_advertise.is_empty() {
self.broadcast(Message::AdvertiseNodesUp(to_advertise), PING_TIMEOUT)
.await;
}
}
async fn handle_ping(
self: Arc<Self>,
from: &SocketAddr,
ping: &PingMessage,
) -> Result<Message, Error> {
let update_locked = self.update_lock.lock().await;
let mut status: Status = self.status.borrow().as_ref().clone();
let is_new = status.handle_ping(from.ip(), ping);
if is_new {
status.recalculate_hash();
}
let status_hash = status.hash;
let config_version = self.ring.borrow().config.version;
self.update_status(&update_locked, status).await;
drop(update_locked);
if is_new || status_hash != ping.status_hash {
self.background
.spawn_cancellable(self.clone().pull_status(ping.id).map(Ok));
}
if is_new || config_version < ping.config_version {
self.background
.spawn_cancellable(self.clone().pull_config(ping.id).map(Ok));
}
Ok(self.make_ping())
}
fn handle_pull_status(&self) -> Message {
Message::AdvertiseNodesUp(self.status.borrow().to_serializable_membership(self))
}
fn handle_pull_config(&self) -> Message {
let ring = self.ring.borrow().clone();
Message::AdvertiseConfig(ring.config.clone())
}
async fn handle_advertise_nodes_up(
self: Arc<Self>,
adv: &[AdvertisedNode],
) -> Result<Message, Error> {
let mut to_ping = vec![];
let update_lock = self.update_lock.lock().await;
let mut status: Status = self.status.borrow().as_ref().clone();
let mut has_changed = false;
let mut max_replication_factor = 0;
for node in adv.iter() {
if node.id == self.id {
// learn our own ip address
let self_addr = SocketAddr::new(node.addr.ip(), self.rpc_local_port);
let old_self = status.nodes.insert(
node.id,
Arc::new(StatusEntry {
addr: self_addr,
last_seen: now_msec(),
num_failures: AtomicUsize::from(0),
state_info: self.state_info.clone(),
}),
);
has_changed = match old_self {
None => true,
Some(x) => x.addr != self_addr,
};
} else {
let ping_them = match status.nodes.get(&node.id) {
// Case 1: new node
None => true,
// Case 2: the node might have changed address
Some(our_node) => node.is_up && !our_node.is_up() && our_node.addr != node.addr,
};
max_replication_factor = std::cmp::max(
max_replication_factor,
node.state_info.replication_factor.unwrap_or_default(),
);
if ping_them {
to_ping.push((node.addr, Some(node.id)));
}
}
}
if self.replication_factor < max_replication_factor {
error!("Some node have a higher replication factor ({}) than this one ({}). This is not supported and might lead to bugs",
max_replication_factor,
self.replication_factor);
std::process::exit(1);
}
if has_changed {
status.recalculate_hash();
}
self.update_status(&update_lock, status).await;
drop(update_lock);
if !to_ping.is_empty() {
self.background
.spawn_cancellable(self.clone().ping_nodes(to_ping).map(Ok));
}
Ok(Message::Ok)
}
async fn handle_advertise_config(
self: Arc<Self>,
adv: &NetworkConfig,
) -> Result<Message, Error> {
let update_lock = self.update_lock.lock().await;
let ring: Arc<Ring> = self.ring.borrow().clone();
if adv.version > ring.config.version {
let ring = Ring::new(adv.clone(), self.replication_factor);
update_lock.update_ring.send(Arc::new(ring))?;
drop(update_lock);
self.background.spawn_cancellable(
self.clone()
.broadcast(Message::AdvertiseConfig(adv.clone()), PING_TIMEOUT)
.map(Ok),
);
self.background.spawn(self.clone().save_network_config());
}
Ok(Message::Ok)
}
async fn ping_loop(self: Arc<Self>, mut stop_signal: watch::Receiver<bool>) {
while !*stop_signal.borrow() {
let restart_at = tokio::time::sleep(PING_INTERVAL);
let status = self.status.borrow().clone();
let ping_addrs = status
.nodes
.iter()
.filter(|(id, _)| **id != self.id)
.map(|(id, status)| (status.addr, Some(*id)))
.collect::<Vec<_>>();
self.clone().ping_nodes(ping_addrs).await;
select! {
_ = restart_at.fuse() => {},
_ = stop_signal.changed().fuse() => {},
}
}
}
async fn discovery_loop(
self: Arc<Self>,
bootstrap_peers: Vec<SocketAddr>,
consul_host: Option<String>,
consul_service_name: Option<String>,
mut stop_signal: watch::Receiver<bool>,
) {
let consul_config = match (consul_host, consul_service_name) {
(Some(ch), Some(csn)) => Some((ch, csn)),
_ => None,
};
while !*stop_signal.borrow() {
let not_configured = self.ring.borrow().config.members.is_empty();
let no_peers = self.status.borrow().nodes.len() < 3;
let bad_peers = self
.status
.borrow()
.nodes
.iter()
.filter(|(_, v)| v.is_up())
.count() != self.ring.borrow().config.members.len();
if not_configured || no_peers || bad_peers {
info!("Doing a bootstrap/discovery step (not_configured: {}, no_peers: {}, bad_peers: {})", not_configured, no_peers, bad_peers);
let mut ping_list = bootstrap_peers
.iter()
.map(|ip| (*ip, None))
.collect::<Vec<_>>();
if let Ok(peers) = self.persist_status.load_async().await {
ping_list.extend(peers.iter().map(|x| (x.addr, Some(x.id))));
}
if let Some((consul_host, consul_service_name)) = &consul_config {
match get_consul_nodes(consul_host, consul_service_name).await {
Ok(node_list) => {
ping_list.extend(node_list.iter().map(|a| (*a, None)));
}
Err(e) => {
warn!("Could not retrieve node list from Consul: {}", e);
}
}
}
self.clone().ping_nodes(ping_list).await;
}
let restart_at = tokio::time::sleep(DISCOVERY_INTERVAL);
select! {
_ = restart_at.fuse() => {},
_ = stop_signal.changed().fuse() => {},
}
}
}
// for some reason fixing this is causing compilation error, see https://github.com/rust-lang/rust-clippy/issues/7052
#[allow(clippy::manual_async_fn)]
fn pull_status(
self: Arc<Self>,
peer: Uuid,
) -> impl futures::future::Future<Output = ()> + Send + 'static {
async move {
let resp = self
.rpc_client
.call(peer, Message::PullStatus, PING_TIMEOUT)
.await;
if let Ok(Message::AdvertiseNodesUp(nodes)) = resp {
let _: Result<_, _> = self.handle_advertise_nodes_up(&nodes).await;
}
}
}
async fn pull_config(self: Arc<Self>, peer: Uuid) {
let resp = self
.rpc_client
.call(peer, Message::PullConfig, PING_TIMEOUT)
.await;
if let Ok(Message::AdvertiseConfig(config)) = resp {
let _: Result<_, _> = self.handle_advertise_config(&config).await;
}
}
async fn update_status(self: &Arc<Self>, updaters: &Updaters, status: Status) {
if status.hash != self.status.borrow().hash {
let mut list = status.to_serializable_membership(&self);
// Combine with old peer list to make sure no peer is lost
if let Ok(old_list) = self.persist_status.load_async().await {
for pp in old_list {
if !list.iter().any(|np| pp.id == np.id) {
list.push(pp);
}
}
}
if !list.is_empty() {
info!("Persisting new peer list ({} peers)", list.len());
self.persist_status
.save_async(&list)
.await
.expect("Unable to persist peer list");
}
}
updaters
.update_status
.send(Arc::new(status))
.expect("Could not update internal membership status");
}
}

View File

@ -38,31 +38,6 @@ impl NetworkConfig {
version: 0,
}
}
pub(crate) fn migrate_from_021(old: garage_rpc_021::ring::NetworkConfig) -> Self {
let members = old
.members
.into_iter()
.map(|(id, conf)| {
(
Hash::try_from(id.as_slice()).unwrap(),
NetworkConfigEntry {
zone: conf.datacenter,
capacity: if conf.capacity == 0 {
None
} else {
Some(conf.capacity)
},
tag: conf.tag,
},
)
})
.collect();
Self {
members,
version: old.version,
}
}
}
/// The overall configuration of one (possibly remote) node

View File

@ -1,369 +0,0 @@
//! Contain structs related to making RPCs
use std::borrow::Borrow;
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::Duration;
use arc_swap::ArcSwapOption;
use futures::future::Future;
use futures::stream::futures_unordered::FuturesUnordered;
use futures::stream::StreamExt;
use futures_util::future::FutureExt;
use hyper::client::{Client, HttpConnector};
use hyper::{Body, Method, Request};
use tokio::sync::{watch, Semaphore};
use garage_util::background::BackgroundRunner;
use garage_util::config::TlsConfig;
use garage_util::data::*;
use garage_util::error::{Error, RpcError};
use crate::membership::Status;
use crate::rpc_server::RpcMessage;
use crate::tls_util;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
/// Strategy to apply when making RPC
#[derive(Copy, Clone)]
pub struct RequestStrategy {
/// Max time to wait for reponse
pub rs_timeout: Duration,
/// Min number of response to consider the request successful
pub rs_quorum: usize,
/// Should requests be dropped after enough response are received
pub rs_interrupt_after_quorum: bool,
}
impl RequestStrategy {
/// Create a RequestStrategy with default timeout and not interrupting when quorum reached
pub fn with_quorum(quorum: usize) -> Self {
RequestStrategy {
rs_timeout: DEFAULT_TIMEOUT,
rs_quorum: quorum,
rs_interrupt_after_quorum: false,
}
}
/// Set timeout of the strategy
pub fn with_timeout(mut self, timeout: Duration) -> Self {
self.rs_timeout = timeout;
self
}
/// Set if requests can be dropped after quorum has been reached
/// In general true for read requests, and false for write
pub fn interrupt_after_quorum(mut self, interrupt: bool) -> Self {
self.rs_interrupt_after_quorum = interrupt;
self
}
}
/// Shortcut for a boxed async function taking a message, and resolving to another message or an
/// error
pub type LocalHandlerFn<M> =
Box<dyn Fn(Arc<M>) -> Pin<Box<dyn Future<Output = Result<M, Error>> + Send>> + Send + Sync>;
/// Client used to send RPC
pub struct RpcClient<M: RpcMessage> {
status: watch::Receiver<Arc<Status>>,
background: Arc<BackgroundRunner>,
local_handler: ArcSwapOption<(Uuid, LocalHandlerFn<M>)>,
rpc_addr_client: RpcAddrClient<M>,
}
impl<M: RpcMessage + 'static> RpcClient<M> {
/// Create a new RpcClient from an address, a job runner, and the status of all RPC servers
pub fn new(
rac: RpcAddrClient<M>,
background: Arc<BackgroundRunner>,
status: watch::Receiver<Arc<Status>>,
) -> Arc<Self> {
Arc::new(Self {
rpc_addr_client: rac,
background,
status,
local_handler: ArcSwapOption::new(None),
})
}
/// Set the local handler, to process RPC to this node without network usage
pub fn set_local_handler<F, Fut>(&self, my_id: Uuid, handler: F)
where
F: Fn(Arc<M>) -> Fut + Send + Sync + 'static,
Fut: Future<Output = Result<M, Error>> + Send + 'static,
{
let handler_arc = Arc::new(handler);
let handler: LocalHandlerFn<M> = Box::new(move |msg| {
let handler_arc2 = handler_arc.clone();
Box::pin(async move { handler_arc2(msg).await })
});
self.local_handler.swap(Some(Arc::new((my_id, handler))));
}
/// Get a RPC client to make calls using node's SocketAddr instead of its ID
pub fn by_addr(&self) -> &RpcAddrClient<M> {
&self.rpc_addr_client
}
/// Make a RPC call
pub async fn call(&self, to: Uuid, msg: M, timeout: Duration) -> Result<M, Error> {
self.call_arc(to, Arc::new(msg), timeout).await
}
/// Make a RPC call from a message stored in an Arc
pub async fn call_arc(&self, to: Uuid, msg: Arc<M>, timeout: Duration) -> Result<M, Error> {
if let Some(lh) = self.local_handler.load_full() {
let (my_id, local_handler) = lh.as_ref();
if to.borrow() == my_id {
return local_handler(msg).await;
}
}
let status = self.status.borrow().clone();
let node_status = match status.nodes.get(&to) {
Some(node_status) => {
if node_status.is_up() {
node_status
} else {
return Err(Error::from(RpcError::NodeDown(to)));
}
}
None => {
return Err(Error::Message(format!(
"Peer ID not found: {:?}",
to.borrow()
)))
}
};
match self
.rpc_addr_client
.call(&node_status.addr, msg, timeout)
.await
{
Err(rpc_error) => {
node_status.num_failures.fetch_add(1, Ordering::SeqCst);
Err(Error::from(rpc_error))
}
Ok(x) => x,
}
}
/// Make a RPC call to multiple servers, returning a Vec containing each result
pub async fn call_many(&self, to: &[Uuid], msg: M, timeout: Duration) -> Vec<Result<M, Error>> {
let msg = Arc::new(msg);
let mut resp_stream = to
.iter()
.map(|to| self.call_arc(*to, msg.clone(), timeout))
.collect::<FuturesUnordered<_>>();
let mut results = vec![];
while let Some(resp) = resp_stream.next().await {
results.push(resp);
}
results
}
/// Make a RPC call to multiple servers, returning either a Vec of responses, or an error if
/// strategy could not be respected due to too many errors
pub async fn try_call_many(
self: &Arc<Self>,
to: &[Uuid],
msg: M,
strategy: RequestStrategy,
) -> Result<Vec<M>, Error> {
let timeout = strategy.rs_timeout;
let msg = Arc::new(msg);
let mut resp_stream = to
.to_vec()
.into_iter()
.map(|to| {
let self2 = self.clone();
let msg = msg.clone();
async move { self2.call_arc(to, msg, timeout).await }
})
.collect::<FuturesUnordered<_>>();
let mut results = vec![];
let mut errors = vec![];
while let Some(resp) = resp_stream.next().await {
match resp {
Ok(msg) => {
results.push(msg);
if results.len() >= strategy.rs_quorum {
break;
}
}
Err(e) => {
errors.push(e);
}
}
}
if results.len() >= strategy.rs_quorum {
// Continue requests in background.
// Continue the remaining requests immediately using tokio::spawn
// but enqueue a task in the background runner
// to ensure that the process won't exit until the requests are done
// (if we had just enqueued the resp_stream.collect directly in the background runner,
// the requests might have been put on hold in the background runner's queue,
// in which case they might timeout or otherwise fail)
if !strategy.rs_interrupt_after_quorum {
let wait_finished_fut = tokio::spawn(async move {
resp_stream.collect::<Vec<_>>().await;
});
self.background.spawn(wait_finished_fut.map(|_| Ok(())));
}
Ok(results)
} else {
let errors = errors.iter().map(|e| format!("{}", e)).collect::<Vec<_>>();
Err(Error::from(RpcError::TooManyErrors(errors)))
}
}
}
/// Thin wrapper arround an `RpcHttpClient` specifying the path of the request
pub struct RpcAddrClient<M: RpcMessage> {
phantom: PhantomData<M>,
http_client: Arc<RpcHttpClient>,
path: String,
}
impl<M: RpcMessage> RpcAddrClient<M> {
/// Create an RpcAddrClient from an HTTP client and the endpoint to reach for RPCs
pub fn new(http_client: Arc<RpcHttpClient>, path: String) -> Self {
Self {
phantom: PhantomData::default(),
http_client,
path,
}
}
/// Make a RPC
pub async fn call<MB>(
&self,
to_addr: &SocketAddr,
msg: MB,
timeout: Duration,
) -> Result<Result<M, Error>, RpcError>
where
MB: Borrow<M>,
{
self.http_client
.call(&self.path, to_addr, msg, timeout)
.await
}
}
/// HTTP client used to make RPCs
pub struct RpcHttpClient {
request_limiter: Semaphore,
method: ClientMethod,
}
enum ClientMethod {
Http(Client<HttpConnector, hyper::Body>),
Https(Client<tls_util::HttpsConnectorFixedDnsname<HttpConnector>, hyper::Body>),
}
impl RpcHttpClient {
/// Create a new RpcHttpClient
pub fn new(
max_concurrent_requests: usize,
tls_config: &Option<TlsConfig>,
) -> Result<Self, Error> {
let method = if let Some(cf) = tls_config {
let ca_certs = tls_util::load_certs(&cf.ca_cert).map_err(|e| {
Error::Message(format!("Failed to open CA certificate file: {:?}", e))
})?;
let node_certs = tls_util::load_certs(&cf.node_cert)
.map_err(|e| Error::Message(format!("Failed to open certificate file: {:?}", e)))?;
let node_key = tls_util::load_private_key(&cf.node_key)
.map_err(|e| Error::Message(format!("Failed to open private key file: {:?}", e)))?;
let mut config = rustls::ClientConfig::new();
for crt in ca_certs.iter() {
config.root_store.add(crt)?;
}
config.set_single_client_cert([&node_certs[..], &ca_certs[..]].concat(), node_key)?;
let connector =
tls_util::HttpsConnectorFixedDnsname::<HttpConnector>::new(config, "garage");
ClientMethod::Https(Client::builder().build(connector))
} else {
ClientMethod::Http(Client::new())
};
Ok(RpcHttpClient {
method,
request_limiter: Semaphore::new(max_concurrent_requests),
})
}
/// Make a RPC
async fn call<M, MB>(
&self,
path: &str,
to_addr: &SocketAddr,
msg: MB,
timeout: Duration,
) -> Result<Result<M, Error>, RpcError>
where
MB: Borrow<M>,
M: RpcMessage,
{
let uri = match self.method {
ClientMethod::Http(_) => format!("http://{}/{}", to_addr, path),
ClientMethod::Https(_) => format!("https://{}/{}", to_addr, path),
};
let req = Request::builder()
.method(Method::POST)
.uri(uri)
.body(Body::from(rmp_to_vec_all_named(msg.borrow())?))?;
let resp_fut = match &self.method {
ClientMethod::Http(client) => client.request(req).fuse(),
ClientMethod::Https(client) => client.request(req).fuse(),
};
trace!("({}) Acquiring request_limiter slot...", path);
let slot = self.request_limiter.acquire().await;
trace!("({}) Got slot, doing request to {}...", path, to_addr);
let resp = tokio::time::timeout(timeout, resp_fut)
.await
.map_err(|e| {
debug!(
"RPC timeout to {}: {}",
to_addr,
debug_serialize(msg.borrow())
);
e
})?
.map_err(|e| {
warn!(
"RPC HTTP client error when connecting to {}: {}",
to_addr, e
);
e
})?;
let status = resp.status();
trace!("({}) Request returned, got status {}", path, status);
let body = hyper::body::to_bytes(resp.into_body()).await?;
drop(slot);
match rmp_serde::decode::from_read::<_, Result<M, String>>(&body[..])? {
Err(e) => Ok(Err(Error::RemoteError(e, status))),
Ok(x) => Ok(Ok(x)),
}
}
}

209
src/rpc/rpc_helper.rs Normal file
View File

@ -0,0 +1,209 @@
//! Contain structs related to making RPCs
use std::sync::Arc;
use std::time::Duration;
use futures::future::join_all;
use futures::stream::futures_unordered::FuturesUnordered;
use futures::stream::StreamExt;
use futures_util::future::FutureExt;
use tokio::select;
pub use netapp::endpoint::{Endpoint, EndpointHandler, Message as Rpc};
use netapp::peering::fullmesh::FullMeshPeeringStrategy;
pub use netapp::proto::*;
pub use netapp::{NetApp, NodeID};
use garage_util::background::BackgroundRunner;
use garage_util::data::Uuid;
use garage_util::error::Error;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10);
/// Strategy to apply when making RPC
#[derive(Copy, Clone)]
pub struct RequestStrategy {
/// Max time to wait for reponse
pub rs_timeout: Duration,
/// Min number of response to consider the request successful
pub rs_quorum: Option<usize>,
/// Should requests be dropped after enough response are received
pub rs_interrupt_after_quorum: bool,
/// Request priority
pub rs_priority: RequestPriority,
}
impl RequestStrategy {
/// Create a RequestStrategy with default timeout and not interrupting when quorum reached
pub fn with_priority(prio: RequestPriority) -> Self {
RequestStrategy {
rs_timeout: DEFAULT_TIMEOUT,
rs_quorum: None,
rs_interrupt_after_quorum: false,
rs_priority: prio,
}
}
/// Set quorum to be reached for request
pub fn with_quorum(mut self, quorum: usize) -> Self {
self.rs_quorum = Some(quorum);
self
}
/// Set timeout of the strategy
pub fn with_timeout(mut self, timeout: Duration) -> Self {
self.rs_timeout = timeout;
self
}
/// Set if requests can be dropped after quorum has been reached
/// In general true for read requests, and false for write
pub fn interrupt_after_quorum(mut self, interrupt: bool) -> Self {
self.rs_interrupt_after_quorum = interrupt;
self
}
}
#[derive(Clone)]
pub struct RpcHelper {
pub(crate) fullmesh: Arc<FullMeshPeeringStrategy>,
pub(crate) background: Arc<BackgroundRunner>,
}
impl RpcHelper {
pub async fn call<M, H, S>(
&self,
endpoint: &Endpoint<M, H>,
to: Uuid,
msg: M,
strat: RequestStrategy,
) -> Result<S, Error>
where
M: Rpc<Response = Result<S, Error>>,
H: EndpointHandler<M>,
{
self.call_arc(endpoint, to, Arc::new(msg), strat).await
}
pub async fn call_arc<M, H, S>(
&self,
endpoint: &Endpoint<M, H>,
to: Uuid,
msg: Arc<M>,
strat: RequestStrategy,
) -> Result<S, Error>
where
M: Rpc<Response = Result<S, Error>>,
H: EndpointHandler<M>,
{
let node_id = to.into();
select! {
res = endpoint.call(&node_id, &msg, strat.rs_priority) => Ok(res??),
_ = tokio::time::sleep(strat.rs_timeout) => Err(Error::Timeout),
}
}
pub async fn call_many<M, H, S>(
&self,
endpoint: &Endpoint<M, H>,
to: &[Uuid],
msg: M,
strat: RequestStrategy,
) -> Vec<(Uuid, Result<S, Error>)>
where
M: Rpc<Response = Result<S, Error>>,
H: EndpointHandler<M>,
{
let msg = Arc::new(msg);
let resps = join_all(
to.iter()
.map(|to| self.call_arc(endpoint, *to, msg.clone(), strat)),
)
.await;
to.iter()
.cloned()
.zip(resps.into_iter())
.collect::<Vec<_>>()
}
pub async fn broadcast<M, H, S>(
&self,
endpoint: &Endpoint<M, H>,
msg: M,
strat: RequestStrategy,
) -> Vec<(Uuid, Result<S, Error>)>
where
M: Rpc<Response = Result<S, Error>>,
H: EndpointHandler<M>,
{
let to = self
.fullmesh
.get_peer_list()
.iter()
.map(|p| p.id.into())
.collect::<Vec<_>>();
self.call_many(endpoint, &to[..], msg, strat).await
}
/// Make a RPC call to multiple servers, returning either a Vec of responses, or an error if
/// strategy could not be respected due to too many errors
pub async fn try_call_many<M, H, S>(
&self,
endpoint: &Arc<Endpoint<M, H>>,
to: &[Uuid],
msg: M,
strategy: RequestStrategy,
) -> Result<Vec<S>, Error>
where
M: Rpc<Response = Result<S, Error>> + 'static,
H: EndpointHandler<M> + 'static,
S: Send,
{
let msg = Arc::new(msg);
let mut resp_stream = to
.to_vec()
.into_iter()
.map(|to| {
let self2 = self.clone();
let msg = msg.clone();
let endpoint2 = endpoint.clone();
async move { self2.call_arc(&endpoint2, to, msg, strategy).await }
})
.collect::<FuturesUnordered<_>>();
let mut results = vec![];
let mut errors = vec![];
let quorum = strategy.rs_quorum.unwrap_or(to.len());
while let Some(resp) = resp_stream.next().await {
match resp {
Ok(msg) => {
results.push(msg);
if results.len() >= quorum {
break;
}
}
Err(e) => {
errors.push(e);
}
}
}
if results.len() >= quorum {
// Continue requests in background.
// Continue the remaining requests immediately using tokio::spawn
// but enqueue a task in the background runner
// to ensure that the process won't exit until the requests are done
// (if we had just enqueued the resp_stream.collect directly in the background runner,
// the requests might have been put on hold in the background runner's queue,
// in which case they might timeout or otherwise fail)
if !strategy.rs_interrupt_after_quorum {
let wait_finished_fut = tokio::spawn(async move {
resp_stream.collect::<Vec<_>>().await;
});
self.background.spawn(wait_finished_fut.map(|_| Ok(())));
}
Ok(results)
} else {
let errors = errors.iter().map(|e| format!("{}", e)).collect::<Vec<_>>();
Err(Error::Quorum(quorum, results.len(), to.len(), errors))
}
}
}

View File

@ -1,247 +0,0 @@
//! Contains structs related to receiving RPCs
use std::collections::HashMap;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::time::Instant;
use futures::future::Future;
use futures_util::future::*;
use futures_util::stream::*;
use hyper::server::conn::AddrStream;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Server, StatusCode};
use serde::{Deserialize, Serialize};
use tokio::net::{TcpListener, TcpStream};
use tokio_rustls::server::TlsStream;
use tokio_rustls::TlsAcceptor;
use tokio_stream::wrappers::TcpListenerStream;
use garage_util::config::TlsConfig;
use garage_util::data::*;
use garage_util::error::Error;
use crate::tls_util;
/// Trait for messages that can be sent as RPC
pub trait RpcMessage: Serialize + for<'de> Deserialize<'de> + Send + Sync {}
type ResponseFuture = Pin<Box<dyn Future<Output = Result<Response<Body>, Error>> + Send>>;
type Handler = Box<dyn Fn(Request<Body>, SocketAddr) -> ResponseFuture + Send + Sync>;
/// Structure handling RPCs
pub struct RpcServer {
/// The address the RpcServer will bind
pub bind_addr: SocketAddr,
/// The tls configuration used for RPC
pub tls_config: Option<TlsConfig>,
handlers: HashMap<String, Handler>,
}
async fn handle_func<M, F, Fut>(
handler: Arc<F>,
req: Request<Body>,
sockaddr: SocketAddr,
name: Arc<String>,
) -> Result<Response<Body>, Error>
where
M: RpcMessage + 'static,
F: Fn(M, SocketAddr) -> Fut + Send + Sync + 'static,
Fut: Future<Output = Result<M, Error>> + Send + 'static,
{
let begin_time = Instant::now();
let whole_body = hyper::body::to_bytes(req.into_body()).await?;
let msg = rmp_serde::decode::from_read::<_, M>(&whole_body[..])?;
trace!(
"Request message: {}",
serde_json::to_string(&msg)
.unwrap_or_else(|_| "<json error>".into())
.chars()
.take(100)
.collect::<String>()
);
match handler(msg, sockaddr).await {
Ok(resp) => {
let resp_bytes = rmp_to_vec_all_named::<Result<M, String>>(&Ok(resp))?;
let rpc_duration = (Instant::now() - begin_time).as_millis();
if rpc_duration > 100 {
debug!("RPC {} ok, took long: {} ms", name, rpc_duration,);
}
Ok(Response::new(Body::from(resp_bytes)))
}
Err(e) => {
let err_str = format!("{}", e);
let rep_bytes = rmp_to_vec_all_named::<Result<M, String>>(&Err(err_str))?;
let mut err_response = Response::new(Body::from(rep_bytes));
*err_response.status_mut() = match e {
Error::BadRpc(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
};
warn!(
"RPC error ({}): {} ({} ms)",
name,
e,
(Instant::now() - begin_time).as_millis(),
);
Ok(err_response)
}
}
}
impl RpcServer {
/// Create a new RpcServer
pub fn new(bind_addr: SocketAddr, tls_config: Option<TlsConfig>) -> Self {
Self {
bind_addr,
tls_config,
handlers: HashMap::new(),
}
}
/// Add handler handling request made to `name`
pub fn add_handler<M, F, Fut>(&mut self, name: String, handler: F)
where
M: RpcMessage + 'static,
F: Fn(M, SocketAddr) -> Fut + Send + Sync + 'static,
Fut: Future<Output = Result<M, Error>> + Send + 'static,
{
let name2 = Arc::new(name.clone());
let handler_arc = Arc::new(handler);
let handler = Box::new(move |req: Request<Body>, sockaddr: SocketAddr| {
let handler2 = handler_arc.clone();
let b: ResponseFuture = Box::pin(handle_func(handler2, req, sockaddr, name2.clone()));
b
});
self.handlers.insert(name, handler);
}
async fn handler(
self: Arc<Self>,
req: Request<Body>,
addr: SocketAddr,
) -> Result<Response<Body>, Error> {
if req.method() != Method::POST {
let mut bad_request = Response::default();
*bad_request.status_mut() = StatusCode::BAD_REQUEST;
return Ok(bad_request);
}
let path = &req.uri().path()[1..].to_string();
let handler = match self.handlers.get(path) {
Some(h) => h,
None => {
let mut not_found = Response::default();
*not_found.status_mut() = StatusCode::NOT_FOUND;
return Ok(not_found);
}
};
trace!("({}) Handling request", path);
let resp_waiter = tokio::spawn(handler(req, addr));
match resp_waiter.await {
Err(err) => {
warn!("Handler await error: {}", err);
let mut ise = Response::default();
*ise.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
Ok(ise)
}
Ok(Err(err)) => {
trace!("({}) Request handler failed: {}", path, err);
let mut bad_request = Response::new(Body::from(format!("{}", err)));
*bad_request.status_mut() = StatusCode::BAD_REQUEST;
Ok(bad_request)
}
Ok(Ok(resp)) => {
trace!("({}) Request handler succeeded", path);
Ok(resp)
}
}
}
/// Run the RpcServer
pub async fn run(
self: Arc<Self>,
shutdown_signal: impl Future<Output = ()>,
) -> Result<(), Error> {
if let Some(tls_config) = self.tls_config.as_ref() {
let ca_certs = tls_util::load_certs(&tls_config.ca_cert)?;
let node_certs = tls_util::load_certs(&tls_config.node_cert)?;
let node_key = tls_util::load_private_key(&tls_config.node_key)?;
let mut ca_store = rustls::RootCertStore::empty();
for crt in ca_certs.iter() {
ca_store.add(crt)?;
}
let mut config =
rustls::ServerConfig::new(rustls::AllowAnyAuthenticatedClient::new(ca_store));
config.set_single_cert([&node_certs[..], &ca_certs[..]].concat(), node_key)?;
let tls_acceptor = Arc::new(TlsAcceptor::from(Arc::new(config)));
let listener = TcpListener::bind(&self.bind_addr).await?;
let incoming = TcpListenerStream::new(listener).filter_map(|socket| async {
match socket {
Ok(stream) => match tls_acceptor.clone().accept(stream).await {
Ok(x) => Some(Ok::<_, hyper::Error>(x)),
Err(_e) => None,
},
Err(_) => None,
}
});
let incoming = hyper::server::accept::from_stream(incoming);
let self_arc = self.clone();
let service = make_service_fn(|conn: &TlsStream<TcpStream>| {
let client_addr = conn
.get_ref()
.0
.peer_addr()
.unwrap_or_else(|_| ([0, 0, 0, 0], 0).into());
let self_arc = self_arc.clone();
async move {
Ok::<_, Error>(service_fn(move |req: Request<Body>| {
self_arc.clone().handler(req, client_addr).map_err(|e| {
warn!("RPC handler error: {}", e);
e
})
}))
}
});
let server = Server::builder(incoming).serve(service);
let graceful = server.with_graceful_shutdown(shutdown_signal);
info!("RPC server listening on http://{}", self.bind_addr);
graceful.await?;
} else {
let self_arc = self.clone();
let service = make_service_fn(move |conn: &AddrStream| {
let client_addr = conn.remote_addr();
let self_arc = self_arc.clone();
async move {
Ok::<_, Error>(service_fn(move |req: Request<Body>| {
self_arc.clone().handler(req, client_addr).map_err(|e| {
warn!("RPC handler error: {}", e);
e
})
}))
}
});
let server = Server::bind(&self.bind_addr).serve(service);
let graceful = server.with_graceful_shutdown(shutdown_signal);
info!("RPC server listening on http://{}", self.bind_addr);
graceful.await?;
}
Ok(())
}
}

551
src/rpc/system.rs Normal file
View File

@ -0,0 +1,551 @@
//! Module containing structs related to membership management
use std::collections::HashMap;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::path::Path;
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
use arc_swap::ArcSwap;
use async_trait::async_trait;
use futures::{join, select};
use futures_util::future::*;
use serde::{Deserialize, Serialize};
use sodiumoxide::crypto::sign::ed25519;
use tokio::sync::watch;
use tokio::sync::Mutex;
use netapp::endpoint::{Endpoint, EndpointHandler};
use netapp::peering::fullmesh::FullMeshPeeringStrategy;
use netapp::proto::*;
use netapp::util::parse_and_resolve_peer_addr;
use netapp::{NetApp, NetworkKey, NodeID, NodeKey};
use garage_util::background::BackgroundRunner;
use garage_util::config::Config;
use garage_util::data::Uuid;
use garage_util::error::*;
use garage_util::persister::Persister;
use garage_util::time::*;
use crate::consul::*;
use crate::ring::*;
use crate::rpc_helper::*;
const DISCOVERY_INTERVAL: Duration = Duration::from_secs(60);
const STATUS_EXCHANGE_INTERVAL: Duration = Duration::from_secs(10);
const PING_TIMEOUT: Duration = Duration::from_secs(2);
/// RPC endpoint used for calls related to membership
pub const SYSTEM_RPC_PATH: &str = "garage_rpc/membership.rs/SystemRpc";
pub const CONNECT_ERROR_MESSAGE: &str = "Error establishing RPC connection to remote node. This can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret";
/// RPC messages related to membership
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum SystemRpc {
/// Response to successfull advertisements
Ok,
/// Request to connect to a specific node (in <pubkey>@<host>:<port> format)
Connect(String),
/// Ask other node its config. Answered with AdvertiseConfig
PullConfig,
/// Advertise Garage status. Answered with another AdvertiseStatus.
/// Exchanged with every node on a regular basis.
AdvertiseStatus(NodeStatus),
/// Advertisement of nodes config. Sent spontanously or in response to PullConfig
AdvertiseConfig(NetworkConfig),
/// Get known nodes states
GetKnownNodes,
/// Return known nodes
ReturnKnownNodes(Vec<KnownNodeInfo>),
}
impl Rpc for SystemRpc {
type Response = Result<SystemRpc, Error>;
}
/// This node's membership manager
pub struct System {
/// The id of this node
pub id: Uuid,
persist_config: Persister<NetworkConfig>,
persist_peer_list: Persister<Vec<(Uuid, SocketAddr)>>,
local_status: ArcSwap<NodeStatus>,
node_status: RwLock<HashMap<Uuid, (u64, NodeStatus)>>,
pub netapp: Arc<NetApp>,
fullmesh: Arc<FullMeshPeeringStrategy>,
pub rpc: RpcHelper,
system_endpoint: Arc<Endpoint<SystemRpc, System>>,
rpc_listen_addr: SocketAddr,
rpc_public_addr: Option<SocketAddr>,
bootstrap_peers: Vec<(NodeID, SocketAddr)>,
consul_host: Option<String>,
consul_service_name: Option<String>,
replication_factor: usize,
/// The ring
pub ring: watch::Receiver<Arc<Ring>>,
update_ring: Mutex<watch::Sender<Arc<Ring>>>,
/// The job runner of this node
pub background: Arc<BackgroundRunner>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeStatus {
/// Hostname of the node
pub hostname: String,
/// Replication factor configured on the node
pub replication_factor: usize,
/// Configuration version
pub config_version: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct KnownNodeInfo {
pub id: Uuid,
pub addr: SocketAddr,
pub is_up: bool,
pub last_seen_secs_ago: Option<u64>,
pub status: NodeStatus,
}
pub fn read_node_id(metadata_dir: &Path) -> Result<NodeID, Error> {
let mut pubkey_file = metadata_dir.to_path_buf();
pubkey_file.push("node_key.pub");
let mut f = std::fs::File::open(pubkey_file.as_path())?;
let mut d = vec![];
f.read_to_end(&mut d)?;
if d.len() != 32 {
return Err(Error::Message("Corrupt node_key.pub file".to_string()));
}
let mut key = [0u8; 32];
key.copy_from_slice(&d[..]);
Ok(NodeID::from_slice(&key[..]).unwrap())
}
pub fn gen_node_key(metadata_dir: &Path) -> Result<NodeKey, Error> {
let mut key_file = metadata_dir.to_path_buf();
key_file.push("node_key");
if key_file.as_path().exists() {
let mut f = std::fs::File::open(key_file.as_path())?;
let mut d = vec![];
f.read_to_end(&mut d)?;
if d.len() != 64 {
return Err(Error::Message("Corrupt node_key file".to_string()));
}
let mut key = [0u8; 64];
key.copy_from_slice(&d[..]);
Ok(NodeKey::from_slice(&key[..]).unwrap())
} else {
if !metadata_dir.exists() {
info!("Metadata directory does not exist, creating it.");
std::fs::create_dir(&metadata_dir)?;
}
info!("Generating new node key pair.");
let (pubkey, key) = ed25519::gen_keypair();
{
use std::os::unix::fs::PermissionsExt;
let mut f = std::fs::File::create(key_file.as_path())?;
let mut perm = f.metadata()?.permissions();
perm.set_mode(0o600);
std::fs::set_permissions(key_file.as_path(), perm)?;
f.write_all(&key[..])?;
}
{
let mut pubkey_file = metadata_dir.to_path_buf();
pubkey_file.push("node_key.pub");
let mut f2 = std::fs::File::create(pubkey_file.as_path())?;
f2.write_all(&pubkey[..])?;
}
Ok(key)
}
}
impl System {
/// Create this node's membership manager
pub fn new(
network_key: NetworkKey,
background: Arc<BackgroundRunner>,
replication_factor: usize,
config: &Config,
) -> Arc<Self> {
let node_key =
gen_node_key(&config.metadata_dir).expect("Unable to read or generate node ID");
info!("Node public key: {}", hex::encode(&node_key.public_key()));
let persist_config = Persister::new(&config.metadata_dir, "network_config");
let persist_peer_list = Persister::new(&config.metadata_dir, "peer_list");
let net_config = match persist_config.load() {
Ok(x) => x,
Err(e) => {
info!(
"No valid previous network configuration stored ({}), starting fresh.",
e
);
NetworkConfig::new()
}
};
let local_status = NodeStatus {
hostname: gethostname::gethostname()
.into_string()
.unwrap_or_else(|_| "<invalid utf-8>".to_string()),
replication_factor,
config_version: net_config.version,
};
let ring = Ring::new(net_config, replication_factor);
let (update_ring, ring) = watch::channel(Arc::new(ring));
if let Some(addr) = config.rpc_public_addr {
println!("{}@{}", hex::encode(&node_key.public_key()), addr);
} else {
println!("{}", hex::encode(&node_key.public_key()));
}
let netapp = NetApp::new(network_key, node_key);
let fullmesh = FullMeshPeeringStrategy::new(
netapp.clone(),
config.bootstrap_peers.clone(),
config.rpc_public_addr,
);
let system_endpoint = netapp.endpoint(SYSTEM_RPC_PATH.into());
let sys = Arc::new(System {
id: netapp.id.into(),
persist_config,
persist_peer_list,
local_status: ArcSwap::new(Arc::new(local_status)),
node_status: RwLock::new(HashMap::new()),
netapp: netapp.clone(),
fullmesh: fullmesh.clone(),
rpc: RpcHelper {
fullmesh,
background: background.clone(),
},
system_endpoint,
replication_factor,
rpc_listen_addr: config.rpc_bind_addr,
rpc_public_addr: config.rpc_public_addr,
bootstrap_peers: config.bootstrap_peers.clone(),
consul_host: config.consul_host.clone(),
consul_service_name: config.consul_service_name.clone(),
ring,
update_ring: Mutex::new(update_ring),
background,
});
sys.system_endpoint.set_handler(sys.clone());
sys
}
/// Perform bootstraping, starting the ping loop
pub async fn run(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
join!(
self.netapp
.clone()
.listen(self.rpc_listen_addr, None, must_exit.clone()),
self.fullmesh.clone().run(must_exit.clone()),
self.discovery_loop(must_exit.clone()),
self.status_exchange_loop(must_exit.clone()),
);
}
// ---- INTERNALS ----
async fn advertise_to_consul(self: Arc<Self>) -> Result<(), Error> {
let (consul_host, consul_service_name) =
match (&self.consul_host, &self.consul_service_name) {
(Some(ch), Some(csn)) => (ch, csn),
_ => return Ok(()),
};
let rpc_public_addr = match self.rpc_public_addr {
Some(addr) => addr,
None => {
warn!("Not advertising to Consul because rpc_public_addr is not defined in config file.");
return Ok(());
}
};
publish_consul_service(
consul_host,
consul_service_name,
self.netapp.id,
&self.local_status.load_full().hostname,
rpc_public_addr,
)
.await
.err_context("Error while publishing Consul service")
}
/// Save network configuration to disc
async fn save_network_config(self: Arc<Self>) -> Result<(), Error> {
let ring: Arc<Ring> = self.ring.borrow().clone();
self.persist_config
.save_async(&ring.config)
.await
.expect("Cannot save current cluster configuration");
Ok(())
}
fn update_local_status(&self) {
let mut new_si: NodeStatus = self.local_status.load().as_ref().clone();
let ring = self.ring.borrow();
new_si.config_version = ring.config.version;
self.local_status.swap(Arc::new(new_si));
}
async fn handle_connect(&self, node: &str) -> Result<SystemRpc, Error> {
let (pubkey, addrs) = parse_and_resolve_peer_addr(node).ok_or_else(|| {
Error::Message(format!(
"Unable to parse or resolve node specification: {}",
node
))
})?;
let mut errors = vec![];
for ip in addrs.iter() {
match self
.netapp
.clone()
.try_connect(*ip, pubkey)
.await
.err_context(CONNECT_ERROR_MESSAGE)
{
Ok(()) => return Ok(SystemRpc::Ok),
Err(e) => {
errors.push((*ip, e));
}
}
}
return Err(Error::Message(format!(
"Could not connect to specified peers. Errors: {:?}",
errors
)));
}
fn handle_pull_config(&self) -> SystemRpc {
let ring = self.ring.borrow().clone();
SystemRpc::AdvertiseConfig(ring.config.clone())
}
fn handle_get_known_nodes(&self) -> SystemRpc {
let node_status = self.node_status.read().unwrap();
let known_nodes = self
.fullmesh
.get_peer_list()
.iter()
.map(|n| KnownNodeInfo {
id: n.id.into(),
addr: n.addr,
is_up: n.is_up(),
last_seen_secs_ago: n.last_seen.map(|t| (Instant::now() - t).as_secs()),
status: node_status
.get(&n.id.into())
.cloned()
.map(|(_, st)| st)
.unwrap_or(NodeStatus {
hostname: "?".to_string(),
replication_factor: 0,
config_version: 0,
}),
})
.collect::<Vec<_>>();
SystemRpc::ReturnKnownNodes(known_nodes)
}
async fn handle_advertise_status(
self: &Arc<Self>,
from: Uuid,
info: &NodeStatus,
) -> Result<SystemRpc, Error> {
let local_info = self.local_status.load();
if local_info.replication_factor < info.replication_factor {
error!("Some node have a higher replication factor ({}) than this one ({}). This is not supported and might lead to bugs",
info.replication_factor,
local_info.replication_factor);
std::process::exit(1);
}
if info.config_version > local_info.config_version {
let self2 = self.clone();
self.background.spawn_cancellable(async move {
self2.pull_config(from).await;
Ok(())
});
}
self.node_status
.write()
.unwrap()
.insert(from, (now_msec(), info.clone()));
Ok(SystemRpc::Ok)
}
async fn handle_advertise_config(
self: Arc<Self>,
adv: &NetworkConfig,
) -> Result<SystemRpc, Error> {
let update_ring = self.update_ring.lock().await;
let ring: Arc<Ring> = self.ring.borrow().clone();
if adv.version > ring.config.version {
let ring = Ring::new(adv.clone(), self.replication_factor);
update_ring.send(Arc::new(ring))?;
drop(update_ring);
let self2 = self.clone();
let adv = adv.clone();
self.background.spawn_cancellable(async move {
self2
.rpc
.broadcast(
&self2.system_endpoint,
SystemRpc::AdvertiseConfig(adv),
RequestStrategy::with_priority(PRIO_HIGH),
)
.await;
Ok(())
});
self.background.spawn(self.clone().save_network_config());
}
Ok(SystemRpc::Ok)
}
async fn status_exchange_loop(&self, mut stop_signal: watch::Receiver<bool>) {
while !*stop_signal.borrow() {
let restart_at = tokio::time::sleep(STATUS_EXCHANGE_INTERVAL);
self.update_local_status();
let local_status: NodeStatus = self.local_status.load().as_ref().clone();
self.rpc
.broadcast(
&self.system_endpoint,
SystemRpc::AdvertiseStatus(local_status),
RequestStrategy::with_priority(PRIO_HIGH).with_timeout(PING_TIMEOUT),
)
.await;
select! {
_ = restart_at.fuse() => {},
_ = stop_signal.changed().fuse() => {},
}
}
}
async fn discovery_loop(self: &Arc<Self>, mut stop_signal: watch::Receiver<bool>) {
let consul_config = match (&self.consul_host, &self.consul_service_name) {
(Some(ch), Some(csn)) => Some((ch.clone(), csn.clone())),
_ => None,
};
while !*stop_signal.borrow() {
let not_configured = self.ring.borrow().config.members.is_empty();
let no_peers = self.fullmesh.get_peer_list().len() < self.replication_factor;
let bad_peers = self
.fullmesh
.get_peer_list()
.iter()
.filter(|p| p.is_up())
.count() != self.ring.borrow().config.members.len();
if not_configured || no_peers || bad_peers {
info!("Doing a bootstrap/discovery step (not_configured: {}, no_peers: {}, bad_peers: {})", not_configured, no_peers, bad_peers);
let mut ping_list = self.bootstrap_peers.clone();
// Add peer list from list stored on disk
if let Ok(peers) = self.persist_peer_list.load_async().await {
ping_list.extend(peers.iter().map(|(id, addr)| ((*id).into(), *addr)))
}
// Fetch peer list from Consul
if let Some((consul_host, consul_service_name)) = &consul_config {
match get_consul_nodes(consul_host, consul_service_name).await {
Ok(node_list) => {
ping_list.extend(node_list);
}
Err(e) => {
warn!("Could not retrieve node list from Consul: {}", e);
}
}
}
for (node_id, node_addr) in ping_list {
tokio::spawn(
self.netapp
.clone()
.try_connect(node_addr, node_id)
.map(|r| r.err_context(CONNECT_ERROR_MESSAGE)),
);
}
}
let peer_list = self
.fullmesh
.get_peer_list()
.iter()
.map(|n| (n.id.into(), n.addr))
.collect::<Vec<_>>();
if let Err(e) = self.persist_peer_list.save_async(&peer_list).await {
warn!("Could not save peer list to file: {}", e);
}
self.background.spawn(self.clone().advertise_to_consul());
let restart_at = tokio::time::sleep(DISCOVERY_INTERVAL);
select! {
_ = restart_at.fuse() => {},
_ = stop_signal.changed().fuse() => {},
}
}
}
async fn pull_config(self: Arc<Self>, peer: Uuid) {
let resp = self
.rpc
.call(
&self.system_endpoint,
peer,
SystemRpc::PullConfig,
RequestStrategy::with_priority(PRIO_HIGH).with_timeout(PING_TIMEOUT),
)
.await;
if let Ok(SystemRpc::AdvertiseConfig(config)) = resp {
let _: Result<_, _> = self.handle_advertise_config(&config).await;
}
}
}
#[async_trait]
impl EndpointHandler<SystemRpc> for System {
async fn handle(self: &Arc<Self>, msg: &SystemRpc, from: NodeID) -> Result<SystemRpc, Error> {
match msg {
SystemRpc::Connect(node) => self.handle_connect(node).await,
SystemRpc::PullConfig => Ok(self.handle_pull_config()),
SystemRpc::AdvertiseStatus(adv) => self.handle_advertise_status(from.into(), adv).await,
SystemRpc::AdvertiseConfig(adv) => self.clone().handle_advertise_config(&adv).await,
SystemRpc::GetKnownNodes => Ok(self.handle_get_known_nodes()),
_ => Err(Error::BadRpc("Unexpected RPC message".to_string())),
}
}
}

View File

@ -1,140 +0,0 @@
use core::future::Future;
use core::task::{Context, Poll};
use std::pin::Pin;
use std::sync::Arc;
use std::{fs, io};
use futures_util::future::*;
use hyper::client::connect::Connection;
use hyper::client::HttpConnector;
use hyper::service::Service;
use hyper::Uri;
use hyper_rustls::MaybeHttpsStream;
use rustls::internal::pemfile;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_rustls::TlsConnector;
use webpki::DNSNameRef;
use garage_util::error::Error;
pub fn load_certs(filename: &str) -> Result<Vec<rustls::Certificate>, Error> {
let certfile = fs::File::open(&filename)?;
let mut reader = io::BufReader::new(certfile);
let certs = pemfile::certs(&mut reader).map_err(|_| {
Error::Message(format!(
"Could not deecode certificates from file: {}",
filename
))
})?;
if certs.is_empty() {
return Err(Error::Message(format!(
"Invalid certificate file: {}",
filename
)));
}
Ok(certs)
}
pub fn load_private_key(filename: &str) -> Result<rustls::PrivateKey, Error> {
let keydata = fs::read_to_string(filename)?;
let mut buf1 = keydata.as_bytes();
let rsa_keys = pemfile::rsa_private_keys(&mut buf1).unwrap_or_default();
let mut buf2 = keydata.as_bytes();
let pkcs8_keys = pemfile::pkcs8_private_keys(&mut buf2).unwrap_or_default();
let mut keys = rsa_keys;
keys.extend(pkcs8_keys.into_iter());
if keys.len() != 1 {
return Err(Error::Message(format!(
"Invalid private key file: {} ({} private keys)",
filename,
keys.len()
)));
}
Ok(keys[0].clone())
}
// ---- AWFUL COPYPASTA FROM HYPER-RUSTLS connector.rs
// ---- ALWAYS USE `garage` AS HOSTNAME FOR TLS VERIFICATION
#[derive(Clone)]
pub struct HttpsConnectorFixedDnsname<T> {
http: T,
tls_config: Arc<rustls::ClientConfig>,
fixed_dnsname: &'static str,
}
type BoxError = Box<dyn std::error::Error + Send + Sync>;
impl HttpsConnectorFixedDnsname<HttpConnector> {
pub fn new(mut tls_config: rustls::ClientConfig, fixed_dnsname: &'static str) -> Self {
let mut http = HttpConnector::new();
http.enforce_http(false);
tls_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
Self {
http,
tls_config: Arc::new(tls_config),
fixed_dnsname,
}
}
}
impl<T> Service<Uri> for HttpsConnectorFixedDnsname<T>
where
T: Service<Uri>,
T::Response: Connection + AsyncRead + AsyncWrite + Send + Unpin + 'static,
T::Future: Send + 'static,
T::Error: Into<BoxError>,
{
type Response = MaybeHttpsStream<T::Response>;
type Error = BoxError;
#[allow(clippy::type_complexity)]
type Future =
Pin<Box<dyn Future<Output = Result<MaybeHttpsStream<T::Response>, BoxError>> + Send>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.http.poll_ready(cx) {
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())),
Poll::Pending => Poll::Pending,
}
}
fn call(&mut self, dst: Uri) -> Self::Future {
let is_https = dst.scheme_str() == Some("https");
if !is_https {
let connecting_future = self.http.call(dst);
let f = async move {
let tcp = connecting_future.await.map_err(Into::into)?;
Ok(MaybeHttpsStream::Http(tcp))
};
f.boxed()
} else {
let cfg = self.tls_config.clone();
let connecting_future = self.http.call(dst);
let dnsname =
DNSNameRef::try_from_ascii_str(self.fixed_dnsname).expect("Invalid fixed dnsname");
let f = async move {
let tcp = connecting_future.await.map_err(Into::into)?;
let connector = TlsConnector::from(cfg);
let tls = connector
.connect(dnsname, tcp)
.await
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(MaybeHttpsStream::Https(tls))
};
f.boxed()
}
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "garage_table"
version = "0.3.0"
version = "0.4.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -13,9 +13,10 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_rpc = { version = "0.3.0", path = "../rpc" }
garage_util = { version = "0.3.0", path = "../util" }
garage_rpc = { version = "0.4.0", path = "../rpc" }
garage_util = { version = "0.4.0", path = "../util" }
async-trait = "0.1.7"
bytes = "1.0"
hexdump = "0.1"
log = "0.4"
@ -30,4 +31,3 @@ serde_bytes = "0.11"
futures = "0.3"
futures-util = "0.3"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }

View File

@ -9,7 +9,7 @@ use tokio::sync::Notify;
use garage_util::data::*;
use garage_util::error::*;
use garage_rpc::membership::System;
use garage_rpc::system::System;
use crate::crdt::Crdt;
use crate::replication::*;

View File

@ -2,6 +2,7 @@ use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use serde_bytes::ByteBuf;
@ -13,9 +14,8 @@ use tokio::sync::watch;
use garage_util::data::*;
use garage_util::error::Error;
use garage_rpc::membership::System;
use garage_rpc::rpc_client::*;
use garage_rpc::rpc_server::*;
use garage_rpc::system::System;
use garage_rpc::*;
use crate::data::*;
use crate::replication::*;
@ -24,11 +24,11 @@ use crate::schema::*;
const TABLE_GC_BATCH_SIZE: usize = 1024;
const TABLE_GC_RPC_TIMEOUT: Duration = Duration::from_secs(30);
pub struct TableGc<F: TableSchema, R: TableReplication> {
pub struct TableGc<F: TableSchema + 'static, R: TableReplication + 'static> {
system: Arc<System>,
data: Arc<TableData<F, R>>,
rpc_client: Arc<RpcClient<GcRpc>>,
endpoint: Arc<Endpoint<GcRpc, Self>>,
}
#[derive(Serialize, Deserialize)]
@ -38,28 +38,27 @@ enum GcRpc {
Ok,
}
impl RpcMessage for GcRpc {}
impl Rpc for GcRpc {
type Response = Result<GcRpc, Error>;
}
impl<F, R> TableGc<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
pub(crate) fn launch(
system: Arc<System>,
data: Arc<TableData<F, R>>,
rpc_server: &mut RpcServer,
) -> Arc<Self> {
let rpc_path = format!("table_{}/gc", data.name);
let rpc_client = system.rpc_client::<GcRpc>(&rpc_path);
pub(crate) fn launch(system: Arc<System>, data: Arc<TableData<F, R>>) -> Arc<Self> {
let endpoint = system
.netapp
.endpoint(format!("garage_table/gc.rs/Rpc:{}", data.name));
let gc = Arc::new(Self {
system: system.clone(),
data: data.clone(),
rpc_client,
endpoint,
});
gc.register_handler(rpc_server, rpc_path);
gc.endpoint.set_handler(gc.clone());
let gc1 = gc.clone();
system.background.spawn_worker(
@ -180,11 +179,15 @@ where
deletes.push((k, vhash));
}
self.rpc_client
self.system
.rpc
.try_call_many(
&self.endpoint,
&nodes[..],
GcRpc::Update(updates),
RequestStrategy::with_quorum(nodes.len()).with_timeout(TABLE_GC_RPC_TIMEOUT),
RequestStrategy::with_priority(PRIO_BACKGROUND)
.with_quorum(nodes.len())
.with_timeout(TABLE_GC_RPC_TIMEOUT),
)
.await?;
@ -193,11 +196,15 @@ where
self.data.name, n_items
);
self.rpc_client
self.system
.rpc
.try_call_many(
&self.endpoint,
&nodes[..],
GcRpc::DeleteIfEqualHash(deletes.clone()),
RequestStrategy::with_quorum(nodes.len()).with_timeout(TABLE_GC_RPC_TIMEOUT),
RequestStrategy::with_priority(PRIO_BACKGROUND)
.with_quorum(nodes.len())
.with_timeout(TABLE_GC_RPC_TIMEOUT),
)
.await?;
@ -216,25 +223,15 @@ where
.compare_and_swap::<_, _, Vec<u8>>(key, Some(vhash), None)?;
Ok(())
}
}
// ---- RPC HANDLER ----
fn register_handler(self: &Arc<Self>, rpc_server: &mut RpcServer, path: String) {
let self2 = self.clone();
rpc_server.add_handler::<GcRpc, _, _>(path, move |msg, _addr| {
let self2 = self2.clone();
async move { self2.handle_rpc(&msg).await }
});
let self2 = self.clone();
self.rpc_client
.set_local_handler(self.system.id, move |msg| {
let self2 = self2.clone();
async move { self2.handle_rpc(&msg).await }
});
}
async fn handle_rpc(self: &Arc<Self>, message: &GcRpc) -> Result<GcRpc, Error> {
#[async_trait]
impl<F, R> EndpointHandler<GcRpc> for TableGc<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
async fn handle(self: &Arc<Self>, message: &GcRpc, _from: NodeID) -> Result<GcRpc, Error> {
match message {
GcRpc::Update(items) => {
self.data.update_many(items)?;

View File

@ -1,7 +1,7 @@
use std::sync::Arc;
use garage_rpc::membership::System;
use garage_rpc::ring::*;
use garage_rpc::system::System;
use garage_util::data::*;
use crate::replication::*;

View File

@ -1,5 +1,4 @@
use garage_rpc::ring::*;
use garage_util::data::*;
/// Trait to describe how a table shall be replicated

View File

@ -1,7 +1,7 @@
use std::sync::Arc;
use garage_rpc::membership::System;
use garage_rpc::ring::*;
use garage_rpc::system::System;
use garage_util::data::*;
use crate::replication::*;

View File

@ -2,6 +2,7 @@ use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use async_trait::async_trait;
use futures::select;
use futures_util::future::*;
use futures_util::stream::*;
@ -13,10 +14,9 @@ use tokio::sync::{mpsc, watch};
use garage_util::data::*;
use garage_util::error::Error;
use garage_rpc::membership::System;
use garage_rpc::ring::*;
use garage_rpc::rpc_client::*;
use garage_rpc::rpc_server::*;
use garage_rpc::system::System;
use garage_rpc::*;
use crate::data::*;
use crate::merkle::*;
@ -28,13 +28,13 @@ const TABLE_SYNC_RPC_TIMEOUT: Duration = Duration::from_secs(30);
// Do anti-entropy every 10 minutes
const ANTI_ENTROPY_INTERVAL: Duration = Duration::from_secs(10 * 60);
pub struct TableSyncer<F: TableSchema, R: TableReplication> {
pub struct TableSyncer<F: TableSchema + 'static, R: TableReplication + 'static> {
system: Arc<System>,
data: Arc<TableData<F, R>>,
merkle: Arc<MerkleUpdater<F, R>>,
todo: Mutex<SyncTodo>,
rpc_client: Arc<RpcClient<SyncRpc>>,
endpoint: Arc<Endpoint<SyncRpc, Self>>,
}
#[derive(Serialize, Deserialize)]
@ -47,7 +47,9 @@ pub(crate) enum SyncRpc {
Ok,
}
impl RpcMessage for SyncRpc {}
impl Rpc for SyncRpc {
type Response = Result<SyncRpc, Error>;
}
struct SyncTodo {
todo: Vec<TodoPartition>,
@ -72,10 +74,10 @@ where
system: Arc<System>,
data: Arc<TableData<F, R>>,
merkle: Arc<MerkleUpdater<F, R>>,
rpc_server: &mut RpcServer,
) -> Arc<Self> {
let rpc_path = format!("table_{}/sync", data.name);
let rpc_client = system.rpc_client::<SyncRpc>(&rpc_path);
let endpoint = system
.netapp
.endpoint(format!("garage_table/sync.rs/Rpc:{}", data.name));
let todo = SyncTodo { todo: vec![] };
@ -84,10 +86,10 @@ where
data: data.clone(),
merkle,
todo: Mutex::new(todo),
rpc_client,
endpoint,
});
syncer.register_handler(rpc_server, rpc_path);
syncer.endpoint.set_handler(syncer.clone());
let (busy_tx, busy_rx) = mpsc::unbounded_channel();
@ -112,21 +114,6 @@ where
syncer
}
fn register_handler(self: &Arc<Self>, rpc_server: &mut RpcServer, path: String) {
let self2 = self.clone();
rpc_server.add_handler::<SyncRpc, _, _>(path, move |msg, _addr| {
let self2 = self2.clone();
async move { self2.handle_rpc(&msg).await }
});
let self2 = self.clone();
self.rpc_client
.set_local_handler(self.system.id, move |msg| {
let self2 = self2.clone();
async move { self2.handle_rpc(&msg).await }
});
}
async fn watcher_task(
self: Arc<Self>,
mut must_exit: watch::Receiver<bool>,
@ -321,11 +308,15 @@ where
) -> Result<(), Error> {
let values = items.iter().map(|(_k, v)| v.clone()).collect::<Vec<_>>();
self.rpc_client
self.system
.rpc
.try_call_many(
&self.endpoint,
nodes,
SyncRpc::Items(values),
RequestStrategy::with_quorum(nodes.len()).with_timeout(TABLE_SYNC_RPC_TIMEOUT),
RequestStrategy::with_priority(PRIO_BACKGROUND)
.with_quorum(nodes.len())
.with_timeout(TABLE_SYNC_RPC_TIMEOUT),
)
.await?;
@ -378,11 +369,14 @@ where
// Check if they have the same root checksum
// If so, do nothing.
let root_resp = self
.rpc_client
.system
.rpc
.call(
&self.endpoint,
who,
SyncRpc::RootCkHash(partition.partition, root_ck_hash),
TABLE_SYNC_RPC_TIMEOUT,
RequestStrategy::with_priority(PRIO_BACKGROUND)
.with_timeout(TABLE_SYNC_RPC_TIMEOUT),
)
.await?;
@ -430,8 +424,15 @@ where
// Get Merkle node for this tree position at remote node
// and compare it with local node
let remote_node = match self
.rpc_client
.call(who, SyncRpc::GetNode(key.clone()), TABLE_SYNC_RPC_TIMEOUT)
.system
.rpc
.call(
&self.endpoint,
who,
SyncRpc::GetNode(key.clone()),
RequestStrategy::with_priority(PRIO_BACKGROUND)
.with_timeout(TABLE_SYNC_RPC_TIMEOUT),
)
.await?
{
SyncRpc::Node(_, node) => node,
@ -492,8 +493,15 @@ where
.collect::<Vec<_>>();
let rpc_resp = self
.rpc_client
.call(who, SyncRpc::Items(values), TABLE_SYNC_RPC_TIMEOUT)
.system
.rpc
.call(
&self.endpoint,
who,
SyncRpc::Items(values),
RequestStrategy::with_priority(PRIO_BACKGROUND)
.with_timeout(TABLE_SYNC_RPC_TIMEOUT),
)
.await?;
if let SyncRpc::Ok = rpc_resp {
Ok(())
@ -504,10 +512,17 @@ where
)))
}
}
}
// ======= SYNCHRONIZATION PROCEDURE -- RECEIVER SIDE ======
// ======= SYNCHRONIZATION PROCEDURE -- RECEIVER SIDE ======
async fn handle_rpc(self: &Arc<Self>, message: &SyncRpc) -> Result<SyncRpc, Error> {
#[async_trait]
impl<F, R> EndpointHandler<SyncRpc> for TableSyncer<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
async fn handle(self: &Arc<Self>, message: &SyncRpc, _from: NodeID) -> Result<SyncRpc, Error> {
match message {
SyncRpc::RootCkHash(range, h) => {
let (_root_ck_key, root_ck) = self.get_root_ck(*range)?;

View File

@ -2,6 +2,7 @@ use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use futures::stream::*;
use serde::{Deserialize, Serialize};
use serde_bytes::ByteBuf;
@ -9,9 +10,8 @@ use serde_bytes::ByteBuf;
use garage_util::data::*;
use garage_util::error::Error;
use garage_rpc::membership::System;
use garage_rpc::rpc_client::*;
use garage_rpc::rpc_server::*;
use garage_rpc::system::System;
use garage_rpc::*;
use crate::crdt::Crdt;
use crate::data::*;
@ -23,12 +23,12 @@ use crate::sync::*;
const TABLE_RPC_TIMEOUT: Duration = Duration::from_secs(10);
pub struct Table<F: TableSchema, R: TableReplication> {
pub struct Table<F: TableSchema + 'static, R: TableReplication + 'static> {
pub system: Arc<System>,
pub data: Arc<TableData<F, R>>,
pub merkle_updater: Arc<MerkleUpdater<F, R>>,
pub syncer: Arc<TableSyncer<F, R>>,
rpc_client: Arc<RpcClient<TableRpc<F>>>,
endpoint: Arc<Endpoint<TableRpc<F>, Self>>,
}
#[derive(Serialize, Deserialize)]
@ -44,7 +44,9 @@ pub(crate) enum TableRpc<F: TableSchema> {
Update(Vec<Arc<ByteBuf>>),
}
impl<F: TableSchema> RpcMessage for TableRpc<F> {}
impl<F: TableSchema> Rpc for TableRpc<F> {
type Response = Result<TableRpc<F>, Error>;
}
impl<F, R> Table<F, R>
where
@ -59,32 +61,27 @@ where
system: Arc<System>,
db: &sled::Db,
name: String,
rpc_server: &mut RpcServer,
) -> Arc<Self> {
let rpc_path = format!("table_{}", name);
let rpc_client = system.rpc_client::<TableRpc<F>>(&rpc_path);
let endpoint = system
.netapp
.endpoint(format!("garage_table/table.rs/Rpc:{}", name));
let data = TableData::new(system.clone(), name, instance, replication, db);
let merkle_updater = MerkleUpdater::launch(&system.background, data.clone());
let syncer = TableSyncer::launch(
system.clone(),
data.clone(),
merkle_updater.clone(),
rpc_server,
);
TableGc::launch(system.clone(), data.clone(), rpc_server);
let syncer = TableSyncer::launch(system.clone(), data.clone(), merkle_updater.clone());
TableGc::launch(system.clone(), data.clone());
let table = Arc::new(Self {
system,
data,
merkle_updater,
syncer,
rpc_client,
endpoint,
});
table.clone().register_handler(rpc_server, rpc_path);
table.endpoint.set_handler(table.clone());
table
}
@ -97,11 +94,14 @@ where
let e_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(e)?));
let rpc = TableRpc::<F>::Update(vec![e_enc]);
self.rpc_client
self.system
.rpc
.try_call_many(
&self.endpoint,
&who[..],
rpc,
RequestStrategy::with_quorum(self.data.replication.write_quorum())
RequestStrategy::with_priority(PRIO_NORMAL)
.with_quorum(self.data.replication.write_quorum())
.with_timeout(TABLE_RPC_TIMEOUT),
)
.await?;
@ -123,7 +123,16 @@ where
let call_futures = call_list.drain().map(|(node, entries)| async move {
let rpc = TableRpc::<F>::Update(entries);
let resp = self.rpc_client.call(node, rpc, TABLE_RPC_TIMEOUT).await?;
let resp = self
.system
.rpc
.call(
&self.endpoint,
node,
rpc,
RequestStrategy::with_priority(PRIO_NORMAL).with_timeout(TABLE_RPC_TIMEOUT),
)
.await?;
Ok::<_, Error>((node, resp))
});
let mut resps = call_futures.collect::<FuturesUnordered<_>>();
@ -152,11 +161,14 @@ where
let rpc = TableRpc::<F>::ReadEntry(partition_key.clone(), sort_key.clone());
let resps = self
.rpc_client
.system
.rpc
.try_call_many(
&self.endpoint,
&who[..],
rpc,
RequestStrategy::with_quorum(self.data.replication.read_quorum())
RequestStrategy::with_priority(PRIO_NORMAL)
.with_quorum(self.data.replication.read_quorum())
.with_timeout(TABLE_RPC_TIMEOUT)
.interrupt_after_quorum(true),
)
@ -208,11 +220,14 @@ where
let rpc = TableRpc::<F>::ReadRange(partition_key.clone(), begin_sort_key, filter, limit);
let resps = self
.rpc_client
.system
.rpc
.try_call_many(
&self.endpoint,
&who[..],
rpc,
RequestStrategy::with_quorum(self.data.replication.read_quorum())
RequestStrategy::with_priority(PRIO_NORMAL)
.with_quorum(self.data.replication.read_quorum())
.with_timeout(TABLE_RPC_TIMEOUT)
.interrupt_after_quorum(true),
)
@ -263,34 +278,32 @@ where
async fn repair_on_read(&self, who: &[Uuid], what: F::E) -> Result<(), Error> {
let what_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(&what)?));
self.rpc_client
self.system
.rpc
.try_call_many(
&self.endpoint,
who,
TableRpc::<F>::Update(vec![what_enc]),
RequestStrategy::with_quorum(who.len()).with_timeout(TABLE_RPC_TIMEOUT),
RequestStrategy::with_priority(PRIO_NORMAL)
.with_quorum(who.len())
.with_timeout(TABLE_RPC_TIMEOUT),
)
.await?;
Ok(())
}
}
// =============== HANDLERS FOR RPC OPERATIONS (SERVER SIDE) ==============
fn register_handler(self: Arc<Self>, rpc_server: &mut RpcServer, path: String) {
let self2 = self.clone();
rpc_server.add_handler::<TableRpc<F>, _, _>(path, move |msg, _addr| {
let self2 = self2.clone();
async move { self2.handle(&msg).await }
});
let self2 = self.clone();
self.rpc_client
.set_local_handler(self.system.id, move |msg| {
let self2 = self2.clone();
async move { self2.handle(&msg).await }
});
}
async fn handle(self: &Arc<Self>, msg: &TableRpc<F>) -> Result<TableRpc<F>, Error> {
#[async_trait]
impl<F, R> EndpointHandler<TableRpc<F>> for Table<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
async fn handle(
self: &Arc<Self>,
msg: &TableRpc<F>,
_from: NodeID,
) -> Result<TableRpc<F>, Error> {
match msg {
TableRpc::ReadEntry(key, sort_key) => {
let value = self.data.read_entry(key, sort_key)?;

View File

@ -1,6 +1,6 @@
[package]
name = "garage_util"
version = "0.3.0"
version = "0.4.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -32,7 +32,6 @@ toml = "0.5"
futures = "0.3"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
netapp = { version = "0.3.0", git = "https://git.deuxfleurs.fr/lx/netapp" }
http = "0.2"
hyper = "0.14"
rustls = "0.19"
webpki = "0.21"

View File

@ -3,8 +3,12 @@ use std::io::Read;
use std::net::SocketAddr;
use std::path::PathBuf;
use serde::de::Error as SerdeError;
use serde::{de, Deserialize};
use netapp::util::parse_and_resolve_peer_addr;
use netapp::NodeID;
use crate::error::Error;
/// Represent the whole configuration
@ -26,24 +30,22 @@ pub struct Config {
// (we can add more aliases for this later)
pub replication_mode: String,
/// RPC secret key: 32 bytes hex encoded
pub rpc_secret: String,
/// Address to bind for RPC
pub rpc_bind_addr: SocketAddr,
/// Public IP address of this node
pub rpc_public_addr: Option<SocketAddr>,
/// Bootstrap peers RPC address
#[serde(deserialize_with = "deserialize_vec_addr")]
pub bootstrap_peers: Vec<SocketAddr>,
/// Consule host to connect to to discover more peers
pub bootstrap_peers: Vec<(NodeID, SocketAddr)>,
/// Consul host to connect to to discover more peers
pub consul_host: Option<String>,
/// Consul service name to use
pub consul_service_name: Option<String>,
/// Configuration for RPC TLS
pub rpc_tls: Option<TlsConfig>,
/// Max number of concurrent RPC request
#[serde(default = "default_max_concurrent_rpc_requests")]
pub max_concurrent_rpc_requests: usize,
/// Sled cache size, in bytes
#[serde(default = "default_sled_cache_capacity")]
pub sled_cache_capacity: u64,
@ -59,17 +61,6 @@ pub struct Config {
pub s3_web: WebConfig,
}
/// Configuration for RPC TLS
#[derive(Deserialize, Debug, Clone)]
pub struct TlsConfig {
/// Path to certificate autority used for all nodes
pub ca_cert: String,
/// Path to public certificate for this node
pub node_cert: String,
/// Path to private key for this node
pub node_key: String,
}
/// Configuration for S3 api
#[derive(Deserialize, Debug, Clone)]
pub struct ApiConfig {
@ -96,9 +87,6 @@ fn default_sled_cache_capacity() -> u64 {
fn default_sled_flush_every_ms() -> u64 {
2000
}
fn default_max_concurrent_rpc_requests() -> usize {
12
}
fn default_block_size() -> usize {
1048576
}
@ -115,27 +103,20 @@ pub fn read_config(config_file: PathBuf) -> Result<Config, Error> {
Ok(toml::from_str(&config)?)
}
fn deserialize_vec_addr<'de, D>(deserializer: D) -> Result<Vec<SocketAddr>, D::Error>
fn deserialize_vec_addr<'de, D>(deserializer: D) -> Result<Vec<(NodeID, SocketAddr)>, D::Error>
where
D: de::Deserializer<'de>,
{
use std::net::ToSocketAddrs;
let mut ret = vec![];
Ok(<Vec<&str>>::deserialize(deserializer)?
.iter()
.filter_map(|&name| {
name.to_socket_addrs()
.map(|iter| (name, iter))
.map_err(|_| warn!("Error resolving \"{}\"", name))
.ok()
})
.map(|(name, iter)| {
let v = iter.collect::<Vec<_>>();
if v.is_empty() {
warn!("Error resolving \"{}\"", name)
}
v
})
.flatten()
.collect())
for peer in <Vec<&str>>::deserialize(deserializer)? {
let (pubkey, addrs) = parse_and_resolve_peer_addr(peer).ok_or_else(|| {
D::Error::custom(format!("Unable to parse or resolve peer: {}", peer))
})?;
for ip in addrs {
ret.push((pubkey, ip));
}
}
Ok(ret)
}

View File

@ -87,6 +87,18 @@ impl FixedBytes32 {
}
}
impl From<netapp::NodeID> for FixedBytes32 {
fn from(node_id: netapp::NodeID) -> FixedBytes32 {
FixedBytes32::try_from(node_id.as_ref()).unwrap()
}
}
impl From<FixedBytes32> for netapp::NodeID {
fn from(bytes: FixedBytes32) -> netapp::NodeID {
netapp::NodeID::from_slice(bytes.as_slice()).unwrap()
}
}
/// A 32 bytes UUID
pub type Uuid = FixedBytes32;
/// A 256 bit cryptographic hash, can be sha256 or blake2 depending on provenance

View File

@ -1,35 +1,13 @@
//! Module containing error types used in Garage
use err_derive::Error;
use hyper::StatusCode;
use std::fmt;
use std::io;
use err_derive::Error;
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
use crate::data::*;
/// RPC related errors
#[derive(Debug, Error)]
pub enum RpcError {
#[error(display = "Node is down: {:?}.", _0)]
NodeDown(Uuid),
#[error(display = "Timeout: {}", _0)]
Timeout(#[error(source)] tokio::time::error::Elapsed),
#[error(display = "HTTP error: {}", _0)]
Http(#[error(source)] http::Error),
#[error(display = "Hyper error: {}", _0)]
Hyper(#[error(source)] hyper::Error),
#[error(display = "Messagepack encode error: {}", _0)]
RmpEncode(#[error(source)] rmp_serde::encode::Error),
#[error(display = "Messagepack decode error: {}", _0)]
RmpDecode(#[error(source)] rmp_serde::decode::Error),
#[error(display = "Too many errors: {:?}", _0)]
TooManyErrors(Vec<String>),
}
/// Regroup all Garage errors
#[derive(Debug, Error)]
pub enum Error {
@ -45,11 +23,8 @@ pub enum Error {
#[error(display = "Invalid HTTP header value: {}", _0)]
HttpHeader(#[error(source)] http::header::ToStrError),
#[error(display = "TLS error: {}", _0)]
Tls(#[error(source)] rustls::TLSError),
#[error(display = "PKI error: {}", _0)]
Pki(#[error(source)] webpki::Error),
#[error(display = "Netapp error: {}", _0)]
Netapp(#[error(source)] netapp::error::Error),
#[error(display = "Sled error: {}", _0)]
Sled(#[error(source)] sled::Error),
@ -66,11 +41,20 @@ pub enum Error {
#[error(display = "Tokio join error: {}", _0)]
TokioJoin(#[error(source)] tokio::task::JoinError),
#[error(display = "RPC call error: {}", _0)]
Rpc(#[error(source)] RpcError),
#[error(display = "Remote error: {}", _0)]
RemoteError(String),
#[error(display = "Remote error: {} (status code {})", _0, _1)]
RemoteError(String, StatusCode),
#[error(display = "Timeout")]
Timeout,
#[error(
display = "Could not reach quorum of {}. {} of {} request succeeded, others returned errors: {:?}",
_0,
_1,
_2,
_3
)]
Quorum(usize, usize, usize, Vec<String>),
#[error(display = "Bad RPC: {}", _0)]
BadRpc(String),
@ -102,3 +86,73 @@ impl<T> From<tokio::sync::mpsc::error::SendError<T>> for Error {
Error::Message("MPSC send error".to_string())
}
}
impl<'a> From<&'a str> for Error {
fn from(v: &'a str) -> Error {
Error::Message(v.to_string())
}
}
impl From<String> for Error {
fn from(v: String) -> Error {
Error::Message(v)
}
}
pub trait ErrorContext<T, E> {
fn err_context<C: std::borrow::Borrow<str>>(self, ctx: C) -> Result<T, Error>;
}
impl<T, E> ErrorContext<T, E> for Result<T, E>
where
E: std::fmt::Display,
{
#[inline]
fn err_context<C: std::borrow::Borrow<str>>(self, ctx: C) -> Result<T, Error> {
match self {
Ok(x) => Ok(x),
Err(e) => Err(Error::Message(format!("{}\n{}", ctx.borrow(), e))),
}
}
}
// Custom serialization for our error type, for use in RPC.
// Errors are serialized as a string of their Display representation.
// Upon deserialization, they all become a RemoteError with the
// given representation.
impl Serialize for Error {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&format!("{}", self))
}
}
impl<'de> Deserialize<'de> for Error {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_string(ErrorVisitor)
}
}
struct ErrorVisitor;
impl<'de> Visitor<'de> for ErrorVisitor {
type Value = Error;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a string that represents an error value")
}
fn visit_str<E>(self, error_msg: &str) -> Result<Self::Value, E> {
Ok(Error::RemoteError(error_msg.to_string()))
}
fn visit_string<E>(self, error_msg: String) -> Result<Self::Value, E> {
Ok(Error::RemoteError(error_msg))
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "garage_web"
version = "0.3.0"
version = "0.4.0"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2018"
license = "AGPL-3.0"
@ -13,10 +13,10 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_api = { version = "0.3.0", path = "../api" }
garage_model = { version = "0.3.0", path = "../model" }
garage_util = { version = "0.3.0", path = "../util" }
garage_table = { version = "0.3.0", path = "../table" }
garage_api = { version = "0.4.0", path = "../api" }
garage_model = { version = "0.4.0", path = "../model" }
garage_util = { version = "0.4.0", path = "../util" }
garage_table = { version = "0.4.0", path = "../table" }
err-derive = "0.3"
idna = "0.2"
@ -26,4 +26,4 @@ percent-encoding = "2.1.0"
futures = "0.3"
http = "0.2"
hyper = "0.14"
hyper = { version = "0.14", features = ["server", "http1", "runtime", "tcp", "stream"] }

View File

@ -38,7 +38,11 @@ impl Error {
match self {
Error::NotFound => StatusCode::NOT_FOUND,
Error::ApiError(e) => e.http_status_code(),
Error::InternalError(GarageError::Rpc(_)) => StatusCode::SERVICE_UNAVAILABLE,
Error::InternalError(
GarageError::Timeout
| GarageError::RemoteError(_)
| GarageError::Quorum(_, _, _, _),
) => StatusCode::SERVICE_UNAVAILABLE,
Error::InternalError(_) => StatusCode::INTERNAL_SERVER_ERROR,
_ => StatusCode::BAD_REQUEST,
}