Compare commits

..

1 commit

Author SHA1 Message Date
6558c15863
decrease write quorum
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
2024-05-15 08:05:18 +02:00
35 changed files with 224 additions and 336 deletions

9
Cargo.lock generated
View file

@ -1527,7 +1527,6 @@ dependencies = [
"garage_util",
"gethostname",
"hex",
"ipnet",
"itertools 0.12.1",
"k8s-openapi",
"kube",
@ -4082,9 +4081,9 @@ dependencies = [
[[package]]
name = "time"
version = "0.3.36"
version = "0.3.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749"
dependencies = [
"deranged",
"num-conv",
@ -4102,9 +4101,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]]
name = "time-macros"
version = "0.2.18"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"
checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774"
dependencies = [
"num-conv",
"time-core",

View file

@ -34,7 +34,7 @@ args@{
ignoreLockHash,
}:
let
nixifiedLockHash = "c0aa85d369b22875a652356862a5810c22838970be9fbec558dd108d5232881d";
nixifiedLockHash = "1ccd5eb25a83962821e0e9da4ce6df31717b2b97a5b3a0c80c9e0e0759710143";
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
lockHashIgnored = if ignoreLockHash
@ -424,7 +424,7 @@ in
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out;
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.28" { inherit profileName; }).out;
ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out;
@ -643,7 +643,7 @@ in
ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out;
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out;
subtle = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".subtle."2.5.0" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out;
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out;
};
@ -823,7 +823,7 @@ in
pin_utils = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-utils."0.1.0" { inherit profileName; }).out;
ryu = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ryu."1.0.16" { inherit profileName; }).out;
${ if false then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
tokio_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-util."0.7.10" { inherit profileName; }).out;
};
@ -2219,7 +2219,6 @@ in
garage_util = (rustPackages."unknown".garage_util."1.0.0" { inherit profileName; }).out;
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
ipnet = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ipnet."2.9.0" { inherit profileName; }).out;
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.12.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.21.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "kube" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kube."0.88.1" { inherit profileName; }).out;
@ -3017,8 +3016,8 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"; };
features = builtins.concatLists [
[ "default" ]
[ "std" ]
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest") "default")
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest") "std")
];
});
@ -5781,11 +5780,11 @@ in
};
});
"registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" = overridableMkRustCrate (profileName: rec {
"registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" = overridableMkRustCrate (profileName: rec {
name = "time";
version = "0.3.36";
version = "0.3.34";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"; };
src = fetchCratesIo { inherit name version; sha256 = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749"; };
features = builtins.concatLists [
[ "alloc" ]
[ "default" ]
@ -5798,7 +5797,7 @@ in
powerfmt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".powerfmt."0.2.0" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
time_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time-core."0.1.2" { inherit profileName; }).out;
time_macros = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.18" { profileName = "__noProfile"; }).out;
time_macros = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.17" { profileName = "__noProfile"; }).out;
};
});
@ -5809,11 +5808,11 @@ in
src = fetchCratesIo { inherit name version; sha256 = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"; };
});
"registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.18" = overridableMkRustCrate (profileName: rec {
"registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.17" = overridableMkRustCrate (profileName: rec {
name = "time-macros";
version = "0.2.18";
version = "0.2.17";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"; };
src = fetchCratesIo { inherit name version; sha256 = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774"; };
features = builtins.concatLists [
[ "parsing" ]
];

View file

@ -55,7 +55,6 @@ hexdump = "0.1"
hmac = "0.12"
idna = "0.5"
itertools = "0.12"
ipnet = "2.9.0"
lazy_static = "1.4"
md-5 = "0.10"
mktemp = "0.5"

View file

@ -152,8 +152,6 @@ Check the following for your configuration files:
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
This parameter is optional but recommended: if your nodes have trouble communicating with
one another, consider adding it.
Alternatively, you can also set `rpc_public_addr_subnet`, which can filter
the addresses announced to other peers to a specific subnet.
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
You can generate such a key with `openssl rand -hex 32`.

View file

@ -67,7 +67,7 @@ Pithos has been abandonned and should probably not used yet, in the following we
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
We considered their v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
We considered there v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
**[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):**
*Not written yet*

View file

@ -5,7 +5,7 @@ weight = 40
Garage is meant to work on old, second-hand hardware.
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
Fear not! Garage is fully equipped to handle drive failures, in most common cases.
Fear not! For Garage is fully equipped to handle drive failures, in most common cases.
## A note on availability of Garage
@ -61,7 +61,7 @@ garage repair -a --yes blocks
This will re-synchronize blocks of data that are missing to the new HDD, reading them from copies located on other nodes.
You can check on the advancement of this process by doing the following command:
You can check on the advancement of this process by doing the following command:
```bash
garage stats -a

View file

@ -42,13 +42,6 @@ If a binary of the last version is not available for your architecture,
or if you want a build customized for your system,
you can [build Garage from source](@/documentation/cookbook/from-source.md).
If none of these option work for you, you can also run Garage in a Docker
container. When using Docker, the commands used in this guide will not work
anymore. We recommend reading the tutorial on [configuring a
multi-node cluster](@/documentation/cookbook/real-world.md) to learn about
using Garage as a Docker container. For simplicity, a minimal command to launch
Garage using Docker is provided in this quick start guide as well.
## Configuring and starting Garage
@ -92,9 +85,6 @@ metrics_token = "$(openssl rand -base64 32)"
EOF
```
See the [Configuration file format](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/)
for complete options and values.
Now that your configuration file has been created, you may save it to the directory of your choice.
By default, Garage looks for **`/etc/garage.toml`.**
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
@ -121,26 +111,6 @@ garage -c path/to/garage.toml server
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
Alternatively, if you cannot or do not wish to run the Garage binary directly,
you may use Docker to run Garage in a container using the following command:
```bash
docker run \
-d \
--name garaged \
-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \
-v /etc/garage.toml:/path/to/garage.toml \
-v /var/lib/garage/meta:/path/to/garage/meta \
-v /var/lib/garage/data:/path/to/garage/data \
dxflrs/garage:v0.9.4
```
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
#### Troubleshooting
Ensure your configuration file, `metadata_dir` and `data_dir` are readable by the user running the `garage` server or Docker.
You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \
Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`.
@ -161,9 +131,6 @@ It uses values from the TOML configuration file to find the Garage daemon runnin
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
again have to specify `-c path/to/garage.toml` at each invocation.
If you are running Garage in a Docker container, you can set `alias garage="docker exec -ti <container name> /garage"`
to use the Garage binary inside your container.
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
the following command should be enough to show the status of your cluster:

View file

@ -31,9 +31,6 @@ rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
rpc_bind_addr = "[::]:3901"
rpc_bind_outgoing = false
rpc_public_addr = "[fc00:1::1]:3901"
# or set rpc_public_adr_subnet to filter down autodiscovery to a subnet:
# rpc_public_addr_subnet = "2001:0db8:f00:b00:/64"
allow_world_readable_secrets = false
@ -108,7 +105,6 @@ Top-level configuration options:
[`rpc_bind_addr`](#rpc_bind_addr),
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
[`rpc_public_addr`](#rpc_public_addr),
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
The `[consul_discovery]` section:
@ -299,12 +295,12 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
| DB engine | `db_engine` value | Database path |
| --------- | ----------------- | ------------- |
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
| [LMDB](https://www.lmdb.tech) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
You can still use an older binary of Garage (e.g. v0.9.4) to migrate
You can still use an older binary of Garage (e.g. v0.9.3) to migrate
old Sled metadata databases to another engine.
Performance characteristics of the different DB engines are as follows:
@ -394,12 +390,10 @@ if geographical replication is used.
If this value is set, Garage will automatically take a snapshot of the metadata
DB file at a regular interval and save it in the metadata directory.
This parameter can take any duration string that can be parsed by
the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate.
This can allow to recover from situations where the metadata DB file is corrupted,
for instance after an unclean shutdown.
See [this page](@/documentation/operations/recovering.md#corrupted_meta) for details.
Snapshots can allow to recover from situations where the metadata DB file is
corrupted, for instance after an unclean shutdown. See [this
page](@/documentation/operations/recovering.md#corrupted_meta) for details.
Garage keeps only the two most recent snapshots of the metadata DB and deletes
older ones automatically.
@ -418,7 +412,7 @@ month, with a random delay to avoid all nodes running at the same time. When
it scrubs the data directory, Garage will read all of the data files stored on
disk to check their integrity, and will rebuild any data files that it finds
corrupted, using the remaining valid copies stored on other nodes.
See [this page](@/documentation/operations/durability-repairs.md#scrub) for details.
See [this page](@/documentation/operations/durability-repair.md#scrub) for details.
Set the `disable_scrub` configuration value to `true` if you don't need Garage
to scrub the data directory, for instance if you are already scrubbing at the
@ -547,14 +541,6 @@ RPC calls. **This parameter is optional but recommended.** In case you have
a NAT that binds the RPC port to a port that is different on your public IP,
this field might help making it work.
#### `rpc_public_addr_subnet` {#rpc_public_addr_subnet}
In case `rpc_public_addr` is not set, but autodiscovery is used, this allows
filtering the list of automatically discovered IPs to a specific subnet.
For example, if nodes should pick *their* IP inside a specific subnet, but you
don't want to explicitly write the IP down (as it's dynamic, or you want to
share configs across nodes), you can use this option.
#### `bootstrap_peers` {#bootstrap_peers}
A list of peer identifiers on which to contact other Garage peers of this cluster.

View file

@ -1,77 +0,0 @@
+++
title = "Migrating from 0.9 to 1.0"
weight = 11
+++
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
We don't recommend trying to migrate to 1.0 directly from 0.8 or older.**
This migration procedure has been tested on several clusters without issues.
However, it is still a *critical procedure* that might cause issues.
**Make sure to back up all your data before attempting it!**
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
## Changes introduced in v1.0
The following are **breaking changes** in Garage v1.0 that require your attention when migrating:
- The Sled metadata db engine has been **removed**. If your cluster was still
using Sled, you will need to **use a Garage v0.9.x binary** to convert the
database using the `garage convert-db` subcommand. See
[here](@/documentation/reference-manual/configuration.md#db_engine) for the
details of the procedure.
The following syntax changes have been made to the configuration file:
- The `replication_mode` parameter has been split into two parameters:
[`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)
and
[`consistency_mode`](@/documentation/reference-manual/configuration.md#consistency_mode).
The old syntax using `replication_mode` is still supported for legacy
reasons and can still be used.
- The parameters `sled_cache_capacity` and `sled_flush_every_ms` have been removed.
## Migration procedure
The migration to Garage v1.0 can be done with almost no downtime,
by restarting all nodes at once in the new version.
The migration steps are as follows:
1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
all data seems to be synced correctly between nodes. If you have time, do
additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
etc.)
2. Ensure you have a snapshot of your Garage installation that you can restore
to in case the upgrade goes wrong:
- If you are running Garage v0.9.4 or later, use the `garage meta snapshot
--all` to make a backup snapshot of the metadata directories of your nodes
for backup purposes, and save a copy of the following files in the
metadata directories of your nodes: `cluster_layout`, `data_layout`,
`node_key`, `node_key.pub`.
- If you are running a filesystem such as ZFS or BTRFS that support
snapshotting, you can create a filesystem-level snapshot to be used as a
restoration point if needed.
- In other cases, make a backup using the old procedure: turn off each node
individually; back up its metadata folder (for instance, use the following
command if your metadata directory is `/var/lib/garage/meta`: `cd
/var/lib/garage ; tar -acf meta-v0.9.tar.zst meta/`); turn it back on
again. This will allow you to take a backup of all nodes without
impacting global cluster availability. You can do all nodes of a single
zone at once as this does not impact the availability of Garage.
3. Prepare your updated binaries and configuration files for Garage v1.0
4. Shut down all v0.9 nodes simultaneously, and restart them all simultaneously
in v1.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
achieve this as fast as possible. Garage v1.0 should be in a working state
as soon as enough nodes have started.
5. Monitor your cluster in the following hours to see if it works well under
your production load.

View file

@ -28,11 +28,11 @@
},
"flake-compat": {
"locked": {
"lastModified": 1717312683,
"narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
"lastModified": 1688025799,
"narHash": "sha256-ktpB4dRtnksm9F5WawoIkEneh1nrEvuxb5lJFt1iOyw=",
"owner": "nix-community",
"repo": "flake-compat",
"rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
"rev": "8bf105319d44f6b9f0d764efa4fdef9f1cc9ba1c",
"type": "github"
},
"original": {
@ -42,12 +42,33 @@
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1659877975,
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"type": "github"
},
"original": {
@ -58,11 +79,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1724395761,
"narHash": "sha256-zRkDV/nbrnp3Y8oCADf5ETl1sDrdmAW6/bBVJ8EbIdQ=",
"lastModified": 1682109806,
"narHash": "sha256-d9g7RKNShMLboTWwukM+RObDWWpHKaqTYXB48clBWXI=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ae815cee91b417be55d43781eb4b73ae1ecc396c",
"rev": "2362848adf8def2866fabbffc50462e929d7fffb",
"type": "github"
},
"original": {
@ -74,17 +95,17 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1724681257,
"narHash": "sha256-EJRuc5Qp7yfXko5ZNeEMYAs4DzAvkCyALuJ/tGllhN4=",
"lastModified": 1707091808,
"narHash": "sha256-LahKBAfGbY836gtpVNnWwBTIzN7yf/uYM/S0g393r0Y=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
"type": "github"
}
},
@ -101,14 +122,15 @@
},
"rust-overlay": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1724638882,
"narHash": "sha256-ap2jIQi/FuUHR6HCht6ASWhoz8EiB99XmI8Esot38VE=",
"lastModified": 1707271822,
"narHash": "sha256-/DZsoPH5GBzOpVEGz5PgJ7vh8Q6TcrJq5u8FcBjqAfI=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "19b70f147b9c67a759e35824b241f1ed92e46694",
"rev": "7a94fe7690d2bdfe1aab475382a505e14dc114a6",
"type": "github"
},
"original": {
@ -116,6 +138,36 @@
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

View file

@ -2,9 +2,9 @@
description =
"Garage, an S3-compatible distributed object store for self-hosted deployments";
# Nixpkgs 24.05 as of 2024-08-26 has rustc v1.77
# Nixpkgs 23.11 as of 2024-02-07, has rustc v1.73
inputs.nixpkgs.url =
"github:NixOS/nixpkgs/0239aeb2f82ea27ccd6b61582b8f7fb8750eeada";
"github:NixOS/nixpkgs/9f2ee8c91ac42da3ae6c6a1d21555f283458247e";
inputs.flake-compat.url = "github:nix-community/flake-compat";
@ -17,9 +17,9 @@
# - rustc v1.66
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
# Rust overlay as of 2024-08-26
# Rust overlay as of 2024-02-07
inputs.rust-overlay.url =
"github:oxalica/rust-overlay/19b70f147b9c67a759e35824b241f1ed92e46694";
"github:oxalica/rust-overlay/7a94fe7690d2bdfe1aab475382a505e14dc114a6";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-compat.follows = "flake-compat";
@ -76,7 +76,6 @@
# import the full shell using `nix develop .#full`
full = shellWithPackages (with pkgs; [
rustfmt
rust-analyzer
clang
mold
# ---- extra packages for dev tasks ----

View file

@ -20,7 +20,7 @@ let
};
toolchainOptions = {
rustVersion = "1.77.0";
rustVersion = "1.73.0";
extraRustComponents = [ "clippy" ];
};

View file

@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.5.0
version: 0.4.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View file

@ -11,7 +11,6 @@ spec:
{{- if eq .Values.deployment.kind "StatefulSet" }}
replicas: {{ .Values.deployment.replicaCount }}
serviceName: {{ include "garage.fullname" . }}
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
{{- end }}
template:
metadata:
@ -64,10 +63,6 @@ spec:
name: web-api
- containerPort: 3903
name: admin
{{- with .Values.environment }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: meta
mountPath: /mnt/meta

View file

@ -96,8 +96,6 @@ deployment:
kind: StatefulSet
# Number of StatefulSet replicas/garage nodes to start
replicaCount: 3
# If using statefulset, allow Parallel or OrderedReady (default)
podManagementPolicy: OrderedReady
image:
repository: dxflrs/amd64_garage
@ -216,8 +214,6 @@ tolerations: []
affinity: {}
environment: {}
monitoring:
metrics:
# If true, a service for monitoring is created with a prometheus.io/scrape annotation

View file

@ -30,11 +30,11 @@ Vagrant.configure("2") do |config|
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
#config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
#config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
#config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
#config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
#config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
#config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
#config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
end

View file

@ -3,10 +3,11 @@
set -x
#for ppatch in task3c task3a tsfix2; do
for ppatch in v093 v1rc1; do
for ppatch in tsfix2; do
#for psc in c cp cdp r pr cpr dpr; do
for ptsk in reg2 set2; do
for psc in c cp cdp r pr cpr dpr; do
for psc in cdp r pr cpr dpr; do
#for ptsk in reg2 set1 set2; do
for ptsk in set1; do
for irun in $(seq 10); do
lein run test --nodes-file nodes.vagrant \
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \

View file

@ -38,9 +38,7 @@
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"
"v093" "v0.9.3"
"v1rc1" "v1.0.0-rc1"})
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"})
(def cli-opts
"Additional command line options."

View file

@ -43,7 +43,7 @@
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
"rpc_public_addr = \"" node ":3901\"\n"
"db_engine = \"lmdb\"\n"
"replication_mode = \"3\"\n"
"replication_mode = \"2\"\n"
"data_dir = \"" data-dir "\"\n"
"metadata_dir = \"" meta-dir "\"\n"
"[s3_api]\n"

View file

@ -11,7 +11,6 @@ in
{
# --- Dev shell inherited from flake.nix ---
devShell = devShells.default;
devShellFull = devShells.full;
# --- Continuous integration shell ---
# The shell used for all CI jobs (along with devShell)

View file

@ -2,7 +2,6 @@ use std::convert::Infallible;
use std::fs::{self, Permissions};
use std::os::unix::fs::PermissionsExt;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
@ -20,7 +19,6 @@ use hyper_util::rt::TokioIo;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
use tokio::sync::watch;
use tokio::time::{sleep_until, Instant};
use opentelemetry::{
global,
@ -293,7 +291,7 @@ where
let connection_collector = tokio::spawn({
let server_name = server_name.clone();
async move {
let mut connections = FuturesUnordered::<tokio::task::JoinHandle<()>>::new();
let mut connections = FuturesUnordered::new();
loop {
let collect_next = async {
if connections.is_empty() {
@ -314,34 +312,23 @@ where
}
}
}
let deadline = Instant::now() + Duration::from_secs(10);
while !connections.is_empty() {
if !connections.is_empty() {
info!(
"{} server: {} connections still open, deadline in {:.2}s",
"{} server: {} connections still open",
server_name,
connections.len(),
(deadline - Instant::now()).as_secs_f32(),
connections.len()
);
tokio::select! {
conn_res = connections.next() => {
trace!(
"{} server: HTTP connection finished: {:?}",
server_name,
conn_res.unwrap(),
);
}
_ = sleep_until(deadline) => {
warn!("{} server: exit deadline reached with {} connections still open, killing them now",
server_name,
connections.len());
for conn in connections.iter() {
conn.abort();
}
for conn in connections {
assert!(conn.await.unwrap_err().is_cancelled());
}
break;
}
while let Some(conn_res) = connections.next().await {
trace!(
"{} server: HTTP connection finished: {:?}",
server_name,
conn_res
);
info!(
"{} server: {} connections still open",
server_name,
connections.len()
);
}
}
}

View file

@ -71,11 +71,21 @@ pub async fn handle_post_object(
}
if let Ok(content) = HeaderValue::from_str(&field.text().await?) {
if params.insert(&name, content).is_some() {
return Err(Error::bad_request(format!(
"Field '{}' provided more than once",
name
)));
match name.as_str() {
"tag" => (/* tag need to be reencoded, but we don't support them yet anyway */),
"acl" => {
if params.insert("x-amz-acl", content).is_some() {
return Err(Error::bad_request("Field 'acl' provided more than once"));
}
}
_ => {
if params.insert(&name, content).is_some() {
return Err(Error::bad_request(format!(
"Field '{}' provided more than once",
name
)));
}
}
}
}
};
@ -212,8 +222,6 @@ pub async fn handle_post_object(
)));
}
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
// arround here to make sure the rest of the machinery takes our acl into account.
let headers = get_headers(&params)?;
let expected_checksums = ExpectedChecksums {

View file

@ -211,12 +211,16 @@ impl Tree {
/// Returns the old value if there was one
#[inline]
pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>(&self, key: T, value: U) -> Result<()> {
pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>(
&self,
key: T,
value: U,
) -> Result<Option<Value>> {
self.0.insert(self.1, key.as_ref(), value.as_ref())
}
/// Returns the old value if there was one
#[inline]
pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<()> {
pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<Option<Value>> {
self.0.remove(self.1, key.as_ref())
}
/// Clears all values from the tree
@ -274,12 +278,12 @@ impl<'a> Transaction<'a> {
tree: &Tree,
key: T,
value: U,
) -> TxOpResult<()> {
) -> TxOpResult<Option<Value>> {
self.tx.insert(tree.1, key.as_ref(), value.as_ref())
}
/// Returns the old value if there was one
#[inline]
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<()> {
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> {
self.tx.remove(tree.1, key.as_ref())
}
/// Clears all values in a tree
@ -335,8 +339,8 @@ pub(crate) trait IDb: Send + Sync {
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
fn len(&self, tree: usize) -> Result<usize>;
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()>;
fn remove(&self, tree: usize, key: &[u8]) -> Result<()>;
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>>;
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
fn clear(&self, tree: usize) -> Result<()>;
fn iter(&self, tree: usize) -> Result<ValueIter<'_>>;
@ -362,8 +366,8 @@ pub(crate) trait ITx {
fn get(&self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
fn len(&self, tree: usize) -> TxOpResult<usize>;
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()>;
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()>;
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>>;
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
fn clear(&mut self, tree: usize) -> TxOpResult<()>;
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>>;

View file

@ -132,20 +132,22 @@ impl IDb for LmdbDb {
Ok(tree.len(&tx)?.try_into().unwrap())
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
let tree = self.get_tree(tree)?;
let mut tx = self.db.write_txn()?;
let old_val = tree.get(&tx, key)?.map(Vec::from);
tree.put(&mut tx, key, value)?;
tx.commit()?;
Ok(())
Ok(old_val)
}
fn remove(&self, tree: usize, key: &[u8]) -> Result<()> {
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
let tree = self.get_tree(tree)?;
let mut tx = self.db.write_txn()?;
let old_val = tree.get(&tx, key)?.map(Vec::from);
tree.delete(&mut tx, key)?;
tx.commit()?;
Ok(())
Ok(old_val)
}
fn clear(&self, tree: usize) -> Result<()> {
@ -252,15 +254,17 @@ impl<'a> ITx for LmdbTx<'a> {
Ok(tree.len(&self.tx)? as usize)
}
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>> {
let tree = *self.get_tree(tree)?;
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
tree.put(&mut self.tx, key, value)?;
Ok(())
Ok(old_val)
}
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()> {
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
let tree = *self.get_tree(tree)?;
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
tree.delete(&mut self.tx, key)?;
Ok(())
Ok(old_val)
}
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
let tree = *self.get_tree(tree)?;

View file

@ -36,7 +36,7 @@ impl std::str::FromStr for Engine {
match text {
"lmdb" | "heed" => Ok(Self::Lmdb),
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.4).".into())),
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.3).".into())),
kind => Err(Error(
format!(
"Invalid DB engine: {} (options are: lmdb, sqlite)",
@ -92,7 +92,6 @@ pub fn open_db(path: &PathBuf, engine: Engine, opt: &OpenOpt) -> Result<Db> {
env_builder.map_size(map_size);
env_builder.max_readers(2048);
unsafe {
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoRdAhead);
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoMetaSync);
if !opt.fsync {
env_builder.flag(heed::flags::Flags::MdbNoSync);

View file

@ -169,7 +169,7 @@ impl IDb for SqliteDb {
}
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
let tree = self.get_tree(tree)?;
let db = self.db.get()?;
let lock = self.write_lock.lock();
@ -184,18 +184,23 @@ impl IDb for SqliteDb {
assert_eq!(n, 1);
drop(lock);
Ok(())
Ok(old_val)
}
fn remove(&self, tree: usize, key: &[u8]) -> Result<()> {
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
let tree = self.get_tree(tree)?;
let db = self.db.get()?;
let lock = self.write_lock.lock();
db.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
let old_val = self.internal_get(&db, &tree, key)?;
if old_val.is_some() {
let n = db.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
assert_eq!(n, 1);
}
drop(lock);
Ok(())
Ok(old_val)
}
fn clear(&self, tree: usize) -> Result<()> {
@ -336,17 +341,31 @@ impl<'a> ITx for SqliteTx<'a> {
}
}
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>> {
let tree = self.get_tree(tree)?;
let sql = format!("INSERT OR REPLACE INTO {} (k, v) VALUES (?1, ?2)", tree);
self.tx.execute(&sql, params![key, value])?;
Ok(())
let old_val = self.internal_get(tree, key)?;
let sql = match &old_val {
Some(_) => format!("UPDATE {} SET v = ?2 WHERE k = ?1", tree),
None => format!("INSERT INTO {} (k, v) VALUES (?1, ?2)", tree),
};
let n = self.tx.execute(&sql, params![key, value])?;
assert_eq!(n, 1);
Ok(old_val)
}
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()> {
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
let tree = self.get_tree(tree)?;
self.tx
.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
Ok(())
let old_val = self.internal_get(tree, key)?;
if old_val.is_some() {
let n = self
.tx
.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
assert_eq!(n, 1);
}
Ok(old_val)
}
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
let tree = self.get_tree(tree)?;

View file

@ -12,7 +12,7 @@ fn test_suite(db: Db) {
// ---- test simple insert/delete ----
assert!(tree.insert(ka, va).is_ok());
assert!(tree.insert(ka, va).unwrap().is_none());
assert_eq!(tree.get(ka).unwrap().unwrap(), va);
assert_eq!(tree.len().unwrap(), 1);
@ -21,7 +21,7 @@ fn test_suite(db: Db) {
let res = db.transaction::<_, (), _>(|tx| {
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), va);
assert_eq!(tx.insert(&tree, ka, vb).unwrap(), ());
assert_eq!(tx.insert(&tree, ka, vb).unwrap().unwrap(), va);
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
@ -33,7 +33,7 @@ fn test_suite(db: Db) {
let res = db.transaction::<(), _, _>(|tx| {
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
assert_eq!(tx.insert(&tree, ka, vc).unwrap(), ());
assert_eq!(tx.insert(&tree, ka, vc).unwrap().unwrap(), vb);
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vc);
@ -50,7 +50,7 @@ fn test_suite(db: Db) {
assert!(iter.next().is_none());
drop(iter);
assert!(tree.insert(kb, vc).is_ok());
assert!(tree.insert(kb, vc).unwrap().is_none());
assert_eq!(tree.get(kb).unwrap().unwrap(), vc);
let mut iter = tree.iter().unwrap();

View file

@ -24,7 +24,6 @@ pub struct ConvertDbOpt {
output_engine: Engine,
#[structopt(flatten)]
#[allow(dead_code)]
db_open: OpenDbOpt,
}
@ -53,7 +52,6 @@ pub(crate) fn do_conversion(args: ConvertDbOpt) -> Result<()> {
}
let opt = OpenOpt {
#[cfg(feature = "lmdb")]
lmdb_map_size: args.db_open.lmdb.map_size.map(|x| x.as_u64() as usize),
..Default::default()
};

View file

@ -48,7 +48,7 @@ pub enum Command {
#[structopt(name = "worker", version = garage_version())]
Worker(WorkerOperation),
/// Low-level node-local debug operations on data blocks
/// Low-level debug operations on data blocks
#[structopt(name = "block", version = garage_version())]
Block(BlockOperation),

View file

@ -141,7 +141,7 @@ impl Garage {
)?)
.ok()
.and_then(|x| NetworkKey::from_slice(&x))
.ok_or_message("Invalid RPC secret key: expected 32 bits of entropy, please check the documentation for requirements")?;
.ok_or_message("Invalid RPC secret key")?;
let (replication_factor, consistency_mode) = parse_replication_mode(&config)?;

View file

@ -24,7 +24,6 @@ bytes.workspace = true
bytesize.workspace = true
gethostname.workspace = true
hex.workspace = true
ipnet.workspace = true
tracing.workspace = true
rand.workspace = true
itertools.workspace = true

View file

@ -227,29 +227,24 @@ impl LayoutHistory {
// ================== updates to layout, public interface ===================
pub fn merge(&mut self, other: &LayoutHistory) -> bool {
// If our current layout version is completely out-of-date,
// forget everything we know and replace it by incoming layout data.
if self.current().version < other.min_stored() {
*self = other.clone();
return true;
}
let mut changed = false;
// Add any new versions to history
for v2 in other.versions.iter() {
if v2.version == self.current().version + 1 {
// This is the next version, add it to our version list
self.versions.push(v2.clone());
changed = true;
} else if let Some(v1) = self.versions.iter().find(|v| v.version == v2.version) {
if let Some(v1) = self.versions.iter().find(|v| v.version == v2.version) {
// Version is already present, check consistency
if v1 != v2 {
error!("Inconsistent layout histories: different layout compositions for version {}. Your cluster will be broken as long as this layout version is not replaced.", v2.version);
}
} else if self.versions.iter().all(|v| v.version != v2.version - 1) {
error!(
"Cannot receive new layout version {}, version {} is missing",
v2.version,
v2.version - 1
);
} else {
// This is an older version
assert!(v2.version < self.min_stored());
self.versions.push(v2.clone());
changed = true;
}
}

View file

@ -844,20 +844,12 @@ impl NodeStatus {
}
}
/// Obtain the list of currently available IP addresses on all non-loopback
/// interfaces, optionally filtering them to be inside a given IpNet.
fn get_default_ip(filter_ipnet: Option<ipnet::IpNet>) -> Option<IpAddr> {
fn get_default_ip() -> Option<IpAddr> {
pnet_datalink::interfaces()
.into_iter()
// filter down and loopback interfaces
.filter(|i| i.is_up() && !i.is_loopback())
// get all IPs
.flat_map(|e| e.ips)
// optionally, filter to be inside filter_ipnet
.find(|ipn| {
filter_ipnet.is_some_and(|ipnet| ipnet.contains(&ipn.ip())) || filter_ipnet.is_none()
})
.map(|ipn| ipn.ip())
.iter()
.find(|e| e.is_up() && !e.is_loopback() && !e.ips.is_empty())
.and_then(|e| e.ips.first())
.map(|a| a.ip())
}
fn get_rpc_public_addr(config: &Config) -> Option<SocketAddr> {
@ -885,28 +877,7 @@ fn get_rpc_public_addr(config: &Config) -> Option<SocketAddr> {
}
}
None => {
// `No rpc_public_addr` specified, try to discover one, optionally filtering by `rpc_public_addr_subnet`.
let filter_subnet: Option<ipnet::IpNet> = config
.rpc_public_addr_subnet
.as_ref()
.and_then(|filter_subnet_str| match filter_subnet_str.parse::<ipnet::IpNet>() {
Ok(filter_subnet) => {
let filter_subnet_trunc = filter_subnet.trunc();
if filter_subnet_trunc != filter_subnet {
warn!("`rpc_public_addr_subnet` changed after applying netmask, continuing with {}", filter_subnet.trunc());
}
Some(filter_subnet_trunc)
}
Err(e) => {
panic!(
"Cannot parse rpc_public_addr_subnet {} from config file: {}. Bailing out.",
filter_subnet_str, e
);
}
});
let addr = get_default_ip(filter_subnet)
.map(|ip| SocketAddr::new(ip, config.rpc_bind_addr.port()));
let addr = get_default_ip().map(|ip| SocketAddr::new(ip, config.rpc_bind_addr.port()));
if let Some(a) = addr {
warn!("Using autodetected rpc_public_addr: {}. Consider specifying it explicitly in configuration file if possible.", a);
}

View file

@ -43,13 +43,10 @@ impl TableReplication for TableFullReplication {
}
fn write_quorum(&self) -> usize {
let nmembers = self.system.cluster_layout().current().all_nodes().len();
let max_faults = if nmembers > 1 { 1 } else { 0 };
if nmembers > max_faults {
nmembers - max_faults
} else {
if nmembers < 3 {
1
} else {
nmembers.div_euclid(2) + 1
}
}

View file

@ -85,10 +85,6 @@ pub struct Config {
/// Public IP address of this node
pub rpc_public_addr: Option<String>,
/// In case `rpc_public_addr` was not set, this can filter
/// the addresses announced to other peers to a specific subnet.
pub rpc_public_addr_subnet: Option<String>,
/// Timeout for Netapp's ping messagess
pub rpc_ping_timeout_msec: Option<u64>,
/// Timeout for Netapp RPC calls