Compare commits

...

58 commits

Author SHA1 Message Date
6da1353541 Merge pull request 'Don't fetch old values in cross-partition transactional inserts' (#877) from withings/garage:perf/kv/insert-no-return-cross-partition into main
Some checks failed
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/deployment/debug Pipeline was successful
ci/woodpecker/deployment/release/3 Pipeline was successful
ci/woodpecker/deployment/release/2 Pipeline was successful
ci/woodpecker/deployment/release/1 Pipeline was successful
ci/woodpecker/deployment/release/4 Pipeline was successful
ci/woodpecker/deployment/publish Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/publish Pipeline failed
Reviewed-on: #877
2024-09-14 15:57:27 +00:00
bd71728874
Tests: don't expect old value after transactional insert
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
2024-09-12 10:50:53 +02:00
51ced60366
Don't fetch old values in cross-partition transactional inserts
Some checks failed
ci/woodpecker/pr/debug Pipeline failed
2024-09-12 10:26:28 +02:00
586957b4b7 Merge pull request 'KV: don't retrieve values for write ops' (#873) from marvinj97/garage:perf/kv/insert-no-return into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #873
Reviewed-by: Alex <alex@adnab.me>
2024-09-10 09:06:29 +00:00
8d2bb4afeb Merge pull request 'Typo' (#875) from faust/garage:doc2 into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
Reviewed-on: #875
2024-09-10 09:03:31 +00:00
c26f32b769
Typo
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
And remove trailing white space.
2024-09-10 09:34:59 +02:00
8062ec7b4b test: fix db tests
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
2024-09-04 19:24:36 +02:00
eb416a02fb dont assert deletion count in sqlite KV adapter
Some checks failed
ci/woodpecker/pr/debug Pipeline failed
2024-09-04 18:51:51 +02:00
74363c9060 perf(kv): dont retrieve values for write ops
Some checks failed
ci/woodpecker/pr/debug Pipeline failed
see #851
2024-09-04 18:45:17 +02:00
615698df7d Merge pull request 'update compiler to Rust 1.77' (#866) from rust-1.77 into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #866
2024-08-26 19:08:00 +00:00
7061fa5a56
use rust 1.77 in nix/compile.nix
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
ci/woodpecker/deployment/debug Pipeline was successful
ci/woodpecker/deployment/release/1 Pipeline was successful
ci/woodpecker/deployment/release/4 Pipeline was successful
ci/woodpecker/deployment/release/3 Pipeline was successful
ci/woodpecker/deployment/release/2 Pipeline was successful
ci/woodpecker/deployment/publish Pipeline was successful
2024-08-26 19:19:16 +02:00
8881930cdd
update nixpkgs and rust-overlay sources in flake.nix 2024-08-26 19:19:16 +02:00
52f6c0760b Merge pull request 'update crate time (fix #849)' (#865) from update-time into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
Reviewed-on: #865
2024-08-26 16:20:04 +00:00
5b0602c7e9
update crate time (fix #849)
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
2024-08-26 18:11:21 +02:00
182b2af7e5 Merge pull request 'api servers: kill opened connections after SIGINT after 10s deadline (fix #806)' (#864) from exit-deadline into main
Some checks failed
ci/woodpecker/push/debug Pipeline failed
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #864
2024-08-25 18:34:55 +00:00
baf32c9575
api servers: kill opened connections after SIGINT after 10s deadline (fix #806)
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
2024-08-25 20:04:56 +02:00
3dda1ee4f6 Merge pull request 'fix build when lmdb feature is disabled (fix #800)' (#863) from fix-800 into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
Reviewed-on: #863
2024-08-25 10:00:40 +00:00
aa7ce9e97c
fix build when lmdb feature is disabled (fix #800)
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
2024-08-25 11:42:37 +02:00
8d62616ec0 Merge pull request 'layout: discard old info when it is completely out-of-date (fix #841)' (#861) from fix-841 into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #861
2024-08-24 11:12:39 +00:00
bd6fe72c06 Merge pull request 'Quick start: mention Docker (replace #803)' (#862) from dougreeder into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
Reviewed-on: #862
2024-08-24 11:07:46 +00:00
4c9e8ef625
doc: clarify quick start on using docker
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
2024-08-24 13:07:02 +02:00
3e711bc110 Merge pull request 'don't modify postobject request before validating policy' (#850) from trinity-1686a/garage:fix-acl-postobject into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
Reviewed-on: #850
2024-08-24 10:49:14 +00:00
7fb66b4944
layout: discard old info when it is completely out-of-date (fix #841)
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
2024-08-24 12:38:56 +02:00
679ae8bcbb Merge pull request 'Set "no read ahead" on LMDB to improve performances when "LMDB size" > "RAM size"' (#855) from fix-lmdb-no-read-ahead into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #855
Reviewed-by: Alex <alex@adnab.me>
2024-08-18 12:25:35 +00:00
2a93ad0c84
force flag "no read ahead" on LMDB
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
2024-08-17 21:17:15 +02:00
f190032589 don't modify postobject request before validating policy
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
2024-08-10 20:10:47 +02:00
3a87bd1370 Merge pull request 'Improve error message for malformed RPC secret key' (#846) from improve-secret-error-message into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #846
Reviewed-by: Quentin <quentin@dufour.io>
2024-08-09 06:47:11 +00:00
9302cd42f0 Improve error message for malformed RPC secret key
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
2024-08-08 23:05:24 +00:00
060ad0da32 docs: Update LMDB website
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
2024-08-06 21:47:14 +00:00
a5ed1161c6 Merge pull request 'Add environment variable dict to helm chart.' (#843) from Benjamin/garage:main into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
Reviewed-on: #843
Reviewed-by: maximilien <me@mricher.fr>
2024-08-06 21:45:35 +00:00
Benjamin von Mossner
222674432b This commit adds an environment dict to garage helm chart. Using it, env variables can be set into the garage container environment, useful to set eg. GARAGE_ADMIN_TOKEN or GARAGE_METRICS_TOKEN
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
2024-07-25 11:42:13 +02:00
070a8ad110 Merge pull request 'doc: fix typo' (#831) from Armael/garage:typo into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #831
2024-06-18 12:40:32 +00:00
770384cae1 Merge pull request 'add rpc_public_addr_subnet config option' (#817) from flokli/garage:rpc_public_addr_subnet into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
Reviewed-on: #817
Reviewed-by: Alex <alex@adnab.me>
2024-06-18 12:40:07 +00:00
a0f6bc5b7f add rpc_public_addr_subnet config option
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
In case `rpc_public_addr` is not set, but autodiscovery is used, this
allows filtering the list of automatically discovered IPs to a specific
subnet.

For example, if nodes should pick *their* IP inside a specific subnet,
but you don't want to explicitly write the IP down (as it's dynamic, or
you want to share configs across nodes), you can use this option.
2024-06-05 08:41:36 +02:00
Armaël Guéneau
88c734bbd9 typo
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
2024-06-04 15:34:02 +02:00
d38509ef4b Merge pull request 'adding the ability to change the default podManagementPolicy for StatefulSets' (#823) from bodaciousbiscuits/garage:main into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #823
Reviewed-by: maximilien <me@mricher.fr>
2024-05-25 18:35:53 +00:00
bodaciousbiscuits
39b37833c5 adding the ability to change the default podManagementPolicy for StatefulSets
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
2024-05-19 21:31:19 -05:00
a2c1de646b Merge pull request 'cli: clarify garage block is node-local' (#813) from flokli/garage:block-node-local into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #813
2024-05-12 08:53:26 +00:00
15847a636a cli: clarify garage block is node-local
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
Prevents some of the confusion from
#810.
2024-05-07 07:42:33 +00:00
123d3e1f04 Merge pull request 'flake.nix: add rust-analyzer to devShells.full, expose devShells.full in shell.nix' (#816) from flokli/garage:shell-fixes into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #816
2024-04-23 18:54:26 +00:00
a6e4b96ca9 shell.nix: expose devShellFull
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
This allows accessing devShells.full from shell.nix.
2024-04-23 11:59:37 +03:00
b442b0e35e devShells.full: add rust-analyzer 2024-04-23 11:59:37 +03:00
0c3b198b22 Improves Quick Start for users not using Linux
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
2024-04-10 16:42:10 -04:00
33c2086d9e Merge pull request '[fix-doc] fix broken references in documentation' (#802) from fix-doc into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #802
2024-04-10 15:49:03 +00:00
5ad1e55ccf [fix-doc] fix broken references in documentation
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
2024-04-10 17:47:34 +02:00
1779fd40c0 Merge pull request 'Garage v1.0' (#683) from next-0.10 into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
Reviewed-on: #683
2024-04-10 15:23:12 +00:00
ff093ddbb8
Merge branch 'main' into next-0.10
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
ci/woodpecker/deployment/debug Pipeline was successful
ci/woodpecker/deployment/release/1 Pipeline was successful
ci/woodpecker/deployment/release/4 Pipeline was successful
ci/woodpecker/deployment/release/3 Pipeline was successful
ci/woodpecker/deployment/release/2 Pipeline was successful
ci/woodpecker/deployment/publish Pipeline was successful
2024-04-10 14:38:14 +02:00
90e3c2af91
[next-0.10] small updates to mention Garage v0.9.4
Some checks failed
ci/woodpecker/push/debug Pipeline failed
ci/woodpecker/pr/debug Pipeline failed
2024-04-10 14:35:30 +02:00
b47706809c Merge pull request 'fix typo in doc' (#799) from fix-typo into main
Some checks failed
ci/woodpecker/push/debug Pipeline failed
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #799
2024-04-08 15:09:27 +00:00
126e0f47a3 fix typo in doc
Some checks failed
ci/woodpecker/pr/debug Pipeline failed
ci/woodpecker/push/debug Pipeline failed
2024-04-08 17:08:44 +02:00
738bb2f09c Merge pull request 'Garage v0.9.4' (#798) from rel-0.9.4 into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
Reviewed-on: #798
2024-04-08 09:57:03 +00:00
7dd7cb5759
[rel-0.9.4] upgrade version to v0.9.4
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
2024-04-08 11:18:19 +02:00
8b663d8c5b Merge pull request 'jepsen testing of Garage v1.0.0-rc1' (#796) from jepsen-1.0rc1 into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #796
2024-04-05 21:05:58 +00:00
c051db8204 jepsen testing of Garage v1.0.0-rc1
All checks were successful
ci/woodpecker/pr/debug Pipeline was successful
ci/woodpecker/push/debug Pipeline was successful
2024-04-05 22:57:00 +02:00
50669b3e76
[next-0.10] bump helm chart version
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
2024-04-03 14:19:59 +02:00
e5838b4837 Merge pull request '[doc-units] document how interval value is parsed' (#795) from doc-units into main
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
Reviewed-on: #795
2024-04-03 12:15:18 +00:00
87dfaf2eb9
[doc-units] document how interval value is parsed
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
2024-04-03 14:14:13 +02:00
554437254e
[next-0.10] Add migration guide for v1.0
All checks were successful
ci/woodpecker/push/debug Pipeline was successful
ci/woodpecker/pr/debug Pipeline was successful
ci/woodpecker/cron/release/3 Pipeline was successful
ci/woodpecker/cron/release/2 Pipeline was successful
ci/woodpecker/cron/debug Pipeline was successful
ci/woodpecker/cron/release/4 Pipeline was successful
ci/woodpecker/cron/release/1 Pipeline was successful
ci/woodpecker/cron/publish Pipeline was successful
2024-03-28 18:45:06 +01:00
34 changed files with 330 additions and 221 deletions

9
Cargo.lock generated
View file

@ -1527,6 +1527,7 @@ dependencies = [
"garage_util",
"gethostname",
"hex",
"ipnet",
"itertools 0.12.1",
"k8s-openapi",
"kube",
@ -4081,9 +4082,9 @@ dependencies = [
[[package]]
name = "time"
version = "0.3.34"
version = "0.3.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749"
checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
dependencies = [
"deranged",
"num-conv",
@ -4101,9 +4102,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]]
name = "time-macros"
version = "0.2.17"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774"
checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"
dependencies = [
"num-conv",
"time-core",

View file

@ -34,7 +34,7 @@ args@{
ignoreLockHash,
}:
let
nixifiedLockHash = "1ccd5eb25a83962821e0e9da4ce6df31717b2b97a5b3a0c80c9e0e0759710143";
nixifiedLockHash = "c0aa85d369b22875a652356862a5810c22838970be9fbec558dd108d5232881d";
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
lockHashIgnored = if ignoreLockHash
@ -424,7 +424,7 @@ in
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out;
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.28" { inherit profileName; }).out;
ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out;
@ -643,7 +643,7 @@ in
ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out;
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out;
subtle = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".subtle."2.5.0" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out;
};
@ -823,7 +823,7 @@ in
pin_utils = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-utils."0.1.0" { inherit profileName; }).out;
ryu = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ryu."1.0.16" { inherit profileName; }).out;
${ if false then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
tokio_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-util."0.7.10" { inherit profileName; }).out;
};
@ -2219,6 +2219,7 @@ in
garage_util = (rustPackages."unknown".garage_util."1.0.0" { inherit profileName; }).out;
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
ipnet = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ipnet."2.9.0" { inherit profileName; }).out;
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.12.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.21.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "kube" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kube."0.88.1" { inherit profileName; }).out;
@ -3016,8 +3017,8 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest") "default")
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest") "std")
[ "default" ]
[ "std" ]
];
});
@ -5780,11 +5781,11 @@ in
};
});
"registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" = overridableMkRustCrate (profileName: rec {
"registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" = overridableMkRustCrate (profileName: rec {
name = "time";
version = "0.3.34";
version = "0.3.36";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749"; };
src = fetchCratesIo { inherit name version; sha256 = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"; };
features = builtins.concatLists [
[ "alloc" ]
[ "default" ]
@ -5797,7 +5798,7 @@ in
powerfmt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".powerfmt."0.2.0" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
time_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time-core."0.1.2" { inherit profileName; }).out;
time_macros = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.17" { profileName = "__noProfile"; }).out;
time_macros = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.18" { profileName = "__noProfile"; }).out;
};
});
@ -5808,11 +5809,11 @@ in
src = fetchCratesIo { inherit name version; sha256 = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"; };
});
"registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.17" = overridableMkRustCrate (profileName: rec {
"registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.18" = overridableMkRustCrate (profileName: rec {
name = "time-macros";
version = "0.2.17";
version = "0.2.18";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774"; };
src = fetchCratesIo { inherit name version; sha256 = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"; };
features = builtins.concatLists [
[ "parsing" ]
];

View file

@ -55,6 +55,7 @@ hexdump = "0.1"
hmac = "0.12"
idna = "0.5"
itertools = "0.12"
ipnet = "2.9.0"
lazy_static = "1.4"
md-5 = "0.10"
mktemp = "0.5"

View file

@ -152,6 +152,8 @@ Check the following for your configuration files:
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
This parameter is optional but recommended: if your nodes have trouble communicating with
one another, consider adding it.
Alternatively, you can also set `rpc_public_addr_subnet`, which can filter
the addresses announced to other peers to a specific subnet.
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
You can generate such a key with `openssl rand -hex 32`.

View file

@ -67,7 +67,7 @@ Pithos has been abandonned and should probably not used yet, in the following we
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
We considered there v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
We considered their v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
**[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):**
*Not written yet*

View file

@ -5,7 +5,7 @@ weight = 40
Garage is meant to work on old, second-hand hardware.
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
Fear not! For Garage is fully equipped to handle drive failures, in most common cases.
Fear not! Garage is fully equipped to handle drive failures, in most common cases.
## A note on availability of Garage
@ -61,7 +61,7 @@ garage repair -a --yes blocks
This will re-synchronize blocks of data that are missing to the new HDD, reading them from copies located on other nodes.
You can check on the advancement of this process by doing the following command:
You can check on the advancement of this process by doing the following command:
```bash
garage stats -a

View file

@ -42,6 +42,13 @@ If a binary of the last version is not available for your architecture,
or if you want a build customized for your system,
you can [build Garage from source](@/documentation/cookbook/from-source.md).
If none of these option work for you, you can also run Garage in a Docker
container. When using Docker, the commands used in this guide will not work
anymore. We recommend reading the tutorial on [configuring a
multi-node cluster](@/documentation/cookbook/real-world.md) to learn about
using Garage as a Docker container. For simplicity, a minimal command to launch
Garage using Docker is provided in this quick start guide as well.
## Configuring and starting Garage
@ -85,6 +92,9 @@ metrics_token = "$(openssl rand -base64 32)"
EOF
```
See the [Configuration file format](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/)
for complete options and values.
Now that your configuration file has been created, you may save it to the directory of your choice.
By default, Garage looks for **`/etc/garage.toml`.**
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
@ -111,6 +121,26 @@ garage -c path/to/garage.toml server
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
Alternatively, if you cannot or do not wish to run the Garage binary directly,
you may use Docker to run Garage in a container using the following command:
```bash
docker run \
-d \
--name garaged \
-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \
-v /etc/garage.toml:/path/to/garage.toml \
-v /var/lib/garage/meta:/path/to/garage/meta \
-v /var/lib/garage/data:/path/to/garage/data \
dxflrs/garage:v0.9.4
```
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
#### Troubleshooting
Ensure your configuration file, `metadata_dir` and `data_dir` are readable by the user running the `garage` server or Docker.
You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \
Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`.
@ -131,6 +161,9 @@ It uses values from the TOML configuration file to find the Garage daemon runnin
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
again have to specify `-c path/to/garage.toml` at each invocation.
If you are running Garage in a Docker container, you can set `alias garage="docker exec -ti <container name> /garage"`
to use the Garage binary inside your container.
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
the following command should be enough to show the status of your cluster:

View file

@ -31,6 +31,9 @@ rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
rpc_bind_addr = "[::]:3901"
rpc_bind_outgoing = false
rpc_public_addr = "[fc00:1::1]:3901"
# or set rpc_public_adr_subnet to filter down autodiscovery to a subnet:
# rpc_public_addr_subnet = "2001:0db8:f00:b00:/64"
allow_world_readable_secrets = false
@ -105,6 +108,7 @@ Top-level configuration options:
[`rpc_bind_addr`](#rpc_bind_addr),
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
[`rpc_public_addr`](#rpc_public_addr),
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
The `[consul_discovery]` section:
@ -295,12 +299,12 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
| DB engine | `db_engine` value | Database path |
| --------- | ----------------- | ------------- |
| [LMDB](https://www.lmdb.tech) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
You can still use an older binary of Garage (e.g. v0.9.3) to migrate
You can still use an older binary of Garage (e.g. v0.9.4) to migrate
old Sled metadata databases to another engine.
Performance characteristics of the different DB engines are as follows:
@ -390,10 +394,12 @@ if geographical replication is used.
If this value is set, Garage will automatically take a snapshot of the metadata
DB file at a regular interval and save it in the metadata directory.
This can allow to recover from situations where the metadata DB file is corrupted,
for instance after an unclean shutdown.
See [this page](@/documentation/operations/recovering.md#corrupted_meta) for details.
This parameter can take any duration string that can be parsed by
the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate.
Snapshots can allow to recover from situations where the metadata DB file is
corrupted, for instance after an unclean shutdown. See [this
page](@/documentation/operations/recovering.md#corrupted_meta) for details.
Garage keeps only the two most recent snapshots of the metadata DB and deletes
older ones automatically.
@ -412,7 +418,7 @@ month, with a random delay to avoid all nodes running at the same time. When
it scrubs the data directory, Garage will read all of the data files stored on
disk to check their integrity, and will rebuild any data files that it finds
corrupted, using the remaining valid copies stored on other nodes.
See [this page](@/documentation/operations/durability-repair.md#scrub) for details.
See [this page](@/documentation/operations/durability-repairs.md#scrub) for details.
Set the `disable_scrub` configuration value to `true` if you don't need Garage
to scrub the data directory, for instance if you are already scrubbing at the
@ -541,6 +547,14 @@ RPC calls. **This parameter is optional but recommended.** In case you have
a NAT that binds the RPC port to a port that is different on your public IP,
this field might help making it work.
#### `rpc_public_addr_subnet` {#rpc_public_addr_subnet}
In case `rpc_public_addr` is not set, but autodiscovery is used, this allows
filtering the list of automatically discovered IPs to a specific subnet.
For example, if nodes should pick *their* IP inside a specific subnet, but you
don't want to explicitly write the IP down (as it's dynamic, or you want to
share configs across nodes), you can use this option.
#### `bootstrap_peers` {#bootstrap_peers}
A list of peer identifiers on which to contact other Garage peers of this cluster.

View file

@ -0,0 +1,77 @@
+++
title = "Migrating from 0.9 to 1.0"
weight = 11
+++
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
We don't recommend trying to migrate to 1.0 directly from 0.8 or older.**
This migration procedure has been tested on several clusters without issues.
However, it is still a *critical procedure* that might cause issues.
**Make sure to back up all your data before attempting it!**
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
## Changes introduced in v1.0
The following are **breaking changes** in Garage v1.0 that require your attention when migrating:
- The Sled metadata db engine has been **removed**. If your cluster was still
using Sled, you will need to **use a Garage v0.9.x binary** to convert the
database using the `garage convert-db` subcommand. See
[here](@/documentation/reference-manual/configuration.md#db_engine) for the
details of the procedure.
The following syntax changes have been made to the configuration file:
- The `replication_mode` parameter has been split into two parameters:
[`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)
and
[`consistency_mode`](@/documentation/reference-manual/configuration.md#consistency_mode).
The old syntax using `replication_mode` is still supported for legacy
reasons and can still be used.
- The parameters `sled_cache_capacity` and `sled_flush_every_ms` have been removed.
## Migration procedure
The migration to Garage v1.0 can be done with almost no downtime,
by restarting all nodes at once in the new version.
The migration steps are as follows:
1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
all data seems to be synced correctly between nodes. If you have time, do
additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
etc.)
2. Ensure you have a snapshot of your Garage installation that you can restore
to in case the upgrade goes wrong:
- If you are running Garage v0.9.4 or later, use the `garage meta snapshot
--all` to make a backup snapshot of the metadata directories of your nodes
for backup purposes, and save a copy of the following files in the
metadata directories of your nodes: `cluster_layout`, `data_layout`,
`node_key`, `node_key.pub`.
- If you are running a filesystem such as ZFS or BTRFS that support
snapshotting, you can create a filesystem-level snapshot to be used as a
restoration point if needed.
- In other cases, make a backup using the old procedure: turn off each node
individually; back up its metadata folder (for instance, use the following
command if your metadata directory is `/var/lib/garage/meta`: `cd
/var/lib/garage ; tar -acf meta-v0.9.tar.zst meta/`); turn it back on
again. This will allow you to take a backup of all nodes without
impacting global cluster availability. You can do all nodes of a single
zone at once as this does not impact the availability of Garage.
3. Prepare your updated binaries and configuration files for Garage v1.0
4. Shut down all v0.9 nodes simultaneously, and restart them all simultaneously
in v1.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
achieve this as fast as possible. Garage v1.0 should be in a working state
as soon as enough nodes have started.
5. Monitor your cluster in the following hours to see if it works well under
your production load.

View file

@ -28,11 +28,11 @@
},
"flake-compat": {
"locked": {
"lastModified": 1688025799,
"narHash": "sha256-ktpB4dRtnksm9F5WawoIkEneh1nrEvuxb5lJFt1iOyw=",
"lastModified": 1717312683,
"narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
"owner": "nix-community",
"repo": "flake-compat",
"rev": "8bf105319d44f6b9f0d764efa4fdef9f1cc9ba1c",
"rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
"type": "github"
},
"original": {
@ -42,33 +42,12 @@
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"lastModified": 1659877975,
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
"type": "github"
},
"original": {
@ -79,11 +58,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1682109806,
"narHash": "sha256-d9g7RKNShMLboTWwukM+RObDWWpHKaqTYXB48clBWXI=",
"lastModified": 1724395761,
"narHash": "sha256-zRkDV/nbrnp3Y8oCADf5ETl1sDrdmAW6/bBVJ8EbIdQ=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2362848adf8def2866fabbffc50462e929d7fffb",
"rev": "ae815cee91b417be55d43781eb4b73ae1ecc396c",
"type": "github"
},
"original": {
@ -95,17 +74,17 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1707091808,
"narHash": "sha256-LahKBAfGbY836gtpVNnWwBTIzN7yf/uYM/S0g393r0Y=",
"lastModified": 1724681257,
"narHash": "sha256-EJRuc5Qp7yfXko5ZNeEMYAs4DzAvkCyALuJ/tGllhN4=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
"type": "github"
}
},
@ -122,15 +101,14 @@
},
"rust-overlay": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1707271822,
"narHash": "sha256-/DZsoPH5GBzOpVEGz5PgJ7vh8Q6TcrJq5u8FcBjqAfI=",
"lastModified": 1724638882,
"narHash": "sha256-ap2jIQi/FuUHR6HCht6ASWhoz8EiB99XmI8Esot38VE=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "7a94fe7690d2bdfe1aab475382a505e14dc114a6",
"rev": "19b70f147b9c67a759e35824b241f1ed92e46694",
"type": "github"
},
"original": {
@ -138,36 +116,6 @@
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

View file

@ -2,9 +2,9 @@
description =
"Garage, an S3-compatible distributed object store for self-hosted deployments";
# Nixpkgs 23.11 as of 2024-02-07, has rustc v1.73
# Nixpkgs 24.05 as of 2024-08-26 has rustc v1.77
inputs.nixpkgs.url =
"github:NixOS/nixpkgs/9f2ee8c91ac42da3ae6c6a1d21555f283458247e";
"github:NixOS/nixpkgs/0239aeb2f82ea27ccd6b61582b8f7fb8750eeada";
inputs.flake-compat.url = "github:nix-community/flake-compat";
@ -17,9 +17,9 @@
# - rustc v1.66
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
# Rust overlay as of 2024-02-07
# Rust overlay as of 2024-08-26
inputs.rust-overlay.url =
"github:oxalica/rust-overlay/7a94fe7690d2bdfe1aab475382a505e14dc114a6";
"github:oxalica/rust-overlay/19b70f147b9c67a759e35824b241f1ed92e46694";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-compat.follows = "flake-compat";
@ -76,6 +76,7 @@
# import the full shell using `nix develop .#full`
full = shellWithPackages (with pkgs; [
rustfmt
rust-analyzer
clang
mold
# ---- extra packages for dev tasks ----

View file

@ -20,7 +20,7 @@ let
};
toolchainOptions = {
rustVersion = "1.73.0";
rustVersion = "1.77.0";
extraRustComponents = [ "clippy" ];
};

View file

@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.4.1
version: 0.5.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View file

@ -11,6 +11,7 @@ spec:
{{- if eq .Values.deployment.kind "StatefulSet" }}
replicas: {{ .Values.deployment.replicaCount }}
serviceName: {{ include "garage.fullname" . }}
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
{{- end }}
template:
metadata:
@ -63,6 +64,10 @@ spec:
name: web-api
- containerPort: 3903
name: admin
{{- with .Values.environment }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: meta
mountPath: /mnt/meta

View file

@ -96,6 +96,8 @@ deployment:
kind: StatefulSet
# Number of StatefulSet replicas/garage nodes to start
replicaCount: 3
# If using statefulset, allow Parallel or OrderedReady (default)
podManagementPolicy: OrderedReady
image:
repository: dxflrs/amd64_garage
@ -214,6 +216,8 @@ tolerations: []
affinity: {}
environment: {}
monitoring:
metrics:
# If true, a service for monitoring is created with a prometheus.io/scrape annotation

View file

@ -30,11 +30,11 @@ Vagrant.configure("2") do |config|
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
#config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
#config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
#config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
#config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
#config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
#config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
#config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
end

View file

@ -3,11 +3,10 @@
set -x
#for ppatch in task3c task3a tsfix2; do
for ppatch in tsfix2; do
for ppatch in v093 v1rc1; do
#for psc in c cp cdp r pr cpr dpr; do
for psc in cdp r pr cpr dpr; do
#for ptsk in reg2 set1 set2; do
for ptsk in set1; do
for ptsk in reg2 set2; do
for psc in c cp cdp r pr cpr dpr; do
for irun in $(seq 10); do
lein run test --nodes-file nodes.vagrant \
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \

View file

@ -38,7 +38,9 @@
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"})
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"
"v093" "v0.9.3"
"v1rc1" "v1.0.0-rc1"})
(def cli-opts
"Additional command line options."

View file

@ -43,7 +43,7 @@
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
"rpc_public_addr = \"" node ":3901\"\n"
"db_engine = \"lmdb\"\n"
"replication_mode = \"2\"\n"
"replication_mode = \"3\"\n"
"data_dir = \"" data-dir "\"\n"
"metadata_dir = \"" meta-dir "\"\n"
"[s3_api]\n"

View file

@ -11,6 +11,7 @@ in
{
# --- Dev shell inherited from flake.nix ---
devShell = devShells.default;
devShellFull = devShells.full;
# --- Continuous integration shell ---
# The shell used for all CI jobs (along with devShell)

View file

@ -2,6 +2,7 @@ use std::convert::Infallible;
use std::fs::{self, Permissions};
use std::os::unix::fs::PermissionsExt;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
@ -19,6 +20,7 @@ use hyper_util::rt::TokioIo;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
use tokio::sync::watch;
use tokio::time::{sleep_until, Instant};
use opentelemetry::{
global,
@ -291,7 +293,7 @@ where
let connection_collector = tokio::spawn({
let server_name = server_name.clone();
async move {
let mut connections = FuturesUnordered::new();
let mut connections = FuturesUnordered::<tokio::task::JoinHandle<()>>::new();
loop {
let collect_next = async {
if connections.is_empty() {
@ -312,23 +314,34 @@ where
}
}
}
if !connections.is_empty() {
let deadline = Instant::now() + Duration::from_secs(10);
while !connections.is_empty() {
info!(
"{} server: {} connections still open",
"{} server: {} connections still open, deadline in {:.2}s",
server_name,
connections.len()
connections.len(),
(deadline - Instant::now()).as_secs_f32(),
);
while let Some(conn_res) = connections.next().await {
trace!(
"{} server: HTTP connection finished: {:?}",
server_name,
conn_res
);
info!(
"{} server: {} connections still open",
server_name,
connections.len()
);
tokio::select! {
conn_res = connections.next() => {
trace!(
"{} server: HTTP connection finished: {:?}",
server_name,
conn_res.unwrap(),
);
}
_ = sleep_until(deadline) => {
warn!("{} server: exit deadline reached with {} connections still open, killing them now",
server_name,
connections.len());
for conn in connections.iter() {
conn.abort();
}
for conn in connections {
assert!(conn.await.unwrap_err().is_cancelled());
}
break;
}
}
}
}

View file

@ -71,21 +71,11 @@ pub async fn handle_post_object(
}
if let Ok(content) = HeaderValue::from_str(&field.text().await?) {
match name.as_str() {
"tag" => (/* tag need to be reencoded, but we don't support them yet anyway */),
"acl" => {
if params.insert("x-amz-acl", content).is_some() {
return Err(Error::bad_request("Field 'acl' provided more than once"));
}
}
_ => {
if params.insert(&name, content).is_some() {
return Err(Error::bad_request(format!(
"Field '{}' provided more than once",
name
)));
}
}
if params.insert(&name, content).is_some() {
return Err(Error::bad_request(format!(
"Field '{}' provided more than once",
name
)));
}
}
};
@ -222,6 +212,8 @@ pub async fn handle_post_object(
)));
}
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
// arround here to make sure the rest of the machinery takes our acl into account.
let headers = get_headers(&params)?;
let expected_checksums = ExpectedChecksums {

View file

@ -211,16 +211,12 @@ impl Tree {
/// Returns the old value if there was one
#[inline]
pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>(
&self,
key: T,
value: U,
) -> Result<Option<Value>> {
pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>(&self, key: T, value: U) -> Result<()> {
self.0.insert(self.1, key.as_ref(), value.as_ref())
}
/// Returns the old value if there was one
#[inline]
pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<Option<Value>> {
pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<()> {
self.0.remove(self.1, key.as_ref())
}
/// Clears all values from the tree
@ -278,12 +274,12 @@ impl<'a> Transaction<'a> {
tree: &Tree,
key: T,
value: U,
) -> TxOpResult<Option<Value>> {
) -> TxOpResult<()> {
self.tx.insert(tree.1, key.as_ref(), value.as_ref())
}
/// Returns the old value if there was one
#[inline]
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> {
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<()> {
self.tx.remove(tree.1, key.as_ref())
}
/// Clears all values in a tree
@ -339,8 +335,8 @@ pub(crate) trait IDb: Send + Sync {
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
fn len(&self, tree: usize) -> Result<usize>;
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>>;
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()>;
fn remove(&self, tree: usize, key: &[u8]) -> Result<()>;
fn clear(&self, tree: usize) -> Result<()>;
fn iter(&self, tree: usize) -> Result<ValueIter<'_>>;
@ -366,8 +362,8 @@ pub(crate) trait ITx {
fn get(&self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
fn len(&self, tree: usize) -> TxOpResult<usize>;
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>>;
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()>;
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()>;
fn clear(&mut self, tree: usize) -> TxOpResult<()>;
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>>;

View file

@ -132,22 +132,20 @@ impl IDb for LmdbDb {
Ok(tree.len(&tx)?.try_into().unwrap())
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
let tree = self.get_tree(tree)?;
let mut tx = self.db.write_txn()?;
let old_val = tree.get(&tx, key)?.map(Vec::from);
tree.put(&mut tx, key, value)?;
tx.commit()?;
Ok(old_val)
Ok(())
}
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
fn remove(&self, tree: usize, key: &[u8]) -> Result<()> {
let tree = self.get_tree(tree)?;
let mut tx = self.db.write_txn()?;
let old_val = tree.get(&tx, key)?.map(Vec::from);
tree.delete(&mut tx, key)?;
tx.commit()?;
Ok(old_val)
Ok(())
}
fn clear(&self, tree: usize) -> Result<()> {
@ -254,17 +252,15 @@ impl<'a> ITx for LmdbTx<'a> {
Ok(tree.len(&self.tx)? as usize)
}
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>> {
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
let tree = *self.get_tree(tree)?;
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
tree.put(&mut self.tx, key, value)?;
Ok(old_val)
Ok(())
}
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()> {
let tree = *self.get_tree(tree)?;
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
tree.delete(&mut self.tx, key)?;
Ok(old_val)
Ok(())
}
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
let tree = *self.get_tree(tree)?;

View file

@ -36,7 +36,7 @@ impl std::str::FromStr for Engine {
match text {
"lmdb" | "heed" => Ok(Self::Lmdb),
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.3).".into())),
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.4).".into())),
kind => Err(Error(
format!(
"Invalid DB engine: {} (options are: lmdb, sqlite)",
@ -92,6 +92,7 @@ pub fn open_db(path: &PathBuf, engine: Engine, opt: &OpenOpt) -> Result<Db> {
env_builder.map_size(map_size);
env_builder.max_readers(2048);
unsafe {
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoRdAhead);
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoMetaSync);
if !opt.fsync {
env_builder.flag(heed::flags::Flags::MdbNoSync);

View file

@ -169,7 +169,7 @@ impl IDb for SqliteDb {
}
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
let tree = self.get_tree(tree)?;
let db = self.db.get()?;
let lock = self.write_lock.lock();
@ -184,23 +184,18 @@ impl IDb for SqliteDb {
assert_eq!(n, 1);
drop(lock);
Ok(old_val)
Ok(())
}
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
fn remove(&self, tree: usize, key: &[u8]) -> Result<()> {
let tree = self.get_tree(tree)?;
let db = self.db.get()?;
let lock = self.write_lock.lock();
let old_val = self.internal_get(&db, &tree, key)?;
if old_val.is_some() {
let n = db.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
assert_eq!(n, 1);
}
db.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
drop(lock);
Ok(old_val)
Ok(())
}
fn clear(&self, tree: usize) -> Result<()> {
@ -341,31 +336,17 @@ impl<'a> ITx for SqliteTx<'a> {
}
}
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>> {
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
let tree = self.get_tree(tree)?;
let old_val = self.internal_get(tree, key)?;
let sql = match &old_val {
Some(_) => format!("UPDATE {} SET v = ?2 WHERE k = ?1", tree),
None => format!("INSERT INTO {} (k, v) VALUES (?1, ?2)", tree),
};
let n = self.tx.execute(&sql, params![key, value])?;
assert_eq!(n, 1);
Ok(old_val)
let sql = format!("INSERT OR REPLACE INTO {} (k, v) VALUES (?1, ?2)", tree);
self.tx.execute(&sql, params![key, value])?;
Ok(())
}
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()> {
let tree = self.get_tree(tree)?;
let old_val = self.internal_get(tree, key)?;
if old_val.is_some() {
let n = self
.tx
.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
assert_eq!(n, 1);
}
Ok(old_val)
self.tx
.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
Ok(())
}
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
let tree = self.get_tree(tree)?;

View file

@ -12,7 +12,7 @@ fn test_suite(db: Db) {
// ---- test simple insert/delete ----
assert!(tree.insert(ka, va).unwrap().is_none());
assert!(tree.insert(ka, va).is_ok());
assert_eq!(tree.get(ka).unwrap().unwrap(), va);
assert_eq!(tree.len().unwrap(), 1);
@ -21,7 +21,7 @@ fn test_suite(db: Db) {
let res = db.transaction::<_, (), _>(|tx| {
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), va);
assert_eq!(tx.insert(&tree, ka, vb).unwrap().unwrap(), va);
assert_eq!(tx.insert(&tree, ka, vb).unwrap(), ());
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
@ -33,7 +33,7 @@ fn test_suite(db: Db) {
let res = db.transaction::<(), _, _>(|tx| {
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
assert_eq!(tx.insert(&tree, ka, vc).unwrap().unwrap(), vb);
assert_eq!(tx.insert(&tree, ka, vc).unwrap(), ());
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vc);
@ -50,7 +50,7 @@ fn test_suite(db: Db) {
assert!(iter.next().is_none());
drop(iter);
assert!(tree.insert(kb, vc).unwrap().is_none());
assert!(tree.insert(kb, vc).is_ok());
assert_eq!(tree.get(kb).unwrap().unwrap(), vc);
let mut iter = tree.iter().unwrap();

View file

@ -24,6 +24,7 @@ pub struct ConvertDbOpt {
output_engine: Engine,
#[structopt(flatten)]
#[allow(dead_code)]
db_open: OpenDbOpt,
}
@ -52,6 +53,7 @@ pub(crate) fn do_conversion(args: ConvertDbOpt) -> Result<()> {
}
let opt = OpenOpt {
#[cfg(feature = "lmdb")]
lmdb_map_size: args.db_open.lmdb.map_size.map(|x| x.as_u64() as usize),
..Default::default()
};

View file

@ -48,7 +48,7 @@ pub enum Command {
#[structopt(name = "worker", version = garage_version())]
Worker(WorkerOperation),
/// Low-level debug operations on data blocks
/// Low-level node-local debug operations on data blocks
#[structopt(name = "block", version = garage_version())]
Block(BlockOperation),

View file

@ -141,7 +141,7 @@ impl Garage {
)?)
.ok()
.and_then(|x| NetworkKey::from_slice(&x))
.ok_or_message("Invalid RPC secret key")?;
.ok_or_message("Invalid RPC secret key: expected 32 bits of entropy, please check the documentation for requirements")?;
let (replication_factor, consistency_mode) = parse_replication_mode(&config)?;

View file

@ -24,6 +24,7 @@ bytes.workspace = true
bytesize.workspace = true
gethostname.workspace = true
hex.workspace = true
ipnet.workspace = true
tracing.workspace = true
rand.workspace = true
itertools.workspace = true

View file

@ -227,24 +227,29 @@ impl LayoutHistory {
// ================== updates to layout, public interface ===================
pub fn merge(&mut self, other: &LayoutHistory) -> bool {
// If our current layout version is completely out-of-date,
// forget everything we know and replace it by incoming layout data.
if self.current().version < other.min_stored() {
*self = other.clone();
return true;
}
let mut changed = false;
// Add any new versions to history
for v2 in other.versions.iter() {
if let Some(v1) = self.versions.iter().find(|v| v.version == v2.version) {
if v2.version == self.current().version + 1 {
// This is the next version, add it to our version list
self.versions.push(v2.clone());
changed = true;
} else if let Some(v1) = self.versions.iter().find(|v| v.version == v2.version) {
// Version is already present, check consistency
if v1 != v2 {
error!("Inconsistent layout histories: different layout compositions for version {}. Your cluster will be broken as long as this layout version is not replaced.", v2.version);
}
} else if self.versions.iter().all(|v| v.version != v2.version - 1) {
error!(
"Cannot receive new layout version {}, version {} is missing",
v2.version,
v2.version - 1
);
} else {
self.versions.push(v2.clone());
changed = true;
// This is an older version
assert!(v2.version < self.min_stored());
}
}

View file

@ -844,12 +844,20 @@ impl NodeStatus {
}
}
fn get_default_ip() -> Option<IpAddr> {
/// Obtain the list of currently available IP addresses on all non-loopback
/// interfaces, optionally filtering them to be inside a given IpNet.
fn get_default_ip(filter_ipnet: Option<ipnet::IpNet>) -> Option<IpAddr> {
pnet_datalink::interfaces()
.iter()
.find(|e| e.is_up() && !e.is_loopback() && !e.ips.is_empty())
.and_then(|e| e.ips.first())
.map(|a| a.ip())
.into_iter()
// filter down and loopback interfaces
.filter(|i| i.is_up() && !i.is_loopback())
// get all IPs
.flat_map(|e| e.ips)
// optionally, filter to be inside filter_ipnet
.find(|ipn| {
filter_ipnet.is_some_and(|ipnet| ipnet.contains(&ipn.ip())) || filter_ipnet.is_none()
})
.map(|ipn| ipn.ip())
}
fn get_rpc_public_addr(config: &Config) -> Option<SocketAddr> {
@ -877,7 +885,28 @@ fn get_rpc_public_addr(config: &Config) -> Option<SocketAddr> {
}
}
None => {
let addr = get_default_ip().map(|ip| SocketAddr::new(ip, config.rpc_bind_addr.port()));
// `No rpc_public_addr` specified, try to discover one, optionally filtering by `rpc_public_addr_subnet`.
let filter_subnet: Option<ipnet::IpNet> = config
.rpc_public_addr_subnet
.as_ref()
.and_then(|filter_subnet_str| match filter_subnet_str.parse::<ipnet::IpNet>() {
Ok(filter_subnet) => {
let filter_subnet_trunc = filter_subnet.trunc();
if filter_subnet_trunc != filter_subnet {
warn!("`rpc_public_addr_subnet` changed after applying netmask, continuing with {}", filter_subnet.trunc());
}
Some(filter_subnet_trunc)
}
Err(e) => {
panic!(
"Cannot parse rpc_public_addr_subnet {} from config file: {}. Bailing out.",
filter_subnet_str, e
);
}
});
let addr = get_default_ip(filter_subnet)
.map(|ip| SocketAddr::new(ip, config.rpc_bind_addr.port()));
if let Some(a) = addr {
warn!("Using autodetected rpc_public_addr: {}. Consider specifying it explicitly in configuration file if possible.", a);
}

View file

@ -85,6 +85,10 @@ pub struct Config {
/// Public IP address of this node
pub rpc_public_addr: Option<String>,
/// In case `rpc_public_addr` was not set, this can filter
/// the addresses announced to other peers to a specific subnet.
pub rpc_public_addr_subnet: Option<String>,
/// Timeout for Netapp's ping messagess
pub rpc_ping_timeout_msec: Option<u64>,
/// Timeout for Netapp RPC calls