Compare commits

..

No commits in common. "c7f5dcd953ff1fdfa002a8bccfb43eafcc6fddd4" and "1243db87f2090a3302c7c8beb386e68ddf9b66b5" have entirely different histories.

122 changed files with 2487 additions and 27961 deletions

View file

@ -20,7 +20,6 @@ steps:
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage
GARAGE_TEST_INTEGRATION_PATH: tmp-garage-integration
commands: commands:
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT} - nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
- nix-build --no-build-output --attr test.amd64 - nix-build --no-build-output --attr test.amd64
@ -32,9 +31,8 @@ steps:
- ./result/bin/garage_util-* - ./result/bin/garage_util-*
- ./result/bin/garage_web-* - ./result/bin/garage_web-*
- ./result/bin/garage-* - ./result/bin/garage-*
- ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false) - ./result/bin/integration-*
- rm result - rm result
- rm -rv tmp-garage-integration
- name: integration tests - name: integration tests
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
@ -65,16 +63,11 @@ steps:
- nix-build --no-build-output --attr pkgs.amd64.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT} - nix-build --no-build-output --attr pkgs.amd64.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
- nix-shell --attr rust --run "./script/not-dynamic.sh result-bin/bin/garage" - nix-shell --attr rust --run "./script/not-dynamic.sh result-bin/bin/garage"
- name: integration tests - name: integration
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false) - nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
- name: upgrade tests
image: nixpkgs/nix:nixos-22.05
commands:
- nix-shell --attr integration --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
- name: push static binary - name: push static binary
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
@ -121,16 +114,11 @@ steps:
- nix-build --no-build-output --attr pkgs.i386.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT} - nix-build --no-build-output --attr pkgs.i386.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
- nix-shell --attr rust --run "./script/not-dynamic.sh result-bin/bin/garage" - nix-shell --attr rust --run "./script/not-dynamic.sh result-bin/bin/garage"
- name: integration tests - name: integration
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false) - nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
- name: upgrade tests
image: nixpkgs/nix:nixos-22.05
commands:
- nix-shell --attr integration --run "./script/test-upgrade.sh v0.8.4 i686-unknown-linux-musl" || (cat /tmp/garage.log; false)
- name: push static binary - name: push static binary
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
@ -295,6 +283,6 @@ trigger:
--- ---
kind: signature kind: signature
hmac: 0c4b57eb4b27b7c6a6ff21ab87f0767fe3eb90f5d95d5cbcdccf794e9d2a5d86 hmac: ac09a5a8c82502f67271f93afa1e1e21ce66383b8e24a6deb26b285cc1c378ba
... ...

31
Cargo.lock generated
View file

@ -1198,7 +1198,7 @@ dependencies = [
[[package]] [[package]]
name = "garage" name = "garage"
version = "0.9.0" version = "0.8.4"
dependencies = [ dependencies = [
"assert-json-diff", "assert-json-diff",
"async-trait", "async-trait",
@ -1249,7 +1249,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_api" name = "garage_api"
version = "0.9.0" version = "0.8.4"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"base64 0.21.3", "base64 0.21.3",
@ -1295,13 +1295,12 @@ dependencies = [
[[package]] [[package]]
name = "garage_block" name = "garage_block"
version = "0.9.0" version = "0.8.4"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-compression", "async-compression",
"async-trait", "async-trait",
"bytes", "bytes",
"bytesize",
"futures", "futures",
"futures-util", "futures-util",
"garage_db", "garage_db",
@ -1321,7 +1320,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_db" name = "garage_db"
version = "0.9.0" version = "0.8.4"
dependencies = [ dependencies = [
"clap 4.4.0", "clap 4.4.0",
"err-derive", "err-derive",
@ -1336,13 +1335,12 @@ dependencies = [
[[package]] [[package]]
name = "garage_model" name = "garage_model"
version = "0.9.0" version = "0.8.4"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
"base64 0.21.3", "base64 0.21.3",
"blake2", "blake2",
"chrono",
"err-derive", "err-derive",
"futures", "futures",
"futures-util", "futures-util",
@ -1364,21 +1362,18 @@ dependencies = [
[[package]] [[package]]
name = "garage_rpc" name = "garage_rpc"
version = "0.9.0" version = "0.8.4"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
"bytes", "bytes",
"bytesize",
"err-derive", "err-derive",
"format_table",
"futures", "futures",
"futures-util", "futures-util",
"garage_db", "garage_db",
"garage_util", "garage_util",
"gethostname", "gethostname",
"hex", "hex",
"itertools 0.10.5",
"k8s-openapi", "k8s-openapi",
"kube", "kube",
"kuska-sodiumoxide", "kuska-sodiumoxide",
@ -1399,7 +1394,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_table" name = "garage_table"
version = "0.9.0" version = "0.8.4"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
@ -1421,7 +1416,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_util" name = "garage_util"
version = "0.9.0" version = "0.8.4"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
@ -1455,7 +1450,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_web" name = "garage_web"
version = "0.9.0" version = "0.8.4"
dependencies = [ dependencies = [
"err-derive", "err-derive",
"futures", "futures",
@ -2288,9 +2283,9 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
[[package]] [[package]]
name = "netapp" name = "netapp"
version = "0.10.0" version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a00b76cec93e3ae68c9ed5f08e27a1507424987ee23d5ec961ebd4da820a265" checksum = "4ffe47ac46d3b2ce2f736a70865492df082e042eb2bfdddfca3b8dd66bd9469d"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
@ -3037,9 +3032,9 @@ dependencies = [
[[package]] [[package]]
name = "rmp-serde" name = "rmp-serde"
version = "1.1.2" version = "0.15.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bffea85eea980d8a74453e5d02a8d93028f3c34725de143085a844ebe953258a" checksum = "723ecff9ad04f4ad92fe1c8ca6c20d2196d9286e9c60727c4cb5511629260e9d"
dependencies = [ dependencies = [
"byteorder", "byteorder",
"rmp", "rmp",

309
Cargo.nix
View file

@ -7,13 +7,13 @@ args@{
"garage_db/default" "garage_db/default"
"garage_util/default" "garage_util/default"
"garage_rpc/default" "garage_rpc/default"
"format_table/default"
"garage_table/default" "garage_table/default"
"garage_block/default" "garage_block/default"
"garage_model/default" "garage_model/default"
"garage_api/default" "garage_api/default"
"garage_web/default" "garage_web/default"
"garage/default" "garage/default"
"format_table/default"
"k2v-client/default" "k2v-client/default"
], ],
rustPackages, rustPackages,
@ -33,7 +33,7 @@ args@{
ignoreLockHash, ignoreLockHash,
}: }:
let let
nixifiedLockHash = "1a87886681a3ef0b83c95addc26674a538b8a93d35bc80db8998e1fcd0821f6c"; nixifiedLockHash = "b73d35e98dc62acc3b01aba2cb825ba6e99217e46781b8c59f8e0ceef34e79d6";
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc; workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock); currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
lockHashIgnored = if ignoreLockHash lockHashIgnored = if ignoreLockHash
@ -57,16 +57,16 @@ in
{ {
cargo2nixVersion = "0.11.0"; cargo2nixVersion = "0.11.0";
workspace = { workspace = {
garage_db = rustPackages.unknown.garage_db."0.9.0"; garage_db = rustPackages.unknown.garage_db."0.8.4";
garage_util = rustPackages.unknown.garage_util."0.9.0"; garage_util = rustPackages.unknown.garage_util."0.8.4";
garage_rpc = rustPackages.unknown.garage_rpc."0.9.0"; garage_rpc = rustPackages.unknown.garage_rpc."0.8.4";
garage_table = rustPackages.unknown.garage_table."0.8.4";
garage_block = rustPackages.unknown.garage_block."0.8.4";
garage_model = rustPackages.unknown.garage_model."0.8.4";
garage_api = rustPackages.unknown.garage_api."0.8.4";
garage_web = rustPackages.unknown.garage_web."0.8.4";
garage = rustPackages.unknown.garage."0.8.4";
format_table = rustPackages.unknown.format_table."0.1.1"; format_table = rustPackages.unknown.format_table."0.1.1";
garage_table = rustPackages.unknown.garage_table."0.9.0";
garage_block = rustPackages.unknown.garage_block."0.9.0";
garage_model = rustPackages.unknown.garage_model."0.9.0";
garage_api = rustPackages.unknown.garage_api."0.9.0";
garage_web = rustPackages.unknown.garage_web."0.9.0";
garage = rustPackages.unknown.garage."0.9.0";
k2v-client = rustPackages.unknown.k2v-client."0.0.4"; k2v-client = rustPackages.unknown.k2v-client."0.0.4";
}; };
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.21.0" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".addr2line."0.21.0" = overridableMkRustCrate (profileName: rec {
@ -98,12 +98,12 @@ in
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "std") (lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "std")
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "getrandom" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.10" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "getrandom" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.10" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && !((hostPlatform.parsed.cpu.name == "armv6l" || hostPlatform.parsed.cpu.name == "armv7l") && hostPlatform.parsed.kernel.name == "none") then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.18.0" { inherit profileName; }).out; ${ if (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && !((hostPlatform.parsed.cpu.name == "armv6l" || hostPlatform.parsed.cpu.name == "armv7l") && hostPlatform.parsed.kernel.name == "none") then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.18.0" { inherit profileName; }).out;
}; };
buildDependencies = { buildDependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "version_check" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "version_check" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out;
}; };
}); });
@ -128,7 +128,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"; }; src = fetchCratesIo { inherit name version; sha256 = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "alloc") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "alloc")
]; ];
}); });
@ -824,7 +824,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"; }; src = fetchCratesIo { inherit name version; sha256 = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"; };
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
}; };
}); });
@ -888,8 +888,8 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea"; }; src = fetchCratesIo { inherit name version; sha256 = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "extern_crate_alloc") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "extern_crate_alloc")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "extern_crate_std") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "extern_crate_std")
]; ];
}); });
@ -1182,13 +1182,13 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add"; }; src = fetchCratesIo { inherit name version; sha256 = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "alloc") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "alloc")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "default") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "std") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "std")
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "crossbeam_utils" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.16" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "crossbeam_utils" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.16" { inherit profileName; }).out;
}; };
}); });
@ -1459,8 +1459,8 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"; }; src = fetchCratesIo { inherit name version; sha256 = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "default")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "std") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "std")
]; ];
}); });
@ -1705,9 +1705,9 @@ in
}; };
}); });
"unknown".garage."0.9.0" = overridableMkRustCrate (profileName: rec { "unknown".garage."0.8.4" = overridableMkRustCrate (profileName: rec {
name = "garage"; name = "garage";
version = "0.9.0"; version = "0.8.4";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/garage"); src = fetchCrateLocal (workspaceSrc + "/src/garage");
features = builtins.concatLists [ features = builtins.concatLists [
@ -1716,13 +1716,13 @@ in
(lib.optional (rootFeatures' ? "garage/default") "default") (lib.optional (rootFeatures' ? "garage/default") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v") "k2v") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery") "kubernetes-discovery") (lib.optional (rootFeatures' ? "garage/kubernetes-discovery") "kubernetes-discovery")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb") "lmdb") (lib.optional (rootFeatures' ? "garage/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics") "metrics") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics") "metrics")
(lib.optional (rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp") "opentelemetry-otlp") (lib.optional (rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp") "opentelemetry-otlp")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus") "opentelemetry-prometheus") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus") "opentelemetry-prometheus")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus") "prometheus") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus") "prometheus")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled") "sled") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled") "sled")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite") "sqlite") (lib.optional (rootFeatures' ? "garage/sqlite") "sqlite")
(lib.optional (rootFeatures' ? "garage/system-libs") "system-libs") (lib.optional (rootFeatures' ? "garage/system-libs") "system-libs")
(lib.optional (rootFeatures' ? "garage/telemetry-otlp") "telemetry-otlp") (lib.optional (rootFeatures' ? "garage/telemetry-otlp") "telemetry-otlp")
]; ];
@ -1734,18 +1734,18 @@ in
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out; format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.9.0" { inherit profileName; }).out; garage_api = (rustPackages."unknown".garage_api."0.8.4" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.9.0" { inherit profileName; }).out; garage_block = (rustPackages."unknown".garage_block."0.8.4" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.9.0" { inherit profileName; }).out; garage_model = (rustPackages."unknown".garage_model."0.8.4" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.0" { inherit profileName; }).out; garage_rpc = (rustPackages."unknown".garage_rpc."0.8.4" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.0" { inherit profileName; }).out; garage_table = (rustPackages."unknown".garage_table."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
garage_web = (rustPackages."unknown".garage_web."0.9.0" { inherit profileName; }).out; garage_web = (rustPackages."unknown".garage_web."0.8.4" { inherit profileName; }).out;
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out; git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out; sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.10.0" { inherit profileName; }).out; netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp" then "opentelemetry_otlp" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry-otlp."0.10.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp" then "opentelemetry_otlp" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry-otlp."0.10.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus" then "opentelemetry_prometheus" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry-prometheus."0.10.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus" then "opentelemetry_prometheus" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry-prometheus."0.10.0" { inherit profileName; }).out;
@ -1777,9 +1777,9 @@ in
}; };
}); });
"unknown".garage_api."0.9.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_api."0.8.4" = overridableMkRustCrate (profileName: rec {
name = "garage_api"; name = "garage_api";
version = "0.9.0"; version = "0.8.4";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/api"); src = fetchCrateLocal (workspaceSrc + "/src/api");
features = builtins.concatLists [ features = builtins.concatLists [
@ -1798,11 +1798,11 @@ in
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.2.0" { inherit profileName; }).out; form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.2.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.9.0" { inherit profileName; }).out; garage_block = (rustPackages."unknown".garage_block."0.8.4" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.9.0" { inherit profileName; }).out; garage_model = (rustPackages."unknown".garage_model."0.8.4" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.0" { inherit profileName; }).out; garage_rpc = (rustPackages."unknown".garage_rpc."0.8.4" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.0" { inherit profileName; }).out; garage_table = (rustPackages."unknown".garage_table."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out; hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.9" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.9" { inherit profileName; }).out;
@ -1832,9 +1832,9 @@ in
}; };
}); });
"unknown".garage_block."0.9.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_block."0.8.4" = overridableMkRustCrate (profileName: rec {
name = "garage_block"; name = "garage_block";
version = "0.9.0"; version = "0.8.4";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/block"); src = fetchCrateLocal (workspaceSrc + "/src/block");
features = builtins.concatLists [ features = builtins.concatLists [
@ -1845,13 +1845,12 @@ in
async_compression = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".async-compression."0.4.1" { inherit profileName; }).out; async_compression = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".async-compression."0.4.1" { inherit profileName; }).out;
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.73" { profileName = "__noProfile"; }).out; async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.73" { profileName = "__noProfile"; }).out;
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.4.0" { inherit profileName; }).out; bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.4.0" { inherit profileName; }).out;
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.3.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.0" { inherit profileName; }).out; garage_rpc = (rustPackages."unknown".garage_rpc."0.8.4" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.0" { inherit profileName; }).out; garage_table = (rustPackages."unknown".garage_table."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
@ -1864,9 +1863,9 @@ in
}; };
}); });
"unknown".garage_db."0.9.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_db."0.8.4" = overridableMkRustCrate (profileName: rec {
name = "garage_db"; name = "garage_db";
version = "0.9.0"; version = "0.8.4";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/db"); src = fetchCrateLocal (workspaceSrc + "/src/db");
features = builtins.concatLists [ features = builtins.concatLists [
@ -1874,20 +1873,20 @@ in
(lib.optional (rootFeatures' ? "garage_db/clap" || rootFeatures' ? "garage_db/cli") "clap") (lib.optional (rootFeatures' ? "garage_db/clap" || rootFeatures' ? "garage_db/cli") "clap")
(lib.optional (rootFeatures' ? "garage_db/cli") "cli") (lib.optional (rootFeatures' ? "garage_db/cli") "cli")
(lib.optional (rootFeatures' ? "garage_db/default") "default") (lib.optional (rootFeatures' ? "garage_db/default") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "heed") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "heed")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger") "pretty_env_logger") (lib.optional (rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger") "pretty_env_logger")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "rusqlite") (lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "rusqlite")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "sled") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "sled")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite") (lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage_db/clap" || rootFeatures' ? "garage_db/cli" then "clap" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".clap."4.4.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage_db/clap" || rootFeatures' ? "garage_db/cli" then "clap" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".clap."4.4.0" { inherit profileName; }).out;
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed."0.11.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "heed" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed."0.11.0" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out; hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger" then "pretty_env_logger" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pretty_env_logger."0.5.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger" then "pretty_env_logger" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pretty_env_logger."0.5.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.29.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.29.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "sled" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "sled" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }).out;
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.37" { inherit profileName; }).out; tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.37" { inherit profileName; }).out;
}; };
@ -1896,34 +1895,33 @@ in
}; };
}); });
"unknown".garage_model."0.9.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_model."0.8.4" = overridableMkRustCrate (profileName: rec {
name = "garage_model"; name = "garage_model";
version = "0.9.0"; version = "0.8.4";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/model"); src = fetchCrateLocal (workspaceSrc + "/src/model");
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage_model/default") "default") (lib.optional (rootFeatures' ? "garage_model/default") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "sled") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "sled")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite") (lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
]; ];
dependencies = { dependencies = {
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.6.0" { inherit profileName; }).out; arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.6.0" { inherit profileName; }).out;
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.73" { profileName = "__noProfile"; }).out; async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.73" { profileName = "__noProfile"; }).out;
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.21.3" { inherit profileName; }).out; base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.21.3" { inherit profileName; }).out;
blake2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".blake2."0.10.6" { inherit profileName; }).out; blake2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".blake2."0.10.6" { inherit profileName; }).out;
chrono = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.26" { inherit profileName; }).out;
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.9.0" { inherit profileName; }).out; garage_block = (rustPackages."unknown".garage_block."0.8.4" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.0" { inherit profileName; }).out; garage_rpc = (rustPackages."unknown".garage_rpc."0.8.4" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.0" { inherit profileName; }).out; garage_table = (rustPackages."unknown".garage_table."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.10.0" { inherit profileName; }).out; netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
@ -1934,9 +1932,9 @@ in
}; };
}); });
"unknown".garage_rpc."0.9.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_rpc."0.8.4" = overridableMkRustCrate (profileName: rec {
name = "garage_rpc"; name = "garage_rpc";
version = "0.9.0"; version = "0.8.4";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/rpc"); src = fetchCrateLocal (workspaceSrc + "/src/rpc");
features = builtins.concatLists [ features = builtins.concatLists [
@ -1953,20 +1951,17 @@ in
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.6.0" { inherit profileName; }).out; arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.6.0" { inherit profileName; }).out;
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.73" { profileName = "__noProfile"; }).out; async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.73" { profileName = "__noProfile"; }).out;
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.4.0" { inherit profileName; }).out; bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.4.0" { inherit profileName; }).out;
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.3.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive" then "err_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; ${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive" then "err_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out; gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.10.5" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.16.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.16.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "kube" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kube."0.75.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "kube" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kube."0.75.0" { inherit profileName; }).out;
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out; sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.10.0" { inherit profileName; }).out; netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
nix = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".nix."0.27.1" { inherit profileName; }).out; nix = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".nix."0.27.1" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
pnet_datalink = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pnet_datalink."0.33.0" { inherit profileName; }).out; pnet_datalink = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pnet_datalink."0.33.0" { inherit profileName; }).out;
@ -1982,9 +1977,9 @@ in
}; };
}); });
"unknown".garage_table."0.9.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_table."0.8.4" = overridableMkRustCrate (profileName: rec {
name = "garage_table"; name = "garage_table";
version = "0.9.0"; version = "0.8.4";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/table"); src = fetchCrateLocal (workspaceSrc + "/src/table");
dependencies = { dependencies = {
@ -1993,9 +1988,9 @@ in
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.4.0" { inherit profileName; }).out; bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.4.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.0" { inherit profileName; }).out; garage_rpc = (rustPackages."unknown".garage_rpc."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out; hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
@ -2007,9 +2002,9 @@ in
}; };
}); });
"unknown".garage_util."0.9.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_util."0.8.4" = overridableMkRustCrate (profileName: rec {
name = "garage_util"; name = "garage_util";
version = "0.9.0"; version = "0.8.4";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/util"); src = fetchCrateLocal (workspaceSrc + "/src/util");
features = builtins.concatLists [ features = builtins.concatLists [
@ -2025,16 +2020,16 @@ in
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.7" { inherit profileName; }).out; digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.7" { inherit profileName; }).out;
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out; hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.9" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.9" { inherit profileName; }).out;
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.27" { inherit profileName; }).out; hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.27" { inherit profileName; }).out;
lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out; lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.10.0" { inherit profileName; }).out; netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."1.1.2" { inherit profileName; }).out; rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.105" { inherit profileName; }).out; serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.105" { inherit profileName; }).out;
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.7" { inherit profileName; }).out; sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.7" { inherit profileName; }).out;
@ -2051,18 +2046,18 @@ in
}; };
}); });
"unknown".garage_web."0.9.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_web."0.8.4" = overridableMkRustCrate (profileName: rec {
name = "garage_web"; name = "garage_web";
version = "0.9.0"; version = "0.8.4";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/web"); src = fetchCrateLocal (workspaceSrc + "/src/web");
dependencies = { dependencies = {
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.9.0" { inherit profileName; }).out; garage_api = (rustPackages."unknown".garage_api."0.8.4" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.9.0" { inherit profileName; }).out; garage_model = (rustPackages."unknown".garage_model."0.8.4" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.0" { inherit profileName; }).out; garage_table = (rustPackages."unknown".garage_table."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.9" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.9" { inherit profileName; }).out;
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.27" { inherit profileName; }).out; hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.27" { inherit profileName; }).out;
hyperlocal = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyperlocal."0.8.0" { inherit profileName; }).out; hyperlocal = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyperlocal."0.8.0" { inherit profileName; }).out;
@ -2186,15 +2181,15 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"; }; src = fetchCratesIo { inherit name version; sha256 = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "ahash") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "ahash")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "allocator-api2") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "allocator-api2")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "default")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "inline-more") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "inline-more")
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "raw") (lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "raw")
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "ahash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.3" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "ahash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "allocator_api2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "allocator_api2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" { inherit profileName; }).out;
}; };
}); });
@ -2204,7 +2199,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f"; }; src = fetchCratesIo { inherit name version; sha256 = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f"; };
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashbrown" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashbrown."0.14.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "hashbrown" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashbrown."0.14.0" { inherit profileName; }).out;
}; };
}); });
@ -2234,20 +2229,20 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "269c7486ed6def5d7b59a427cec3e87b4d4dd4381d01e21c8c9f2d3985688392"; }; src = fetchCratesIo { inherit name version; sha256 = "269c7486ed6def5d7b59a427cec3e87b4d4dd4381d01e21c8c9f2d3985688392"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb-rkv-sys") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb-rkv-sys")
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "bytemuck" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytemuck."1.13.1" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "bytemuck" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytemuck."1.13.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "byteorder" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "byteorder" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed_traits" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-traits."0.8.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "heed_traits" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-traits."0.8.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed_types" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-types."0.8.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "heed_types" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-types."0.8.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "lmdb_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lmdb-rkv-sys."0.11.2" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "lmdb_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lmdb-rkv-sys."0.11.2" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.18.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.18.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "page_size" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".page_size."0.4.2" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "page_size" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".page_size."0.4.2" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "synchronoise" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".synchronoise."1.0.1" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "synchronoise" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".synchronoise."1.0.1" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isWindows then "url" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".url."2.4.0" { inherit profileName; }).out; ${ if (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isWindows then "url" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".url."2.4.0" { inherit profileName; }).out;
}; };
}); });
@ -2264,20 +2259,20 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "9a6cf0a6952fcedc992602d5cddd1e3fff091fbe87d38636e3ec23a31f32acbd"; }; src = fetchCratesIo { inherit name version; sha256 = "9a6cf0a6952fcedc992602d5cddd1e3fff091fbe87d38636e3ec23a31f32acbd"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "bincode") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "bincode")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "default") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "serde") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "serde")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "serde-bincode") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "serde-bincode")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "serde-json") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "serde-json")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "serde_json") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "serde_json")
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "bincode" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bincode."1.3.3" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "bincode" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bincode."1.3.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "bytemuck" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytemuck."1.13.1" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "bytemuck" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytemuck."1.13.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "byteorder" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "byteorder" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed_traits" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-traits."0.8.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "heed_traits" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-traits."0.8.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "serde_json" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.105" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "serde_json" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.105" { inherit profileName; }).out;
}; };
}); });
@ -2653,12 +2648,12 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"; }; src = fetchCratesIo { inherit name version; sha256 = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"; };
features = builtins.concatLists [ features = builtins.concatLists [
[ "default" ] (lib.optional (rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp") "default")
[ "use_alloc" ] (lib.optional (rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp") "use_alloc")
[ "use_std" ] (lib.optional (rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp") "use_std")
]; ];
dependencies = { dependencies = {
either = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".either."1.9.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp" then "either" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".either."1.9.0" { inherit profileName; }).out;
}; };
}); });
@ -3014,15 +3009,15 @@ in
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled_bindings") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled_bindings")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "cc") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "cc")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "default")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "min_sqlite_version_3_14_0") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "min_sqlite_version_3_14_0")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "pkg-config") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "pkg-config")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "vcpkg") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "vcpkg")
]; ];
buildDependencies = { buildDependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.27" { profileName = "__noProfile"; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.27" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "vcpkg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".vcpkg."0.2.15" { profileName = "__noProfile"; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "vcpkg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".vcpkg."0.2.15" { profileName = "__noProfile"; }).out;
}; };
}); });
@ -3052,14 +3047,14 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "61b9ce6b3be08acefa3003c57b7565377432a89ec24476bbe72e11d101f852fe"; }; src = fetchCratesIo { inherit name version; sha256 = "61b9ce6b3be08acefa3003c57b7565377432a89ec24476bbe72e11d101f852fe"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "default") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "default")
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out;
}; };
buildDependencies = { buildDependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.27" { profileName = "__noProfile"; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.27" { profileName = "__noProfile"; }).out;
}; };
}); });
@ -3223,11 +3218,11 @@ in
src = fetchCratesIo { inherit name version; sha256 = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"; }; src = fetchCratesIo { inherit name version; sha256 = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".netapp."0.10.0" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" = overridableMkRustCrate (profileName: rec {
name = "netapp"; name = "netapp";
version = "0.10.0"; version = "0.5.2";
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "0a00b76cec93e3ae68c9ed5f08e27a1507424987ee23d5ec961ebd4da820a265"; }; src = fetchCratesIo { inherit name version; sha256 = "4ffe47ac46d3b2ce2f736a70865492df082e042eb2bfdddfca3b8dd66bd9469d"; };
features = builtins.concatLists [ features = builtins.concatLists [
[ "default" ] [ "default" ]
[ "opentelemetry" ] [ "opentelemetry" ]
@ -3249,7 +3244,7 @@ in
opentelemetry_contrib = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry-contrib."0.9.0" { inherit profileName; }).out; opentelemetry_contrib = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry-contrib."0.9.0" { inherit profileName; }).out;
pin_project = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-project."1.1.3" { inherit profileName; }).out; pin_project = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-project."1.1.3" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."1.1.2" { inherit profileName; }).out; rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.32.0" { inherit profileName; }).out; tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.32.0" { inherit profileName; }).out;
tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.14" { inherit profileName; }).out; tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.14" { inherit profileName; }).out;
@ -3484,7 +3479,7 @@ in
[ "default" ] [ "default" ]
[ "race" ] [ "race" ]
[ "std" ] [ "std" ]
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "unstable") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "unstable")
]; ];
}); });
@ -3622,8 +3617,8 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd"; }; src = fetchCratesIo { inherit name version; sha256 = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd"; };
dependencies = { dependencies = {
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out; ${ if (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isWindows then "winapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }).out; ${ if (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isWindows then "winapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }).out;
}; };
}); });
@ -4314,11 +4309,11 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".rmp-serde."1.1.2" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" = overridableMkRustCrate (profileName: rec {
name = "rmp-serde"; name = "rmp-serde";
version = "1.1.2"; version = "0.15.5";
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "bffea85eea980d8a74453e5d02a8d93028f3c34725de143085a844ebe953258a"; }; src = fetchCratesIo { inherit name version; sha256 = "723ecff9ad04f4ad92fe1c8ca6c20d2196d9286e9c60727c4cb5511629260e9d"; };
dependencies = { dependencies = {
byteorder = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out; byteorder = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
rmp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp."0.8.12" { inherit profileName; }).out; rmp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp."0.8.12" { inherit profileName; }).out;
@ -4351,12 +4346,12 @@ in
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "modern_sqlite") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "modern_sqlite")
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."2.4.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."2.4.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-iterator."0.2.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "fallible_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-iterator."0.2.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_streaming_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-streaming-iterator."0.1.9" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "fallible_streaming_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-streaming-iterator."0.1.9" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashlink" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashlink."0.8.3" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "hashlink" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashlink."0.8.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "libsqlite3_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.26.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "libsqlite3_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.26.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.11.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.11.0" { inherit profileName; }).out;
}; };
}); });
@ -5020,7 +5015,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2"; }; src = fetchCratesIo { inherit name version; sha256 = "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2"; };
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "crossbeam_queue" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-queue."0.3.8" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "crossbeam_queue" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-queue."0.3.8" { inherit profileName; }).out;
}; };
}); });

View file

@ -18,14 +18,14 @@ default-members = ["src/garage"]
[workspace.dependencies] [workspace.dependencies]
format_table = { version = "0.1.1", path = "src/format-table" } format_table = { version = "0.1.1", path = "src/format-table" }
garage_api = { version = "0.9.0", path = "src/api" } garage_api = { version = "0.8.4", path = "src/api" }
garage_block = { version = "0.9.0", path = "src/block" } garage_block = { version = "0.8.4", path = "src/block" }
garage_db = { version = "0.9.0", path = "src/db", default-features = false } garage_db = { version = "0.8.4", path = "src/db", default-features = false }
garage_model = { version = "0.9.0", path = "src/model", default-features = false } garage_model = { version = "0.8.4", path = "src/model", default-features = false }
garage_rpc = { version = "0.9.0", path = "src/rpc" } garage_rpc = { version = "0.8.4", path = "src/rpc" }
garage_table = { version = "0.9.0", path = "src/table" } garage_table = { version = "0.8.4", path = "src/table" }
garage_util = { version = "0.9.0", path = "src/util" } garage_util = { version = "0.8.4", path = "src/util" }
garage_web = { version = "0.9.0", path = "src/web" } garage_web = { version = "0.8.4", path = "src/web" }
k2v-client = { version = "0.0.4", path = "src/k2v-client" } k2v-client = { version = "0.0.4", path = "src/k2v-client" }
[profile.dev] [profile.dev]

View file

@ -37,7 +37,7 @@ Second, we suppose you have created a key and a bucket.
As a reminder, you can create a key for your nextcloud instance as follow: As a reminder, you can create a key for your nextcloud instance as follow:
```bash ```bash
garage key create nextcloud-key garage key new --name nextcloud-key
``` ```
Keep the Key ID and the Secret key in a pad, they will be needed later. Keep the Key ID and the Secret key in a pad, they will be needed later.
@ -139,7 +139,7 @@ a reasonable trade-off for some instances.
Create a key for Peertube: Create a key for Peertube:
```bash ```bash
garage key create peertube-key garage key new --name peertube-key
``` ```
Keep the Key ID and the Secret key in a pad, they will be needed later. Keep the Key ID and the Secret key in a pad, they will be needed later.
@ -253,7 +253,7 @@ As such, your Garage cluster should be configured appropriately for good perform
This is the usual Garage setup: This is the usual Garage setup:
```bash ```bash
garage key create mastodon-key garage key new --name mastodon-key
garage bucket create mastodon-data garage bucket create mastodon-data
garage bucket allow mastodon-data --read --write --key mastodon-key garage bucket allow mastodon-data --read --write --key mastodon-key
``` ```
@ -379,7 +379,7 @@ Supposing you have a working synapse installation, you can add the module with p
Now create a bucket and a key for your matrix instance (note your Key ID and Secret Key somewhere, they will be needed later): Now create a bucket and a key for your matrix instance (note your Key ID and Secret Key somewhere, they will be needed later):
```bash ```bash
garage key create matrix-key garage key new --name matrix-key
garage bucket create matrix garage bucket create matrix
garage bucket allow matrix --read --write --key matrix-key garage bucket allow matrix --read --write --key matrix-key
``` ```

View file

@ -54,7 +54,7 @@ how to configure this.
Create your key and bucket: Create your key and bucket:
```bash ```bash
garage key create my-key garage key new my-key
garage bucket create backup garage bucket create backup
garage bucket allow backup --read --write --key my-key garage bucket allow backup --read --write --key my-key
``` ```

View file

@ -23,7 +23,7 @@ You can configure a different target for each data type (check `[lfs]` and `[att
Let's start by creating a key and a bucket (your key id and secret will be needed later, keep them somewhere): Let's start by creating a key and a bucket (your key id and secret will be needed later, keep them somewhere):
```bash ```bash
garage key create gitea-key garage key new --name gitea-key
garage bucket create gitea garage bucket create gitea
garage bucket allow gitea --read --write --key gitea-key garage bucket allow gitea --read --write --key gitea-key
``` ```
@ -118,7 +118,7 @@ through another support, like a git repository.
As a first step, we will need to create a bucket on Garage and enabling website access on it: As a first step, we will need to create a bucket on Garage and enabling website access on it:
```bash ```bash
garage key create nix-key garage key new --name nix-key
garage bucket create nix.example.com garage bucket create nix.example.com
garage bucket allow nix.example.com --read --write --key nix-key garage bucket allow nix.example.com --read --write --key nix-key
garage bucket website nix.example.com --allow garage bucket website nix.example.com --allow

View file

@ -19,10 +19,9 @@ To run a real-world deployment, make sure the following conditions are met:
- You have at least three machines with sufficient storage space available. - You have at least three machines with sufficient storage space available.
- Each machine has an IP address which makes it directly reachable by all other machines. - Each machine has a public IP address which is reachable by other machines. It
In many cases, nodes will be behind a NAT and will not each have a public is highly recommended that you use IPv6 for this end-to-end connectivity. If
IPv4 addresses. In this case, is recommended that you use IPv6 for this IPv6 is not available, then using a mesh VPN such as
end-to-end connectivity if it is available. Otherwise, using a mesh VPN such as
[Nebula](https://github.com/slackhq/nebula) or [Nebula](https://github.com/slackhq/nebula) or
[Yggdrasil](https://yggdrasil-network.github.io/) are approaches to consider [Yggdrasil](https://yggdrasil-network.github.io/) are approaches to consider
in addition to building out your own VPN tunneling. in addition to building out your own VPN tunneling.
@ -43,7 +42,7 @@ For our example, we will suppose the following infrastructure with IPv6 connecti
| Brussels | Mars | fc00:F::1 | 1.5 TB | | Brussels | Mars | fc00:F::1 | 1.5 TB |
Note that Garage will **always** store the three copies of your data on nodes at different Note that Garage will **always** store the three copies of your data on nodes at different
locations. This means that in the case of this small example, the usable capacity locations. This means that in the case of this small example, the available capacity
of the cluster is in fact only 1.5 TB, because nodes in Brussels can't store more than that. of the cluster is in fact only 1.5 TB, because nodes in Brussels can't store more than that.
This also means that nodes in Paris and London will be under-utilized. This also means that nodes in Paris and London will be under-utilized.
To make better use of the available hardware, you should ensure that the capacity To make better use of the available hardware, you should ensure that the capacity
@ -76,23 +75,28 @@ to store 2 TB of data in total.
- For the metadata storage, Garage does not do checksumming and integrity - For the metadata storage, Garage does not do checksumming and integrity
verification on its own. If you are afraid of bitrot/data corruption, verification on its own. If you are afraid of bitrot/data corruption,
put your metadata directory on a ZFS or BTRFS partition. Otherwise, just use regular put your metadata directory on a BTRFS partition. Otherwise, just use regular
EXT4 or XFS. EXT4 or XFS.
- Servers with multiple HDDs are supported natively by Garage without resorting - Having a single server with several storage drives is currently not very well
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md). supported in Garage ([#218](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/218)).
For an easy setup, just put all your drives in a RAID0 or a ZFS RAIDZ array.
If you're adventurous, you can try to format each of your disk as
a separate XFS partition, and then run one `garage` daemon per disk drive,
or use something like [`mergerfs`](https://github.com/trapexit/mergerfs) to merge
all your disks in a single union filesystem that spreads load over them.
## Get a Docker image ## Get a Docker image
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
We encourage you to use a fixed tag (eg. `v0.9.0`) and not the `latest` tag. We encourage you to use a fixed tag (eg. `v0.8.0`) and not the `latest` tag.
For this example, we will use the latest published version at the time of the writing which is `v0.9.0` but it's up to you For this example, we will use the latest published version at the time of the writing which is `v0.8.0` but it's up to you
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
For example: For example:
``` ```
sudo docker pull dxflrs/garage:v0.9.0 sudo docker pull dxflrs/garage:v0.8.0
``` ```
## Deploying and configuring Garage ## Deploying and configuring Garage
@ -157,13 +161,12 @@ docker run \
-v /etc/garage.toml:/etc/garage.toml \ -v /etc/garage.toml:/etc/garage.toml \
-v /var/lib/garage/meta:/var/lib/garage/meta \ -v /var/lib/garage/meta:/var/lib/garage/meta \
-v /var/lib/garage/data:/var/lib/garage/data \ -v /var/lib/garage/data:/var/lib/garage/data \
dxflrs/garage:v0.9.0 dxflrs/garage:v0.8.0
``` ```
With this command line, Garage should be started automatically at each boot. It should be restarted automatically at each reboot.
Please note that we use host networking as otherwise the network indirection Please note that we use host networking as otherwise Docker containers
added by Docker would prevent Garage nodes from communicating with one another can not communicate with IPv6.
(especially if using IPv6).
If you want to use `docker-compose`, you may use the following `docker-compose.yml` file as a reference: If you want to use `docker-compose`, you may use the following `docker-compose.yml` file as a reference:
@ -171,7 +174,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
version: "3" version: "3"
services: services:
garage: garage:
image: dxflrs/garage:v0.9.0 image: dxflrs/garage:v0.8.0
network_mode: "host" network_mode: "host"
restart: unless-stopped restart: unless-stopped
volumes: volumes:
@ -180,12 +183,10 @@ services:
- /var/lib/garage/data:/var/lib/garage/data - /var/lib/garage/data:/var/lib/garage/data
``` ```
If you wish to upgrade your cluster, make sure to read the corresponding Upgrading between Garage versions should be supported transparently,
[documentation page](@/documentation/operations/upgrading.md) first, as well as but please check the relase notes before doing so!
the documentation relevant to your version of Garage in the case of major To upgrade, simply stop and remove this container and
upgrades. With the containerized setup proposed here, the upgrade process start again the command with a new version of Garage.
will require stopping and removing the existing container, and re-creating it
with the upgraded version.
## Controling the daemon ## Controling the daemon
@ -269,12 +270,12 @@ of a role that is assigned to each active cluster node.
For our example, we will suppose we have the following infrastructure For our example, we will suppose we have the following infrastructure
(Capacity, Identifier and Zone are specific values to Garage described in the following): (Capacity, Identifier and Zone are specific values to Garage described in the following):
| Location | Name | Disk Space | Identifier | Zone (`-z`) | Capacity (`-c`) | | Location | Name | Disk Space | `Capacity` | `Identifier` | `Zone` |
|----------|---------|------------|------------|-------------|-----------------| |----------|---------|------------|------------|--------------|--------------|
| Paris | Mercury | 1 TB | `563e` | `par1` | `1T` | | Paris | Mercury | 1 TB | `10` | `563e` | `par1` |
| Paris | Venus | 2 TB | `86f0` | `par1` | `2T` | | Paris | Venus | 2 TB | `20` | `86f0` | `par1` |
| London | Earth | 2 TB | `6814` | `lon1` | `2T` | | London | Earth | 2 TB | `20` | `6814` | `lon1` |
| Brussels | Mars | 1.5 TB | `212f` | `bru1` | `1.5T` | | Brussels | Mars | 1.5 TB | `15` | `212f` | `bru1` |
#### Node identifiers #### Node identifiers
@ -296,8 +297,6 @@ garage status
It will display the IP address associated with each node; It will display the IP address associated with each node;
from the IP address you will be able to recognize the node. from the IP address you will be able to recognize the node.
We will now use the `garage layout assign` command to configure the correct parameters for each node.
#### Zones #### Zones
Zones are simply a user-chosen identifier that identify a group of server that are grouped together logically. Zones are simply a user-chosen identifier that identify a group of server that are grouped together logically.
@ -307,29 +306,29 @@ In most cases, a zone will correspond to a geographical location (i.e. a datacen
Behind the scene, Garage will use zone definition to try to store the same data on different zones, Behind the scene, Garage will use zone definition to try to store the same data on different zones,
in order to provide high availability despite failure of a zone. in order to provide high availability despite failure of a zone.
Zones are passed to Garage using the `-z` flag of `garage layout assign` (see below).
#### Capacity #### Capacity
Garage needs to know the storage capacity (disk space) it can/should use on Garage reasons on an abstract metric about disk storage that is named the *capacity* of a node.
each node, to be able to correctly balance data. The capacity configured in Garage must be proportional to the disk space dedicated to the node.
Capacity values are expressed in bytes and are passed to Garage using the `-c` flag of `garage layout assign` (see below). Capacity values must be **integers** but can be given any signification.
Here we chose that 1 unit of capacity = 100 GB.
#### Tags Note that the amount of data stored by Garage on each server may not be strictly proportional to
its capacity value, as Garage will priorize having 3 copies of data in different zones,
You can add additional tags to nodes using the `-t` flag of `garage layout assign` (see below). even if this means that capacities will not be strictly respected. For example in our above examples,
Tags have no specific meaning for Garage and can be used at your convenience. nodes Earth and Mars will always store a copy of everything each, and the third copy will
have 66% chance of being stored by Venus and 33% chance of being stored by Mercury.
#### Injecting the topology #### Injecting the topology
Given the information above, we will configure our cluster as follow: Given the information above, we will configure our cluster as follow:
```bash ```bash
garage layout assign 563e -z par1 -c 1T -t mercury garage layout assign 563e -z par1 -c 10 -t mercury
garage layout assign 86f0 -z par1 -c 2T -t venus garage layout assign 86f0 -z par1 -c 20 -t venus
garage layout assign 6814 -z lon1 -c 2T -t earth garage layout assign 6814 -z lon1 -c 20 -t earth
garage layout assign 212f -z bru1 -c 1.5T -t mars garage layout assign 212f -z bru1 -c 15 -t mars
``` ```
At this point, the changes in the cluster layout have not yet been applied. At this point, the changes in the cluster layout have not yet been applied.
@ -339,7 +338,6 @@ To show the new layout that will be applied, call:
garage layout show garage layout show
``` ```
Make sure to read carefully the output of `garage layout show`.
Once you are satisfied with your new layout, apply it with: Once you are satisfied with your new layout, apply it with:
```bash ```bash

View file

@ -91,16 +91,6 @@ is definitely lost, then there is no other choice than to declare your S3 object
as unrecoverable, and to delete them properly from the data store. This can be done as unrecoverable, and to delete them properly from the data store. This can be done
using the `garage block purge` command. using the `garage block purge` command.
## Rebalancing data directories
In [multi-HDD setups](@/documentation/operations/multi-hdd.md), to ensure that
data blocks are well balanced between storage locations, you may run a
rebalance operation using `garage repair rebalance`. This is usefull when
adding storage locations or when capacities of the storage locations have been
changed. Once this is finished, Garage will know for each block of a single
possible location where it can be, which can increase access speed. This
operation will also move out all data from locations marked as read-only.
# Metadata operations # Metadata operations
@ -124,3 +114,4 @@ in your cluster, you can run one of the following repair procedures:
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version - `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
- `garage repair block_refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected) - `garage repair block_refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)

View file

@ -9,30 +9,18 @@ a certain capacity, or a gateway node that does not store data and is only
used as an API entry point for faster cluster access. used as an API entry point for faster cluster access.
An introduction to building cluster layouts can be found in the [production deployment](@/documentation/cookbook/real-world.md) page. An introduction to building cluster layouts can be found in the [production deployment](@/documentation/cookbook/real-world.md) page.
In Garage, all of the data that can be stored in a given cluster is divided
into slices which we call *partitions*. Each partition is stored by
one or several nodes in the cluster
(see [`replication_mode`](@/documentation/reference-manual/configuration.md#replication-mode)).
The layout determines the correspondence between these partition,
which exist on a logical level, and actual storage nodes.
## How cluster layouts work in Garage ## How cluster layouts work in Garage
A cluster layout is composed of the following components: In Garage, a cluster layout is composed of the following components:
- a table of roles assigned to nodes, defined by the user - a table of roles assigned to nodes
- an optimal assignation of partitions to nodes, computed by an algorithm that is ran once when calling `garage layout apply` or the ApplyClusterLayout API endpoint
- a version number - a version number
Garage nodes will always use the cluster layout with the highest version number. Garage nodes will always use the cluster layout with the highest version number.
Garage nodes also maintain and synchronize between them a set of proposed role Garage nodes also maintain and synchronize between them a set of proposed role
changes that haven't yet been applied. These changes will be applied (or changes that haven't yet been applied. These changes will be applied (or
canceled) in the next version of the layout. canceled) in the next version of the layout
All operations on the layout can be realized using the `garage` CLI or using the
[administration API endpoint](@/documentation/reference-manual/admin-api.md).
We give here a description of CLI commands, the admin API semantics are very similar.
The following commands insert modifications to the set of proposed role changes The following commands insert modifications to the set of proposed role changes
for the next layout version (but they do not create the new layout immediately): for the next layout version (but they do not create the new layout immediately):
@ -63,7 +51,7 @@ commands will fail otherwise.
## Warnings about Garage cluster layout management ## Warnings about Garage cluster layout management
**⚠️ Never make several calls to `garage layout apply` or `garage layout **Warning: never make several calls to `garage layout apply` or `garage layout
revert` with the same value of the `--version` flag. Doing so can lead to the revert` with the same value of the `--version` flag. Doing so can lead to the
creation of several different layouts with the same version number, in which creation of several different layouts with the same version number, in which
case your Garage cluster will become inconsistent until fixed.** If a call to case your Garage cluster will become inconsistent until fixed.** If a call to
@ -77,198 +65,13 @@ shell, you shouldn't have much issues as long as you run commands one after
the other and take care of checking the output of `garage layout show` the other and take care of checking the output of `garage layout show`
before applying any changes. before applying any changes.
If you are using the `garage` CLI or the admin API to script layout changes, If you are using the `garage` CLI to script layout changes, follow the following recommendations:
follow the following recommendations:
- If using the CLI, make all of your `garage` CLI calls to the same RPC host. - Make all of your `garage` CLI calls to the same RPC host. Do not use the
If using the admin API, make all of your API calls to the same Garage node. Do `garage` CLI to connect to individual nodes to send them each a piece of the
not connect to individual nodes to send them each a piece of the layout changes layout changes you are making, as the changes propagate asynchronously
you are making, as the changes propagate asynchronously between nodes and might between nodes and might not all be taken into account at the time when the
not all be taken into account at the time when the new layout is applied. new layout is applied.
- **Only call `garage layout apply`/ApplyClusterLayout once**, and call it - **Only call `garage layout apply` once**, and call it **strictly after** all
**strictly after** all of the `layout assign` and `layout remove` of the `layout assign` and `layout remove` commands have returned.
commands/UpdateClusterLayout API calls have returned.
## Understanding unexpected layout calculations
When adding, removing or modifying nodes in a cluster layout, sometimes
unexpected assigntations of partitions to node can occur. These assignations
are in fact normal and logical, given the objectives of the algorihtm. Indeed,
**the layout algorithm prioritizes moving less data between nodes over the fact
of achieving equal distribution of load. It also tries to use all links between
pairs of nodes in equal proportions when moving data.** This section presents
two examples and illustrates how one can control Garage's behavior to obtain
the desired results.
### Example 1
In this example, a cluster is originally composed of 3 nodes in 3 different
zones (data centers). The three nodes are of equal capacity, therefore they
are all fully exploited and all store a copy of all of the data in the cluster.
Then, a fourth node of the same size is added in the datacenter `dc1`.
As illustrated by the following, **Garage will by default not store any data on the new node**:
```
$ garage layout show
==== CURRENT CLUSTER LAYOUT ====
ID Tags Zone Capacity Usable capacity
b10c110e4e854e5a node1 dc1 1000.0 MB 1000.0 MB (100.0%)
a235ac7695e0c54d node2 dc2 1000.0 MB 1000.0 MB (100.0%)
62b218d848e86a64 node3 dc3 1000.0 MB 1000.0 MB (100.0%)
Zone redundancy: maximum
Current cluster layout version: 6
==== STAGED ROLE CHANGES ====
ID Tags Zone Capacity
a11c7cf18af29737 node4 dc1 1000.0 MB
==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====
ID Tags Zone Capacity Usable capacity
b10c110e4e854e5a node1 dc1 1000.0 MB 1000.0 MB (100.0%)
a11c7cf18af29737 node4 dc1 1000.0 MB 0 B (0.0%)
a235ac7695e0c54d node2 dc2 1000.0 MB 1000.0 MB (100.0%)
62b218d848e86a64 node3 dc3 1000.0 MB 1000.0 MB (100.0%)
Zone redundancy: maximum
==== COMPUTATION OF A NEW PARTITION ASSIGNATION ====
Partitions are replicated 3 times on at least 3 distinct zones.
Optimal partition size: 3.9 MB (3.9 MB in previous layout)
Usable capacity / total cluster capacity: 3.0 GB / 4.0 GB (75.0 %)
Effective capacity (replication factor 3): 1000.0 MB
A total of 0 new copies of partitions need to be transferred.
dc1 Tags Partitions Capacity Usable capacity
b10c110e4e854e5a node1 256 (0 new) 1000.0 MB 1000.0 MB (100.0%)
a11c7cf18af29737 node4 0 (0 new) 1000.0 MB 0 B (0.0%)
TOTAL 256 (256 unique) 2.0 GB 1000.0 MB (50.0%)
dc2 Tags Partitions Capacity Usable capacity
a235ac7695e0c54d node2 256 (0 new) 1000.0 MB 1000.0 MB (100.0%)
TOTAL 256 (256 unique) 1000.0 MB 1000.0 MB (100.0%)
dc3 Tags Partitions Capacity Usable capacity
62b218d848e86a64 node3 256 (0 new) 1000.0 MB 1000.0 MB (100.0%)
TOTAL 256 (256 unique) 1000.0 MB 1000.0 MB (100.0%)
```
While unexpected, this is logical because of the following facts:
- storing some data on the new node does not help increase the total quantity
of data that can be stored on the cluster, as the two other zones (`dc2` and
`dc3`) still need to store a full copy of everything, and their capacity is
still the same;
- there is therefore no need to move any data on the new node as this would be pointless;
- moving data to the new node has a cost which the algorithm decides to not pay if not necessary.
This distribution of data can however not be what the administrator wanted: if
they added a new node to `dc1`, it might be because the existing node is too
slow, and they wish to divide its load by half. In that case, what they need to
do to force Garage to distribute the data between the two nodes is to attribute
only half of the capacity to each node in `dc1` (in our example, 500M instead of 1G).
In that case, Garage would determine that to be able to store 1G in total, it
would need to store 500M on the old node and 500M on the added one.
### Example 2
The following example is a slightly different scenario, where `dc1` had two
nodes that were used at 50%, and `dc2` and `dc3` each have one node that is
100% used. All node capacities are the same.
Then, a node from `dc1` is moved into `dc3`. One could expect that the roles of
`dc1` and `dc3` would simply be swapped: the remaining node in `dc1` would be
used at 100%, and the two nodes now in `dc3` would be used at 50%. Instead,
this happens:
```
==== CURRENT CLUSTER LAYOUT ====
ID Tags Zone Capacity Usable capacity
b10c110e4e854e5a node1 dc1 1000.0 MB 500.0 MB (50.0%)
a11c7cf18af29737 node4 dc1 1000.0 MB 500.0 MB (50.0%)
a235ac7695e0c54d node2 dc2 1000.0 MB 1000.0 MB (100.0%)
62b218d848e86a64 node3 dc3 1000.0 MB 1000.0 MB (100.0%)
Zone redundancy: maximum
Current cluster layout version: 8
==== STAGED ROLE CHANGES ====
ID Tags Zone Capacity
a11c7cf18af29737 node4 dc3 1000.0 MB
==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====
ID Tags Zone Capacity Usable capacity
b10c110e4e854e5a node1 dc1 1000.0 MB 1000.0 MB (100.0%)
a235ac7695e0c54d node2 dc2 1000.0 MB 1000.0 MB (100.0%)
62b218d848e86a64 node3 dc3 1000.0 MB 753.9 MB (75.4%)
a11c7cf18af29737 node4 dc3 1000.0 MB 246.1 MB (24.6%)
Zone redundancy: maximum
==== COMPUTATION OF A NEW PARTITION ASSIGNATION ====
Partitions are replicated 3 times on at least 3 distinct zones.
Optimal partition size: 3.9 MB (3.9 MB in previous layout)
Usable capacity / total cluster capacity: 3.0 GB / 4.0 GB (75.0 %)
Effective capacity (replication factor 3): 1000.0 MB
A total of 128 new copies of partitions need to be transferred.
dc1 Tags Partitions Capacity Usable capacity
b10c110e4e854e5a node1 256 (128 new) 1000.0 MB 1000.0 MB (100.0%)
TOTAL 256 (256 unique) 1000.0 MB 1000.0 MB (100.0%)
dc2 Tags Partitions Capacity Usable capacity
a235ac7695e0c54d node2 256 (0 new) 1000.0 MB 1000.0 MB (100.0%)
TOTAL 256 (256 unique) 1000.0 MB 1000.0 MB (100.0%)
dc3 Tags Partitions Capacity Usable capacity
62b218d848e86a64 node3 193 (0 new) 1000.0 MB 753.9 MB (75.4%)
a11c7cf18af29737 node4 63 (0 new) 1000.0 MB 246.1 MB (24.6%)
TOTAL 256 (256 unique) 2.0 GB 1000.0 MB (50.0%)
```
As we can see, the node that was moved to `dc3` (node4) is only used at 25% (approximatively),
whereas the node that was already in `dc3` (node3) is used at 75%.
This can be explained by the following:
- node1 will now be the only node remaining in `dc1`, thus it has to store all
of the data in the cluster. Since it was storing only half of it before, it has
to retrieve the other half from other nodes in the cluster.
- The data which it does not have is entirely stored by the other node that was
in `dc1` and that is now in `dc3` (node4). There is also a copy of it on node2
and node3 since both these nodes have a copy of everything.
- node3 and node4 are the two nodes that will now be in a datacenter that is
under-utilized (`dc3`), this means that those are the two candidates from which
data can be removed to be moved to node1.
- Garage will move data in equal proportions from all possible sources, in this
case it means that it will tranfer 25% of the entire data set from node3 to
node1 and another 25% from node4 to node1.
This explains why node3 ends with 75% utilization (100% from before minus 25%
that is moved to node1), and node4 ends with 25% (50% from before minus 25%
that is moved to node1).
This illustrates the second principle of the layout computation: **if there is
a choice in moving data out of some nodes, then all links between pairs of
nodes are used in equal proportions** (this is approximately true, there is
randomness in the algorihtm to achieve this so there might be some small
fluctuations, as we see above).

View file

@ -1,101 +0,0 @@
+++
title = "Multi-HDD support"
weight = 15
+++
Since v0.9, Garage natively supports nodes that have several storage drives
for storing data blocks (not for metadata storage).
## Initial setup
To set up a new Garage storage node with multiple HDDs,
format and mount all your drives in different directories,
and use a Garage configuration as follows:
```toml
data_dir = [
{ path = "/path/to/hdd1", capacity = "2T" },
{ path = "/path/to/hdd2", capacity = "4T" },
]
```
Garage will automatically balance all blocks stored by the node
among the different specified directories, proportionnally to the
specified capacities.
## Updating the list of storage locations
If you add new storage locations to your `data_dir`,
Garage will not rebalance existing data between storage locations.
Newly written blocks will be balanced proportionnally to the specified capacities,
and existing data may be moved between drives to improve balancing,
but only opportunistically when a data block is re-written (e.g. an object
is re-uploaded, or an object with a duplicate block is uploaded).
To understand precisely what is happening, we need to dive in to how Garage
splits data among the different storage locations.
First of all, Garage divides the set of all possible block hashes
in a fixed number of slices (currently 1024), and assigns
to each slice a primary storage location among the specified data directories.
The number of slices having their primary location in each data directory
is proportionnal to the capacity specified in the config file.
When Garage receives a block to write, it will always write it in the primary
directory of the slice that contains its hash.
Now, to be able to not lose existing data blocks when storage locations
are added, Garage also keeps a list of secondary data directories
for all of the hash slices. Secondary data directories for a slice indicates
storage locations that once were primary directories for that slice, i.e. where
Garage knows that data blocks of that slice might be stored.
When Garage is requested to read a certain data block,
it will first look in the primary storage directory of its slice,
and if it doesn't find it there it goes through all of the secondary storage
locations until it finds it. This allows Garage to continue operating
normally when storage locations are added, without having to shuffle
files between drives to place them in the correct location.
This relatively simple strategy works well but does not ensure that data
is correctly balanced among drives according to their capacity.
To rebalance data, two strategies can be used:
- Lazy rebalancing: when a block is re-written (e.g. the object is re-uploaded),
Garage checks whether the existing copy is in the primary directory of the slice
or in a secondary directory. If the current copy is in a secondary directory,
Garage re-writes a copy in the primary directory and deletes the one from the
secondary directory. This might never end up rebalancing everything if there
are data blocks that are only read and never written.
- Active rebalancing: an operator of a Garage node can explicitly launch a repair
procedure that rebalances the data directories, moving all blocks to their
primary location. Once done, all secondary locations for all hash slices are
removed so that they won't be checked anymore when looking for a data block.
## Read-only storage locations
If you would like to move all data blocks from an existing data directory to one
or several new data directories, mark the old directory as read-only:
```toml
data_dir = [
{ path = "/path/to/old_data", read_only = true },
{ path = "/path/to/new_hdd1", capacity = "2T" },
{ path = "/path/to/new_hdd2", capacity = "4T" },
]
```
Garage will be able to read requested blocks from the read-only directory.
Garage will also move data out of the read-only directory either progressively
(lazy rebalancing) or if requested explicitly (active rebalancing).
Once an active rebalancing has finished, your read-only directory should be empty:
it might still contain subdirectories, but no data files. You can check that
it contains no files using:
```bash
find -type f /path/to/old_data # should not print anything
```
at which point it can be removed from the `data_dir` list in your config file.

View file

@ -80,6 +80,6 @@ The entire procedure would look something like this:
5. If any specific migration procedure is required, it is usually in one of the two cases: 5. If any specific migration procedure is required, it is usually in one of the two cases:
- It can be run on online nodes after the new version has started, during regular cluster operation. - It can be run on online nodes after the new version has started, during regular cluster operation.
- it has to be run offline, in which case you will have to again take all nodes offline one after the other to run the repair - it has to be run offline
For this last step, please refer to the specific documentation pertaining to the version upgrade you are doing. For this last step, please refer to the specific documentation pertaining to the version upgrade you are doing.

View file

@ -84,8 +84,9 @@ admin_token = "$(openssl rand -base64 32)"
EOF EOF
``` ```
Now that your configuration file has been created, you may save it to the directory of your choice. Now that your configuration file has been created, you can put
By default, Garage looks for **`/etc/garage.toml`.** it in the right place. By default, garage looks at **`/etc/garage.toml`.**
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml` You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
at each invocation of the `garage` binary (for example: `garage -c ./garage.toml server`, `garage -c ./garage.toml status`). at each invocation of the `garage` binary (for example: `garage -c ./garage.toml server`, `garage -c ./garage.toml status`).
@ -102,14 +103,12 @@ your data to be persisted properly.
### Launching the Garage server ### Launching the Garage server
Use the following command to launch the Garage server: Use the following command to launch the Garage server with our configuration file:
``` ```
garage -c path/to/garage.toml server garage server
``` ```
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
You can tune Garage's verbosity as follows (from less verbose to more verbose): You can tune Garage's verbosity as follows (from less verbose to more verbose):
``` ```
@ -127,7 +126,7 @@ Log level `debug` can help you check why your S3 API calls are not working.
The `garage` utility is also used as a CLI tool to configure your Garage deployment. The `garage` utility is also used as a CLI tool to configure your Garage deployment.
It uses values from the TOML configuration file to find the Garage daemon running on the It uses values from the TOML configuration file to find the Garage daemon running on the
local node, therefore if your configuration file is not at `/etc/garage.toml` you will local node, therefore if your configuration file is not at `/etc/garage.toml` you will
again have to specify `-c path/to/garage.toml` at each invocation. again have to specify `-c path/to/garage.toml`.
If the `garage` CLI is able to correctly detect the parameters of your local Garage node, If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
the following command should be enough to show the status of your cluster: the following command should be enough to show the status of your cluster:
@ -141,7 +140,7 @@ This should show something like this:
``` ```
==== HEALTHY NODES ==== ==== HEALTHY NODES ====
ID Hostname Address Tag Zone Capacity ID Hostname Address Tag Zone Capacity
563e1ac825ee3323 linuxbox 127.0.0.1:3901 NO ROLE ASSIGNED 563e1ac825ee3323 linuxbox 127.0.0.1:3901 NO ROLE ASSIGNED
``` ```
## Creating a cluster layout ## Creating a cluster layout
@ -154,12 +153,12 @@ For our test deployment, we are using only one node. The way in which we configu
it does not matter, you can simply write: it does not matter, you can simply write:
```bash ```bash
garage layout assign -z dc1 -c 1G <node_id> garage layout assign -z dc1 -c 1 <node_id>
``` ```
where `<node_id>` corresponds to the identifier of the node shown by `garage status` (first column). where `<node_id>` corresponds to the identifier of the node shown by `garage status` (first column).
You can enter simply a prefix of that identifier. You can enter simply a prefix of that identifier.
For instance here you could write just `garage layout assign -z dc1 -c 1G 563e`. For instance here you could write just `garage layout assign -z dc1 -c 1 563e`.
The layout then has to be applied to the cluster, using: The layout then has to be applied to the cluster, using:
@ -210,7 +209,7 @@ one key can access multiple buckets, multiple keys can access one bucket.
Create an API key using the following command: Create an API key using the following command:
``` ```
garage key create nextcloud-app-key garage key new --name nextcloud-app-key
``` ```
The output should look as follows: The output should look as follows:

View file

@ -10,8 +10,6 @@ Here is an example `garage.toml` configuration file that illustrates all of the
```toml ```toml
metadata_dir = "/var/lib/garage/meta" metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data" data_dir = "/var/lib/garage/data"
metadata_fsync = true
data_fsync = false
db_engine = "lmdb" db_engine = "lmdb"
@ -92,19 +90,6 @@ This folder can be placed on an HDD. The space available for `data_dir`
should be counted to determine a node's capacity should be counted to determine a node's capacity
when [adding it to the cluster layout](@/documentation/cookbook/real-world.md). when [adding it to the cluster layout](@/documentation/cookbook/real-world.md).
Since `v0.9.0`, Garage supports multiple data directories with the following syntax:
```toml
data_dir = [
{ path = "/path/to/old_data", read_only = true },
{ path = "/path/to/new_hdd1", capacity = "2T" },
{ path = "/path/to/new_hdd2", capacity = "4T" },
]
```
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
on how to operate Garage in such a setup.
### `db_engine` (since `v0.8.0`) ### `db_engine` (since `v0.8.0`)
By default, Garage uses the Sled embedded database library By default, Garage uses the Sled embedded database library
@ -146,49 +131,6 @@ convert-db -a <input db engine> -i <input db path> \
Make sure to specify the full database path as presented in the table above, Make sure to specify the full database path as presented in the table above,
and not just the path to the metadata directory. and not just the path to the metadata directory.
### `metadata_fsync`
Whether to enable synchronous mode for the database engine or not.
This is disabled (`false`) by default.
This reduces the risk of metadata corruption in case of power failures,
at the cost of a significant drop in write performance,
as Garage will have to pause to sync data to disk much more often
(several times for API calls such as PutObject).
Using this option reduces the risk of simultaneous metadata corruption on several
cluster nodes, which could lead to data loss.
If multi-site replication is used, this option is most likely not necessary, as
it is extremely unlikely that two nodes in different locations will have a
power failure at the exact same time.
(Metadata corruption on a single node is not an issue, the corrupted data file
can always be deleted and reconstructed from the other nodes in the cluster.)
Here is how this option impacts the different database engines:
| Database | `metadata_fsync = false` (default) | `metadata_fsync = true` |
|----------|------------------------------------|-------------------------------|
| Sled | default options | *unsupported* |
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
Note that the Sqlite database is always ran in `WAL` mode (`PRAGMA journal_mode = WAL`).
### `data_fsync`
Whether to `fsync` data blocks and their containing directory after they are
saved to disk.
This is disabled (`false`) by default.
This might reduce the risk that a data block is lost in rare
situations such as simultaneous node losing power,
at the cost of a moderate drop in write performance.
Similarly to `metatada_fsync`, this is likely not necessary
if geographical replication is used.
### `block_size` ### `block_size`
Garage splits stored objects in consecutive chunks of size `block_size` Garage splits stored objects in consecutive chunks of size `block_size`

View file

@ -76,13 +76,16 @@ but these endpoints are documented in [Red Hat Ceph Storage - Chapter 2. Ceph Ob
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) | | Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|------------------------------|----------------------------------|-----------------|---------------|---------|-----| |------------------------------|----------------------------------|-----------------|---------------|---------|-----|
| [AbortMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ | | [AbortMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
| [CompleteMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ | | [CompleteMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) | ✅ Implemented (see details below) | ✅ | ✅ | ✅ | ✅ |
| [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) | ✅ Implemented | ✅| ✅ | ✅ | ✅ | | [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) | ✅ Implemented | ✅| ✅ | ✅ | ✅ |
| [ListMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUpload.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ | | [ListMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUpload.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
| [ListParts](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ | | [ListParts](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
| [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) | ✅ Implemented | ✅ | ✅| ✅ | ✅ | | [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) | ✅ Implemented (see details below) | ✅ | ✅| ✅ | ✅ |
| [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ | | [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
Our implementation of Multipart Upload is currently a bit more restrictive than Amazon's one in some edge cases.
For more information, please refer to our [issue tracker](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/204).
### Website endpoints ### Website endpoints
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) | | Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
@ -124,22 +127,15 @@ If you need this feature, please [share your use case in our dedicated issue](ht
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) | | Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|------------------------------|----------------------------------|-----------------|---------------|---------|-----| |------------------------------|----------------------------------|-----------------|---------------|---------|-----|
| [DeleteBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) | ✅ Implemented | ❌| ✅| ❌| ✅| | [DeleteBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) | ❌ Missing | ❌| ✅| ❌| ✅|
| [GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) | ✅ Implemented | ❌| ✅ | ❌| ✅| | [GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ✅|
| [PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) | ⚠ Partially implemented (see below) | ❌| ✅ | ❌| ✅| | [PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ✅|
| [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) | ❌ Stub (see below) | ✅| ✅ | ❌| ✅| | [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) | ❌ Stub (see below) | ✅| ✅ | ❌| ✅|
| [ListObjectVersions](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html) | ❌ Missing | ❌| ✅ | ❌| ✅| | [ListObjectVersions](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html) | ❌ Missing | ❌| ✅ | ❌| ✅|
| [PutBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) | ❌ Missing | ❌| ✅| ❌| ✅| | [PutBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) | ❌ Missing | ❌| ✅| ❌| ✅|
**PutBucketLifecycleConfiguration:** The only actions supported are
`AbortIncompleteMultipartUpload` and `Expiration` (without the
`ExpiredObjectDeleteMarker` field). All other operations are dependent on
either bucket versionning or storage classes which Garage currently does not
implement. The deprecated `Prefix` member directly in the the `Rule`
structure/XML tag is not supported, specified prefixes must be inside the
`Filter` structure/XML tag.
**GetBucketVersioning:** Stub implementation which always returns "versionning not enabled", since Garage does not yet support bucket versionning. **GetBucketVersioning:** Stub implementation (Garage does not yet support versionning so this always returns "versionning not enabled").
### Replication endpoints ### Replication endpoints

View file

@ -1,72 +0,0 @@
+++
title = "Migrating from 0.8 to 0.9"
weight = 12
+++
**This guide explains how to migrate to 0.9 if you have an existing 0.8 cluster.
We don't recommend trying to migrate to 0.9 directly from 0.7 or older.**
This migration procedure has been tested on several clusters without issues.
However, it is still a *critical procedure* that might cause issues.
**Make sure to back up all your data before attempting it!**
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
The following are **breaking changes** in Garage v0.9 that require your attention when migrating:
- LMDB is now the default metadata db engine and Sled is deprecated. If you were using Sled, make sure to specify `db_engine = "sled"` in your configuration file, or take the time to [convert your database](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0).
- Capacity values are now in actual byte units. The translation from the old layout will assign 1 capacity = 1Gb by default, which might be wrong for your cluster. This does not cause any data to be moved around, but you might want to re-assign correct capacity values post-migration.
- Multipart uploads that were started in Garage v0.8 will not be visible in Garage v0.9 and will have to be restarted from scratch.
- Changes to the admin API: some `v0/` endpoints have been replaced by `v1/` counterparts with updated/uniformized syntax. All other endpoints have also moved to `v1/` by default, without syntax changes, but are still available under `v0/` for compatibility.
## Simple migration procedure (takes cluster offline for a while)
The migration steps are as follows:
1. Disable API and web access. You may do this by stopping your reverse proxy or by commenting out
the `api_bind_addr` values in your `config.toml` file and restarting Garage.
2. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`,
check the logs and check that all data seems to be synced correctly between
nodes. If you have time, do additional checks (`versions`, `block_refs`, etc.)
3. Check that the block resync queue and Merkle queue are empty:
run `garage stats -a` to query them or inspect metrics in the Grafana dashboard.
4. Turn off Garage v0.8
5. **Backup the metadata folder of all your nodes!** For instance, use the following command
if your metadata directory is `/var/lib/garage/meta`: `cd /var/lib/garage ; tar -acf meta-v0.8.tar.zst meta/`
6. Install Garage v0.9
7. Update your configuration file if necessary.
8. Turn on Garage v0.9
9. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`.
Wait for a full table sync to run.
10. Your upgraded cluster should be in a working state. Re-enable API and Web
access and check that everything went well.
11. Monitor your cluster in the next hours to see if it works well under your production load, report any issue.
12. You might want to assign correct capacity values to all your nodes. Doing so might cause data to be moved
in your cluster, which should also be monitored carefully.
## Minimal downtime migration procedure
The migration to Garage v0.9 can be done with almost no downtime,
by restarting all nodes at once in the new version.
The migration steps are as follows:
1. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`,
check the logs and check that all data seems to be synced correctly between
nodes. If you have time, do additional checks (`versions`, `block_refs`, etc.)
2. Turn off each node individually; back up its metadata folder (see above); turn it back on again.
This will allow you to take a backup of all nodes without impacting global cluster availability.
You can do all nodes of a single zone at once as this does not impact the availability of Garage.
3. Prepare your binaries and configuration files for Garage v0.9
4. Shut down all v0.8 nodes simultaneously, and restart them all simultaneously in v0.9.
Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to achieve this as fast as possible.
Garage v0.9 should be in a working state as soon as it starts.
5. Proceed with repair and monitoring as described in steps 9-12 above.

View file

@ -52,11 +52,11 @@ Returns an HTTP status 200 if the node is ready to answer user's requests,
and an HTTP status 503 (Service Unavailable) if there are some partitions and an HTTP status 503 (Service Unavailable) if there are some partitions
for which a quorum of nodes is not available. for which a quorum of nodes is not available.
A simple textual message is also returned in a body with content-type `text/plain`. A simple textual message is also returned in a body with content-type `text/plain`.
See `/v1/health` for an API that also returns JSON output. See `/v0/health` for an API that also returns JSON output.
### Cluster operations ### Cluster operations
#### GetClusterStatus `GET /v1/status` #### GetClusterStatus `GET /v0/status`
Returns the cluster's current status in JSON, including: Returns the cluster's current status in JSON, including:
@ -70,112 +70,86 @@ Example response body:
```json ```json
{ {
"node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f", "node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"garageVersion": "git:v0.9.0-dev", "garage_version": "git:v0.8.0",
"garageFeatures": [ "knownNodes": {
"k2v", "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
"sled",
"lmdb",
"sqlite",
"metrics",
"bundled-libs"
],
"rustVersion": "1.68.0",
"dbEngine": "LMDB (using Heed crate)",
"knownNodes": [
{
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"addr": "10.0.0.11:3901", "addr": "10.0.0.11:3901",
"isUp": true, "is_up": true,
"lastSeenSecsAgo": 9, "last_seen_secs_ago": 9,
"hostname": "node1" "hostname": "node1"
}, },
{ "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
"addr": "10.0.0.12:3901", "addr": "10.0.0.12:3901",
"isUp": true, "is_up": true,
"lastSeenSecsAgo": 1, "last_seen_secs_ago": 1,
"hostname": "node2" "hostname": "node2"
}, },
{ "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"addr": "10.0.0.21:3901", "addr": "10.0.0.21:3901",
"isUp": true, "is_up": true,
"lastSeenSecsAgo": 7, "last_seen_secs_ago": 7,
"hostname": "node3" "hostname": "node3"
}, },
{ "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
"addr": "10.0.0.22:3901", "addr": "10.0.0.22:3901",
"isUp": true, "is_up": true,
"lastSeenSecsAgo": 1, "last_seen_secs_ago": 1,
"hostname": "node4" "hostname": "node4"
} }
], },
"layout": { "layout": {
"version": 12, "version": 12,
"roles": [ "roles": {
{ "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"zone": "dc1", "zone": "dc1",
"capacity": 10737418240, "capacity": 4,
"tags": [ "tags": [
"node1" "node1"
] ]
}, },
{ "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
"zone": "dc1", "zone": "dc1",
"capacity": 10737418240, "capacity": 6,
"tags": [ "tags": [
"node2" "node2"
] ]
}, },
{ "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"zone": "dc2", "zone": "dc2",
"capacity": 10737418240, "capacity": 10,
"tags": [ "tags": [
"node3" "node3"
] ]
} }
], },
"stagedRoleChanges": [ "stagedRoleChanges": {
{ "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
"remove": false,
"zone": "dc2", "zone": "dc2",
"capacity": 10737418240, "capacity": 5,
"tags": [ "tags": [
"node4" "node4"
] ]
} }
{
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"remove": true,
"zone": null,
"capacity": null,
"tags": null,
} }
]
} }
} }
``` ```
#### GetClusterHealth `GET /v1/health` #### GetClusterHealth `GET /v0/health`
Returns the cluster's current health in JSON format, with the following variables: Returns the cluster's current health in JSON format, with the following variables:
- `status`: one of `healthy`, `degraded` or `unavailable`: - `status`: one of `Healthy`, `Degraded` or `Unavailable`:
- healthy: Garage node is connected to all storage nodes - Healthy: Garage node is connected to all storage nodes
- degraded: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions - Degraded: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions
- unavailable: a quorum of write nodes is not available for some partitions - Unavailable: a quorum of write nodes is not available for some partitions
- `knownNodes`: the number of nodes this Garage node has had a TCP connection to since the daemon started - `known_nodes`: the number of nodes this Garage node has had a TCP connection to since the daemon started
- `connectedNodes`: the nubmer of nodes this Garage node currently has an open connection to - `connected_nodes`: the nubmer of nodes this Garage node currently has an open connection to
- `storageNodes`: the number of storage nodes currently registered in the cluster layout - `storage_nodes`: the number of storage nodes currently registered in the cluster layout
- `storageNodesOk`: the number of storage nodes to which a connection is currently open - `storage_nodes_ok`: the number of storage nodes to which a connection is currently open
- `partitions`: the total number of partitions of the data (currently always 256) - `partitions`: the total number of partitions of the data (currently always 256)
- `partitionsQuorum`: the number of partitions for which a quorum of write nodes is available - `partitions_quorum`: the number of partitions for which a quorum of write nodes is available
- `partitionsAllOk`: the number of partitions for which we are connected to all storage nodes responsible of storing it - `partitions_all_ok`: the number of partitions for which we are connected to all storage nodes responsible of storing it
Contrarily to `GET /health`, this endpoint always returns a 200 OK HTTP response code. Contrarily to `GET /health`, this endpoint always returns a 200 OK HTTP response code.
@ -183,18 +157,18 @@ Example response body:
```json ```json
{ {
"status": "degraded", "status": "Degraded",
"knownNodes": 3, "known_nodes": 3,
"connectedNodes": 3, "connected_nodes": 2,
"storageNodes": 4, "storage_nodes": 3,
"storageNodesOk": 3, "storage_nodes_ok": 2,
"partitions": 256, "partitions": 256,
"partitionsQuorum": 256, "partitions_quorum": 256,
"partitionsAllOk": 64 "partitions_all_ok": 0
} }
``` ```
#### ConnectClusterNodes `POST /v1/connect` #### ConnectClusterNodes `POST /v0/connect`
Instructs this Garage node to connect to other Garage nodes at specified addresses. Instructs this Garage node to connect to other Garage nodes at specified addresses.
@ -224,7 +198,7 @@ Example response:
] ]
``` ```
#### GetClusterLayout `GET /v1/layout` #### GetClusterLayout `GET /v0/layout`
Returns the cluster's current layout in JSON, including: Returns the cluster's current layout in JSON, including:
@ -238,54 +212,42 @@ Example response body:
```json ```json
{ {
"version": 12, "version": 12,
"roles": [ "roles": {
{ "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"zone": "dc1", "zone": "dc1",
"capacity": 10737418240, "capacity": 4,
"tags": [ "tags": [
"node1" "node1"
] ]
}, },
{ "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
"zone": "dc1", "zone": "dc1",
"capacity": 10737418240, "capacity": 6,
"tags": [ "tags": [
"node2" "node2"
] ]
}, },
{ "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"zone": "dc2", "zone": "dc2",
"capacity": 10737418240, "capacity": 10,
"tags": [ "tags": [
"node3" "node3"
] ]
} }
], },
"stagedRoleChanges": [ "stagedRoleChanges": {
{ "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
"remove": false,
"zone": "dc2", "zone": "dc2",
"capacity": 10737418240, "capacity": 5,
"tags": [ "tags": [
"node4" "node4"
] ]
} }
{
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"remove": true,
"zone": null,
"capacity": null,
"tags": null,
} }
]
} }
``` ```
#### UpdateClusterLayout `POST /v1/layout` #### UpdateClusterLayout `POST /v0/layout`
Send modifications to the cluster layout. These modifications will Send modifications to the cluster layout. These modifications will
be included in the staged role changes, visible in subsequent calls be included in the staged role changes, visible in subsequent calls
@ -297,9 +259,8 @@ the layout.
Request body format: Request body format:
```json ```json
[ {
{ <node_id>: {
"id": <node_id>,
"capacity": <new_capacity>, "capacity": <new_capacity>,
"zone": <new_zone>, "zone": <new_zone>,
"tags": [ "tags": [
@ -307,22 +268,17 @@ Request body format:
... ...
] ]
}, },
{ <node_id_to_remove>: null,
"id": <node_id_to_remove>, ...
"remove": true }
}
]
``` ```
Contrary to the CLI that may update only a subset of the fields Contrary to the CLI that may update only a subset of the fields
`capacity`, `zone` and `tags`, when calling this API all of these `capacity`, `zone` and `tags`, when calling this API all of these
values must be specified. values must be specified.
This returns the new cluster layout with the proposed staged changes,
as returned by GetClusterLayout.
#### ApplyClusterLayout `POST /v0/layout/apply`
#### ApplyClusterLayout `POST /v1/layout/apply`
Applies to the cluster the layout changes currently registered as Applies to the cluster the layout changes currently registered as
staged layout changes. staged layout changes.
@ -339,10 +295,7 @@ Similarly to the CLI, the body must include the version of the new layout
that will be created, which MUST be 1 + the value of the currently that will be created, which MUST be 1 + the value of the currently
existing layout in the cluster. existing layout in the cluster.
This returns the message describing all the calculations done to compute the new #### RevertClusterLayout `POST /v0/layout/revert`
layout, as well as the description of the layout as returned by GetClusterLayout.
#### RevertClusterLayout `POST /v1/layout/revert`
Clears all of the staged layout changes. Clears all of the staged layout changes.
@ -360,13 +313,10 @@ Similarly to the CLI, the body must include the incremented
version number, which MUST be 1 + the value of the currently version number, which MUST be 1 + the value of the currently
existing layout in the cluster. existing layout in the cluster.
This returns the new cluster layout with all changes reverted,
as returned by GetClusterLayout.
### Access key operations ### Access key operations
#### ListKeys `GET /v1/key` #### ListKeys `GET /v0/key`
Returns all API access keys in the cluster. Returns all API access keys in the cluster.
@ -385,8 +335,34 @@ Example response:
] ]
``` ```
#### GetKeyInfo `GET /v1/key?id=<acces key id>` #### CreateKey `POST /v0/key`
#### GetKeyInfo `GET /v1/key?search=<pattern>`
Creates a new API access key.
Request body format:
```json
{
"name": "NameOfMyKey"
}
```
#### ImportKey `POST /v0/key/import`
Imports an existing API key.
Request body format:
```json
{
"accessKeyId": "GK31c2f218a2e44f485b94239e",
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835",
"name": "NameOfMyKey"
}
```
#### GetKeyInfo `GET /v0/key?id=<acces key id>`
#### GetKeyInfo `GET /v0/key?search=<pattern>`
Returns information about the requested API access key. Returns information about the requested API access key.
@ -394,9 +370,6 @@ If `id` is set, the key is looked up using its exact identifier (faster).
If `search` is set, the key is looked up using its name or prefix If `search` is set, the key is looked up using its name or prefix
of identifier (slower, all keys are enumerated to do this). of identifier (slower, all keys are enumerated to do this).
Optionnally, the query parameter `showSecretKey=true` can be set to reveal the
associated secret access key.
Example response: Example response:
```json ```json
@ -460,40 +433,11 @@ Example response:
} }
``` ```
#### CreateKey `POST /v1/key` #### DeleteKey `DELETE /v0/key?id=<acces key id>`
Creates a new API access key. Deletes an API access key.
Request body format: #### UpdateKey `POST /v0/key?id=<acces key id>`
```json
{
"name": "NameOfMyKey"
}
```
This returns the key info, including the created secret key,
in the same format as the result of GetKeyInfo.
#### ImportKey `POST /v1/key/import`
Imports an existing API key.
This will check that the imported key is in the valid format, i.e.
is a key that could have been generated by Garage.
Request body format:
```json
{
"accessKeyId": "GK31c2f218a2e44f485b94239e",
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835",
"name": "NameOfMyKey"
}
```
This returns the key info in the same format as the result of GetKeyInfo.
#### UpdateKey `POST /v1/key?id=<acces key id>`
Updates information about the specified API access key. Updates information about the specified API access key.
@ -513,16 +457,10 @@ All fields (`name`, `allow` and `deny`) are optional.
If they are present, the corresponding modifications are applied to the key, otherwise nothing is changed. If they are present, the corresponding modifications are applied to the key, otherwise nothing is changed.
The possible flags in `allow` and `deny` are: `createBucket`. The possible flags in `allow` and `deny` are: `createBucket`.
This returns the key info in the same format as the result of GetKeyInfo.
#### DeleteKey `DELETE /v1/key?id=<acces key id>`
Deletes an API access key.
### Bucket operations ### Bucket operations
#### ListBuckets `GET /v1/bucket` #### ListBuckets `GET /v0/bucket`
Returns all storage buckets in the cluster. Returns all storage buckets in the cluster.
@ -564,8 +502,8 @@ Example response:
] ]
``` ```
#### GetBucketInfo `GET /v1/bucket?id=<bucket id>` #### GetBucketInfo `GET /v0/bucket?id=<bucket id>`
#### GetBucketInfo `GET /v1/bucket?globalAlias=<alias>` #### GetBucketInfo `GET /v0/bucket?globalAlias=<alias>`
Returns information about the requested storage bucket. Returns information about the requested storage bucket.
@ -597,10 +535,7 @@ Example response:
], ],
"objects": 14827, "objects": 14827,
"bytes": 13189855625, "bytes": 13189855625,
"unfinishedUploads": 1, "unfinshedUploads": 0,
"unfinishedMultipartUploads": 1,
"unfinishedMultipartUploadParts": 11,
"unfinishedMultipartUploadBytes": 41943040,
"quotas": { "quotas": {
"maxSize": null, "maxSize": null,
"maxObjects": null "maxObjects": null
@ -608,7 +543,7 @@ Example response:
} }
``` ```
#### CreateBucket `POST /v1/bucket` #### CreateBucket `POST /v0/bucket`
Creates a new storage bucket. Creates a new storage bucket.
@ -648,7 +583,13 @@ or no alias at all.
Technically, you can also specify both `globalAlias` and `localAlias` and that would create Technically, you can also specify both `globalAlias` and `localAlias` and that would create
two aliases, but I don't see why you would want to do that. two aliases, but I don't see why you would want to do that.
#### UpdateBucket `PUT /v1/bucket?id=<bucket id>` #### DeleteBucket `DELETE /v0/bucket?id=<bucket id>`
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
Warning: this will delete all aliases associated with the bucket!
#### UpdateBucket `PUT /v0/bucket?id=<bucket id>`
Updates configuration of the given bucket. Updates configuration of the given bucket.
@ -680,16 +621,9 @@ In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or
to remove the quotas. An absent value will be considered the same as a `null`. It is not possible to remove the quotas. An absent value will be considered the same as a `null`. It is not possible
to change only one of the two quotas. to change only one of the two quotas.
#### DeleteBucket `DELETE /v1/bucket?id=<bucket id>`
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
Warning: this will delete all aliases associated with the bucket!
### Operations on permissions for keys on buckets ### Operations on permissions for keys on buckets
#### BucketAllowKey `POST /v1/bucket/allow` #### BucketAllowKey `POST /v0/bucket/allow`
Allows a key to do read/write/owner operations on a bucket. Allows a key to do read/write/owner operations on a bucket.
@ -710,7 +644,7 @@ Request body format:
Flags in `permissions` which have the value `true` will be activated. Flags in `permissions` which have the value `true` will be activated.
Other flags will remain unchanged. Other flags will remain unchanged.
#### BucketDenyKey `POST /v1/bucket/deny` #### BucketDenyKey `POST /v0/bucket/deny`
Denies a key from doing read/write/owner operations on a bucket. Denies a key from doing read/write/owner operations on a bucket.
@ -734,19 +668,19 @@ Other flags will remain unchanged.
### Operations on bucket aliases ### Operations on bucket aliases
#### GlobalAliasBucket `PUT /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>` #### GlobalAliasBucket `PUT /v0/bucket/alias/global?id=<bucket id>&alias=<global alias>`
Empty body. Creates a global alias for a bucket. Empty body. Creates a global alias for a bucket.
#### GlobalUnaliasBucket `DELETE /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>` #### GlobalUnaliasBucket `DELETE /v0/bucket/alias/global?id=<bucket id>&alias=<global alias>`
Removes a global alias for a bucket. Removes a global alias for a bucket.
#### LocalAliasBucket `PUT /v1/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>` #### LocalAliasBucket `PUT /v0/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>`
Empty body. Creates a local alias for a bucket in the namespace of a specific access key. Empty body. Creates a local alias for a bucket in the namespace of a specific access key.
#### LocalUnaliasBucket `DELETE /v1/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>` #### LocalUnaliasBucket `DELETE /v0/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>`
Removes a local alias for a bucket in the namespace of a specific access key. Removes a local alias for a bucket in the namespace of a specific access key.

View file

@ -1,13 +0,0 @@
optimal_layout.aux
optimal_layout.log
optimal_layout.synctex.gz
optimal_layout.bbl
optimal_layout.blg
geodistrib.aux
geodistrib.bbl
geodistrib.blg
geodistrib.log
geodistrib.out
geodistrib.synctex.gz

File diff suppressed because it is too large Load diff

Before

Width:  |  Height:  |  Size: 161 KiB

File diff suppressed because it is too large Load diff

Before

Width:  |  Height:  |  Size: 560 KiB

File diff suppressed because it is too large Load diff

Before

Width:  |  Height:  |  Size: 287 KiB

File diff suppressed because it is too large Load diff

Before

Width:  |  Height:  |  Size: 112 KiB

File diff suppressed because it is too large Load diff

Before

Width:  |  Height:  |  Size: 270 KiB

View file

@ -1,317 +0,0 @@
\documentclass[]{article}
\usepackage{amsmath,amssymb}
\usepackage{amsthm}
\usepackage{stmaryrd}
\usepackage{graphicx,xcolor}
\usepackage{hyperref}
\usepackage{algorithm,algpseudocode,float}
\renewcommand\thesubsubsection{\Alph{subsubsection})}
\newtheorem{proposition}{Proposition}
%opening
\title{An algorithm for geo-distributed and redundant storage in Garage}
\author{Mendes Oulamara \\ \emph{mendes@deuxfleurs.fr}}
\date{}
\begin{document}
\maketitle
\begin{abstract}
Garage
\end{abstract}
\section{Introduction}
Garage\footnote{\url{https://garagehq.deuxfleurs.fr/}} is an open-source distributed object storage service tailored for self-hosting. It was designed by the Deuxfleurs association\footnote{\url{https://deuxfleurs.fr/}} to enable small structures (associations, collectives, small companies) to share storage resources to reliably self-host their data, possibly with old and non-reliable machines.
To achieve these reliability and availability goals, the data is broken into \emph{partitions} and every partition is replicated over 3 different machines (that we call \emph{nodes}). When the data is queried, a consensus algorithm allows to fetch it from one of the nodes. A \emph{replication factor} of 3 ensures the best guarantees in the consensus algorithm \cite{ADD RREF}, but this parameter can be different.
Moreover, if the nodes are spread over different \emph{zones} (different houses, offices, cities\dots), we can ask the data to be replicated over nodes belonging to different zones, to improve the storage robustness against zone failure (such as power outage). To do so, we set a \emph{redundancy parameter}, that is no more than the replication factor, and we ask that any partition is replicated over this number of zones at least.
In this work, we propose a repartition algorithm that, given the nodes specifications and the replication and redundancy parameters, computes an optimal assignation of partitions to nodes. We say that the assignation is optimal in the sense that it maximizes the size of the partitions, and hence the effective storage capacity of the system.
Moreover, when a former assignation exists, which is not optimal anymore due to nodes or zones updates, our algorithm computes a new optimal assignation that minimizes the amount of data to be transferred during the assignation update (the \emph{transfer load}).
We call the set of nodes cooperating to store the data a \emph{cluster}, and a description of the nodes, zones and the assignation of partitions to nodes a \emph{cluster layout}
\subsection{Notations}
Let $k$ be some fixed parameter value, typically 8, that we call the ``partition bits''.
Every object to be stored in the system is split into data blocks of fixed size. We compute a hash $h(\mathbf{b})$ of every such block $\mathbf{b}$, and we define the $k$ last bits of this hash to be the partition number $p(\mathbf{b})$ of the block. This label can take $P=2^k$ different values, and hence there are $P$ different partitions. We denote $\mathbf{P}$ the set of partition labels (i.e. $\mathbf{P}=\llbracket1,P\rrbracket$).
We are given a set $\mathbf{N}$ of $N$ nodes and a set $\mathbf{Z}$ of $Z$ zones. Every node $n$ has a non-negative storage capacity $c_n\ge 0$ and belongs to a zone $z_n\in \mathbf{Z}$. We are also given a replication parameter $\rho_\mathbf{N}$ and a redundancy parameter $\rho_\mathbf{Z}$ such that $1\le \rho_\mathbf{Z} \le \rho_\mathbf{N}$ (typical values would be $\rho_N=3$ and $\rho_Z=2$).
Our goal is to compute an assignment $\alpha = (\alpha_p^1, \ldots, \alpha_p^{\rho_\mathbf{N}})_{p\in \mathbf{P}}$ such that every partition $p$ is associated to $\rho_\mathbf{N}$ distinct nodes $\alpha_p^1, \ldots, \alpha_p^{\rho_\mathbf{N}} \in \mathbf{N}$ and these nodes belong to at least $\rho_\mathbf{Z}$ distinct zones. Among the possible assignations, we choose one that \emph{maximizes} the effective storage capacity of the cluster. If the layout contained a previous assignment $\alpha'$, we \emph{minimize} the amount of data to transfer during the layout update by making $\alpha$ as close as possible to $\alpha'$. These maximization and minimization are described more formally in the following section.
\subsection{Optimization parameters}
To link the effective storage capacity of the cluster to partition assignment, we make the following assumption:
\begin{equation}
\tag{H1}
\text{\emph{All partitions have the same size $s$.}}
\end{equation}
This assumption is justified by the dispersion of the hashing function, when the number of partitions is small relative to the number of stored blocks.
Every node $n$ wille store some number $p_n$ of partitions (it is the number of partitions $p$ such that $n$ appears in the $\alpha_p$). Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/p_n$. This remark leads us to define the optimal size that we will want to maximize:
\begin{equation}
\label{eq:optimal}
\tag{OPT}
s^* = \min_{n \in N} \frac{c_n}{p_n}.
\end{equation}
When the capacities of the nodes are updated (this includes adding or removing a node), we want to update the assignment as well. However, transferring the data between nodes has a cost and we would like to limit the number of changes in the assignment. We make the following assumption:
\begin{equation}
\tag{H2}
\text{\emph{Nodes updates happen rarely relatively to block operations.}}
\end{equation}
This assumption justifies that when we compute the new assignment $\alpha$, it is worth to optimize the partition size \eqref{eq:optimal} first, and then, among the possible optimal solution, to try to minimize the number of partition transfers. More formally, we minimize the distance between two assignments defined by
\begin{equation}
d(\alpha, \alpha') := \#\{ (n,p) \in \mathbf{N}\times\mathbf{P} ~|~ n\in \alpha_p \triangle \alpha'_p \}
\end{equation}
where the symmetric difference $\alpha_p \triangle \alpha'_p$ denotes the nodes appearing in one of the assignations but not in both.
\section{Computation of an optimal assignment}
The algorithm that we propose takes as inputs the cluster layout parameters $\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$, that we defined in the introduction, together with the former assignation $\alpha'$ (if any). The computation of the new optimal assignation $\alpha^*$ is done in three successive steps that will be detailed in the following sections. The first step computes the largest partition size $s^*$ that an assignation can achieve. The second step computes an optimal candidate assignment $\alpha$ that achieves $s^*$ and a heuristic is used in the computation to make it hopefully close to $\alpha'$. The third steps modifies $\alpha$ iteratively to reduces $d(\alpha, \alpha')$ and yields an assignation $\alpha^*$ achieving $s^*$, and minimizing $d(\cdot, \alpha')$ among such assignations.
We will explain in the next section how to represent an assignment $\alpha$ by a flow $f$ on a weighted graph $G$ to enable the use of flow and graph algorithms. The main function of the algorithm can be written as follows.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Compute Layout}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$, $\alpha'$}
\State $s^* \leftarrow$ \Call{Compute Partition Size}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$}
\State $G \leftarrow G(s^*)$
\State $f \leftarrow$ \Call{Compute Candidate Assignment}{$G$, $\alpha'$}
\State $f^* \leftarrow$ \Call{Minimize transfer load}{$G$, $f$, $\alpha'$}
\State Build $\alpha^*$ from $f^*$
\State \Return $\alpha^*$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
As we will see in the next sections, the worst case complexity of this algorithm is $O(P^2 N^2)$. The minimization of transfer load is the most expensive step, and it can run with a timeout since it is only an optimization step. Without this step (or with a smart timeout), the worst cas complexity can be $O((PN)^{3/2}\log C)$ where $C$ is the total storage capacity of the cluster.
\subsection{Determination of the partition size $s^*$}
We will represent an assignment $\alpha$ as a flow in a specific graph $G$. We will not compute the optimal partition size $s^*$ a priori, but we will determine it by dichotomy, as the largest size $s$ such that the maximal flow achievable on $G=G(s)$ has value $\rho_\mathbf{N}P$. We will assume that the capacities are given in a small enough unit (say, Megabytes), and we will determine $s^*$ at the precision of the given unit.
Given some candidate size value $s$, we describe the oriented weighted graph $G=(V,E)$ with vertex set $V$ arc set $E$ (see Figure \ref{fig:flowgraph}).
The set of vertices $V$ contains the source $\mathbf{s}$, the sink $\mathbf{t}$, vertices
$\mathbf{p^+, p^-}$ for every partition $p$, vertices $\mathbf{x}_{p,z}$ for every partition $p$ and zone $z$, and vertices $\mathbf{n}$ for every node $n$.
The set of arcs $E$ contains:
\begin{itemize}
\item ($\mathbf{s}$,$\mathbf{p}^+$, $\rho_\mathbf{Z}$) for every partition $p$;
\item ($\mathbf{s}$,$\mathbf{p}^-$, $\rho_\mathbf{N}-\rho_\mathbf{Z}$) for every partition $p$;
\item ($\mathbf{p}^+$,$\mathbf{x}_{p,z}$, 1) for every partition $p$ and zone $z$;
\item ($\mathbf{p}^-$,$\mathbf{x}_{p,z}$, $\rho_\mathbf{N}-\rho_\mathbf{Z}$) for every partition $p$ and zone $z$;
\item ($\mathbf{x}_{p,z}$,$\mathbf{n}$, 1) for every partition $p$, zone $z$ and node $n\in z$;
\item ($\mathbf{n}$, $\mathbf{t}$, $\lfloor c_n/s \rfloor$) for every node $n$.
\end{itemize}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{figures/flow_graph_param}
\caption{An example of graph $G(s)$. Arcs are oriented from left to right, and unlabeled arcs have capacity 1. In this example, nodes $n_1,n_2,n_3$ belong to zone $z_1$, and nodes $n_4,n_5$ belong to zone $z_2$.}
\label{fig:flowgraph}
\end{figure}
In the following complexity calculations, we will use the number of vertices and edges of $G$. Remark from now that $\# V = O(PZ)$ and $\# E = O(PN)$.
\begin{proposition}
An assignment $\alpha$ is realizable with partition size $s$ and the redundancy constraints $(\rho_\mathbf{N},\rho_\mathbf{Z})$ if and only if there exists a maximal flow function $f$ in $G$ with total flow $\rho_\mathbf{N}P$, such that the arcs ($\mathbf{x}_{p,z}$,$\mathbf{n}$, 1) used are exactly those for which $p$ is associated to $n$ in $\alpha$.
\end{proposition}
\begin{proof}
Given such flow $f$, we can reconstruct a candidate $\alpha$. In $f$, the flow passing through $\mathbf{p^+}$ and $\mathbf{p^-}$ is $\rho_\mathbf{N}$, and since the outgoing capacity of every $\mathbf{x}_{p,z}$ is 1, every partition is associated to $\rho_\mathbf{N}$ distinct nodes. The fraction $\rho_\mathbf{Z}$ of the flow passing through every $\mathbf{p^+}$ must be spread over as many distinct zones as every arc outgoing from $\mathbf{p^+}$ has capacity 1. So the reconstructed $\alpha$ verifies the redundancy constraints. For every node $n$, the flow between $\mathbf{n}$ and $\mathbf{t}$ corresponds to the number of partitions associated to $n$. By construction of $f$, this does not exceed $\lfloor c_n/s \rfloor$. We assumed that the partition size is $s$, hence this association does not exceed the storage capacity of the nodes.
In the other direction, given an assignment $\alpha$, one can similarly check that the facts that $\alpha$ respects the redundancy constraints, and the storage capacities of the nodes, are necessary condition to construct a maximal flow function $f$.
\end{proof}
\textbf{Implementation remark:} In the flow algorithm, while exploring the graph, we explore the neighbours of every vertex in a random order to heuristically spread the associations between nodes and partitions.
\subsubsection*{Algorithm}
With this result mind, we can describe the first step of our algorithm. All divisions are supposed to be integer divisions.
\begin{algorithmic}[1]
\Function{Compute Partition Size}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$}
\State Build the graph $G=G(s=1)$
\State $ f \leftarrow$ \Call{Maximal flow}{$G$}
\If{$f.\mathrm{total flow} < \rho_\mathbf{N}P$}
\State \Return Error: capacities too small or constraints too strong.
\EndIf
\State $s^- \leftarrow 1$
\State $s^+ \leftarrow 1+\frac{1}{\rho_\mathbf{N}}\sum_{n \in \mathbf{N}} c_n$
\While{$s^-+1 < s^+$}
\State Build the graph $G=G(s=(s^-+s^+)/2)$
\State $ f \leftarrow$ \Call{Maximal flow}{$G$}
\If{$f.\mathrm{total flow} < \rho_\mathbf{N}P$}
\State $s^+ \leftarrow (s^- + s^+)/2$
\Else
\State $s^- \leftarrow (s^- + s^+)/2$
\EndIf
\EndWhile
\State \Return $s^-$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
To compute the maximal flow, we use Dinic's algorithm. Its complexity on general graphs is $O(\#V^2 \#E)$, but on graphs with edge capacity bounded by a constant, it turns out to be $O(\#E^{3/2})$. The graph $G$ does not fall in this case since the capacities of the arcs incoming to $\mathbf{t}$ are far from bounded. However, the proof of this complexity function works readily for graphs where we only ask the edges \emph{not} incoming to the sink $\mathbf{t}$ to have their capacities bounded by a constant. One can find the proof of this claim in \cite[Section 2]{even1975network}.
The dichotomy adds a logarithmic factor $\log (C)$ where $C=\sum_{n \in \mathbf{N}} c_n$ is the total capacity of the cluster. The total complexity of this first function is hence
$O(\#E^{3/2}\log C ) = O\big((PN)^{3/2} \log C\big)$.
\subsubsection*{Metrics}
We can display the discrepancy between the computed $s^*$ and the best size we could have hoped for the given total capacity, that is $C/\rho_\mathbf{N}$.
\subsection{Computation of a candidate assignment}
Now that we have the optimal partition size $s^*$, to compute a candidate assignment it would be enough to compute a maximal flow function $f$ on $G(s^*)$. This is what we do if there is no former assignation $\alpha'$.
If there is some $\alpha'$, we add a step that will heuristically help to obtain a candidate $\alpha$ closer to $\alpha'$. We fist compute a flow function $\tilde{f}$ that uses only the partition-to-node associations appearing in $\alpha'$. Most likely, $\tilde{f}$ will not be a maximal flow of $G(s^*)$. In Dinic's algorithm, we can start from a non maximal flow function and then discover improving paths. This is what we do by starting from $\tilde{f}$. The hope\footnote{This is only a hope, because one can find examples where the construction of $f$ from $\tilde{f}$ produces an assignment $\alpha$ that is not as close as possible to $\alpha'$.} is that the final flow function $f$ will tend to keep the associations appearing in $\tilde{f}$.
More formally, we construct the graph $G_{|\alpha'}$ from $G$ by removing all the arcs $(\mathbf{x}_{p,z},\mathbf{n}, 1)$ where $p$ is not associated to $n$ in $\alpha'$. We compute a maximal flow function $\tilde{f}$ in $G_{|\alpha'}$. The flow $\tilde{f}$ is also a valid (most likely non maximal) flow function on $G$. We compute a maximal flow function $f$ on $G$ by starting Dinic's algorithm on $\tilde{f}$.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Compute Candidate Assignment}{$G$, $\alpha'$}
\State Build the graph $G_{|\alpha'}$
\State $ \tilde{f} \leftarrow$ \Call{Maximal flow}{$G_{|\alpha'}$}
\State $ f \leftarrow$ \Call{Maximal flow from flow}{$G$, $\tilde{f}$}
\State \Return $f$
\EndFunction
\end{algorithmic}
~
\textbf{Remark:} The function ``Maximal flow'' can be just seen as the function ``Maximal flow from flow'' called with the zero flow function as starting flow.
\subsubsection*{Complexity}
With the considerations of the last section, we have the complexity of the Dinic's algorithm $O(\#E^{3/2}) = O((PN)^{3/2})$.
\subsubsection*{Metrics}
We can display the flow value of $\tilde{f}$, which is an upper bound of the distance between $\alpha$ and $\alpha'$. It might be more a Debug level display than Info.
\subsection{Minimization of the transfer load}
Now that we have a candidate flow function $f$, we want to modify it to make its corresponding assignation $\alpha$ as close as possible to $\alpha'$. Denote by $f'$ the maximal flow corresponding to $\alpha'$, and let $d(f, \alpha')=d(f, f'):=d(\alpha,\alpha')$\footnote{It is the number of arcs of type $(\mathbf{x}_{p,z},\mathbf{n})$ saturated in one flow and not in the other.}.
We want to build a sequence $f=f_0, f_1, f_2 \dots$ of maximal flows such that $d(f_i, \alpha')$ decreases as $i$ increases. The distance being a non-negative integer, this sequence of flow functions must be finite. We now explain how to find some improving $f_{i+1}$ from $f_i$.
For any maximal flow $f$ in $G$, we define the oriented weighted graph $G_f=(V, E_f)$ as follows. The vertices of $G_f$ are the same as the vertices of $G$. $E_f$ contains the arc $(v_1,v_2, w)$ between vertices $v_1,v_2\in V$ with weight $w$ if and only if the arc $(v_1,v_2)$ is not saturated in $f$ (i.e. $c(v_1,v_2)-f(v_1,v_2) \ge 1$, we also consider reversed arcs). The weight $w$ is:
\begin{itemize}
\item $-1$ if $(v_1,v_2)$ is of type $(\mathbf{x}_{p,z},\mathbf{n})$ or $(\mathbf{x}_{p,z},\mathbf{n})$ and is saturated in only one of the two flows $f,f'$;
\item $+1$ if $(v_1,v_2)$ is of type $(\mathbf{x}_{p,z},\mathbf{n})$ or $(\mathbf{x}_{p,z},\mathbf{n})$ and is saturated in either both or none of the two flows $f,f'$;
\item $0$ otherwise.
\end{itemize}
If $\gamma$ is a simple cycle of arcs in $G_f$, we define its weight $w(\gamma)$ as the sum of the weights of its arcs. We can add $+1$ to the value of $f$ on the arcs of $\gamma$, and by construction of $G_f$ and the fact that $\gamma$ is a cycle, the function that we get is still a valid flow function on $G$, it is maximal as it has the same flow value as $f$. We denote this new function $f+\gamma$.
\begin{proposition}
Given a maximal flow $f$ and a simple cycle $\gamma$ in $G_f$, we have $d(f+\gamma, f') - d(f,f') = w(\gamma)$.
\end{proposition}
\begin{proof}
Let $X$ be the set of arcs of type $(\mathbf{x}_{p,z},\mathbf{n})$. Then we can express $d(f,f')$ as
\begin{align*}
d(f,f') & = \#\{e\in X ~|~ f(e)\neq f'(e)\}
= \sum_{e\in X} 1_{f(e)\neq f'(e)} \\
& = \frac{1}{2}\big( \#X + \sum_{e\in X} 1_{f(e)\neq f'(e)} - 1_{f(e)= f'(e)} \big).
\end{align*}
We can express the cycle weight as
\begin{align*}
w(\gamma) & = \sum_{e\in X, e\in \gamma} - 1_{f(e)\neq f'(e)} + 1_{f(e)= f'(e)}.
\end{align*}
Remark that since we passed on unit of flow in $\gamma$ to construct $f+\gamma$, we have for any $e\in X$, $f(e)=f'(e)$ if and only if $(f+\gamma)(e) \neq f'(e)$.
Hence
\begin{align*}
w(\gamma) & = \frac{1}{2}(w(\gamma) + w(\gamma)) \\
&= \frac{1}{2} \Big(
\sum_{e\in X, e\in \gamma} - 1_{f(e)\neq f'(e)} + 1_{f(e)= f'(e)} \\
& \qquad +
\sum_{e\in X, e\in \gamma} 1_{(f+\gamma)(e)\neq f'(e)} + 1_{(f+\gamma)(e)= f'(e)}
\Big).
\end{align*}
Plugging this in the previous equation, we find that
$$d(f,f')+w(\gamma) = d(f+\gamma, f').$$
\end{proof}
This result suggests that given some flow $f_i$, we just need to find a negative cycle $\gamma$ in $G_{f_i}$ to construct $f_{i+1}$ as $f_i+\gamma$. The following proposition ensures that this greedy strategy reaches an optimal flow.
\begin{proposition}
For any maximal flow $f$, $G_f$ contains a negative cycle if and only if there exists a maximal flow $f^*$ in $G$ such that $d(f^*, f') < d(f, f')$.
\end{proposition}
\begin{proof}
Suppose that there is such flow $f^*$. Define the oriented multigraph $M_{f,f^*}=(V,E_M)$ with the same vertex set $V$ as in $G$, and for every $v_1,v_2 \in V$, $E_M$ contains $(f^*(v_1,v_2) - f(v_1,v_2))_+$ copies of the arc $(v_1,v_2)$. For every vertex $v$, its total degree (meaning its outer degree minus its inner degree) is equal to
\begin{align*}
\deg v & = \sum_{u\in V} (f^*(v,u) - f(v,u))_+ - \sum_{u\in V} (f^*(u,v) - f(u,v))_+ \\
& = \sum_{u\in V} f^*(v,u) - f(v,u) = \sum_{u\in V} f^*(v,u) - \sum_{u\in V} f(v,u).
\end{align*}
The last two sums are zero for any inner vertex since $f,f^*$ are flows, and they are equal on the source and sink since the two flows are both maximal and have hence the same value. Thus, $\deg v = 0$ for every vertex $v$.
This implies that the multigraph $M_{f,f^*}$ is the union of disjoint simple cycles. $f$ can be transformed into $f^*$ by pushing a mass 1 along all these cycles in any order. Since $d(f^*, f')<d(f,f')$, there must exists one of these simple cycles $\gamma$ with $d(f+\gamma, f') < d(f, f')$. Finally, since we can push a mass in $f$ along $\gamma$, it must appear in $G_f$. Hence $\gamma$ is a cycle of $G_f$ with negative weight.
\end{proof}
In the next section we describe the corresponding algorithm. Instead of discovering only one cycle, we are allowed to discover a set $\Gamma$ of disjoint negative cycles.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Minimize transfer load}{$G$, $f$, $\alpha'$}
\State Build the graph $G_f$
\State $\Gamma \leftarrow$ \Call{Detect Negative Cycles}{$G_f$}
\While{$\Gamma \neq \emptyset$}
\ForAll{$\gamma \in \Gamma$}
\State $f \leftarrow f+\gamma$
\EndFor
\State Update $G_f$
\State $\Gamma \leftarrow$ \Call{Detect Negative Cycles}{$G_f$}
\EndWhile
\State \Return $f$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
The distance $d(f,f')$ is bounded by the maximal number of differences in the associated assignment. If these assignment are totally disjoint, this distance is $2\rho_N P$. At every iteration of the While loop, the distance decreases, so there is at most $O(\rho_N P) = O(P)$ iterations.
The detection of negative cycle is done with the Bellman-Ford algorithm, whose complexity should normally be $O(\#E\#V)$. In our case, it amounts to $O(P^2ZN)$. Multiplied by the complexity of the outer loop, it amounts to $O(P^3ZN)$ which is a lot when the number of partitions and nodes starts to be large. To avoid that, we adapt the Bellman-Ford algorithm.
The Bellman-Ford algorithm runs $\#V$ iterations of an outer loop, and an inner loop over $E$. The idea is to compute the shortest paths from a source vertex $v$ to all other vertices. After $k$ iterations of the outer loop, the algorithm has computed all shortest path of length at most $k$. All simple paths have length at most $\#V-1$, so if there is an update in the last iteration of the loop, it means that there is a negative cycle in the graph. The observation that will enable us to improve the complexity is the following:
\begin{proposition}
In the graph $G_f$ (and $G$), all simple paths have a length at most $4N$.
\end{proposition}
\begin{proof}
Since $f$ is a maximal flow, there is no outgoing edge from $\mathbf{s}$ in $G_f$. One can thus check than any simple path of length 4 must contain at least two node of type $\mathbf{n}$. Hence on a path, at most 4 arcs separate two successive nodes of type $\mathbf{n}$.
\end{proof}
Thus, in the absence of negative cycles, shortest paths in $G_f$ have length at most $4N$. So we can do only $4N+1$ iterations of the outer loop in the Bellman-Ford algorithm. This makes the complexity of the detection of one set of cycle to be $O(N\#E) = O(N^2 P)$.
With this improvement, the complexity of the whole algorithm is, in the worst case, $O(N^2P^2)$. However, since we detect several cycles at once and we start with a flow that might be close to the previous one, the number of iterations of the outer loop might be smaller in practice.
\subsubsection*{Metrics}
We can display the node and zone utilization ratio, by dividing the flow passing through them divided by their outgoing capacity. In particular, we can pinpoint saturated nodes and zones (i.e. used at their full potential).
We can display the distance to the previous assignment, and the number of partition transfers.
\bibliography{optimal_layout}
\bibliographystyle{ieeetr}
\end{document}

View file

@ -1,11 +0,0 @@
@article{even1975network,
title={Network flow and testing graph connectivity},
author={Even, Shimon and Tarjan, R Endre},
journal={SIAM journal on computing},
volume={4},
number={4},
pages={507--518},
year={1975},
publisher={SIAM}
}

View file

@ -1,709 +0,0 @@
\documentclass[]{article}
\usepackage{amsmath,amssymb}
\usepackage{amsthm}
\usepackage{graphicx,xcolor}
\usepackage{algorithm,algpseudocode,float}
\renewcommand\thesubsubsection{\Alph{subsubsection})}
\newtheorem{proposition}{Proposition}
%opening
\title{Optimal partition assignment in Garage}
\author{Mendes}
\begin{document}
\maketitle
\section{Introduction}
\subsection{Context}
Garage is an open-source distributed storage service blablabla$\dots$
Every object to be stored in the system falls in a partition given by the last $k$ bits of its hash. There are $P=2^k$ partitions. Every partition will be stored on distinct nodes of the system. The goal of the assignment of partitions to nodes is to ensure (nodes and zone) redundancy and to be as efficient as possible.
\subsection{Formal description of the problem}
We are given a set of nodes $\mathbf{N}$ and a set of zones $\mathbf{Z}$. Every node $n$ has a non-negative storage capacity $c_n\ge 0$ and belongs to a zone $z\in \mathbf{Z}$. We are also given a number of partition $P>0$ (typically $P=256$).
We would like to compute an assignment of nodes to partitions. We will impose some redundancy constraints to this assignment, and under these constraints, we want our system to have the largest storage capacity possible. To link storage capacity to partition assignment, we make the following assumption:
\begin{equation}
\tag{H1}
\text{\emph{All partitions have the same size $s$.}}
\end{equation}
This assumption is justified by the dispersion of the hashing function, when the number of partitions is small relative to the number of stored large objects.
Every node $n$ wille store some number $k_n$ of partitions. Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/k_n$. This remark leads us to define the optimal size that we will want to maximize:
\begin{equation}
\label{eq:optimal}
\tag{OPT}
s^* = \min_{n \in N} \frac{c_n}{k_n}.
\end{equation}
When the capacities of the nodes are updated (this includes adding or removing a node), we want to update the assignment as well. However, transferring the data between nodes has a cost and we would like to limit the number of changes in the assignment. We make the following assumption:
\begin{equation}
\tag{H2}
\text{\emph{Updates of capacity happens rarely relatively to object storing.}}
\end{equation}
This assumption justifies that when we compute the new assignment, it is worth to optimize the partition size \eqref{eq:optimal} first, and then, among the possible optimal solution, to try to minimize the number of partition transfers.
For now, in the following, we ask the following redundancy constraint:
\textbf{Parametric node and zone redundancy:} Given two integer parameters $1\le \rho_\mathbf{Z} \le \rho_\mathbf{N}$, we ask every partition to be stored on $\rho_\mathbf{N}$ distinct nodes, and these nodes must belong to at least $\rho_\mathbf{Z}$ distinct zones.
\textbf{Mode 3-strict:} every partition needs to be assignated to three nodes belonging to three different zones.
\textbf{Mode 3:} every partition needs to be assignated to three nodes. We try to spread the three nodes over different zones as much as possible.
\textbf{Warning:} This is a working document written incrementaly. The last version of the algorithm is the \textbf{parametric assignment} described in the next section.
\section{Computation of a parametric assignment}
\textbf{Attention : }We change notations in this section.
Notations : let $P$ be the number of partitions, $N$ the number of nodes, $Z$ the number of zones. Let $\mathbf{P,N,Z}$ be the label sets of, respectively, partitions, nodes and zones.
Let $s^*$ be the largest partition size achievable with the redundancy constraints. Let $(c_n)_{n\in \mathbf{N}}$ be the storage capacity of every node.
In this section, we propose a third specification of the problem. The user inputs two redundancy parameters $1\le \rho_\mathbf{Z} \le \rho_\mathbf{N}$. We compute an assignment $\alpha = (\alpha_p^1, \ldots, \alpha_p^{\rho_\mathbf{N}})_{p\in \mathbf{P}}$ such that every partition $p$ is associated to $\rho_\mathbf{N}$ distinct nodes $\alpha_p^1, \ldots, \alpha_p^{\rho_\mathbf{N}}$ and these nodes belong to at least $\rho_\mathbf{Z}$ distinct zones.
If the layout contained a previous assignment $\alpha'$, we try to minimize the amount of data to transfer during the layout update by making $\alpha$ as close as possible to $\alpha'$.
In the following subsections, we describe the successive steps of the algorithm we propose to compute $\alpha$.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Compute Layout}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$, $\alpha'$}
\State $s^* \leftarrow$ \Call{Compute Partition Size}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$}
\State $G \leftarrow G(s^*)$
\State $f \leftarrow$ \Call{Compute Candidate Assignment}{$G$, $\alpha'$}
\State $f^* \leftarrow$ \Call{Minimize transfer load}{$G$, $f$, $\alpha'$}
\State Build $\alpha^*$ from $f^*$
\State \Return $\alpha^*$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
As we will see in the next sections, the worst case complexity of this algorithm is $O(P^2 N^2)$. The minimization of transfer load is the most expensive step, and it can run with a timeout since it is only an optimization step. Without this step (or with a smart timeout), the worst cas complexity can be $O((PN)^{3/2}\log C)$ where $C$ is the total storage capacity of the cluster.
\subsection{Determination of the partition size $s^*$}
Again, we will represent an assignment $\alpha$ as a flow in a specific graph $G$. We will not compute the optimal partition size $s^*$ a priori, but we will determine it by dichotomy, as the largest size $s$ such that the maximal flow achievable on $G=G(s)$ has value $\rho_\mathbf{N}P$. We will assume that the capacities are given in a small enough unit (say, Megabytes), and we will determine $s^*$ at the precision of the given unit.
Given some candidate size value $s$, we describe the oriented weighted graph $G=(V,E)$ with vertex set $V$ arc set $E$.
The set of vertices $V$ contains the source $\mathbf{s}$, the sink $\mathbf{t}$, vertices
$\mathbf{p^+, p^-}$ for every partition $p$, vertices $\mathbf{x}_{p,z}$ for every partition $p$ and zone $z$, and vertices $\mathbf{n}$ for every node $n$.
The set of arcs $E$ contains:
\begin{itemize}
\item ($\mathbf{s}$,$\mathbf{p}^+$, $\rho_\mathbf{Z}$) for every partition $p$;
\item ($\mathbf{s}$,$\mathbf{p}^-$, $\rho_\mathbf{N}-\rho_\mathbf{Z}$) for every partition $p$;
\item ($\mathbf{p}^+$,$\mathbf{x}_{p,z}$, 1) for every partition $p$ and zone $z$;
\item ($\mathbf{p}^-$,$\mathbf{x}_{p,z}$, $\rho_\mathbf{N}-\rho_\mathbf{Z}$) for every partition $p$ and zone $z$;
\item ($\mathbf{x}_{p,z}$,$\mathbf{n}$, 1) for every partition $p$, zone $z$ and node $n\in z$;
\item ($\mathbf{n}$, $\mathbf{t}$, $\lfloor c_n/s \rfloor$) for every node $n$.
\end{itemize}
In the following complexity calculations, we will use the number of vertices and edges of $G$. Remark from now that $\# V = O(PZ)$ and $\# E = O(PN)$.
\begin{proposition}
An assignment $\alpha$ is realizable with partition size $s$ and the redundancy constraints $(\rho_\mathbf{N},\rho_\mathbf{Z})$ if and only if there exists a maximal flow function $f$ in $G$ with total flow $\rho_\mathbf{N}P$, such that the arcs ($\mathbf{x}_{p,z}$,$\mathbf{n}$, 1) used are exactly those for which $p$ is associated to $n$ in $\alpha$.
\end{proposition}
\begin{proof}
Given such flow $f$, we can reconstruct a candidate $\alpha$. In $f$, the flow passing through $\mathbf{p^+}$ and $\mathbf{p^-}$ is $\rho_\mathbf{N}$, and since the outgoing capacity of every $\mathbf{x}_{p,z}$ is 1, every partition is associated to $\rho_\mathbf{N}$ distinct nodes. The fraction $\rho_\mathbf{Z}$ of the flow passing through every $\mathbf{p^+}$ must be spread over as many distinct zones as every arc outgoing from $\mathbf{p^+}$ has capacity 1. So the reconstructed $\alpha$ verifies the redundancy constraints. For every node $n$, the flow between $\mathbf{n}$ and $\mathbf{t}$ corresponds to the number of partitions associated to $n$. By construction of $f$, this does not exceed $\lfloor c_n/s \rfloor$. We assumed that the partition size is $s$, hence this association does not exceed the storage capacity of the nodes.
In the other direction, given an assignment $\alpha$, one can similarly check that the facts that $\alpha$ respects the redundancy constraints, and the storage capacities of the nodes, are necessary condition to construct a maximal flow function $f$.
\end{proof}
\textbf{Implementation remark:} In the flow algorithm, while exploring the graph, we explore the neighbours of every vertex in a random order to heuristically spread the association between nodes and partitions.
\subsubsection*{Algorithm}
With this result mind, we can describe the first step of our algorithm. All divisions are supposed to be integer division.
\begin{algorithmic}[1]
\Function{Compute Partition Size}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$}
\State Build the graph $G=G(s=1)$
\State $ f \leftarrow$ \Call{Maximal flow}{$G$}
\If{$f.\mathrm{total flow} < \rho_\mathbf{N}P$}
\State \Return Error: capacities too small or constraints too strong.
\EndIf
\State $s^- \leftarrow 1$
\State $s^+ \leftarrow 1+\frac{1}{\rho_\mathbf{N}}\sum_{n \in \mathbf{N}} c_n$
\While{$s^-+1 < s^+$}
\State Build the graph $G=G(s=(s^-+s^+)/2)$
\State $ f \leftarrow$ \Call{Maximal flow}{$G$}
\If{$f.\mathrm{total flow} < \rho_\mathbf{N}P$}
\State $s^+ \leftarrow (s^- + s^+)/2$
\Else
\State $s^- \leftarrow (s^- + s^+)/2$
\EndIf
\EndWhile
\State \Return $s^-$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
To compute the maximal flow, we use Dinic's algorithm. Its complexity on general graphs is $O(\#V^2 \#E)$, but on graphs with edge capacity bounded by a constant, it turns out to be $O(\#E^{3/2})$. The graph $G$ does not fall in this case since the capacities of the arcs incoming to $\mathbf{t}$ are far from bounded. However, the proof of this complexity works readily for graph where we only ask the edges \emph{not} incoming to the sink $\mathbf{t}$ to have their capacities bounded by a constant. One can find the proof of this claim in \cite[Section 2]{even1975network}.
The dichotomy adds a logarithmic factor $\log (C)$ where $C=\sum_{n \in \mathbf{N}} c_n$ is the total capacity of the cluster. The total complexity of this first function is hence
$O(\#E^{3/2}\log C ) = O\big((PN)^{3/2} \log C\big)$.
\subsubsection*{Metrics}
We can display the discrepancy between the computed $s^*$ and the best size we could hope for a given total capacity, that is $C/\rho_\mathbf{N}$.
\subsection{Computation of a candidate assignment}
Now that we have the optimal partition size $s^*$, to compute a candidate assignment, it would be enough to compute a maximal flow function $f$ on $G(s^*)$. This is what we do if there was no previous assignment $\alpha'$.
If there was some $\alpha'$, we add a step that will heuristically help to obtain a candidate $\alpha$ closer to $\alpha'$. to do so, we fist compute a flow function $\tilde{f}$ that uses only the partition-to-node association appearing in $\alpha'$. Most likely, $\tilde{f}$ will not be a maximal flow of $G(s^*)$. In Dinic's algorithm, we can start from a non maximal flow function and then discover improving paths. This is what we do in starting from $\tilde{f}$. The hope\footnote{This is only a hope, because one can find examples where the construction of $f$ from $\tilde{f}$ produces an assignment $\alpha$ that is not as close as possible to $\alpha'$.} is that the final flow function $f$ will tend to keep the associations appearing in $\tilde{f}$.
More formally, we construct the graph $G_{|\alpha'}$ from $G$ by removing all the arcs $(\mathbf{x}_{p,z},\mathbf{n}, 1)$ where $p$ is not associated to $n$ in $\alpha'$. We compute a maximal flow function $\tilde{f}$ in $G_{|\alpha'}$. $\tilde{f}$ is also a valid (most likely non maximal) flow function in $G$. We compute a maximal flow function $f$ on $G$ by starting Dinic's algorithm on $\tilde{f}$.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Compute Candidate Assignment}{$G$, $\alpha'$}
\State Build the graph $G_{|\alpha'}$
\State $ \tilde{f} \leftarrow$ \Call{Maximal flow}{$G_{|\alpha'}$}
\State $ f \leftarrow$ \Call{Maximal flow from flow}{$G$, $\tilde{f}$}
\State \Return $f$
\EndFunction
\end{algorithmic}
\textbf{Remark:} The function ``Maximal flow'' can be just seen as the function ``Maximal flow from flow'' called with the zero flow function as starting flow.
\subsubsection*{Complexity}
From the consideration of the last section, we have the complexity of the Dinic's algorithm $O(\#E^{3/2}) = O((PN)^{3/2})$.
\subsubsection*{Metrics}
We can display the flow value of $\tilde{f}$, which is an upper bound of the distance between $\alpha$ and $\alpha'$. It might be more a Debug level display than Info.
\subsection{Minimization of the transfer load}
Now that we have a candidate flow function $f$, we want to modify it to make its associated assignment as close as possible to $\alpha'$. Denote by $f'$ the maximal flow associated to $\alpha'$, and let $d(f, f')$ be distance between the associated assignments\footnote{It is the number of arcs of type $(\mathbf{x}_{p,z},\mathbf{n})$ saturated in one flow and not in the other.}.
We want to build a sequence $f=f_0, f_1, f_2 \dots$ of maximal flows such that $d(f_i, \alpha')$ decreases as $i$ increases. The distance being a non-negative integer, this sequence of flow functions must be finite. We now explain how to find some improving $f_{i+1}$ from $f_i$.
For any maximal flow $f$ in $G$, we define the oriented weighted graph $G_f=(V, E_f)$ as follows. The vertices of $G_f$ are the same as the vertices of $G$. $E_f$ contains the arc $(v_1,v_2, w)$ between vertices $v_1,v_2\in V$ with weight $w$ if and only if the arc $(v_1,v_2)$ is not saturated in $f$ (i.e. $c(v_1,v_2)-f(v_1,v_2) \ge 1$, we also consider reversed arcs). The weight $w$ is:
\begin{itemize}
\item $-1$ if $(v_1,v_2)$ is of type $(\mathbf{x}_{p,z},\mathbf{n})$ or $(\mathbf{x}_{p,z},\mathbf{n})$ and is saturated in only one of the two flows $f,f'$;
\item $+1$ if $(v_1,v_2)$ is of type $(\mathbf{x}_{p,z},\mathbf{n})$ or $(\mathbf{x}_{p,z},\mathbf{n})$ and is saturated in either both or none of the two flows $f,f'$;
\item $0$ otherwise.
\end{itemize}
If $\gamma$ is a simple cycle of arcs in $G_f$, we define its weight $w(\gamma)$ as the sum of the weights of its arcs. We can add $+1$ to the value of $f$ on the arcs of $\gamma$, and by construction of $G_f$ and the fact that $\gamma$ is a cycle, the function that we get is still a valid flow function on $G$, it is maximal as it has the same flow value as $f$. We denote this new function $f+\gamma$.
\begin{proposition}
Given a maximal flow $f$ and a simple cycle $\gamma$ in $G_f$, we have $d(f+\gamma, f') - d(f,f') = w(\gamma)$.
\end{proposition}
\begin{proof}
Let $X$ be the set of arcs of type $(\mathbf{x}_{p,z},\mathbf{n})$. Then we can express $d(f,f')$ as
\begin{align*}
d(f,f') & = \#\{e\in X ~|~ f(e)\neq f'(e)\}
= \sum_{e\in X} 1_{f(e)\neq f'(e)} \\
& = \frac{1}{2}\big( \#X + \sum_{e\in X} 1_{f(e)\neq f'(e)} - 1_{f(e)= f'(e)} \big).
\end{align*}
We can express the cycle weight as
\begin{align*}
w(\gamma) & = \sum_{e\in X, e\in \gamma} - 1_{f(e)\neq f'(e)} + 1_{f(e)= f'(e)}.
\end{align*}
Remark that since we passed on unit of flow in $\gamma$ to construct $f+\gamma$, we have for any $e\in X$, $f(e)=f'(e)$ if and only if $(f+\gamma)(e) \neq f'(e)$.
Hence
\begin{align*}
w(\gamma) & = \frac{1}{2}(w(\gamma) + w(\gamma)) \\
&= \frac{1}{2} \Big(
\sum_{e\in X, e\in \gamma} - 1_{f(e)\neq f'(e)} + 1_{f(e)= f'(e)} \\
& \qquad +
\sum_{e\in X, e\in \gamma} 1_{(f+\gamma)(e)\neq f'(e)} + 1_{(f+\gamma)(e)= f'(e)}
\Big).
\end{align*}
Plugging this in the previous equation, we find that
$$d(f,f')+w(\gamma) = d(f+\gamma, f').$$
\end{proof}
This result suggests that given some flow $f_i$, we just need to find a negative cycle $\gamma$ in $G_{f_i}$ to construct $f_{i+1}$ as $f_i+\gamma$. The following proposition ensures that this greedy strategy reaches an optimal flow.
\begin{proposition}
For any maximal flow $f$, $G_f$ contains a negative cycle if and only if there exists a maximal flow $f^*$ in $G$ such that $d(f^*, f') < d(f, f')$.
\end{proposition}
\begin{proof}
Suppose that there is such flow $f^*$. Define the oriented multigraph $M_{f,f^*}=(V,E_M)$ with the same vertex set $V$ as in $G$, and for every $v_1,v_2 \in V$, $E_M$ contains $(f^*(v_1,v_2) - f(v_1,v_2))_+$ copies of the arc $(v_1,v_2)$. For every vertex $v$, its total degree (meaning its outer degree minus its inner degree) is equal to
\begin{align*}
\deg v & = \sum_{u\in V} (f^*(v,u) - f(v,u))_+ - \sum_{u\in V} (f^*(u,v) - f(u,v))_+ \\
& = \sum_{u\in V} f^*(v,u) - f(v,u) = \sum_{u\in V} f^*(v,u) - \sum_{u\in V} f(v,u).
\end{align*}
The last two sums are zero for any inner vertex since $f,f^*$ are flows, and they are equal on the source and sink since the two flows are both maximal and have hence the same value. Thus, $\deg v = 0$ for every vertex $v$.
This implies that the multigraph $M_{f,f^*}$ is the union of disjoint simple cycles. $f$ can be transformed into $f^*$ by pushing a mass 1 along all these cycles in any order. Since $d(f^*, f')<d(f,f')$, there must exists one of these simple cycles $\gamma$ with $d(f+\gamma, f') < d(f, f')$. Finally, since we can push a mass in $f$ along $\gamma$, it must appear in $G_f$. Hence $\gamma$ is a cycle of $G_f$ with negative weight.
\end{proof}
In the next section we describe the corresponding algorithm. Instead of discovering only one cycle, we are allowed to discover a set $\Gamma$ of disjoint negative cycles.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Minimize transfer load}{$G$, $f$, $\alpha'$}
\State Build the graph $G_f$
\State $\Gamma \leftarrow$ \Call{Detect Negative Cycles}{$G_f$}
\While{$\Gamma \neq \emptyset$}
\ForAll{$\gamma \in \Gamma$}
\State $f \leftarrow f+\gamma$
\EndFor
\State Update $G_f$
\State $\Gamma \leftarrow$ \Call{Detect Negative Cycles}{$G_f$}
\EndWhile
\State \Return $f$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
The distance $d(f,f')$ is bounded by the maximal number of differences in the associated assignment. If these assignment are totally disjoint, this distance is $2\rho_N P$. At every iteration of the While loop, the distance decreases, so there is at most $O(\rho_N P) = O(P)$ iterations.
The detection of negative cycle is done with the Bellman-Ford algorithm, whose complexity should normally be $O(\#E\#V)$. In our case, it amounts to $O(P^2ZN)$. Multiplied by the complexity of the outer loop, it amounts to $O(P^3ZN)$ which is a lot when the number of partitions and nodes starts to be large. To avoid that, we adapt the Bellman-Ford algorithm.
The Bellman-Ford algorithm runs $\#V$ iterations of an outer loop, and an inner loop over $E$. The idea is to compute the shortest paths from a source vertex $v$ to all other vertices. After $k$ iterations of the outer loop, the algorithm has computed all shortest path of length at most $k$. All simple paths have length at most $\#V-1$, so if there is an update in the last iteration of the loop, it means that there is a negative cycle in the graph. The observation that will enable us to improve the complexity is the following:
\begin{proposition}
In the graph $G_f$ (and $G$), all simple paths have a length at most $4N$.
\end{proposition}
\begin{proof}
Since $f$ is a maximal flow, there is no outgoing edge from $\mathbf{s}$ in $G_f$. One can thus check than any simple path of length 4 must contain at least two node of type $\mathbf{n}$. Hence on a path, at most 4 arcs separate two successive nodes of type $\mathbf{n}$.
\end{proof}
Thus, in the absence of negative cycles, shortest paths in $G_f$ have length at most $4N$. So we can do only $4N+1$ iterations of the outer loop in Bellman-Ford algorithm. This makes the complexity of the detection of one set of cycle to be $O(N\#E) = O(N^2 P)$.
With this improvement, the complexity of the whole algorithm is, in the worst case, $O(N^2P^2)$. However, since we detect several cycles at once and we start with a flow that might be close to the previous one, the number of iterations of the outer loop might be smaller in practice.
\subsubsection*{Metrics}
We can display the node and zone utilization ratio, by dividing the flow passing through them divided by their outgoing capacity. In particular, we can pinpoint saturated nodes and zones (i.e. used at their full potential).
We can display the distance to the previous assignment, and the number of partition transfers.
\section{Properties of an optimal 3-strict assignment}
\subsection{Optimal assignment}
\label{sec:opt_assign}
For every zone $z\in Z$, define the zone capacity $c_z = \sum_{v, z_v=z} c_v$ and define $C = \sum_v c_v = \sum_z c_z$.
One can check that the best we could be doing to maximize $s^*$ would be to use the nodes proportionally to their capacity. This would yield $s^*=C/(3N)$. This is not possible because of (i) redundancy constraints and (ii) integer rounding but it gives and upper bound.
\subsubsection*{Optimal utilization}
We call an \emph{utilization} a collection of non-negative integers $(n_v)_{v\in V}$ such that $\sum_v n_v = 3N$ and for every zone $z$, $\sum_{v\in z} n_v \le N$. We call such utilization \emph{optimal} if it maximizes $s^*$.
We start by computing a node sub-utilization $(\hat{n}_v)_{v\in V}$ such that for every zone $z$, $\sum_{v\in z} \hat{n}_v \le N$ and we show that there is an optimal utilization respecting the constraints and such that $\hat{n}_v \le n_v$ for every node.
Assume that there is a zone $z_0$ such that $c_{z_0}/C \ge 1/3$. Then for any $v\in z_0$, we define
$$\hat{n}_v = \left\lfloor\frac{c_v}{c_{z_0}}N\right\rfloor.$$
This choice ensures for any such $v$ that
$$
\frac{c_v}{\hat{n}_v} \ge \frac{c_{z_0}}{N} \ge \frac{C}{3N}
$$
which is the universal upper bound on $s^*$. Hence any optimal utilization $(n_v)$ can be modified to another optimal utilization such that $n_v\ge \hat{n}_v$
Because $z_0$ cannot store more than $N$ partition occurences, in any assignment, at least $2N$ partitions must be assignated to the zones $Z\setminus\{z_0\}$. Let $C_0 = C-c_{z_0}$. Suppose that there exists a zone $z_1\neq z_0$ such that $c_{z_1}/C_0 \ge 1/2$. Then, with the same argument as for $z_0$, we can define
$$\hat{n}_v = \left\lfloor\frac{c_v}{c_{z_1}}N\right\rfloor$$
for every $v\in z_1$.
Now we can assign the remaining partitions. Let $(\hat{N}, \hat{C})$ to be
\begin{itemize}
\item $(3N,C)$ if we did not find any $z_0$;
\item $(2N,C-c_{z_0})$ if there was a $z_0$ but no $z_1$;
\item $(N,C-c_{z_0}-c_{z_1})$ if there was a $z_0$ and a $z_1$.
\end{itemize}
Then at least $\hat{N}$ partitions must be spread among the remaining zones. Hence $s^*$ is upper bounded by $\hat{C}/\hat{N}$ and without loss of generality, we can define, for every node that is not in $z_0$ nor $z_1$,
$$\hat{n}_v = \left\lfloor\frac{c_v}{\hat{C}}\hat{N}\right\rfloor.$$
We constructed a sub-utilization $\hat{n}_v$. Now notice that $3N-\sum_v \hat{n}_v \le \# V$ where $\# V$ denotes the number of nodes. We can iteratively pick a node $v^*$ such that
\begin{itemize}
\item $\sum_{v\in z_{v^*}} \hat{n}_v < N$ where $z_{v^*}$ is the zone of $v^*$;
\item $v^*$ maximizes the quantity $c_v/(\hat{n}_v+1)$ among the vertices satisfying the first condition (i.e. not in a saturated zone).
\end{itemize}
We iterate these instructions until $\sum_v \hat{n}_v= 3N$, and at this stage we define $(n_v) = (\hat{n}_v)$. It is easy to prove by induction that at every step, there is an optimal utilization that is pointwise larger than $\hat{n}_v$, and in particular, that $(n_v)$ is optimal.
\subsubsection*{Existence of an optimal assignment}
As for now, the \emph{optimal utilization} that we obtained is just a vector of numbers and it is not clear that it can be realized as the utilization of some concrete assignment. Here is a way to get a concrete assignment.
Define $3N$ tokens $t_1,\ldots, t_{3N}\in V$ as follows:
\begin{itemize}
\item Enumerate the zones $z$ of $Z$ in any order;
\item enumerate the nodes $v$ of $z$ in any order;
\item repeat $n_v$ times the token $v$.
\end{itemize}
Then for $1\le i \le N$, define the triplet $T_i$ to be
$(t_i, t_{i+N}, t_{i+2N})$. Since the same nodes of a zone appear contiguously, the three nodes of a triplet must belong to three distinct zones.
However simple, this solution to go from an utilization to an assignment has the drawback of not spreading the triplets: a node will tend to be associated to the same two other nodes for many partitions. Hence, during data transfer, it will tend to use only two link, instead of spreading the bandwith use over many other links to other nodes. To achieve this goal, we will reframe the search of an assignment as a flow problem. and in the flow algorithm, we will introduce randomness in the order of exploration. This will be sufficient to obtain a good dispersion of the triplets.
\begin{figure}
\centering
\includegraphics[width=0.9\linewidth]{figures/naive}
\caption{On the left, the creation of a concrete assignment with the naive approach of repeating tokens. On the right, the zones containing the nodes.}
\end{figure}
\subsubsection*{Assignment as a maximum flow problem}
We describe the flow problem via its graph $(X,E)$ where $X$ is a set of vertices, and $E$ are directed weighted edges between the vertices. For every zone $z$, define $n_z=\sum_{v\in z} n_v$.
The set of vertices $X$ contains the source $\mathbf{s}$ and the sink $\mathbf{t}$; a vertex $\mathbf{x}_z$ for every zone $z\in Z$, and a vertex $\mathbf{y}_i$ for every partition index $1\le i\le N$.
The set of edges $E$ contains
\begin{itemize}
\item the edge $(\mathbf{s}, \mathbf{x}_z, n_z)$ for every zone $z\in Z$;
\item the edge $(\mathbf{x}_z, \mathbf{y}_i, 1)$ for every zone $z\in Z$ and partition $1\le i\le N$;
\item the edge $(\mathbf{y}_i, \mathbf{t}, 3)$ for every partition $1\le i\le N$.
\end{itemize}
\begin{figure}[b]
\centering
\includegraphics[width=0.6\linewidth]{figures/flow}
\caption{Flow problem to compute and optimal assignment.}
\end{figure}
We first show the equivalence between this problem and and the construction of an assignment. Given some optimal assignment $(n_v)$, define the flow $f:E\to \mathbb{N}$ that saturates every edge from $\mathbf{s}$ or to $\mathbf{t}$, takes value $1$ on the edge between $\mathbf{x}_z$ and $\mathbf{y}_i$ if partition $i$ is stored in some node of the zone $z$, and $0$ otherwise. One can easily check that $f$ thus defined is indeed a flow and is maximum.
Reciprocally, by the existence of maximum flows constructed from optimal assignments, any maximum flow must saturate the edges linked to the source or the sink. It can only take value 0 or 1 on the other edge, and every partition vertex is associated to exactly three distinct zone vertices. Every zone is associated to exactly $n_z$ partitions.
A maximum flow can be constructed using, for instance, Dinic's algorithm. This algorithm works by discovering augmenting path to iteratively increase the flow. During the exploration of the graph to find augmenting path, we can shuffle the order of enumeration of the neighbours to spread the associations between zones and partitions.
Once we have such association, we can randomly distribute the $n_z$ edges picked for every zone $z$ to its nodes $v\in z$ such that every such $v$ gets $n_z$ edges. This defines an optimal assignment of partitions to nodes.
\subsection{Minimal transfer}
Assume that there was a previous assignment $(T'_i)_{1\le i\le N}$ corresponding to utilizations $(n'_v)_{v\in V}$. We would like the new computed assignment $(T_i)_{1\le i\le N}$ from some $(n_v)_{v\in V}$ to minimize the number of partitions that need to be transferred. We can imagine two different objectives corresponding to different hypotheses:
\begin{equation}
\tag{H3A}
\label{hyp:A}
\text{\emph{Transfers between different zones cost much more than inside a zone.}}
\end{equation}
\begin{equation}
\tag{H3B}
\label{hyp:B}
\text{\emph{Changing zone is not the largest cost when transferring a partition.}}
\end{equation}
In case $A$, our goal will be to minimize the number of changes of zone in the assignment of partitions to zone. More formally, we will maximize the quantity
$$
Q_Z :=
\sum_{1\le i\le N}
\#\{z\in Z ~|~ z\cap T_i \neq \emptyset, z\cap T'_i \neq \emptyset \}
.$$
In case $B$, our goal will be to minimize the number of changes of nodes in the assignment of partitions to nodes. We will maximize the quantity
$$
Q_V :=
\sum_{1\le i\le N} \#(T_i \cap T'_i).
$$
It is tempting to hope that there is a way to maximize both quantity, that having the least discrepancy in terms of nodes will lead to the least discrepancy in terms of zones. But this is actually wrong! We propose the following counter-example to convince the reader:
We consider eight nodes $a, a', b, c, d, d', e, e'$ belonging to five different zones $\{a,a'\}, \{b\}, \{c\}, \{d,d'\}, \{e, e'\}$. We take three partitions ($N=3$), that are originally assigned with some utilization $(n'_v)_{v\in V}$ as follows:
$$
T'_1=(a,b,c) \qquad
T'_2=(a',b,d) \qquad
T'_3=(b,c,e).
$$
This assignment, with updated utilizations $(n_v)_{v\in V}$ minimizes the number of zone changes:
$$
T_1=(d,b,c) \qquad
T_2=(a,b,d) \qquad
T_3=(b,c,e').
$$
This one, with the same utilization, minimizes the number of node changes:
$$
T_1=(a,b,c) \qquad
T_2=(e',b,d) \qquad
T_3=(b,c,d').
$$
One can check that in this case, it is impossible to minimize both the number of zone and node changes.
Because of the redundancy constraint, we cannot use a greedy algorithm to just replace nodes in the triplets to try to get the new utilization rate: this could lead to blocking situation where there is still a hole to fill in a triplet but no available node satisfies the zone separation constraint. To circumvent this issue, we propose an algorithm based on finding cycles in a graph encoding of the assignment. As in section \ref{sec:opt_assign}, we can explore the neigbours in a random order in the graph algorithms, to spread the triplets distribution.
\subsubsection{Minimizing the zone discrepancy}
First, notice that, given an assignment of partitions to \emph{zones}, it is easy to deduce an assignment to \emph{nodes} that minimizes the number of transfers for this zone assignment: For every zone $z$ and every node $v\in z$, pick in any way a set $P_v$ of partitions that where assigned to $v$ in $T'$, to $z_v$ in $T$, with the cardinality of $P_v$ smaller than $n_v$. Once all these sets are chosen, complement the assignment to reach the right utilization for every node. If $\#P_v > n_v$, it means that all the partitions that could stay in $v$ (i.e. that were already in $v$ and are still assigned to its zone) do stay in $v$. If $\#P_v = n_v$, then $n_v$ partitions stay in $v$, which is the number of partitions that need to be in $v$ in the end. In both cases, we could not hope for better given the partition to zone assignment.
Our goal now is to find a assignment of partitions to zones that minimizes the number of zone transfers. To do so we are going to represent an assignment as a graph.
Let $G_T=(X,E_T)$ be the directed weighted graph with vertices $(\mathbf{x}_i)_{1\le i\le N}$ and $(\mathbf{y}_z)_{z\in Z}$. For any $1\le i\le N$ and $z\in Z$, $E_T$ contains the arc:
\begin{itemize}
\item $(\mathbf{x}_i, \mathbf{y}_z, +1)$, if $z$ appears in $T_i'$ and $T_i$;
\item $(\mathbf{x}_i, \mathbf{y}_z, -1)$, if $z$ appears in $T_i$ but not in $T'_i$;
\item $(\mathbf{y}_z, \mathbf{x}_i, -1)$, if $z$ appears in $T'_i$ but not in $T_i$;
\item $(\mathbf{y}_z, \mathbf{x}_i, +1)$, if $z$ does not appear in $T'_i$ nor in $T_i$.
\end{itemize}
In other words, the orientation of the arc encodes whether partition $i$ is stored in zone $z$ in the assignment $T$ and the weight $\pm 1$ encodes whether this corresponds to what happens in the assignment $T'$.
\begin{figure}[t]
\centering
\begin{minipage}{.40\linewidth}
\centering
\includegraphics[width=.8\linewidth]{figures/mini_zone}
\end{minipage}
\begin{minipage}{.55\linewidth}
\centering
\includegraphics[width=.8\linewidth]{figures/mini_node}
\end{minipage}
\caption{On the left: the graph $G_T$ encoding an assignment to minimize the zone discrepancy. On the right: the graph $G_T$ encoding an assignment to minimize the node discrepancy.}
\end{figure}
Notice that at every partition, there are three outgoing arcs, and at every zone, there are $n_z$ incoming arcs. Moreover, if $w(e)$ is the weight of an arc $e$, define the weight of $G_T$ by
\begin{align*}
w(G_T) := \sum_{e\in E} w(e) &= \#Z \times N - 4 \sum_{1\le i\le N} \#\{z\in Z ~|~ z\cap T_i = \emptyset, z\cap T'_i \neq \emptyset\} \\
&=\#Z \times N - 4 \sum_{1\le i\le N} 3- \#\{z\in Z ~|~ z\cap T_i \neq \emptyset, z\cap T'_i \neq \emptyset\} \\
&= (\#Z-12)N + 4 Q_Z.
\end{align*}
Hence maximizing $Q_Z$ is equivalent to maximizing $w(G_T)$.
Assume that their exist some assignment $T^*$ with the same utilization $(n_v)_{v\in V}$. Define $G_{T^*}$ similarly and consider the set $E_\mathrm{Diff} = E_T \setminus E_{T^*}$ of arcs that appear only in $G_T$. Since all vertices have the same number of incoming arcs in $G_T$ and $G_{T^*}$, the vertices of the graph $(X, E_\mathrm{Diff})$ must all have the same number number of incoming and outgoing arrows. So $E_\mathrm{Diff}$ can be expressed as a union of disjoint cycles. Moreover, the edges of $E_\mathrm{Diff}$ must appear in $E_{T^*}$ with reversed orientation and opposite weight. Hence, we have
$$
w(G_T) - w(G_{T^*}) = 2 \sum_{e\in E_\mathrm{Diff}} w(e).
$$
Hence, if $T$ is not optimal, there exists some $T^*$ with $w(G_T) < w(G_{T^*})$, and by the considerations above, there must exist a cycle in $E_\mathrm{Diff}$, and hence in $G_T$, with negative weight. If we reverse the edges and weights along this cycle, we obtain some graph. Since we did not change the incoming degree of any vertex, this is the graph encoding of some valid assignment $T^+$ such that $w(G_{T^+}) > w(G_T)$. We can iterate this operation until there is no other assignment $T^*$ with larger weight, that is until we obtain an optimal assignment.
\subsubsection{Minimizing the node discrepancy}
We will follow an approach similar to the one where we minimize the zone discrepancy. Here we will directly obtain a node assignment from a graph encoding.
Let $G_T=(X,E_T)$ be the directed weighted graph with vertices $(\mathbf{x}_i)_{1\le i\le N}$, $(\mathbf{y}_{z,i})_{z\in Z, 1\le i\le N}$ and $(\mathbf{u}_v)_{v\in V}$. For any $1\le i\le N$ and $z\in Z$, $E_T$ contains the arc:
\begin{itemize}
\item $(\mathbf{x}_i, \mathbf{y}_{z,i}, 0)$, if $z$ appears in $T_i$;
\item $(\mathbf{y}_{z,i}, \mathbf{x}_i, 0)$, if $z$ does not appear in $T_i$.
\end{itemize}
For any $1\le i\le N$ and $v\in V$, $E_T$ contains the arc:
\begin{itemize}
\item $(\mathbf{y}_{z_v,i}, \mathbf{u}_v, +1)$, if $v$ appears in $T_i'$ and $T_i$;
\item $(\mathbf{y}_{z_v,i}, \mathbf{u}_v, -1)$, if $v$ appears in $T_i$ but not in $T'_i$;
\item $(\mathbf{u}_v, \mathbf{y}_{z_v,i}, -1)$, if $v$ appears in $T'_i$ but not in $T_i$;
\item $(\mathbf{u}_v, \mathbf{y}_{z_v,i}, +1)$, if $v$ does not appear in $T'_i$ nor in $T_i$.
\end{itemize}
Every vertex $\mathbb{x}_i$ has outgoing degree 3, every vertex $\mathbf{y}_{z,v}$ has outgoing degree 1, and every vertex $\mathbf{u}_v$ has incoming degree $n_v$.
Remark that any graph respecting these degree constraints is the encoding of a valid assignment with utilizations $(n_v)_{v\in V}$, in particular no partition is stored in two nodes of the same zone.
We define $w(G_T)$ similarly:
\begin{align*}
w(G_T) := \sum_{e\in E_T} w(e) &= \#V \times N - 4\sum_{1\le i\le N} 3-\#(T_i\cap T'_i) \\
&= (\#V-12)N + 4Q_V.
\end{align*}
Exactly like in the previous section, the existence of an assignment with larger weight implies the existence of a negatively weighted cycle in $G_T$. Reversing this cycle gives us the encoding of a valid assignment with a larger weight. Iterating this operation yields an optimal assignment.
\subsubsection{Linear combination of both criteria}
In the graph $G_T$ defined in the previous section, instead of having weights $0$ and $\pm 1$, we could be having weights $\pm\alpha$ between $\mathbf{x}$ and $\mathbf{y}$ vertices, and weights $\pm\beta$ between $\mathbf{y}$ and $\mathbf{u}$ vertices, for some $\alpha,\beta>0$ (we have positive weight if the assignment corresponds to $T'$ and negative otherwise). Then
\begin{align*}
w(G_T) &= \sum_{e\in E_T} w(e) =
\alpha \big( (\#Z-12)N + 4 Q_Z\big) +
\beta \big( (\#V-12)N + 4 Q_V\big) \\
&= \mathrm{const}+ 4(\alpha Q_Z + \beta Q_V).
\end{align*}
So maximizing the weight of such graph encoding would be equivalent to maximizing a linear combination of $Q_Z$ and $Q_V$.
\subsection{Algorithm}
We give a high level description of the algorithm to compute an optimal 3-strict assignment. The operations appearing at lines 1,2,4 are respectively described by Algorithms \ref{alg:util},\ref{alg:opt} and \ref{alg:mini}.
\begin{algorithm}[H]
\caption{Optimal 3-strict assignment}
\label{alg:total}
\begin{algorithmic}[1]
\Function{Optimal 3-strict assignment}{$N$, $(c_v)_{v\in V}$, $T'$}
\State $(n_v)_{v\in V} \leftarrow$ \Call{Compute optimal utilization}{$N$, $(c_v)_{v\in V}$}
\State $(T_i)_{1\le i\le N} \leftarrow$ \Call{Compute candidate assignment}{$N$, $(n_v)_{v\in V}$}
\If {there was a previous assignment $T'$}
\State $T \leftarrow$ \Call{Minimization of transfers}{$(T_i)_{1\le i\le N}$, $(T'_i)_{1\le i\le N}$}
\EndIf
\State \Return $T$.
\EndFunction
\end{algorithmic}
\end{algorithm}
We give some considerations of worst case complexity for these algorithms. In the following, we assume $N>\#V>\#Z$. The complexity of Algorithm \ref{alg:total} is $O(N^3\# Z)$ if we assume \eqref{hyp:A} and $O(N^3 \#Z \#V)$ if we assume \eqref{hyp:B}.
Algorithm \ref{alg:util} can be implemented with complexity $O(\#V^2)$. The complexity of the function call at line \ref{lin:subutil} is $O(\#V)$. The difference between the sum of the subutilizations and $3N$ is at most the sum of the rounding errors when computing the $\hat{n}_v$. Hence it is bounded by $\#V$ and the loop at line \ref{lin:loopsub} is iterated at most $\#V$ times. Finding the minimizing $v$ at line \ref{lin:findmin} takes $O(\#V)$ operations (naively, we could also use a heap).
Algorithm \ref{alg:opt} can be implemented with complexity $O(N^3\times \#Z)$. The flow graph has $O(N+\#Z)$ vertices and $O(N\times \#Z)$ edges. Dinic's algorithm has complexity $O(\#\mathrm{Vertices}^2\#\mathrm{Edges})$ hence in our case it is $O(N^3\times \#Z)$.
Algorithm \ref{alg:mini} can be implented with complexity $O(N^3\# Z)$ under \eqref{hyp:A} and $O(N^3 \#Z \#V)$ under \eqref{hyp:B}.
The graph $G_T$ has $O(N)$ vertices and $O(N\times \#Z)$ edges under assumption \eqref{hyp:A} and respectively $O(N\times \#Z)$ vertices and $O(N\times \#V)$ edges under assumption \eqref{hyp:B}. The loop at line \ref{lin:repeat} is iterated at most $N$ times since the distance between $T$ and $T'$ decreases at every iteration. Bellman-Ford algorithm has complexity $O(\#\mathrm{Vertices}\#\mathrm{Edges})$, which in our case amounts to $O(N^2\# Z)$ under \eqref{hyp:A} and $O(N^2 \#Z \#V)$ under \eqref{hyp:B}.
\begin{algorithm}
\caption{Computation of the optimal utilization}
\label{alg:util}
\begin{algorithmic}[1]
\Function{Compute optimal utilization}{$N$, $(c_v)_{v\in V}$}
\State $(\hat{n}_v)_{v\in V} \leftarrow $ \Call{Compute subutilization}{$N$, $(c_v)_{v\in V}$} \label{lin:subutil}
\While{$\sum_{v\in V} \hat{n}_v < 3N$} \label{lin:loopsub}
\State Pick $v\in V$ minimizing $\frac{c_v}{\hat{n}_v+1}$ and such that
$\sum_{v'\in z_v} \hat{n}_{v'} < N$ \label{lin:findmin}
\State $\hat{n}_v \leftarrow \hat{n}_v+1$
\EndWhile
\State \Return $(\hat{n}_v)_{v\in V}$
\EndFunction
\State
\Function{Compute subutilization}{$N$, $(c_v)_{v\in V}$}
\State $R \leftarrow 3$
\For{$v\in V$}
\State $\hat{n}_v \leftarrow \mathrm{unset}$
\EndFor
\For{$z\in Z$}
\State $c_z \leftarrow \sum_{v\in z} c_v$
\EndFor
\State $C \leftarrow \sum_{z\in Z} c_z$
\While{$\exists z \in Z$ such that $R\times c_{z} > C$}
\For{$v\in z$}
\State $\hat{n}_v \leftarrow \left\lfloor \frac{c_v}{c_z} N \right\rfloor$
\EndFor
\State $C \leftarrow C-c_z$
\State $R\leftarrow R-1$
\EndWhile
\For{$v\in V$}
\If{$\hat{n}_v = \mathrm{unset}$}
\State $\hat{n}_v \leftarrow \left\lfloor \frac{Rc_v}{C} N \right\rfloor$
\EndIf
\EndFor
\State \Return $(\hat{n}_v)_{v\in V}$
\EndFunction
\end{algorithmic}
\end{algorithm}
\begin{algorithm}
\caption{Computation of a candidate assignment}
\label{alg:opt}
\begin{algorithmic}[1]
\Function{Compute candidate assignment}{$N$, $(n_v)_{v\in V}$}
\State Compute the flow graph $G$
\State Compute the maximal flow $f$ using Dinic's algorithm with randomized neighbours enumeration
\State Construct the assignment $(T_i)_{1\le i\le N}$ from $f$
\State \Return $(T_i)_{1\le i\le N}$
\EndFunction
\end{algorithmic}
\end{algorithm}
\begin{algorithm}
\caption{Minimization of the number of transfers}
\label{alg:mini}
\begin{algorithmic}[1]
\Function{Minimization of transfers}{$(T_i)_{1\le i\le N}$, $(T'_i)_{1\le i\le N}$}
\State Construct the graph encoding $G_T$
\Repeat \label{lin:repeat}
\State Find a negative cycle $\gamma$ using Bellman-Ford algorithm on $G_T$
\State Reverse the orientations and weights of edges in $\gamma$
\Until{no negative cycle is found}
\State Update $(T_i)_{1\le i\le N}$ from $G_T$
\State \Return $(T_i)_{1\le i\le N}$
\EndFunction
\end{algorithmic}
\end{algorithm}
\newpage
\section{Computation of a 3-non-strict assignment}
\subsection{Choices of optimality}
In this mode, we primarily want to store every partition on three nodes, and only secondarily try to spread the nodes among different zone. So we make the choice of not taking the zone repartition in the criterion of optimality.
We try to maximize $s^*$ defined in \eqref{eq:optimal}. So we can compute the optimal utilizations $(n_v)_{v\in V}$ with the only constraint that $n_v \le N$ for every node $v$. As in the previous section, we start with a sub-utilization proportional to $c_v$ (and capped at $N$), and we iteratively increase the $\hat{n}_v$ that is less than $N$ and maximizes the quantity $c_v/(\hat{n}_v+1)$, until the total sum is $3N$.
\subsection{Computation of a candidate assignment}
To compute a candidate assignment (that does not optimize zone spreading nor distance to a previous assignment yet), we can use the folowing flow problem.
Define the oriented weighted graph $(X,E)$. The set of vertices $X$ contains the source $\mathbf{s}$, the sink $\mathbf{t}$, vertices
$\mathbf{x}_p, \mathbf{u}^+_p, \mathbf{u}^-_p$ for every partition $p$, vertices $\mathbf{y}_{p,z}$ for every partition $p$ and zone $z$, and vertices $\mathbf{z}_v$ for every node $v$.
The set of edges is composed of the following arcs:
\begin{itemize}
\item ($\mathbf{s}$,$\mathbf{x}_p$, 3) for every partition $p$;
\item ($\mathbf{x}_p$,$\mathbf{u}^+_p$, 3) for every partition $p$;
\item ($\mathbf{x}_p$,$\mathbf{u}^-_p$, 2) for every partition $p$;
\item ($\mathbf{u}^+_p$,$\mathbf{y}_{p,z}$, 1) for every partition $p$ and zone $z$;
\item ($\mathbf{u}^-_p$,$\mathbf{y}_{p,z}$, 2) for every partition $p$ and zone $z$;
\item ($\mathbf{y}_{p,z}$,$\mathbf{z}_v$, 1) for every partition $p$, zone $z$ and node $v\in z$;
\item ($\mathbf{z}_v$, $\mathbf{t}$, $n_v$) for every node $v$;
\end{itemize}
One can check that any maximal flow in this graph corresponds to an assignment of partitions to nodes. In such a flow, all the arcs from $\mathbf{s}$ and to $\mathbf{t}$ are saturated. The arc from $\mathbf{y}_{p,z}$ to $\mathbf{z}_v$ is saturated if and only if $p$ is associated to~$v$.
Finally the flow from $\mathbf{x}_p$ to $\mathbf{y}_{p,z}$ can go either through $\mathbf{u}^+_p$ or $\mathbf{u}^-_p$.
\subsection{Maximal spread and minimal transfers}
Notice that if the arc $\mathbf{u}_p^+\mathbf{y}_{p,z}$ is not saturated but there is some flow in $\mathbf{u}_p^-\mathbf{y}_{p,z}$, then it is possible to transfer a unit of flow from the path $\mathbf{x}_p\mathbf{u}_p^-\mathbf{y}_{p,z}$ to the path $\mathbf{x}_p\mathbf{u}_p^+\mathbf{y}_{p,z}$. So we can always find an equivalent maximal flow $f^*$ that uses the path through $\mathbf{u}_p^-$ only if the path through $\mathbf{u}_p^+$ is saturated.
We will use this fact to consider the amount of flow going through the vertices $\mathbf{u}^+$ as a measure of how well the partitions are spread over nodes belonging to different zones. If the partition $p$ is associated to 3 different zones, then a flow of 3 will cross $\mathbf{u}_p^+$ in $f^*$ (i.e. a flow of 0 will cross $\mathbf{u}_p^+$). If $p$ is associated to two zones, a flow of $2$ will cross $\mathbf{u}_p^+$. If $p$ is associated to a single zone, a flow of $1$ will cross $\mathbf{u}_p^+$.
Let $N_1, N_2, N_3$ be the number of partitions associated to respectively 1,2 and 3 distinct zones. We will optimize a linear combination of these variables using the discovery of positively weighted circuits in a graph.
At the same step, we will also optimize the distance to a previous assignment $T'$. Let $\alpha> \beta> \gamma \ge 0$ be three parameters.
Given the flow $f$, let $G_f=(X',E_f)$ be the multi-graph where $X' = X\setminus\{\mathbf{s},\mathbf{t}\}$. The set $E_f$ is composed of the arcs:
\begin{itemize}
\item As many arcs from $(\mathbf{x}_p, \mathbf{u}^+_p,\alpha), (\mathbf{x}_p, \mathbf{u}^+_p,\beta), (\mathbf{x}_p, \mathbf{u}^+_p,\gamma)$ (selected in this order) as there is flow crossing $\mathbf{u}^+_p$ in $f$;
\item As many arcs from $(\mathbf{u}^+_p, \mathbf{x}_p,-\gamma), (\mathbf{u}^+_p, \mathbf{x}_p,-\beta), (\mathbf{u}^+_p, \mathbf{x}_p,-\alpha)$ (selected in this order) as there is flow crossing $\mathbf{u}^-_p$ in $f$;
\item As many copies of $(\mathbf{x}_p, \mathbf{u}^-_p,0)$ as there is flow through $\mathbf{u}^-_p$;
\item As many copies of $(\mathbf{u}^-_p,\mathbf{x}_p,0)$ so that the number of arcs between these two vertices is 2;
\item $(\mathbf{u}^+_p,\mathbf{y}_{p,z}, 0)$ if the flow between these vertices is 1, and the opposite arc otherwise;
\item as many copies of $(\mathbf{u}^-_p,\mathbf{y}_{p,z}, 0)$ as the flow between these vertices, and as many copies of the opposite arc as 2~$-$~the flow;
\item $(\mathbf{y}_{p,z},\mathbf{z}_v, \pm1)$ if it is saturated in $f$, with $+1$ if $v\in T'_p$ and $-1$ otherwise;
\item $(\mathbf{z}_v,\mathbf{y}_{p,z}, \pm1)$ if it is not saturated in $f$, with $+1$ if $v\notin T'_p$ and $-1$ otherwise.
\end{itemize}
To summarize, arcs are oriented left to right if they correspond to a presence of flow in $f$, and right to left if they correspond to an absence of flow. They are positively weighted if we want them to stay at their current state, and negatively if we want them to switch. Let us compute the weight of such graph.
\begin{multline*}
w(G_f) = \sum_{e\in E_f} w(e_f) \\
=
(\alpha - \beta -\gamma) N_1 + (\alpha +\beta - \gamma) N_2 + (\alpha+\beta+\gamma) N_3
\\ +
\#V\times N - 4 \sum_p 3-\#(T_p\cap T'_p) \\
=(\#V-12+\alpha-\beta-\gamma)\times N + 4Q_V + 2\beta N_2 + 2(\beta+\gamma) N_3 \\
\end{multline*}
As for the mode 3-strict, one can check that the difference of two such graphs corresponding to the same $(n_v)$ is always eulerian. Hence we can navigate in this class with the same greedy algorithm that discovers positive cycles and flips them.
The function that we optimize is
$$
2Q_V + \beta N_2 + (\beta+\gamma) N_3.
$$
The choice of parameters $\beta$ and $\gamma$ should be lead by the following question: For $\beta$, where to put the tradeoff between zone dispersion and distance to the previous configuration? For $\gamma$, do we prefer to have more partitions spread between 2 zones, or have less between at least 2 zones but more between 3 zones.
The quantity $Q_V$ varies between $0$ and $3N$, it should be of order $N$. The quantity $N_2+N_3$ should also be of order $N$ (it is exactly $N$ in the strict mode). So the two terms of the function are comparable.
\bibliography{optimal_layout}
\bibliographystyle{ieeetr}
\end{document}

View file

@ -189,7 +189,7 @@ let
rootFeatures = if features != null then rootFeatures = if features != null then
features features
else else
([ "garage/bundled-libs" "garage/sled" "garage/lmdb" "garage/k2v" ] ++ (if release then [ ([ "garage/bundled-libs" "garage/sled" "garage/k2v" ] ++ (if release then [
"garage/consul-discovery" "garage/consul-discovery"
"garage/kubernetes-discovery" "garage/kubernetes-discovery"
"garage/metrics" "garage/metrics"

View file

@ -9,22 +9,11 @@ GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
NIX_RELEASE="${REPO_FOLDER}/result/bin/" NIX_RELEASE="${REPO_FOLDER}/result/bin/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH" PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
if [ -z "$GARAGE_BIN" ]; then garage -c /tmp/config.1.toml bucket create eprouvette
GARAGE_BIN=$(which garage || exit 1) KEY_INFO=$(garage -c /tmp/config.1.toml key new --name opérateur)
echo -en "Found garage at: ${GARAGE_BIN}\n"
else
echo -en "Using garage binary at: ${GARAGE_BIN}\n"
fi
$GARAGE_BIN -c /tmp/config.1.toml bucket create eprouvette
if [ "$GARAGE_08" = "1" ]; then
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key new --name opérateur)
else
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key create opérateur)
fi
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'` ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'` SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
$GARAGE_BIN -c /tmp/config.1.toml bucket allow eprouvette --read --write --owner --key $ACCESS_KEY garage -c /tmp/config.1.toml bucket allow eprouvette --read --write --owner --key $ACCESS_KEY
echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3 echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3
echo "Bucket s3://eprouvette created. Credentials stored in /tmp/garage.s3." echo "Bucket s3://eprouvette created. Credentials stored in /tmp/garage.s3."

View file

@ -11,16 +11,11 @@ PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
FANCYCOLORS=("41m" "42m" "44m" "45m" "100m" "104m") FANCYCOLORS=("41m" "42m" "44m" "45m" "100m" "104m")
export RUST_BACKTRACE=1 export RUST_BACKTRACE=1
export RUST_LOG=garage=info,garage_api=debug export RUST_LOG=garage=info,garage_api=debug,netapp=trace
MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m" MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m"
if [ -z "$GARAGE_BIN" ]; then WHICH_GARAGE=$(which garage || exit 1)
GARAGE_BIN=$(which garage || exit 1) echo -en "${MAIN_LABEL} Found garage at: ${WHICH_GARAGE}\n"
echo -en "${MAIN_LABEL} Found garage at: ${GARAGE_BIN}\n"
else
echo -en "${MAIN_LABEL} Using garage binary at: ${GARAGE_BIN}\n"
fi
$GARAGE_BIN --version
NETWORK_SECRET="$(openssl rand -hex 32)" NETWORK_SECRET="$(openssl rand -hex 32)"
@ -33,7 +28,6 @@ LABEL="\e[${FANCYCOLORS[$count]}[$count]\e[49m"
cat > $CONF_PATH <<EOF cat > $CONF_PATH <<EOF
block_size = 1048576 # objects are split in blocks of maximum this number of bytes block_size = 1048576 # objects are split in blocks of maximum this number of bytes
metadata_dir = "/tmp/garage-meta-$count" metadata_dir = "/tmp/garage-meta-$count"
db_engine = "lmdb"
data_dir = "/tmp/garage-data-$count" data_dir = "/tmp/garage-data-$count"
rpc_bind_addr = "0.0.0.0:$((3900+$count))" # the port other Garage nodes will use to talk to this node rpc_bind_addr = "0.0.0.0:$((3900+$count))" # the port other Garage nodes will use to talk to this node
rpc_public_addr = "127.0.0.1:$((3900+$count))" rpc_public_addr = "127.0.0.1:$((3900+$count))"
@ -57,7 +51,7 @@ EOF
echo -en "$LABEL configuration written to $CONF_PATH\n" echo -en "$LABEL configuration written to $CONF_PATH\n"
($GARAGE_BIN -c /tmp/config.$count.toml server 2>&1|while read r; do echo -en "$LABEL $r\n"; done) & (garage -c /tmp/config.$count.toml server 2>&1|while read r; do echo -en "$LABEL $r\n"; done) &
done done
# >>>>>>>>>>>>>>>> END FOR LOOP ON NODES # >>>>>>>>>>>>>>>> END FOR LOOP ON NODES
@ -79,14 +73,14 @@ fi
sleep 3 sleep 3
# Establish connections between nodes # Establish connections between nodes
for count in $(seq 1 3); do for count in $(seq 1 3); do
NODE=$($GARAGE_BIN -c /tmp/config.$count.toml node id -q) NODE=$(garage -c /tmp/config.$count.toml node id -q)
for count2 in $(seq 1 3); do for count2 in $(seq 1 3); do
$GARAGE_BIN -c /tmp/config.$count2.toml node connect $NODE garage -c /tmp/config.$count2.toml node connect $NODE
done done
done done
RETRY=120 RETRY=120
until $GARAGE_BIN -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do until garage -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
(( RETRY-- )) (( RETRY-- ))
if (( RETRY <= 0 )); then if (( RETRY <= 0 )); then
echo -en "${MAIN_LABEL} Garage did not start" echo -en "${MAIN_LABEL} Garage did not start"

View file

@ -9,17 +9,9 @@ GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
NIX_RELEASE="${REPO_FOLDER}/result/bin/" NIX_RELEASE="${REPO_FOLDER}/result/bin/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH" PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
if [ -z "$GARAGE_BIN" ]; then
GARAGE_BIN=$(which garage || exit 1)
echo -en "Found garage at: ${GARAGE_BIN}\n"
else
echo -en "Using garage binary at: ${GARAGE_BIN}\n"
fi
$GARAGE_BIN --version
sleep 5 sleep 5
RETRY=120 RETRY=120
until $GARAGE_BIN -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do until garage -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
(( RETRY-- )) (( RETRY-- ))
if (( RETRY <= 0 )); then if (( RETRY <= 0 )); then
echo "garage did not start in time, failing." echo "garage did not start in time, failing."
@ -29,20 +21,11 @@ until $GARAGE_BIN -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
sleep 1 sleep 1
done done
if [ "$GARAGE_08" = "1" ]; then garage -c /tmp/config.1.toml status \
$GARAGE_BIN -c /tmp/config.1.toml status \
| grep 'NO ROLE' \ | grep 'NO ROLE' \
| grep -Po '^[0-9a-f]+' \ | grep -Po '^[0-9a-f]+' \
| while read id; do | while read id; do
$GARAGE_BIN -c /tmp/config.1.toml layout assign $id -z dc1 -c 1 garage -c /tmp/config.1.toml layout assign $id -z dc1 -c 1
done done
else
$GARAGE_BIN -c /tmp/config.1.toml status \
| grep 'NO ROLE' \
| grep -Po '^[0-9a-f]+' \
| while read id; do
$GARAGE_BIN -c /tmp/config.1.toml layout assign $id -z dc1 -c 1G
done
fi
$GARAGE_BIN -c /tmp/config.1.toml layout apply --version 1 garage -c /tmp/config.1.toml layout apply --version 1

View file

@ -21,4 +21,4 @@ version: 0.4.1
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "v0.9.0" appVersion: "v0.8.4"

View file

@ -1,138 +0,0 @@
#!/usr/bin/env bash
: '
This script tests part renumbering on an S3 remote (here configured for Minio).
On Minio:
The results confirm that if I upload parts with number 1, 4, 5 and 6,
they are renumbered to 1, 2, 3 and 4 after CompleteMultipartUpload.
Thus, specifying partNumber=4 on a GetObject/HeadObject should return
information on the part I originally uploaded with part number
On S3: not tested
Sample output (on Minio):
f07e1404cc527d494242824ded3a616b part1
78974cd4d0f622eb3426ea7cd22f5a1c part4
f9cc379f8baa61645558d9ba7e6351fa part5
1bd2383eebbac1f8e7143575ba5b1f4a part6
Upload ID: 6838b813-d0ca-400b-9d28-ec8b2b5cd004
PART 1 ETag: "f07e1404cc527d494242824ded3a616b"
PART 4 ETag: "78974cd4d0f622eb3426ea7cd22f5a1c"
PART 5 ETag: "f9cc379f8baa61645558d9ba7e6351fa"
PART 6 ETag: "1bd2383eebbac1f8e7143575ba5b1f4a"
======================================== LIST ====
{
"Parts": [
{
"PartNumber": 1,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"f07e1404cc527d494242824ded3a616b\"",
"Size": 20971520
},
{
"PartNumber": 4,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"78974cd4d0f622eb3426ea7cd22f5a1c\"",
"Size": 20971520
},
{
"PartNumber": 5,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"f9cc379f8baa61645558d9ba7e6351fa\"",
"Size": 20971520
},
{
"PartNumber": 6,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"1bd2383eebbac1f8e7143575ba5b1f4a\"",
"Size": 20971520
}
],
"ChecksumAlgorithm": "",
"Initiator": {
"ID": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4",
"DisplayName": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4"
},
"Owner": {
"DisplayName": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4",
"ID": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4"
},
"StorageClass": "STANDARD"
}
======================================== COMPLETE ====
{
"Location": "http://localhost:9000/test/upload",
"Bucket": "test",
"Key": "upload",
"ETag": "\"8e817c8ccd442f9a79c77b58fe808c43-4\""
}
======================================== LIST ====
An error occurred (NoSuchUpload) when calling the ListParts operation: The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.
======================================== GET PART 4 ====
{
"AcceptRanges": "bytes",
"LastModified": "2023-04-25T10:21:59+00:00",
"ContentLength": 20971520,
"ETag": "\"8e817c8ccd442f9a79c77b58fe808c43-4\"",
"ContentRange": "bytes 62914560-83886079/83886080",
"ContentType": "binary/octet-stream",
"Metadata": {},
"PartsCount": 4
}
1bd2383eebbac1f8e7143575ba5b1f4a get-part4
Conclusions:
- Parts are indeed renumbered with consecutive numbers
- ListParts only applies to multipart uploads in progress,
it cannot be used once the multipart upload has been completed
'
export AWS_ACCESS_KEY_ID=1D8Pk2k4oQSoh1BU
export AWS_SECRET_ACCESS_KEY=4B46SR8U7FUgY0raB8Zuxg1NLyLTvbNV
function aws { command aws --endpoint-url http://localhost:9000 $@ ; }
aws --version
aws s3 mb s3://test
for NUM in 1 4 5 6; do
dd if=/dev/urandom of=part$NUM bs=1M count=10
done
md5sum part*
UPLOAD=$(aws s3api create-multipart-upload --bucket test --key 'upload' | jq -r ".UploadId")
echo "Upload ID: $UPLOAD"
PARTS=""
for NUM in 1 4 5 6; do
ETAG=$(aws s3api upload-part --bucket test --key 'upload' --part-number $NUM \
--body "part$NUM" --upload-id "$UPLOAD" | jq -r ".ETag")
echo "PART $NUM ETag: $ETAG"
if [ -n "$PARTS" ]; then
PARTS="$PARTS,"
fi
PARTS="$PARTS {\"ETag\":$ETAG,\"PartNumber\":$NUM}"
done
echo "======================================== LIST ===="
aws s3api list-parts --bucket test --key upload --upload-id "$UPLOAD" | jq
echo "======================================== COMPLETE ===="
echo "{\"Parts\":[$PARTS]}" > mpu
aws s3api complete-multipart-upload --multipart-upload file://mpu \
--bucket test --key 'upload' --upload-id "$UPLOAD"
echo "======================================== LIST ===="
aws s3api list-parts --bucket test --key upload --upload-id "$UPLOAD" | jq
echo "======================================== GET PART 4 ===="
aws s3api get-object --bucket test --key upload --part-number 4 get-part4
md5sum get-part4

View file

@ -1,103 +0,0 @@
#!/usr/bin/env bash
: '
This script tests whether uploaded parts can be skipped in a
CompleteMultipartUpoad
On Minio: yes, parts can be skipped
On S3: not tested
Sample output (on Minio):
f23911bcd1230f5ebe8887cbf5bc396e part1
a2657143167eaf647c40473e78a091dc part4
72f72c02c5163bc81024b28ac818c5e0 part5
e29cf500d20498218904b8df8806caa2 part6
Upload ID: e8fe7b83-9800-46fb-ae90-9d7ccd42fe76
PART 1 ETag: "f23911bcd1230f5ebe8887cbf5bc396e"
PART 4 ETag: "a2657143167eaf647c40473e78a091dc"
PART 5 ETag: "72f72c02c5163bc81024b28ac818c5e0"
PART 6 ETag: "e29cf500d20498218904b8df8806caa2"
======================================== COMPLETE ====
{
"Location": "http://localhost:9000/test/upload",
"Bucket": "test",
"Key": "upload",
"ETag": "\"48246e44d4b38bdc2f3c10ee25b1af17-3\""
}
======================================== GET FULL ====
{
"AcceptRanges": "bytes",
"LastModified": "2023-04-25T10:54:35+00:00",
"ContentLength": 31457280,
"ETag": "\"48246e44d4b38bdc2f3c10ee25b1af17-3\"",
"ContentType": "binary/octet-stream",
"Metadata": {}
}
97fb904da7ad310699a6afab0eb6e061 get-full
97fb904da7ad310699a6afab0eb6e061 -
======================================== GET PART 3 ====
{
"AcceptRanges": "bytes",
"LastModified": "2023-04-25T10:54:35+00:00",
"ContentLength": 10485760,
"ETag": "\"48246e44d4b38bdc2f3c10ee25b1af17-3\"",
"ContentRange": "bytes 20971520-31457279/31457280",
"ContentType": "binary/octet-stream",
"Metadata": {},
"PartsCount": 3
}
e29cf500d20498218904b8df8806caa2 get-part3
Conclusions:
- Skipping a part in a CompleteMultipartUpoad call is OK
- The part is simply not included in the stored object
- Sequential part renumbering counts only non-skipped parts
'
export AWS_ACCESS_KEY_ID=1D8Pk2k4oQSoh1BU
export AWS_SECRET_ACCESS_KEY=4B46SR8U7FUgY0raB8Zuxg1NLyLTvbNV
function aws { command aws --endpoint-url http://localhost:9000 $@ ; }
aws --version
aws s3 mb s3://test
for NUM in 1 4 5 6; do
dd if=/dev/urandom of=part$NUM bs=1M count=10
done
md5sum part*
UPLOAD=$(aws s3api create-multipart-upload --bucket test --key 'upload' | jq -r ".UploadId")
echo "Upload ID: $UPLOAD"
PARTS=""
for NUM in 1 4 5 6; do
ETAG=$(aws s3api upload-part --bucket test --key 'upload' --part-number $NUM \
--body "part$NUM" --upload-id "$UPLOAD" | jq -r ".ETag")
echo "PART $NUM ETag: $ETAG"
if [ "$NUM" != "5" ]; then
if [ -n "$PARTS" ]; then
PARTS="$PARTS,"
fi
PARTS="$PARTS {\"ETag\":$ETAG,\"PartNumber\":$NUM}"
fi
done
echo "======================================== COMPLETE ===="
echo "{\"Parts\":[$PARTS]}" > mpu
aws s3api complete-multipart-upload --multipart-upload file://mpu \
--bucket test --key 'upload' --upload-id "$UPLOAD"
echo "======================================== GET FULL ===="
aws s3api get-object --bucket test --key upload get-full
md5sum get-full
cat part1 part4 part6 | md5sum
echo "======================================== GET PART 3 ===="
aws s3api get-object --bucket test --key upload --part-number 3 get-part3
md5sum get-part3

View file

@ -31,11 +31,6 @@ dd if=/dev/urandom of=/tmp/garage.1.rnd bs=1k count=2 # No multipart, inline sto
dd if=/dev/urandom of=/tmp/garage.2.rnd bs=1M count=5 # No multipart but file will be chunked dd if=/dev/urandom of=/tmp/garage.2.rnd bs=1M count=5 # No multipart but file will be chunked
dd if=/dev/urandom of=/tmp/garage.3.rnd bs=1M count=10 # by default, AWS starts using multipart at 8MB dd if=/dev/urandom of=/tmp/garage.3.rnd bs=1M count=10 # by default, AWS starts using multipart at 8MB
dd if=/dev/urandom of=/tmp/garage.part1.rnd bs=1M count=5
dd if=/dev/urandom of=/tmp/garage.part2.rnd bs=1M count=5
dd if=/dev/urandom of=/tmp/garage.part3.rnd bs=1M count=5
dd if=/dev/urandom of=/tmp/garage.part4.rnd bs=1M count=5
# data of lower entropy, to test compression # data of lower entropy, to test compression
dd if=/dev/urandom bs=1k count=2 | base64 -w0 > /tmp/garage.1.b64 dd if=/dev/urandom bs=1k count=2 | base64 -w0 > /tmp/garage.1.b64
dd if=/dev/urandom bs=1M count=5 | base64 -w0 > /tmp/garage.2.b64 dd if=/dev/urandom bs=1M count=5 | base64 -w0 > /tmp/garage.2.b64
@ -45,7 +40,7 @@ echo "🧪 S3 API testing..."
# AWS # AWS
if [ -z "$SKIP_AWS" ]; then if [ -z "$SKIP_AWS" ]; then
echo "🛠️ Testing with awscli (aws s3)" echo "🛠️ Testing with awscli"
source ${SCRIPT_FOLDER}/dev-env-aws.sh source ${SCRIPT_FOLDER}/dev-env-aws.sh
aws s3 ls aws s3 ls
for idx in {1..3}.{rnd,b64}; do for idx in {1..3}.{rnd,b64}; do
@ -56,36 +51,8 @@ if [ -z "$SKIP_AWS" ]; then
rm /tmp/garage.$idx.dl rm /tmp/garage.$idx.dl
aws s3 rm "s3://eprouvette/&+-é\"/garage.$idx.aws" aws s3 rm "s3://eprouvette/&+-é\"/garage.$idx.aws"
done done
echo "🛠️ Testing multipart uploads with awscli (aws s3api)"
UPLOAD=$(aws s3api create-multipart-upload --bucket eprouvette --key 'upload' | jq -r ".UploadId")
echo "Upload ID: $UPLOAD"
ETAG3=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 3 --body "/tmp/garage.part1.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
ETAG2=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 2 --body "/tmp/garage.part2.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
ETAG3=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 3 --body "/tmp/garage.part3.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
ETAG6=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 6 --body "/tmp/garage.part4.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
MPU="{\"Parts\":[{\"PartNumber\":2,\"ETag\":$ETAG2}, {\"PartNumber\":3,\"ETag\":$ETAG3}, {\"PartNumber\":6,\"ETag\":$ETAG6}]}"
echo $MPU > /tmp/garage.mpu.json
aws s3api complete-multipart-upload --multipart-upload file:///tmp/garage.mpu.json \
--bucket eprouvette --key 'upload' --upload-id "$UPLOAD"
aws s3api get-object --bucket eprouvette --key upload /tmp/garage.mpu.get
if [ "$(md5sum /tmp/garage.mpu.get | cut -d ' ' -f 1)" != "$(cat /tmp/garage.part{2,3,4}.rnd | md5sum | cut -d ' ' -f 1)" ]; then
echo "Invalid multipart upload"
exit 1
fi
fi fi
echo "OK!!"
exit 0
# S3CMD # S3CMD
if [ -z "$SKIP_S3CMD" ]; then if [ -z "$SKIP_S3CMD" ]; then
echo "🛠️ Testing with s3cmd" echo "🛠️ Testing with s3cmd"
@ -174,7 +141,6 @@ rm eprouvette/winscp
EOF EOF
fi fi
rm /tmp/garage.part{1..4}.rnd
rm /tmp/garage.{1..3}.{rnd,b64} rm /tmp/garage.{1..3}.{rnd,b64}
echo "🏁 Teardown" echo "🏁 Teardown"

View file

@ -1,75 +0,0 @@
#!/usr/bin/env bash
set -ex
export LC_ALL=C.UTF-8
export LANG=C.UTF-8
SCRIPT_FOLDER="`dirname \"$0\"`"
REPO_FOLDER="${SCRIPT_FOLDER}/../"
GARAGE_DEBUG="${REPO_FOLDER}/target/debug/"
GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
NIX_RELEASE="${REPO_FOLDER}/result/bin/:${REPO_FOLDER}/result-bin/bin/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
OLD_VERSION="$1"
ARCH="$2"
echo "Downloading old garage binary..."
curl https://garagehq.deuxfleurs.fr/_releases/$OLD_VERSION/$ARCH/garage > /tmp/old_garage
chmod +x /tmp/old_garage
echo "============= insert data into old version cluster ================="
export GARAGE_BIN=/tmp/old_garage
if echo $OLD_VERSION | grep 'v0\.8\.'; then
echo "Detected Garage v0.8.x"
export GARAGE_08=1
fi
echo "⏳ Setup cluster using old version"
$GARAGE_BIN --version
${SCRIPT_FOLDER}/dev-clean.sh
${SCRIPT_FOLDER}/dev-cluster.sh > /tmp/garage.log 2>&1 &
sleep 6
${SCRIPT_FOLDER}/dev-configure.sh
${SCRIPT_FOLDER}/dev-bucket.sh
echo "🛠️ Inserting data in old cluster"
source ${SCRIPT_FOLDER}/dev-env-rclone.sh
rclone copy "${SCRIPT_FOLDER}/../.git/" garage:eprouvette/test_dotgit --stats=1s --stats-log-level=NOTICE --stats-one-line
echo "🏁 Stopping old cluster"
killall -INT old_garage
sleep 2
killall -9 old_garage || true
echo "🏁 Removing old garage version"
rm -rv $GARAGE_BIN
export -n GARAGE_BIN
export -n GARAGE_08
echo "================ read data from new cluster ==================="
echo "⏳ Setup cluster using new version"
pwd
ls
export GARAGE_BIN=$(which garage)
$GARAGE_BIN --version
${SCRIPT_FOLDER}/dev-cluster.sh >> /tmp/garage.log 2>&1 &
sleep 3
echo "🛠️ Retrieving data from old cluster"
rclone copy garage:eprouvette/test_dotgit /tmp/test_dotgit --stats=1s --stats-log-level=NOTICE --stats-one-line --fast-list
if ! diff <(find "${SCRIPT_FOLDER}/../.git" -type f | xargs md5sum | cut -d ' ' -f 1 | sort) <(find /tmp/test_dotgit -type f | xargs md5sum | cut -d ' ' -f 1 | sort); then
echo "TEST FAILURE: directories are different"
exit 1
fi
rm -r /tmp/test_dotgit
echo "🏁 Teardown"
rm -rf /tmp/garage-{data,meta}-*
rm -rf /tmp/config.*.toml
echo "✅ Success"

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_api" name = "garage_api"
version = "0.9.0" version = "0.8.4"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"

View file

@ -25,8 +25,7 @@ use crate::admin::bucket::*;
use crate::admin::cluster::*; use crate::admin::cluster::*;
use crate::admin::error::*; use crate::admin::error::*;
use crate::admin::key::*; use crate::admin::key::*;
use crate::admin::router_v0; use crate::admin::router::{Authorization, Endpoint};
use crate::admin::router_v1::{Authorization, Endpoint};
use crate::helpers::host_to_bucket; use crate::helpers::host_to_bucket;
pub struct AdminApiServer { pub struct AdminApiServer {
@ -230,13 +229,8 @@ impl ApiHandler for AdminApiServer {
type Error = Error; type Error = Error;
fn parse_endpoint(&self, req: &Request<Body>) -> Result<Endpoint, Error> { fn parse_endpoint(&self, req: &Request<Body>) -> Result<Endpoint, Error> {
if req.uri().path().starts_with("/v0/") {
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
Endpoint::from_v0(endpoint_v0)
} else {
Endpoint::from_request(req) Endpoint::from_request(req)
} }
}
async fn handle( async fn handle(
&self, &self,
@ -282,13 +276,8 @@ impl ApiHandler for AdminApiServer {
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage, req).await, Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage, req).await,
// Keys // Keys
Endpoint::ListKeys => handle_list_keys(&self.garage).await, Endpoint::ListKeys => handle_list_keys(&self.garage).await,
Endpoint::GetKeyInfo { Endpoint::GetKeyInfo { id, search } => {
id, handle_get_key_info(&self.garage, id, search).await
search,
show_secret_key,
} => {
let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
handle_get_key_info(&self.garage, id, search, show_secret_key).await
} }
Endpoint::CreateKey => handle_create_key(&self.garage, req).await, Endpoint::CreateKey => handle_create_key(&self.garage, req).await,
Endpoint::ImportKey => handle_import_key(&self.garage, req).await, Endpoint::ImportKey => handle_import_key(&self.garage, req).await,

View file

@ -14,7 +14,6 @@ use garage_model::bucket_alias_table::*;
use garage_model::bucket_table::*; use garage_model::bucket_table::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::permission::*; use garage_model::permission::*;
use garage_model::s3::mpu_table;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use crate::admin::error::*; use crate::admin::error::*;
@ -125,14 +124,6 @@ async fn bucket_info_results(
.map(|x| x.filtered_values(&garage.system.ring.borrow())) .map(|x| x.filtered_values(&garage.system.ring.borrow()))
.unwrap_or_default(); .unwrap_or_default();
let mpu_counters = garage
.mpu_counter_table
.table
.get(&bucket_id, &EmptyKey)
.await?
.map(|x| x.filtered_values(&garage.system.ring.borrow()))
.unwrap_or_default();
let mut relevant_keys = HashMap::new(); let mut relevant_keys = HashMap::new();
for (k, _) in bucket for (k, _) in bucket
.state .state
@ -217,12 +208,12 @@ async fn bucket_info_results(
} }
}) })
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
objects: *counters.get(OBJECTS).unwrap_or(&0), objects: counters.get(OBJECTS).cloned().unwrap_or_default(),
bytes: *counters.get(BYTES).unwrap_or(&0), bytes: counters.get(BYTES).cloned().unwrap_or_default(),
unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0), unfinished_uploads: counters
unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0), .get(UNFINISHED_UPLOADS)
unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0), .cloned()
unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0), .unwrap_or_default(),
quotas: ApiBucketQuotas { quotas: ApiBucketQuotas {
max_size: quotas.max_size, max_size: quotas.max_size,
max_objects: quotas.max_objects, max_objects: quotas.max_objects,
@ -244,9 +235,6 @@ struct GetBucketInfoResult {
objects: i64, objects: i64,
bytes: i64, bytes: i64,
unfinished_uploads: i64, unfinished_uploads: i64,
unfinished_multipart_uploads: i64,
unfinished_multipart_upload_parts: i64,
unfinished_multipart_upload_bytes: i64,
quotas: ApiBucketQuotas, quotas: ApiBucketQuotas,
} }

View file

@ -1,13 +1,14 @@
use std::collections::HashMap;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
use hyper::{Body, Request, Response}; use hyper::{Body, Request, Response, StatusCode};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use garage_util::crdt::*; use garage_util::crdt::*;
use garage_util::data::*; use garage_util::data::*;
use garage_rpc::layout; use garage_rpc::layout::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
@ -25,37 +26,26 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
.system .system
.get_known_nodes() .get_known_nodes()
.into_iter() .into_iter()
.map(|i| KnownNodeResp { .map(|i| {
id: hex::encode(i.id), (
hex::encode(i.id),
KnownNodeResp {
addr: i.addr, addr: i.addr,
is_up: i.is_up, is_up: i.is_up,
last_seen_secs_ago: i.last_seen_secs_ago, last_seen_secs_ago: i.last_seen_secs_ago,
hostname: i.status.hostname, hostname: i.status.hostname,
},
)
}) })
.collect(), .collect(),
layout: format_cluster_layout(&garage.system.get_cluster_layout()), layout: get_cluster_layout(garage),
}; };
Ok(json_ok_response(&res)?) Ok(json_ok_response(&res)?)
} }
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<Body>, Error> { pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
use garage_rpc::system::ClusterHealthStatus;
let health = garage.system.health(); let health = garage.system.health();
let health = ClusterHealth {
status: match health.status {
ClusterHealthStatus::Healthy => "healthy",
ClusterHealthStatus::Degraded => "degraded",
ClusterHealthStatus::Unavailable => "unavailable",
},
known_nodes: health.known_nodes,
connected_nodes: health.connected_nodes,
storage_nodes: health.storage_nodes,
storage_nodes_ok: health.storage_nodes_ok,
partitions: health.partitions,
partitions_quorum: health.partitions_quorum,
partitions_all_ok: health.partitions_all_ok,
};
Ok(json_ok_response(&health)?) Ok(json_ok_response(&health)?)
} }
@ -84,68 +74,33 @@ pub async fn handle_connect_cluster_nodes(
} }
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<Body>, Error> { pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
let res = format_cluster_layout(&garage.system.get_cluster_layout()); let res = get_cluster_layout(garage);
Ok(json_ok_response(&res)?) Ok(json_ok_response(&res)?)
} }
fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResponse { fn get_cluster_layout(garage: &Arc<Garage>) -> GetClusterLayoutResponse {
let roles = layout let layout = garage.system.get_cluster_layout();
.roles
.items()
.iter()
.filter_map(|(k, _, v)| v.0.clone().map(|x| (k, x)))
.map(|(k, v)| NodeRoleResp {
id: hex::encode(k),
zone: v.zone.clone(),
capacity: v.capacity,
tags: v.tags.clone(),
})
.collect::<Vec<_>>();
let staged_role_changes = layout
.staging_roles
.items()
.iter()
.filter(|(k, _, v)| layout.roles.get(k) != Some(v))
.map(|(k, _, v)| match &v.0 {
None => NodeRoleChange {
id: hex::encode(k),
action: NodeRoleChangeEnum::Remove { remove: true },
},
Some(r) => NodeRoleChange {
id: hex::encode(k),
action: NodeRoleChangeEnum::Update {
zone: r.zone.clone(),
capacity: r.capacity,
tags: r.tags.clone(),
},
},
})
.collect::<Vec<_>>();
GetClusterLayoutResponse { GetClusterLayoutResponse {
version: layout.version, version: layout.version,
roles, roles: layout
staged_role_changes, .roles
.items()
.iter()
.filter(|(_, _, v)| v.0.is_some())
.map(|(k, _, v)| (hex::encode(k), v.0.clone()))
.collect(),
staged_role_changes: layout
.staging
.items()
.iter()
.filter(|(k, _, v)| layout.roles.get(k) != Some(v))
.map(|(k, _, v)| (hex::encode(k), v.0.clone()))
.collect(),
} }
} }
// ----
#[derive(Debug, Clone, Copy, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ClusterHealth {
status: &'static str,
known_nodes: usize,
connected_nodes: usize,
storage_nodes: usize,
storage_nodes_ok: usize,
partitions: usize,
partitions_quorum: usize,
partitions_all_ok: usize,
}
#[derive(Serialize)] #[derive(Serialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct GetClusterStatusResponse { struct GetClusterStatusResponse {
@ -154,19 +109,11 @@ struct GetClusterStatusResponse {
garage_features: Option<&'static [&'static str]>, garage_features: Option<&'static [&'static str]>,
rust_version: &'static str, rust_version: &'static str,
db_engine: String, db_engine: String,
known_nodes: Vec<KnownNodeResp>, known_nodes: HashMap<String, KnownNodeResp>,
layout: GetClusterLayoutResponse, layout: GetClusterLayoutResponse,
} }
#[derive(Serialize)] #[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct ApplyClusterLayoutResponse {
message: Vec<String>,
layout: GetClusterLayoutResponse,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct ConnectClusterNodesResponse { struct ConnectClusterNodesResponse {
success: bool, success: bool,
error: Option<String>, error: Option<String>,
@ -176,31 +123,18 @@ struct ConnectClusterNodesResponse {
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct GetClusterLayoutResponse { struct GetClusterLayoutResponse {
version: u64, version: u64,
roles: Vec<NodeRoleResp>, roles: HashMap<String, Option<NodeRole>>,
staged_role_changes: Vec<NodeRoleChange>, staged_role_changes: HashMap<String, Option<NodeRole>>,
} }
#[derive(Serialize)] #[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct NodeRoleResp {
id: String,
zone: String,
capacity: Option<u64>,
tags: Vec<String>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct KnownNodeResp { struct KnownNodeResp {
id: String,
addr: SocketAddr, addr: SocketAddr,
is_up: bool, is_up: bool,
last_seen_secs_ago: Option<u64>, last_seen_secs_ago: Option<u64>,
hostname: String, hostname: String,
} }
// ---- update functions ----
pub async fn handle_update_cluster_layout( pub async fn handle_update_cluster_layout(
garage: &Arc<Garage>, garage: &Arc<Garage>,
req: Request<Body>, req: Request<Body>,
@ -210,35 +144,22 @@ pub async fn handle_update_cluster_layout(
let mut layout = garage.system.get_cluster_layout(); let mut layout = garage.system.get_cluster_layout();
let mut roles = layout.roles.clone(); let mut roles = layout.roles.clone();
roles.merge(&layout.staging_roles); roles.merge(&layout.staging);
for change in updates { for (node, role) in updates {
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?; let node = hex::decode(node).ok_or_bad_request("Invalid node identifier")?;
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?; let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
let new_role = match change.action {
NodeRoleChangeEnum::Remove { remove: true } => None,
NodeRoleChangeEnum::Update {
zone,
capacity,
tags,
} => Some(layout::NodeRole {
zone,
capacity,
tags,
}),
_ => return Err(Error::bad_request("Invalid layout change")),
};
layout layout
.staging_roles .staging
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role))); .merge(&roles.update_mutator(node, NodeRoleV(role)));
} }
garage.system.update_cluster_layout(&layout).await?; garage.system.update_cluster_layout(&layout).await?;
let res = format_cluster_layout(&layout); Ok(Response::builder()
Ok(json_ok_response(&res)?) .status(StatusCode::NO_CONTENT)
.body(Body::empty())?)
} }
pub async fn handle_apply_cluster_layout( pub async fn handle_apply_cluster_layout(
@ -248,15 +169,12 @@ pub async fn handle_apply_cluster_layout(
let param = parse_json_body::<ApplyRevertLayoutRequest>(req).await?; let param = parse_json_body::<ApplyRevertLayoutRequest>(req).await?;
let layout = garage.system.get_cluster_layout(); let layout = garage.system.get_cluster_layout();
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?; let layout = layout.apply_staged_changes(Some(param.version))?;
garage.system.update_cluster_layout(&layout).await?; garage.system.update_cluster_layout(&layout).await?;
let res = ApplyClusterLayoutResponse { Ok(Response::builder()
message: msg, .status(StatusCode::NO_CONTENT)
layout: format_cluster_layout(&layout), .body(Body::empty())?)
};
Ok(json_ok_response(&res)?)
} }
pub async fn handle_revert_cluster_layout( pub async fn handle_revert_cluster_layout(
@ -269,39 +187,14 @@ pub async fn handle_revert_cluster_layout(
let layout = layout.revert_staged_changes(Some(param.version))?; let layout = layout.revert_staged_changes(Some(param.version))?;
garage.system.update_cluster_layout(&layout).await?; garage.system.update_cluster_layout(&layout).await?;
let res = format_cluster_layout(&layout); Ok(Response::builder()
Ok(json_ok_response(&res)?) .status(StatusCode::NO_CONTENT)
.body(Body::empty())?)
} }
// ---- type UpdateClusterLayoutRequest = HashMap<String, Option<NodeRole>>;
type UpdateClusterLayoutRequest = Vec<NodeRoleChange>;
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct ApplyRevertLayoutRequest { struct ApplyRevertLayoutRequest {
version: u64, version: u64,
} }
// ----
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct NodeRoleChange {
id: String,
#[serde(flatten)]
action: NodeRoleChangeEnum,
}
#[derive(Serialize, Deserialize)]
#[serde(untagged)]
enum NodeRoleChangeEnum {
#[serde(rename_all = "camelCase")]
Remove { remove: bool },
#[serde(rename_all = "camelCase")]
Update {
zone: String,
capacity: Option<u64>,
tags: Vec<String>,
},
}

View file

@ -10,7 +10,7 @@ use garage_model::garage::Garage;
use garage_model::key_table::*; use garage_model::key_table::*;
use crate::admin::error::*; use crate::admin::error::*;
use crate::helpers::{is_default, json_ok_response, parse_json_body}; use crate::helpers::{json_ok_response, parse_json_body};
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Error> { pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
let res = garage let res = garage
@ -34,7 +34,6 @@ pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Er
} }
#[derive(Serialize)] #[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct ListKeyResultItem { struct ListKeyResultItem {
id: String, id: String,
name: String, name: String,
@ -44,7 +43,6 @@ pub async fn handle_get_key_info(
garage: &Arc<Garage>, garage: &Arc<Garage>,
id: Option<String>, id: Option<String>,
search: Option<String>, search: Option<String>,
show_secret_key: bool,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let key = if let Some(id) = id { let key = if let Some(id) = id {
garage.key_helper().get_existing_key(&id).await? garage.key_helper().get_existing_key(&id).await?
@ -57,7 +55,7 @@ pub async fn handle_get_key_info(
unreachable!(); unreachable!();
}; };
key_info_results(garage, key, show_secret_key).await key_info_results(garage, key).await
} }
pub async fn handle_create_key( pub async fn handle_create_key(
@ -66,16 +64,15 @@ pub async fn handle_create_key(
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let req = parse_json_body::<CreateKeyRequest>(req).await?; let req = parse_json_body::<CreateKeyRequest>(req).await?;
let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key")); let key = Key::new(&req.name);
garage.key_table.insert(&key).await?; garage.key_table.insert(&key).await?;
key_info_results(garage, key, true).await key_info_results(garage, key).await
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct CreateKeyRequest { struct CreateKeyRequest {
name: Option<String>, name: String,
} }
pub async fn handle_import_key( pub async fn handle_import_key(
@ -89,15 +86,10 @@ pub async fn handle_import_key(
return Err(Error::KeyAlreadyExists(req.access_key_id.to_string())); return Err(Error::KeyAlreadyExists(req.access_key_id.to_string()));
} }
let imported_key = Key::import( let imported_key = Key::import(&req.access_key_id, &req.secret_access_key, &req.name);
&req.access_key_id,
&req.secret_access_key,
req.name.as_deref().unwrap_or("Imported key"),
)
.ok_or_bad_request("Invalid key format")?;
garage.key_table.insert(&imported_key).await?; garage.key_table.insert(&imported_key).await?;
key_info_results(garage, imported_key, false).await key_info_results(garage, imported_key).await
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@ -105,7 +97,7 @@ pub async fn handle_import_key(
struct ImportKeyRequest { struct ImportKeyRequest {
access_key_id: String, access_key_id: String,
secret_access_key: String, secret_access_key: String,
name: Option<String>, name: String,
} }
pub async fn handle_update_key( pub async fn handle_update_key(
@ -135,11 +127,10 @@ pub async fn handle_update_key(
garage.key_table.insert(&key).await?; garage.key_table.insert(&key).await?;
key_info_results(garage, key, false).await key_info_results(garage, key).await
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct UpdateKeyRequest { struct UpdateKeyRequest {
name: Option<String>, name: Option<String>,
allow: Option<KeyPerm>, allow: Option<KeyPerm>,
@ -158,11 +149,7 @@ pub async fn handle_delete_key(garage: &Arc<Garage>, id: String) -> Result<Respo
.body(Body::empty())?) .body(Body::empty())?)
} }
async fn key_info_results( async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Body>, Error> {
garage: &Arc<Garage>,
key: Key,
show_secret: bool,
) -> Result<Response<Body>, Error> {
let mut relevant_buckets = HashMap::new(); let mut relevant_buckets = HashMap::new();
let key_state = key.state.as_option().unwrap(); let key_state = key.state.as_option().unwrap();
@ -191,11 +178,7 @@ async fn key_info_results(
let res = GetKeyInfoResult { let res = GetKeyInfoResult {
name: key_state.name.get().clone(), name: key_state.name.get().clone(),
access_key_id: key.key_id.clone(), access_key_id: key.key_id.clone(),
secret_access_key: if show_secret { secret_access_key: key_state.secret_key.clone(),
Some(key_state.secret_key.clone())
} else {
None
},
permissions: KeyPerm { permissions: KeyPerm {
create_bucket: *key_state.allow_create_bucket.get(), create_bucket: *key_state.allow_create_bucket.get(),
}, },
@ -241,8 +224,7 @@ async fn key_info_results(
struct GetKeyInfoResult { struct GetKeyInfoResult {
name: String, name: String,
access_key_id: String, access_key_id: String,
#[serde(skip_serializing_if = "is_default")] secret_access_key: String,
secret_access_key: Option<String>,
permissions: KeyPerm, permissions: KeyPerm,
buckets: Vec<KeyInfoBucketResult>, buckets: Vec<KeyInfoBucketResult>,
} }
@ -264,7 +246,6 @@ struct KeyInfoBucketResult {
} }
#[derive(Serialize, Deserialize, Default)] #[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ApiBucketKeyPerm { pub(crate) struct ApiBucketKeyPerm {
#[serde(default)] #[serde(default)]
pub(crate) read: bool, pub(crate) read: bool,

View file

@ -1,7 +1,6 @@
pub mod api_server; pub mod api_server;
mod error; mod error;
mod router_v0; mod router;
mod router_v1;
mod bucket; mod bucket;
mod cluster; mod cluster;

View file

@ -5,6 +5,12 @@ use hyper::{Method, Request};
use crate::admin::error::*; use crate::admin::error::*;
use crate::router_macros::*; use crate::router_macros::*;
pub enum Authorization {
None,
MetricsToken,
AdminToken,
}
router_match! {@func router_match! {@func
/// List of all Admin API endpoints. /// List of all Admin API endpoints.
@ -128,6 +134,15 @@ impl Endpoint {
Ok(res) Ok(res)
} }
/// Get the kind of authorization which is required to perform the operation.
pub fn authorization_type(&self) -> Authorization {
match self {
Self::Health => Authorization::None,
Self::CheckDomain => Authorization::None,
Self::Metrics => Authorization::MetricsToken,
_ => Authorization::AdminToken,
}
}
} }
generateQueryParameters! { generateQueryParameters! {

View file

@ -1,235 +0,0 @@
use std::borrow::Cow;
use hyper::{Method, Request};
use crate::admin::error::*;
use crate::admin::router_v0;
use crate::router_macros::*;
pub enum Authorization {
None,
MetricsToken,
AdminToken,
}
router_match! {@func
/// List of all Admin API endpoints.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Endpoint {
Options,
CheckDomain,
Health,
Metrics,
GetClusterStatus,
GetClusterHealth,
ConnectClusterNodes,
// Layout
GetClusterLayout,
UpdateClusterLayout,
ApplyClusterLayout,
RevertClusterLayout,
// Keys
ListKeys,
CreateKey,
ImportKey,
GetKeyInfo {
id: Option<String>,
search: Option<String>,
show_secret_key: Option<String>,
},
DeleteKey {
id: String,
},
UpdateKey {
id: String,
},
// Buckets
ListBuckets,
CreateBucket,
GetBucketInfo {
id: Option<String>,
global_alias: Option<String>,
},
DeleteBucket {
id: String,
},
UpdateBucket {
id: String,
},
// Bucket-Key Permissions
BucketAllowKey,
BucketDenyKey,
// Bucket aliases
GlobalAliasBucket {
id: String,
alias: String,
},
GlobalUnaliasBucket {
id: String,
alias: String,
},
LocalAliasBucket {
id: String,
access_key_id: String,
alias: String,
},
LocalUnaliasBucket {
id: String,
access_key_id: String,
alias: String,
},
}}
impl Endpoint {
/// Determine which S3 endpoint a request is for using the request, and a bucket which was
/// possibly extracted from the Host header.
/// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets
pub fn from_request<T>(req: &Request<T>) -> Result<Self, Error> {
let uri = req.uri();
let path = uri.path();
let query = uri.query();
let mut query = QueryParameters::from_query(query.unwrap_or_default())?;
let res = router_match!(@gen_path_parser (req.method(), path, query) [
OPTIONS _ => Options,
GET "/check" => CheckDomain,
GET "/health" => Health,
GET "/metrics" => Metrics,
GET "/v1/status" => GetClusterStatus,
GET "/v1/health" => GetClusterHealth,
POST "/v1/connect" => ConnectClusterNodes,
// Layout endpoints
GET "/v1/layout" => GetClusterLayout,
POST "/v1/layout" => UpdateClusterLayout,
POST "/v1/layout/apply" => ApplyClusterLayout,
POST "/v1/layout/revert" => RevertClusterLayout,
// API key endpoints
GET "/v1/key" if id => GetKeyInfo (query_opt::id, query_opt::search, query_opt::show_secret_key),
GET "/v1/key" if search => GetKeyInfo (query_opt::id, query_opt::search, query_opt::show_secret_key),
POST "/v1/key" if id => UpdateKey (query::id),
POST "/v1/key" => CreateKey,
POST "/v1/key/import" => ImportKey,
DELETE "/v1/key" if id => DeleteKey (query::id),
GET "/v1/key" => ListKeys,
// Bucket endpoints
GET "/v1/bucket" if id => GetBucketInfo (query_opt::id, query_opt::global_alias),
GET "/v1/bucket" if global_alias => GetBucketInfo (query_opt::id, query_opt::global_alias),
GET "/v1/bucket" => ListBuckets,
POST "/v1/bucket" => CreateBucket,
DELETE "/v1/bucket" if id => DeleteBucket (query::id),
PUT "/v1/bucket" if id => UpdateBucket (query::id),
// Bucket-key permissions
POST "/v1/bucket/allow" => BucketAllowKey,
POST "/v1/bucket/deny" => BucketDenyKey,
// Bucket aliases
PUT "/v1/bucket/alias/global" => GlobalAliasBucket (query::id, query::alias),
DELETE "/v1/bucket/alias/global" => GlobalUnaliasBucket (query::id, query::alias),
PUT "/v1/bucket/alias/local" => LocalAliasBucket (query::id, query::access_key_id, query::alias),
DELETE "/v1/bucket/alias/local" => LocalUnaliasBucket (query::id, query::access_key_id, query::alias),
]);
if let Some(message) = query.nonempty_message() {
debug!("Unused query parameter: {}", message)
}
Ok(res)
}
/// Some endpoints work exactly the same in their v1/ version as they did in their v0/ version.
/// For these endpoints, we can convert a v0/ call to its equivalent as if it was made using
/// its v1/ URL.
pub fn from_v0(v0_endpoint: router_v0::Endpoint) -> Result<Self, Error> {
match v0_endpoint {
// Cluster endpoints
router_v0::Endpoint::ConnectClusterNodes => Ok(Self::ConnectClusterNodes),
// - GetClusterStatus: response format changed
// - GetClusterHealth: response format changed
// Layout endpoints
router_v0::Endpoint::RevertClusterLayout => Ok(Self::RevertClusterLayout),
// - GetClusterLayout: response format changed
// - UpdateClusterLayout: query format changed
// - ApplyCusterLayout: response format changed
// Key endpoints
router_v0::Endpoint::ListKeys => Ok(Self::ListKeys),
router_v0::Endpoint::CreateKey => Ok(Self::CreateKey),
router_v0::Endpoint::GetKeyInfo { id, search } => Ok(Self::GetKeyInfo {
id,
search,
show_secret_key: Some("true".into()),
}),
router_v0::Endpoint::DeleteKey { id } => Ok(Self::DeleteKey { id }),
// - UpdateKey: response format changed (secret key no longer returned)
// Bucket endpoints
router_v0::Endpoint::GetBucketInfo { id, global_alias } => {
Ok(Self::GetBucketInfo { id, global_alias })
}
router_v0::Endpoint::ListBuckets => Ok(Self::ListBuckets),
router_v0::Endpoint::CreateBucket => Ok(Self::CreateBucket),
router_v0::Endpoint::DeleteBucket { id } => Ok(Self::DeleteBucket { id }),
router_v0::Endpoint::UpdateBucket { id } => Ok(Self::UpdateBucket { id }),
// Bucket-key permissions
router_v0::Endpoint::BucketAllowKey => Ok(Self::BucketAllowKey),
router_v0::Endpoint::BucketDenyKey => Ok(Self::BucketDenyKey),
// Bucket alias endpoints
router_v0::Endpoint::GlobalAliasBucket { id, alias } => {
Ok(Self::GlobalAliasBucket { id, alias })
}
router_v0::Endpoint::GlobalUnaliasBucket { id, alias } => {
Ok(Self::GlobalUnaliasBucket { id, alias })
}
router_v0::Endpoint::LocalAliasBucket {
id,
access_key_id,
alias,
} => Ok(Self::LocalAliasBucket {
id,
access_key_id,
alias,
}),
router_v0::Endpoint::LocalUnaliasBucket {
id,
access_key_id,
alias,
} => Ok(Self::LocalUnaliasBucket {
id,
access_key_id,
alias,
}),
// For endpoints that have different body content syntax, issue
// deprecation warning
_ => Err(Error::bad_request(format!(
"v0/ endpoint is no longer supported: {}",
v0_endpoint.name()
))),
}
}
/// Get the kind of authorization which is required to perform the operation.
pub fn authorization_type(&self) -> Authorization {
match self {
Self::Health => Authorization::None,
Self::CheckDomain => Authorization::None,
Self::Metrics => Authorization::MetricsToken,
_ => Authorization::AdminToken,
}
}
}
generateQueryParameters! {
keywords: [],
fields: [
"format" => format,
"id" => id,
"search" => search,
"globalAlias" => global_alias,
"alias" => alias,
"accessKeyId" => access_key_id,
"showSecretKey" => show_secret_key
]
}

View file

@ -152,10 +152,6 @@ pub fn json_ok_response<T: Serialize>(res: &T) -> Result<Response<Body>, Error>
.body(Body::from(resp_json))?) .body(Body::from(resp_json))?)
} }
pub fn is_default<T: Default + PartialEq>(v: &T) -> bool {
*v == T::default()
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View file

@ -26,7 +26,6 @@ macro_rules! router_match {
$($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)* $($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)*
]) => {{ ]) => {{
{ {
#[allow(unused_parens)]
match ($method, $reqpath) { match ($method, $reqpath) {
$( $(
(&Method::$meth, $path) if true $(&& $query.$required.is_some())? => Endpoint::$api { (&Method::$meth, $path) if true $(&& $query.$required.is_some())? => Endpoint::$api {
@ -129,6 +128,12 @@ macro_rules! router_match {
} }
} }
}; };
(@if ($($cond:tt)+) then ($($then:tt)*) else ($($else:tt)*)) => {
$($then)*
};
(@if () then ($($then:tt)*) else ($($else:tt)*)) => {
$($else)*
};
} }
/// This macro is used to generate part of the code in this module. It must be called only one, and /// This macro is used to generate part of the code in this module. It must be called only one, and

View file

@ -26,9 +26,7 @@ use crate::s3::copy::*;
use crate::s3::cors::*; use crate::s3::cors::*;
use crate::s3::delete::*; use crate::s3::delete::*;
use crate::s3::get::*; use crate::s3::get::*;
use crate::s3::lifecycle::*;
use crate::s3::list::*; use crate::s3::list::*;
use crate::s3::multipart::*;
use crate::s3::post_object::handle_post_object; use crate::s3::post_object::handle_post_object;
use crate::s3::put::*; use crate::s3::put::*;
use crate::s3::router::Endpoint; use crate::s3::router::Endpoint;
@ -258,7 +256,7 @@ impl ApiHandler for S3ApiServer {
bucket_name, bucket_name,
bucket_id, bucket_id,
delimiter: delimiter.map(|d| d.to_string()), delimiter: delimiter.map(|d| d.to_string()),
page_size: max_keys.unwrap_or(1000).clamp(1, 1000), page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
prefix: prefix.unwrap_or_default(), prefix: prefix.unwrap_or_default(),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
}, },
@ -288,7 +286,7 @@ impl ApiHandler for S3ApiServer {
bucket_name, bucket_name,
bucket_id, bucket_id,
delimiter: delimiter.map(|d| d.to_string()), delimiter: delimiter.map(|d| d.to_string()),
page_size: max_keys.unwrap_or(1000).clamp(1, 1000), page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
prefix: prefix.unwrap_or_default(), prefix: prefix.unwrap_or_default(),
}, },
@ -321,7 +319,7 @@ impl ApiHandler for S3ApiServer {
bucket_name, bucket_name,
bucket_id, bucket_id,
delimiter: delimiter.map(|d| d.to_string()), delimiter: delimiter.map(|d| d.to_string()),
page_size: max_uploads.unwrap_or(1000).clamp(1, 1000), page_size: max_uploads.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
prefix: prefix.unwrap_or_default(), prefix: prefix.unwrap_or_default(),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
}, },
@ -345,7 +343,7 @@ impl ApiHandler for S3ApiServer {
key, key,
upload_id, upload_id,
part_number_marker: part_number_marker.map(|p| p.clamp(1, 10000)), part_number_marker: part_number_marker.map(|p| p.clamp(1, 10000)),
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000), max_parts: max_parts.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
}, },
) )
.await .await
@ -355,21 +353,14 @@ impl ApiHandler for S3ApiServer {
} }
Endpoint::GetBucketWebsite {} => handle_get_website(&bucket).await, Endpoint::GetBucketWebsite {} => handle_get_website(&bucket).await,
Endpoint::PutBucketWebsite {} => { Endpoint::PutBucketWebsite {} => {
handle_put_website(garage, bucket.clone(), req, content_sha256).await handle_put_website(garage, bucket_id, req, content_sha256).await
} }
Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket.clone()).await, Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket_id).await,
Endpoint::GetBucketCors {} => handle_get_cors(&bucket).await, Endpoint::GetBucketCors {} => handle_get_cors(&bucket).await,
Endpoint::PutBucketCors {} => { Endpoint::PutBucketCors {} => {
handle_put_cors(garage, bucket.clone(), req, content_sha256).await handle_put_cors(garage, bucket_id, req, content_sha256).await
}
Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket.clone()).await,
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(&bucket).await,
Endpoint::PutBucketLifecycleConfiguration {} => {
handle_put_lifecycle(garage, bucket.clone(), req, content_sha256).await
}
Endpoint::DeleteBucketLifecycle {} => {
handle_delete_lifecycle(garage, bucket.clone()).await
} }
Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket_id).await,
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
}; };

View file

@ -2,7 +2,7 @@ use std::pin::Pin;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, SystemTime, UNIX_EPOCH};
use futures::{stream, stream::Stream, StreamExt}; use futures::{stream, stream::Stream, StreamExt, TryFutureExt};
use md5::{Digest as Md5Digest, Md5}; use md5::{Digest as Md5Digest, Md5};
use bytes::Bytes; use bytes::Bytes;
@ -18,14 +18,12 @@ use garage_util::time::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::key_table::Key; use garage_model::key_table::Key;
use garage_model::s3::block_ref_table::*; use garage_model::s3::block_ref_table::*;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use crate::helpers::parse_bucket_key; use crate::helpers::parse_bucket_key;
use crate::s3::error::*; use crate::s3::error::*;
use crate::s3::multipart; use crate::s3::put::{decode_upload_id, get_headers};
use crate::s3::put::get_headers;
use crate::s3::xml::{self as s3_xml, xmlns_tag}; use crate::s3::xml::{self as s3_xml, xmlns_tag};
pub async fn handle_copy( pub async fn handle_copy(
@ -94,10 +92,7 @@ pub async fn handle_copy(
let tmp_dest_object_version = ObjectVersion { let tmp_dest_object_version = ObjectVersion {
uuid: new_uuid, uuid: new_uuid,
timestamp: new_timestamp, timestamp: new_timestamp,
state: ObjectVersionState::Uploading { state: ObjectVersionState::Uploading(new_meta.headers.clone()),
headers: new_meta.headers.clone(),
multipart: false,
},
}; };
let tmp_dest_object = Object::new( let tmp_dest_object = Object::new(
dest_bucket_id, dest_bucket_id,
@ -110,14 +105,8 @@ pub async fn handle_copy(
// this means that the BlockRef entries linked to this version cannot be // this means that the BlockRef entries linked to this version cannot be
// marked as deleted (they are marked as deleted only if the Version // marked as deleted (they are marked as deleted only if the Version
// doesn't exist or is marked as deleted). // doesn't exist or is marked as deleted).
let mut dest_version = Version::new( let mut dest_version =
new_uuid, Version::new(new_uuid, dest_bucket_id, dest_key.to_string(), false);
VersionBacklink::Object {
bucket_id: dest_bucket_id,
key: dest_key.to_string(),
},
false,
);
garage.version_table.insert(&dest_version).await?; garage.version_table.insert(&dest_version).await?;
// Fill in block list for version and insert block refs // Fill in block list for version and insert block refs
@ -190,13 +179,17 @@ pub async fn handle_upload_part_copy(
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let copy_precondition = CopyPreconditionHeaders::parse(req)?; let copy_precondition = CopyPreconditionHeaders::parse(req)?;
let dest_upload_id = multipart::decode_upload_id(upload_id)?; let dest_version_uuid = decode_upload_id(upload_id)?;
let dest_key = dest_key.to_string(); let dest_key = dest_key.to_string();
let (source_object, (_, _, mut dest_mpu)) = futures::try_join!( let (source_object, dest_object) = futures::try_join!(
get_copy_source(&garage, api_key, req), get_copy_source(&garage, api_key, req),
multipart::get_upload(&garage, &dest_bucket_id, &dest_key, &dest_upload_id) garage
.object_table
.get(&dest_bucket_id, &dest_key)
.map_err(Error::from),
)?; )?;
let dest_object = dest_object.ok_or(Error::NoSuchKey)?;
let (source_object_version, source_version_data, source_version_meta) = let (source_object_version, source_version_data, source_version_meta) =
extract_source_info(&source_object)?; extract_source_info(&source_object)?;
@ -224,6 +217,15 @@ pub async fn handle_upload_part_copy(
}, },
}; };
// Check destination version is indeed in uploading state
if !dest_object
.versions()
.iter()
.any(|v| v.uuid == dest_version_uuid && v.is_uploading())
{
return Err(Error::NoSuchUpload);
}
// Check source version is not inlined // Check source version is not inlined
match source_version_data { match source_version_data {
ObjectVersionData::DeleteMarker => unreachable!(), ObjectVersionData::DeleteMarker => unreachable!(),
@ -240,11 +242,23 @@ pub async fn handle_upload_part_copy(
// Fetch source versin with its block list, // Fetch source versin with its block list,
// and destination version to check part hasn't yet been uploaded // and destination version to check part hasn't yet been uploaded
let source_version = garage let (source_version, dest_version) = futures::try_join!(
garage
.version_table .version_table
.get(&source_object_version.uuid, &EmptyKey) .get(&source_object_version.uuid, &EmptyKey),
.await? garage.version_table.get(&dest_version_uuid, &EmptyKey),
.ok_or(Error::NoSuchKey)?; )?;
let source_version = source_version.ok_or(Error::NoSuchKey)?;
// Check this part number hasn't yet been uploaded
if let Some(dv) = dest_version {
if dv.has_part_number(part_number) {
return Err(Error::bad_request(format!(
"Part number {} has already been uploaded",
part_number
)));
}
}
// We want to reuse blocks from the source version as much as possible. // We want to reuse blocks from the source version as much as possible.
// However, we still need to get the data from these blocks // However, we still need to get the data from these blocks
@ -285,33 +299,6 @@ pub async fn handle_upload_part_copy(
current_offset = block_end; current_offset = block_end;
} }
// Calculate the identity of destination part: timestamp, version id
let dest_version_id = gen_uuid();
let dest_mpu_part_key = MpuPartKey {
part_number,
timestamp: dest_mpu.next_timestamp(part_number),
};
// Create the uploaded part
dest_mpu.parts.clear();
dest_mpu.parts.put(
dest_mpu_part_key,
MpuPart {
version: dest_version_id,
etag: None,
size: None,
},
);
garage.mpu_table.insert(&dest_mpu).await?;
let mut dest_version = Version::new(
dest_version_id,
VersionBacklink::MultipartUpload {
upload_id: dest_upload_id,
},
false,
);
// Now, actually copy the blocks // Now, actually copy the blocks
let mut md5hasher = Md5::new(); let mut md5hasher = Md5::new();
@ -361,8 +348,8 @@ pub async fn handle_upload_part_copy(
let must_upload = existing_block_hash.is_none(); let must_upload = existing_block_hash.is_none();
let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..])); let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..]));
dest_version.blocks.clear(); let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false);
dest_version.blocks.put( version.blocks.put(
VersionBlockKey { VersionBlockKey {
part_number, part_number,
offset: current_offset, offset: current_offset,
@ -376,7 +363,7 @@ pub async fn handle_upload_part_copy(
let block_ref = BlockRef { let block_ref = BlockRef {
block: final_hash, block: final_hash,
version: dest_version_id, version: dest_version_uuid,
deleted: false.into(), deleted: false.into(),
}; };
@ -391,33 +378,23 @@ pub async fn handle_upload_part_copy(
Ok(()) Ok(())
} }
}, },
async {
// Thing 2: we need to insert the block in the version // Thing 2: we need to insert the block in the version
garage.version_table.insert(&dest_version).await?; garage.version_table.insert(&version),
// Thing 3: we need to add a block reference // Thing 3: we need to add a block reference
garage.block_ref_table.insert(&block_ref).await garage.block_ref_table.insert(&block_ref),
},
// Thing 4: we need to prefetch the next block // Thing 4: we need to prefetch the next block
defragmenter.next(), defragmenter.next(),
)?; )?;
next_block = res.2; next_block = res.3;
} }
assert_eq!(current_offset, source_range.length);
let data_md5sum = md5hasher.finalize(); let data_md5sum = md5hasher.finalize();
let etag = hex::encode(data_md5sum); let etag = hex::encode(data_md5sum);
// Put the part's ETag in the Versiontable // Put the part's ETag in the Versiontable
dest_mpu.parts.put( let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false);
dest_mpu_part_key, version.parts_etags.put(part_number, etag.clone());
MpuPart { garage.version_table.insert(&version).await?;
version: dest_version_id,
etag: Some(etag.clone()),
size: Some(current_offset),
},
);
garage.mpu_table.insert(&dest_mpu).await?;
// LGTM // LGTM
let resp_xml = s3_xml::to_xml_with_header(&CopyPartResult { let resp_xml = s3_xml::to_xml_with_header(&CopyPartResult {

View file

@ -44,11 +44,14 @@ pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<Body>, Error> {
pub async fn handle_delete_cors( pub async fn handle_delete_cors(
garage: Arc<Garage>, garage: Arc<Garage>,
mut bucket: Bucket, bucket_id: Uuid,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let param = bucket let mut bucket = garage
.params_mut() .bucket_helper()
.ok_or_internal_error("Bucket should not be deleted at this point")?; .get_existing_bucket(bucket_id)
.await?;
let param = bucket.params_mut().unwrap();
param.cors_config.update(None); param.cors_config.update(None);
garage.bucket_table.insert(&bucket).await?; garage.bucket_table.insert(&bucket).await?;
@ -60,7 +63,7 @@ pub async fn handle_delete_cors(
pub async fn handle_put_cors( pub async fn handle_put_cors(
garage: Arc<Garage>, garage: Arc<Garage>,
mut bucket: Bucket, bucket_id: Uuid,
req: Request<Body>, req: Request<Body>,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
@ -70,9 +73,12 @@ pub async fn handle_put_cors(
verify_signed_content(content_sha256, &body[..])?; verify_signed_content(content_sha256, &body[..])?;
} }
let param = bucket let mut bucket = garage
.params_mut() .bucket_helper()
.ok_or_internal_error("Bucket should not be deleted at this point")?; .get_existing_bucket(bucket_id)
.await?;
let param = bucket.params_mut().unwrap();
let conf: CorsConfiguration = from_reader(&body as &[u8])?; let conf: CorsConfiguration = from_reader(&body as &[u8])?;
conf.validate()?; conf.validate()?;

View file

@ -149,6 +149,7 @@ pub async fn handle_head(
let (part_offset, part_end) = let (part_offset, part_end) =
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?; calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
let n_parts = version.parts_etags.items().len();
Ok(object_headers(object_version, version_meta) Ok(object_headers(object_version, version_meta)
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset)) .header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
@ -161,7 +162,7 @@ pub async fn handle_head(
version_meta.size version_meta.size
), ),
) )
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?)) .header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts))
.status(StatusCode::PARTIAL_CONTENT) .status(StatusCode::PARTIAL_CONTENT)
.body(Body::empty())?) .body(Body::empty())?)
} }
@ -375,6 +376,7 @@ async fn handle_get_part(
let (begin, end) = let (begin, end) =
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?; calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
let n_parts = version.parts_etags.items().len();
let body = body_from_blocks_range(garage, version.blocks.items(), begin, end); let body = body_from_blocks_range(garage, version.blocks.items(), begin, end);
@ -384,7 +386,7 @@ async fn handle_get_part(
CONTENT_RANGE, CONTENT_RANGE,
format!("bytes {}-{}/{}", begin, end - 1, version_meta.size), format!("bytes {}-{}/{}", begin, end - 1, version_meta.size),
) )
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?)) .header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts))
.body(body)?) .body(body)?)
} }
_ => unreachable!(), _ => unreachable!(),

View file

@ -1,401 +0,0 @@
use quick_xml::de::from_reader;
use std::sync::Arc;
use hyper::{Body, Request, Response, StatusCode};
use serde::{Deserialize, Serialize};
use crate::s3::error::*;
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content;
use garage_model::bucket_table::{
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule,
};
use garage_model::garage::Garage;
use garage_util::data::*;
pub async fn handle_get_lifecycle(bucket: &Bucket) -> Result<Response<Body>, Error> {
let param = bucket
.params()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
if let Some(lifecycle) = param.lifecycle_config.get() {
let wc = LifecycleConfiguration::from_garage_lifecycle_config(lifecycle);
let xml = to_xml_with_header(&wc)?;
Ok(Response::builder()
.status(StatusCode::OK)
.header(http::header::CONTENT_TYPE, "application/xml")
.body(Body::from(xml))?)
} else {
Ok(Response::builder()
.status(StatusCode::NO_CONTENT)
.body(Body::empty())?)
}
}
pub async fn handle_delete_lifecycle(
garage: Arc<Garage>,
mut bucket: Bucket,
) -> Result<Response<Body>, Error> {
let param = bucket
.params_mut()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
param.lifecycle_config.update(None);
garage.bucket_table.insert(&bucket).await?;
Ok(Response::builder()
.status(StatusCode::NO_CONTENT)
.body(Body::empty())?)
}
pub async fn handle_put_lifecycle(
garage: Arc<Garage>,
mut bucket: Bucket,
req: Request<Body>,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let body = hyper::body::to_bytes(req.into_body()).await?;
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let param = bucket
.params_mut()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
let conf: LifecycleConfiguration = from_reader(&body as &[u8])?;
let config = conf
.validate_into_garage_lifecycle_config()
.ok_or_bad_request("Invalid lifecycle configuration")?;
param.lifecycle_config.update(Some(config));
garage.bucket_table.insert(&bucket).await?;
Ok(Response::builder()
.status(StatusCode::OK)
.body(Body::empty())?)
}
// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ----
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct LifecycleConfiguration {
#[serde(serialize_with = "xmlns_tag", skip_deserializing)]
pub xmlns: (),
#[serde(rename = "Rule")]
pub lifecycle_rules: Vec<LifecycleRule>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct LifecycleRule {
#[serde(rename = "ID")]
pub id: Option<Value>,
#[serde(rename = "Status")]
pub status: Value,
#[serde(rename = "Filter", default)]
pub filter: Option<Filter>,
#[serde(rename = "Expiration", default)]
pub expiration: Option<Expiration>,
#[serde(rename = "AbortIncompleteMultipartUpload", default)]
pub abort_incomplete_mpu: Option<AbortIncompleteMpu>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct Filter {
#[serde(rename = "And")]
pub and: Option<Box<Filter>>,
#[serde(rename = "Prefix")]
pub prefix: Option<Value>,
#[serde(rename = "ObjectSizeGreaterThan")]
pub size_gt: Option<IntValue>,
#[serde(rename = "ObjectSizeLessThan")]
pub size_lt: Option<IntValue>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct Expiration {
#[serde(rename = "Days")]
pub days: Option<IntValue>,
#[serde(rename = "Date")]
pub at_date: Option<Value>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct AbortIncompleteMpu {
#[serde(rename = "DaysAfterInitiation")]
pub days: IntValue,
}
impl LifecycleConfiguration {
pub fn validate_into_garage_lifecycle_config(
self,
) -> Result<Vec<GarageLifecycleRule>, &'static str> {
let mut ret = vec![];
for rule in self.lifecycle_rules {
ret.push(rule.validate_into_garage_lifecycle_rule()?);
}
Ok(ret)
}
pub fn from_garage_lifecycle_config(config: &[GarageLifecycleRule]) -> Self {
Self {
xmlns: (),
lifecycle_rules: config
.iter()
.map(LifecycleRule::from_garage_lifecycle_rule)
.collect(),
}
}
}
impl LifecycleRule {
pub fn validate_into_garage_lifecycle_rule(self) -> Result<GarageLifecycleRule, &'static str> {
let enabled = match self.status.0.as_str() {
"Enabled" => true,
"Disabled" => false,
_ => return Err("invalid value for <Status>"),
};
let filter = self
.filter
.map(Filter::validate_into_garage_lifecycle_filter)
.transpose()?
.unwrap_or_default();
let abort_incomplete_mpu_days = self.abort_incomplete_mpu.map(|x| x.days.0 as usize);
let expiration = self
.expiration
.map(Expiration::validate_into_garage_lifecycle_expiration)
.transpose()?;
Ok(GarageLifecycleRule {
id: self.id.map(|x| x.0),
enabled,
filter,
abort_incomplete_mpu_days,
expiration,
})
}
pub fn from_garage_lifecycle_rule(rule: &GarageLifecycleRule) -> Self {
Self {
id: rule.id.as_deref().map(Value::from),
status: if rule.enabled {
Value::from("Enabled")
} else {
Value::from("Disabled")
},
filter: Filter::from_garage_lifecycle_filter(&rule.filter),
abort_incomplete_mpu: rule
.abort_incomplete_mpu_days
.map(|days| AbortIncompleteMpu {
days: IntValue(days as i64),
}),
expiration: rule
.expiration
.as_ref()
.map(Expiration::from_garage_lifecycle_expiration),
}
}
}
impl Filter {
pub fn count(&self) -> i32 {
fn count<T>(x: &Option<T>) -> i32 {
x.as_ref().map(|_| 1).unwrap_or(0)
}
count(&self.prefix) + count(&self.size_gt) + count(&self.size_lt)
}
pub fn validate_into_garage_lifecycle_filter(
self,
) -> Result<GarageLifecycleFilter, &'static str> {
if self.count() > 0 && self.and.is_some() {
Err("Filter tag cannot contain both <And> and another condition")
} else if let Some(and) = self.and {
if and.and.is_some() {
return Err("Nested <And> tags");
}
Ok(and.internal_into_garage_lifecycle_filter())
} else if self.count() > 1 {
Err("Multiple Filter conditions must be wrapped in an <And> tag")
} else {
Ok(self.internal_into_garage_lifecycle_filter())
}
}
fn internal_into_garage_lifecycle_filter(self) -> GarageLifecycleFilter {
GarageLifecycleFilter {
prefix: self.prefix.map(|x| x.0),
size_gt: self.size_gt.map(|x| x.0 as u64),
size_lt: self.size_lt.map(|x| x.0 as u64),
}
}
pub fn from_garage_lifecycle_filter(rule: &GarageLifecycleFilter) -> Option<Self> {
let filter = Filter {
and: None,
prefix: rule.prefix.as_deref().map(Value::from),
size_gt: rule.size_gt.map(|x| IntValue(x as i64)),
size_lt: rule.size_lt.map(|x| IntValue(x as i64)),
};
match filter.count() {
0 => None,
1 => Some(filter),
_ => Some(Filter {
and: Some(Box::new(filter)),
..Default::default()
}),
}
}
}
impl Expiration {
pub fn validate_into_garage_lifecycle_expiration(
self,
) -> Result<GarageLifecycleExpiration, &'static str> {
match (self.days, self.at_date) {
(Some(_), Some(_)) => Err("cannot have both <Days> and <Date> in <Expiration>"),
(None, None) => Err("<Expiration> must contain either <Days> or <Date>"),
(Some(days), None) => Ok(GarageLifecycleExpiration::AfterDays(days.0 as usize)),
(None, Some(date)) => {
parse_lifecycle_date(&date.0)?;
Ok(GarageLifecycleExpiration::AtDate(date.0))
}
}
}
pub fn from_garage_lifecycle_expiration(exp: &GarageLifecycleExpiration) -> Self {
match exp {
GarageLifecycleExpiration::AfterDays(days) => Expiration {
days: Some(IntValue(*days as i64)),
at_date: None,
},
GarageLifecycleExpiration::AtDate(date) => Expiration {
days: None,
at_date: Some(Value(date.to_string())),
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use quick_xml::de::from_str;
#[test]
fn test_deserialize_lifecycle_config() -> Result<(), Error> {
let message = r#"<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Rule>
<ID>id1</ID>
<Status>Enabled</Status>
<Filter>
<Prefix>documents/</Prefix>
</Filter>
<AbortIncompleteMultipartUpload>
<DaysAfterInitiation>7</DaysAfterInitiation>
</AbortIncompleteMultipartUpload>
</Rule>
<Rule>
<ID>id2</ID>
<Status>Enabled</Status>
<Filter>
<And>
<Prefix>logs/</Prefix>
<ObjectSizeGreaterThan>1000000</ObjectSizeGreaterThan>
</And>
</Filter>
<Expiration>
<Days>365</Days>
</Expiration>
</Rule>
</LifecycleConfiguration>"#;
let conf: LifecycleConfiguration = from_str(message).unwrap();
let ref_value = LifecycleConfiguration {
xmlns: (),
lifecycle_rules: vec![
LifecycleRule {
id: Some("id1".into()),
status: "Enabled".into(),
filter: Some(Filter {
prefix: Some("documents/".into()),
..Default::default()
}),
expiration: None,
abort_incomplete_mpu: Some(AbortIncompleteMpu { days: IntValue(7) }),
},
LifecycleRule {
id: Some("id2".into()),
status: "Enabled".into(),
filter: Some(Filter {
and: Some(Box::new(Filter {
prefix: Some("logs/".into()),
size_gt: Some(IntValue(1000000)),
..Default::default()
})),
..Default::default()
}),
expiration: Some(Expiration {
days: Some(IntValue(365)),
at_date: None,
}),
abort_incomplete_mpu: None,
},
],
};
assert_eq! {
ref_value,
conf
};
let message2 = to_xml_with_header(&ref_value)?;
let cleanup = |c: &str| c.replace(char::is_whitespace, "");
assert_eq!(cleanup(message), cleanup(&message2));
// Check validation
let validated = ref_value
.validate_into_garage_lifecycle_config()
.ok_or_bad_request("invalid xml config")?;
let ref_config = vec![
GarageLifecycleRule {
id: Some("id1".into()),
enabled: true,
filter: GarageLifecycleFilter {
prefix: Some("documents/".into()),
..Default::default()
},
expiration: None,
abort_incomplete_mpu_days: Some(7),
},
GarageLifecycleRule {
id: Some("id2".into()),
enabled: true,
filter: GarageLifecycleFilter {
prefix: Some("logs/".into()),
size_gt: Some(1000000),
..Default::default()
},
expiration: Some(GarageLifecycleExpiration::AfterDays(365)),
abort_incomplete_mpu_days: None,
},
];
assert_eq!(validated, ref_config);
let message3 = to_xml_with_header(&LifecycleConfiguration::from_garage_lifecycle_config(
&validated,
))?;
assert_eq!(cleanup(message), cleanup(&message3));
Ok(())
}
}

View file

@ -1,3 +1,4 @@
use std::cmp::Ordering;
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::iter::{Iterator, Peekable}; use std::iter::{Iterator, Peekable};
use std::sync::Arc; use std::sync::Arc;
@ -10,15 +11,15 @@ use garage_util::error::Error as GarageError;
use garage_util::time::*; use garage_util::time::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_model::s3::version_table::Version;
use garage_table::EnumerationOrder; use garage_table::{EmptyKey, EnumerationOrder};
use crate::encoding::*; use crate::encoding::*;
use crate::helpers::key_after_prefix; use crate::helpers::key_after_prefix;
use crate::s3::error::*; use crate::s3::error::*;
use crate::s3::multipart as s3_multipart; use crate::s3::put as s3_put;
use crate::s3::xml as s3_xml; use crate::s3::xml as s3_xml;
const DUMMY_NAME: &str = "Dummy Key"; const DUMMY_NAME: &str = "Dummy Key";
@ -175,9 +176,7 @@ pub async fn handle_list_multipart_upload(
t.get_range( t.get_range(
&bucket, &bucket,
key, key,
Some(ObjectFilter::IsUploading { Some(ObjectFilter::IsUploading),
check_multipart: Some(true),
}),
count, count,
EnumerationOrder::Forward, EnumerationOrder::Forward,
) )
@ -273,26 +272,24 @@ pub async fn handle_list_parts(
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
debug!("ListParts {:?}", query); debug!("ListParts {:?}", query);
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?; let upload_id = s3_put::decode_upload_id(&query.upload_id)?;
let (_, _, mpu) = let (object, version) = futures::try_join!(
s3_multipart::get_upload(&garage, &query.bucket_id, &query.key, &upload_id).await?; garage.object_table.get(&query.bucket_id, &query.key),
garage.version_table.get(&upload_id, &EmptyKey),
)?;
let (info, next) = fetch_part_info(query, &mpu)?; let (info, next) = fetch_part_info(query, object, version, upload_id)?;
let result = s3_xml::ListPartsResult { let result = s3_xml::ListPartsResult {
xmlns: (), xmlns: (),
// Query parameters
bucket: s3_xml::Value(query.bucket_name.to_string()), bucket: s3_xml::Value(query.bucket_name.to_string()),
key: s3_xml::Value(query.key.to_string()), key: s3_xml::Value(query.key.to_string()),
upload_id: s3_xml::Value(query.upload_id.to_string()), upload_id: s3_xml::Value(query.upload_id.to_string()),
part_number_marker: query.part_number_marker.map(|e| s3_xml::IntValue(e as i64)), part_number_marker: query.part_number_marker.map(|e| s3_xml::IntValue(e as i64)),
max_parts: s3_xml::IntValue(query.max_parts as i64),
// Result values
next_part_number_marker: next.map(|e| s3_xml::IntValue(e as i64)), next_part_number_marker: next.map(|e| s3_xml::IntValue(e as i64)),
is_truncated: s3_xml::Value(format!("{}", next.is_some())), max_parts: s3_xml::IntValue(query.max_parts as i64),
is_truncated: s3_xml::Value(next.map(|_| "true").unwrap_or("false").to_string()),
parts: info parts: info
.iter() .iter()
.map(|part| s3_xml::PartItem { .map(|part| s3_xml::PartItem {
@ -302,8 +299,6 @@ pub async fn handle_list_parts(
size: s3_xml::IntValue(part.size as i64), size: s3_xml::IntValue(part.size as i64),
}) })
.collect(), .collect(),
// Dummy result values (unsupported features)
initiator: s3_xml::Initiator { initiator: s3_xml::Initiator {
display_name: s3_xml::Value(DUMMY_NAME.to_string()), display_name: s3_xml::Value(DUMMY_NAME.to_string()),
id: s3_xml::Value(DUMMY_KEY.to_string()), id: s3_xml::Value(DUMMY_KEY.to_string()),
@ -340,8 +335,8 @@ struct UploadInfo {
} }
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
struct PartInfo<'a> { struct PartInfo {
etag: &'a str, etag: String,
timestamp: u64, timestamp: u64,
part_number: u64, part_number: u64,
size: u64, size: u64,
@ -461,51 +456,107 @@ where
} }
} }
fn fetch_part_info<'a>( fn fetch_part_info(
query: &ListPartsQuery, query: &ListPartsQuery,
mpu: &'a MultipartUpload, object: Option<Object>,
) -> Result<(Vec<PartInfo<'a>>, Option<u64>), Error> { version: Option<Version>,
assert!((1..=1000).contains(&query.max_parts)); // see s3/api_server.rs upload_id: Uuid,
) -> Result<(Vec<PartInfo>, Option<u64>), Error> {
// Check results
let object = object.ok_or(Error::NoSuchKey)?;
// Parse multipart upload part list, removing parts not yet finished let obj_version = object
// and failed part uploads that were overwritten .versions()
let mut parts: Vec<PartInfo<'a>> = Vec::with_capacity(mpu.parts.items().len()); .iter()
for (pk, p) in mpu.parts.items().iter() { .find(|v| v.uuid == upload_id && v.is_uploading())
if let (Some(etag), Some(size)) = (&p.etag, p.size) { .ok_or(Error::NoSuchUpload)?;
let part_info = PartInfo {
part_number: pk.part_number,
timestamp: pk.timestamp,
etag,
size,
};
match parts.last_mut() {
Some(lastpart) if lastpart.part_number == pk.part_number => {
*lastpart = part_info;
}
_ => {
parts.push(part_info);
}
}
}
}
// Cut the beginning if we have a marker let version = version.ok_or(Error::NoSuchKey)?;
if let Some(marker) = &query.part_number_marker {
// Cut the beginning of our 2 vectors if required
let (etags, blocks) = match &query.part_number_marker {
Some(marker) => {
let next = marker + 1; let next = marker + 1;
let part_idx = parts
.binary_search_by(|part| part.part_number.cmp(&next)) let part_idx = into_ok_or_err(
.unwrap_or_else(|x| x); version
parts = parts.split_off(part_idx); .parts_etags
.items()
.binary_search_by(|(part_num, _)| part_num.cmp(&next)),
);
let parts = &version.parts_etags.items()[part_idx..];
let block_idx = into_ok_or_err(
version
.blocks
.items()
.binary_search_by(|(vkey, _)| vkey.part_number.cmp(&next)),
);
let blocks = &version.blocks.items()[block_idx..];
(parts, blocks)
}
None => (version.parts_etags.items(), version.blocks.items()),
};
// Use the block vector to compute a (part_number, size) vector
let mut size = Vec::<(u64, u64)>::new();
blocks.iter().for_each(|(key, val)| {
let mut new_size = val.size;
match size.pop() {
Some((part_number, size)) if part_number == key.part_number => new_size += size,
Some(v) => size.push(v),
None => (),
}
size.push((key.part_number, new_size))
});
// Merge the etag vector and size vector to build a PartInfo vector
let max_parts = query.max_parts as usize;
let (mut etag_iter, mut size_iter) = (etags.iter().peekable(), size.iter().peekable());
let mut info = Vec::<PartInfo>::with_capacity(max_parts);
while info.len() < max_parts {
match (etag_iter.peek(), size_iter.peek()) {
(Some((ep, etag)), Some((sp, size))) => match ep.cmp(sp) {
Ordering::Less => {
debug!("ETag information ignored due to missing corresponding block information. Query: {:?}", query);
etag_iter.next();
}
Ordering::Equal => {
info.push(PartInfo {
etag: etag.to_string(),
timestamp: obj_version.timestamp,
part_number: *ep,
size: *size,
});
etag_iter.next();
size_iter.next();
}
Ordering::Greater => {
debug!("Block information ignored due to missing corresponding ETag information. Query: {:?}", query);
size_iter.next();
}
},
(None, None) => return Ok((info, None)),
_ => {
debug!(
"Additional block or ETag information ignored. Query: {:?}",
query
);
return Ok((info, None));
}
}
} }
// Cut the end if we have too many parts match info.last() {
if parts.len() > query.max_parts as usize { Some(part_info) => {
parts.truncate(query.max_parts as usize); let pagination = Some(part_info.part_number);
let pagination = Some(parts.last().unwrap().part_number); Ok((info, pagination))
return Ok((parts, pagination)); }
None => Ok((info, None)),
} }
Ok((parts, None))
} }
/* /*
@ -600,7 +651,7 @@ impl ListMultipartUploadsQuery {
}), }),
uuid => Ok(RangeBegin::AfterUpload { uuid => Ok(RangeBegin::AfterUpload {
key: key_marker.to_string(), key: key_marker.to_string(),
upload: s3_multipart::decode_upload_id(uuid)?, upload: s3_put::decode_upload_id(uuid)?,
}), }),
}, },
@ -792,7 +843,7 @@ impl ExtractAccumulator for UploadAccumulator {
let mut uploads_for_key = object let mut uploads_for_key = object
.versions() .versions()
.iter() .iter()
.filter(|x| x.is_uploading(Some(true))) .filter(|x| x.is_uploading())
.collect::<Vec<&ObjectVersion>>(); .collect::<Vec<&ObjectVersion>>();
// S3 logic requires lexicographically sorted upload ids. // S3 logic requires lexicographically sorted upload ids.
@ -867,6 +918,14 @@ impl ExtractAccumulator for UploadAccumulator {
* Utility functions * Utility functions
*/ */
/// This is a stub for Result::into_ok_or_err that is not yet in Rust stable
fn into_ok_or_err<T>(r: Result<T, T>) -> T {
match r {
Ok(r) => r,
Err(r) => r,
}
}
/// Returns the common prefix of the object given the query prefix and delimiter /// Returns the common prefix of the object given the query prefix and delimiter
fn common_prefix<'a>(object: &'a Object, query: &ListQueryCommon) -> Option<&'a str> { fn common_prefix<'a>(object: &'a Object, query: &ListQueryCommon) -> Option<&'a str> {
match &query.delimiter { match &query.delimiter {
@ -892,6 +951,7 @@ fn uriencode_maybe(s: &str, yes: bool) -> s3_xml::Value {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use garage_model::s3::version_table::*;
use garage_util::*; use garage_util::*;
use std::iter::FromIterator; use std::iter::FromIterator;
@ -931,13 +991,10 @@ mod tests {
ObjectVersion { ObjectVersion {
uuid: Uuid::from(uuid), uuid: Uuid::from(uuid),
timestamp: TS, timestamp: TS,
state: ObjectVersionState::Uploading { state: ObjectVersionState::Uploading(ObjectVersionHeaders {
multipart: true,
headers: ObjectVersionHeaders {
content_type: "text/plain".to_string(), content_type: "text/plain".to_string(),
other: BTreeMap::<String, String>::new(), other: BTreeMap::<String, String>::new(),
}, }),
},
} }
} }
@ -1112,77 +1169,83 @@ mod tests {
Ok(()) Ok(())
} }
fn mpu() -> MultipartUpload { fn version() -> Version {
let uuid = Uuid::from([0x08; 32]); let uuid = Uuid::from([0x08; 32]);
let parts = vec![ let blocks = vec![
( (
MpuPartKey { VersionBlockKey {
part_number: 1, part_number: 1,
timestamp: TS, offset: 1,
}, },
MpuPart { VersionBlock {
version: uuid, hash: uuid,
size: Some(3), size: 3,
etag: Some("etag1".into()),
}, },
), ),
( (
MpuPartKey { VersionBlockKey {
part_number: 1,
offset: 2,
},
VersionBlock {
hash: uuid,
size: 2,
},
),
(
VersionBlockKey {
part_number: 2, part_number: 2,
timestamp: TS, offset: 1,
}, },
MpuPart { VersionBlock {
version: uuid, hash: uuid,
size: None, size: 8,
etag: None,
}, },
), ),
( (
MpuPartKey { VersionBlockKey {
part_number: 3,
timestamp: TS,
},
MpuPart {
version: uuid,
size: Some(10),
etag: Some("etag2".into()),
},
),
(
MpuPartKey {
part_number: 5, part_number: 5,
timestamp: TS, offset: 1,
}, },
MpuPart { VersionBlock {
version: uuid, hash: uuid,
size: Some(7), size: 7,
etag: Some("etag3".into()),
}, },
), ),
( (
MpuPartKey { VersionBlockKey {
part_number: 8, part_number: 8,
timestamp: TS, offset: 1,
}, },
MpuPart { VersionBlock {
version: uuid, hash: uuid,
size: Some(5), size: 5,
etag: Some("etag4".into()),
}, },
), ),
]; ];
let etags = vec![
(1, "etag1".to_string()),
(3, "etag2".to_string()),
(5, "etag3".to_string()),
(8, "etag4".to_string()),
(9, "etag5".to_string()),
];
MultipartUpload { Version {
upload_id: uuid,
timestamp: TS,
deleted: false.into(),
parts: crdt::Map::<MpuPartKey, MpuPart>::from_iter(parts),
bucket_id: uuid, bucket_id: uuid,
key: "a".into(), key: "a".to_string(),
uuid,
deleted: false.into(),
blocks: crdt::Map::<VersionBlockKey, VersionBlock>::from_iter(blocks),
parts_etags: crdt::Map::<u64, String>::from_iter(etags),
} }
} }
fn obj() -> Object {
Object::new(bucket(), "d".to_string(), vec![objup_version([0x08; 32])])
}
#[test] #[test]
fn test_fetch_part_info() -> Result<(), Error> { fn test_fetch_part_info() -> Result<(), Error> {
let uuid = Uuid::from([0x08; 32]); let uuid = Uuid::from([0x08; 32]);
@ -1195,85 +1258,82 @@ mod tests {
max_parts: 2, max_parts: 2,
}; };
let mpu = mpu(); assert!(
fetch_part_info(&query, None, None, uuid).is_err(),
"No object and version should fail"
);
assert!(
fetch_part_info(&query, Some(obj()), None, uuid).is_err(),
"No version should faild"
);
assert!(
fetch_part_info(&query, None, Some(version()), uuid).is_err(),
"No object should fail"
);
// Start from the beginning but with limited size to trigger pagination // Start from the beginning but with limited size to trigger pagination
let (info, pagination) = fetch_part_info(&query, &mpu)?; let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?;
assert_eq!(pagination.unwrap(), 3); assert_eq!(pagination.unwrap(), 5);
assert_eq!( assert_eq!(
info, info,
vec![ vec![
PartInfo { PartInfo {
etag: "etag1", etag: "etag1".to_string(),
timestamp: TS, timestamp: TS,
part_number: 1, part_number: 1,
size: 3 size: 5
}, },
PartInfo { PartInfo {
etag: "etag2", etag: "etag3".to_string(),
timestamp: TS, timestamp: TS,
part_number: 3, part_number: 5,
size: 10 size: 7
}, },
] ]
); );
// Use previous pagination to make a new request // Use previous pagination to make a new request
query.part_number_marker = Some(pagination.unwrap()); query.part_number_marker = Some(pagination.unwrap());
let (info, pagination) = fetch_part_info(&query, &mpu)?; let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?;
assert!(pagination.is_none()); assert!(pagination.is_none());
assert_eq!( assert_eq!(
info, info,
vec![ vec![PartInfo {
PartInfo { etag: "etag4".to_string(),
etag: "etag3",
timestamp: TS,
part_number: 5,
size: 7
},
PartInfo {
etag: "etag4",
timestamp: TS, timestamp: TS,
part_number: 8, part_number: 8,
size: 5 size: 5
}, },]
]
); );
// Trying to access a part that is way larger than registered ones // Trying to access a part that is way larger than registered ones
query.part_number_marker = Some(9999); query.part_number_marker = Some(9999);
let (info, pagination) = fetch_part_info(&query, &mpu)?; let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?;
assert!(pagination.is_none()); assert!(pagination.is_none());
assert_eq!(info, vec![]); assert_eq!(info, vec![]);
// Try without any limitation // Try without any limitation
query.max_parts = 1000; query.max_parts = 1000;
query.part_number_marker = None; query.part_number_marker = None;
let (info, pagination) = fetch_part_info(&query, &mpu)?; let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?;
assert!(pagination.is_none()); assert!(pagination.is_none());
assert_eq!( assert_eq!(
info, info,
vec![ vec![
PartInfo { PartInfo {
etag: "etag1", etag: "etag1".to_string(),
timestamp: TS, timestamp: TS,
part_number: 1, part_number: 1,
size: 3 size: 5
}, },
PartInfo { PartInfo {
etag: "etag2", etag: "etag3".to_string(),
timestamp: TS,
part_number: 3,
size: 10
},
PartInfo {
etag: "etag3",
timestamp: TS, timestamp: TS,
part_number: 5, part_number: 5,
size: 7 size: 7
}, },
PartInfo { PartInfo {
etag: "etag4", etag: "etag4".to_string(),
timestamp: TS, timestamp: TS,
part_number: 8, part_number: 8,
size: 5 size: 5

View file

@ -6,9 +6,7 @@ mod copy;
pub mod cors; pub mod cors;
mod delete; mod delete;
pub mod get; pub mod get;
mod lifecycle;
mod list; mod list;
mod multipart;
mod post_object; mod post_object;
mod put; mod put;
mod website; mod website;

View file

@ -1,465 +0,0 @@
use std::collections::HashMap;
use std::sync::Arc;
use futures::prelude::*;
use hyper::body::Body;
use hyper::{Request, Response};
use md5::{Digest as Md5Digest, Md5};
use garage_table::*;
use garage_util::async_hash::*;
use garage_util::data::*;
use garage_util::time::*;
use garage_model::bucket_table::Bucket;
use garage_model::garage::Garage;
use garage_model::s3::block_ref_table::*;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::s3::error::*;
use crate::s3::put::*;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
// ----
pub async fn handle_create_multipart_upload(
garage: Arc<Garage>,
req: &Request<Body>,
bucket_name: &str,
bucket_id: Uuid,
key: &str,
) -> Result<Response<Body>, Error> {
let upload_id = gen_uuid();
let timestamp = now_msec();
let headers = get_headers(req.headers())?;
// Create object in object table
let object_version = ObjectVersion {
uuid: upload_id,
timestamp,
state: ObjectVersionState::Uploading {
multipart: true,
headers,
},
};
let object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&object).await?;
// Create multipart upload in mpu table
// This multipart upload will hold references to uploaded parts
// (which are entries in the Version table)
let mpu = MultipartUpload::new(upload_id, timestamp, bucket_id, key.into(), false);
garage.mpu_table.insert(&mpu).await?;
// Send success response
let result = s3_xml::InitiateMultipartUploadResult {
xmlns: (),
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key.to_string()),
upload_id: s3_xml::Value(hex::encode(upload_id)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_put_part(
garage: Arc<Garage>,
req: Request<Body>,
bucket_id: Uuid,
key: &str,
part_number: u64,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let upload_id = decode_upload_id(upload_id)?;
let content_md5 = match req.headers().get("content-md5") {
Some(x) => Some(x.to_str()?.to_string()),
None => None,
};
// Read first chuck, and at the same time try to get object to see if it exists
let key = key.to_string();
let body = req.into_body().map_err(Error::from);
let mut chunker = StreamChunker::new(body, garage.config.block_size);
let ((_, _, mut mpu), first_block) = futures::try_join!(
get_upload(&garage, &bucket_id, &key, &upload_id),
chunker.next(),
)?;
// Check object is valid and part can be accepted
let first_block = first_block.ok_or_bad_request("Empty body")?;
// Calculate part identity: timestamp, version id
let version_uuid = gen_uuid();
let mpu_part_key = MpuPartKey {
part_number,
timestamp: mpu.next_timestamp(part_number),
};
// The following consists in many steps that can each fail.
// Keep track that some cleanup will be needed if things fail
// before everything is finished (cleanup is done using the Drop trait).
let mut interrupted_cleanup = InterruptedCleanup(Some(InterruptedCleanupInner {
garage: garage.clone(),
upload_id,
version_uuid,
}));
// Create version and link version from MPU
mpu.parts.clear();
mpu.parts.put(
mpu_part_key,
MpuPart {
version: version_uuid,
etag: None,
size: None,
},
);
garage.mpu_table.insert(&mpu).await?;
let version = Version::new(
version_uuid,
VersionBacklink::MultipartUpload { upload_id },
false,
);
garage.version_table.insert(&version).await?;
// Copy data to version
let first_block_hash = async_blake2sum(first_block.clone()).await;
let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
&garage,
&version,
part_number,
first_block,
first_block_hash,
&mut chunker,
)
.await?;
// Verify that checksums map
ensure_checksum_matches(
data_md5sum.as_slice(),
data_sha256sum,
content_md5.as_deref(),
content_sha256,
)?;
// Store part etag in version
let data_md5sum_hex = hex::encode(data_md5sum);
mpu.parts.put(
mpu_part_key,
MpuPart {
version: version_uuid,
etag: Some(data_md5sum_hex.clone()),
size: Some(total_size),
},
);
garage.mpu_table.insert(&mpu).await?;
// We were not interrupted, everything went fine.
// We won't have to clean up on drop.
interrupted_cleanup.cancel();
let response = Response::builder()
.header("ETag", format!("\"{}\"", data_md5sum_hex))
.body(Body::empty())
.unwrap();
Ok(response)
}
struct InterruptedCleanup(Option<InterruptedCleanupInner>);
struct InterruptedCleanupInner {
garage: Arc<Garage>,
upload_id: Uuid,
version_uuid: Uuid,
}
impl InterruptedCleanup {
fn cancel(&mut self) {
drop(self.0.take());
}
}
impl Drop for InterruptedCleanup {
fn drop(&mut self) {
if let Some(info) = self.0.take() {
tokio::spawn(async move {
let version = Version::new(
info.version_uuid,
VersionBacklink::MultipartUpload {
upload_id: info.upload_id,
},
true,
);
if let Err(e) = info.garage.version_table.insert(&version).await {
warn!("Cannot cleanup after aborted UploadPart: {}", e);
}
});
}
}
}
pub async fn handle_complete_multipart_upload(
garage: Arc<Garage>,
req: Request<Body>,
bucket_name: &str,
bucket: &Bucket,
key: &str,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let body = hyper::body::to_bytes(req.into_body()).await?;
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml)
.ok_or_bad_request("Invalid CompleteMultipartUpload XML")?;
debug!(
"CompleteMultipartUpload list of parts: {:?}",
body_list_of_parts
);
let upload_id = decode_upload_id(upload_id)?;
// Get object and multipart upload
let key = key.to_string();
let (_, mut object_version, mpu) = get_upload(&garage, &bucket.id, &key, &upload_id).await?;
if mpu.parts.is_empty() {
return Err(Error::bad_request("No data was uploaded"));
}
let headers = match object_version.state {
ObjectVersionState::Uploading { headers, .. } => headers,
_ => unreachable!(),
};
// Check that part numbers are an increasing sequence.
// (it doesn't need to start at 1 nor to be a continuous sequence,
// see discussion in #192)
if body_list_of_parts.is_empty() {
return Err(Error::EntityTooSmall);
}
if !body_list_of_parts
.iter()
.zip(body_list_of_parts.iter().skip(1))
.all(|(p1, p2)| p1.part_number < p2.part_number)
{
return Err(Error::InvalidPartOrder);
}
// Check that the list of parts they gave us corresponds to parts we have here
debug!("Parts stored in multipart upload: {:?}", mpu.parts.items());
let mut have_parts = HashMap::new();
for (pk, pv) in mpu.parts.items().iter() {
have_parts.insert(pk.part_number, pv);
}
let mut parts = vec![];
for req_part in body_list_of_parts.iter() {
match have_parts.get(&req_part.part_number) {
Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => {
parts.push(*part)
}
_ => return Err(Error::InvalidPart),
}
}
let grg = &garage;
let parts_versions = futures::future::try_join_all(parts.iter().map(|p| async move {
grg.version_table
.get(&p.version, &EmptyKey)
.await?
.ok_or_internal_error("Part version missing from version table")
}))
.await?;
// Create final version and block refs
let mut final_version = Version::new(
upload_id,
VersionBacklink::Object {
bucket_id: bucket.id,
key: key.to_string(),
},
false,
);
for (part_number, part_version) in parts_versions.iter().enumerate() {
if part_version.deleted.get() {
return Err(Error::InvalidPart);
}
for (vbk, vb) in part_version.blocks.items().iter() {
final_version.blocks.put(
VersionBlockKey {
part_number: (part_number + 1) as u64,
offset: vbk.offset,
},
*vb,
);
}
}
garage.version_table.insert(&final_version).await?;
let block_refs = final_version.blocks.items().iter().map(|(_, b)| BlockRef {
block: b.hash,
version: upload_id,
deleted: false.into(),
});
garage.block_ref_table.insert_many(block_refs).await?;
// Calculate etag of final object
// To understand how etags are calculated, read more here:
// https://teppen.io/2018/06/23/aws_s3_etags/
let mut etag_md5_hasher = Md5::new();
for part in parts.iter() {
etag_md5_hasher.update(part.etag.as_ref().unwrap().as_bytes());
}
let etag = format!(
"{}-{}",
hex::encode(etag_md5_hasher.finalize()),
parts.len()
);
// Calculate total size of final object
let total_size = parts.iter().map(|x| x.size.unwrap()).sum();
if let Err(e) = check_quotas(&garage, bucket, &key, total_size).await {
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
return Err(e);
}
// Write final object version
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
ObjectVersionMeta {
headers,
size: total_size,
etag: etag.clone(),
},
final_version.blocks.items()[0].1.hash,
));
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
// Send response saying ok we're done
let result = s3_xml::CompleteMultipartUploadResult {
xmlns: (),
location: None,
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key),
etag: s3_xml::Value(format!("\"{}\"", etag)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_abort_multipart_upload(
garage: Arc<Garage>,
bucket_id: Uuid,
key: &str,
upload_id: &str,
) -> Result<Response<Body>, Error> {
let upload_id = decode_upload_id(upload_id)?;
let (_, mut object_version, _) =
get_upload(&garage, &bucket_id, &key.to_string(), &upload_id).await?;
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
Ok(Response::new(Body::from(vec![])))
}
// ======== helpers ============
#[allow(clippy::ptr_arg)]
pub(crate) async fn get_upload(
garage: &Garage,
bucket_id: &Uuid,
key: &String,
upload_id: &Uuid,
) -> Result<(Object, ObjectVersion, MultipartUpload), Error> {
let (object, mpu) = futures::try_join!(
garage.object_table.get(bucket_id, key).map_err(Error::from),
garage
.mpu_table
.get(upload_id, &EmptyKey)
.map_err(Error::from),
)?;
let object = object.ok_or(Error::NoSuchUpload)?;
let mpu = mpu.ok_or(Error::NoSuchUpload)?;
let object_version = object
.versions()
.iter()
.find(|v| v.uuid == *upload_id && v.is_uploading(Some(true)))
.ok_or(Error::NoSuchUpload)?
.clone();
Ok((object, object_version, mpu))
}
pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?;
if id_bin.len() != 32 {
return Err(Error::NoSuchUpload);
}
let mut uuid = [0u8; 32];
uuid.copy_from_slice(&id_bin[..]);
Ok(Uuid::from(uuid))
}
#[derive(Debug)]
struct CompleteMultipartUploadPart {
etag: String,
part_number: u64,
}
fn parse_complete_multipart_upload_body(
xml: &roxmltree::Document,
) -> Option<Vec<CompleteMultipartUploadPart>> {
let mut parts = vec![];
let root = xml.root();
let cmu = root.first_child()?;
if !cmu.has_tag_name("CompleteMultipartUpload") {
return None;
}
for item in cmu.children() {
// Only parse <Part> nodes
if !item.is_element() {
continue;
}
if item.has_tag_name("Part") {
let etag = item.children().find(|e| e.has_tag_name("ETag"))?.text()?;
let part_number = item
.children()
.find(|e| e.has_tag_name("PartNumber"))?
.text()?;
parts.push(CompleteMultipartUploadPart {
etag: etag.trim_matches('"').to_string(),
part_number: part_number.parse().ok()?,
});
} else {
return None;
}
}
Some(parts)
}

View file

@ -1,4 +1,4 @@
use std::collections::{BTreeMap, HashMap}; use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::sync::Arc; use std::sync::Arc;
use base64::prelude::*; use base64::prelude::*;
@ -30,6 +30,8 @@ use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use crate::s3::error::*; use crate::s3::error::*;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
pub async fn handle_put( pub async fn handle_put(
garage: Arc<Garage>, garage: Arc<Garage>,
@ -121,23 +123,20 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
// The following consists in many steps that can each fail. // The following consists in many steps that can each fail.
// Keep track that some cleanup will be needed if things fail // Keep track that some cleanup will be needed if things fail
// before everything is finished (cleanup is done using the Drop trait). // before everything is finished (cleanup is done using the Drop trait).
let mut interrupted_cleanup = InterruptedCleanup(Some(InterruptedCleanupInner { let mut interrupted_cleanup = InterruptedCleanup(Some((
garage: garage.clone(), garage.clone(),
bucket_id: bucket.id, bucket.id,
key: key.into(), key.into(),
version_uuid, version_uuid,
version_timestamp, version_timestamp,
})); )));
// Write version identifier in object table so that we have a trace // Write version identifier in object table so that we have a trace
// that we are uploading something // that we are uploading something
let mut object_version = ObjectVersion { let mut object_version = ObjectVersion {
uuid: version_uuid, uuid: version_uuid,
timestamp: version_timestamp, timestamp: version_timestamp,
state: ObjectVersionState::Uploading { state: ObjectVersionState::Uploading(headers.clone()),
headers: headers.clone(),
multipart: false,
},
}; };
let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]); let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
@ -146,14 +145,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
// Write this entry now, even with empty block list, // Write this entry now, even with empty block list,
// to prevent block_ref entries from being deleted (they can be deleted // to prevent block_ref entries from being deleted (they can be deleted
// if the reference a version that isn't found in the version table) // if the reference a version that isn't found in the version table)
let version = Version::new( let version = Version::new(version_uuid, bucket.id, key.into(), false);
version_uuid,
VersionBacklink::Object {
bucket_id: bucket.id,
key: key.into(),
},
false,
);
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Transfer data and verify checksum // Transfer data and verify checksum
@ -200,7 +192,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
/// Validate MD5 sum against content-md5 header /// Validate MD5 sum against content-md5 header
/// and sha256sum against signed content-sha256 /// and sha256sum against signed content-sha256
pub(crate) fn ensure_checksum_matches( fn ensure_checksum_matches(
data_md5sum: &[u8], data_md5sum: &[u8],
data_sha256sum: garage_util::data::FixedBytes32, data_sha256sum: garage_util::data::FixedBytes32,
content_md5: Option<&str>, content_md5: Option<&str>,
@ -226,7 +218,7 @@ pub(crate) fn ensure_checksum_matches(
} }
/// Check that inserting this object with this size doesn't exceed bucket quotas /// Check that inserting this object with this size doesn't exceed bucket quotas
pub(crate) async fn check_quotas( async fn check_quotas(
garage: &Arc<Garage>, garage: &Arc<Garage>,
bucket: &Bucket, bucket: &Bucket,
key: &str, key: &str,
@ -283,7 +275,7 @@ pub(crate) async fn check_quotas(
Ok(()) Ok(())
} }
pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>( async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
garage: &Garage, garage: &Garage,
version: &Version, version: &Version,
part_number: u64, part_number: u64,
@ -389,7 +381,7 @@ async fn put_block_meta(
Ok(()) Ok(())
} }
pub(crate) struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> { struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> {
stream: S, stream: S,
read_all: bool, read_all: bool,
block_size: usize, block_size: usize,
@ -397,7 +389,7 @@ pub(crate) struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> {
} }
impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> { impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> {
pub(crate) fn new(stream: S, block_size: usize) -> Self { fn new(stream: S, block_size: usize) -> Self {
Self { Self {
stream, stream,
read_all: false, read_all: false,
@ -406,7 +398,7 @@ impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> {
} }
} }
pub(crate) async fn next(&mut self) -> Result<Option<Bytes>, Error> { async fn next(&mut self) -> Result<Option<Bytes>, Error> {
while !self.read_all && self.buf.len() < self.block_size { while !self.read_all && self.buf.len() < self.block_size {
if let Some(block) = self.stream.next().await { if let Some(block) = self.stream.next().await {
let bytes = block?; let bytes = block?;
@ -433,14 +425,7 @@ pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<Body> {
.unwrap() .unwrap()
} }
struct InterruptedCleanup(Option<InterruptedCleanupInner>); struct InterruptedCleanup(Option<(Arc<Garage>, Uuid, String, Uuid, u64)>);
struct InterruptedCleanupInner {
garage: Arc<Garage>,
bucket_id: Uuid,
key: String,
version_uuid: Uuid,
version_timestamp: u64,
}
impl InterruptedCleanup { impl InterruptedCleanup {
fn cancel(&mut self) { fn cancel(&mut self) {
@ -449,15 +434,15 @@ impl InterruptedCleanup {
} }
impl Drop for InterruptedCleanup { impl Drop for InterruptedCleanup {
fn drop(&mut self) { fn drop(&mut self) {
if let Some(info) = self.0.take() { if let Some((garage, bucket_id, key, version_uuid, version_ts)) = self.0.take() {
tokio::spawn(async move { tokio::spawn(async move {
let object_version = ObjectVersion { let object_version = ObjectVersion {
uuid: info.version_uuid, uuid: version_uuid,
timestamp: info.version_timestamp, timestamp: version_ts,
state: ObjectVersionState::Aborted, state: ObjectVersionState::Aborted,
}; };
let object = Object::new(info.bucket_id, info.key, vec![object_version]); let object = Object::new(bucket_id, key, vec![object_version]);
if let Err(e) = info.garage.object_table.insert(&object).await { if let Err(e) = garage.object_table.insert(&object).await {
warn!("Cannot cleanup after aborted PutObject: {}", e); warn!("Cannot cleanup after aborted PutObject: {}", e);
} }
}); });
@ -465,9 +450,326 @@ impl Drop for InterruptedCleanup {
} }
} }
// ============ helpers ============ // ----
pub(crate) fn get_mime_type(headers: &HeaderMap<HeaderValue>) -> Result<String, Error> { pub async fn handle_create_multipart_upload(
garage: Arc<Garage>,
req: &Request<Body>,
bucket_name: &str,
bucket_id: Uuid,
key: &str,
) -> Result<Response<Body>, Error> {
let version_uuid = gen_uuid();
let headers = get_headers(req.headers())?;
// Create object in object table
let object_version = ObjectVersion {
uuid: version_uuid,
timestamp: now_msec(),
state: ObjectVersionState::Uploading(headers),
};
let object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&object).await?;
// Insert empty version so that block_ref entries refer to something
// (they are inserted concurrently with blocks in the version table, so
// there is the possibility that they are inserted before the version table
// is created, in which case it is allowed to delete them, e.g. in repair_*)
let version = Version::new(version_uuid, bucket_id, key.into(), false);
garage.version_table.insert(&version).await?;
// Send success response
let result = s3_xml::InitiateMultipartUploadResult {
xmlns: (),
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key.to_string()),
upload_id: s3_xml::Value(hex::encode(version_uuid)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_put_part(
garage: Arc<Garage>,
req: Request<Body>,
bucket_id: Uuid,
key: &str,
part_number: u64,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let version_uuid = decode_upload_id(upload_id)?;
let content_md5 = match req.headers().get("content-md5") {
Some(x) => Some(x.to_str()?.to_string()),
None => None,
};
// Read first chuck, and at the same time try to get object to see if it exists
let key = key.to_string();
let body = req.into_body().map_err(Error::from);
let mut chunker = StreamChunker::new(body, garage.config.block_size);
let (object, version, first_block) = futures::try_join!(
garage
.object_table
.get(&bucket_id, &key)
.map_err(Error::from),
garage
.version_table
.get(&version_uuid, &EmptyKey)
.map_err(Error::from),
chunker.next(),
)?;
// Check object is valid and multipart block can be accepted
let first_block = first_block.ok_or_bad_request("Empty body")?;
let object = object.ok_or_bad_request("Object not found")?;
if !object
.versions()
.iter()
.any(|v| v.uuid == version_uuid && v.is_uploading())
{
return Err(Error::NoSuchUpload);
}
// Check part hasn't already been uploaded
if let Some(v) = version {
if v.has_part_number(part_number) {
return Err(Error::bad_request(format!(
"Part number {} has already been uploaded",
part_number
)));
}
}
// Copy block to store
let version = Version::new(version_uuid, bucket_id, key, false);
let first_block_hash = async_blake2sum(first_block.clone()).await;
let (_, data_md5sum, data_sha256sum) = read_and_put_blocks(
&garage,
&version,
part_number,
first_block,
first_block_hash,
&mut chunker,
)
.await?;
// Verify that checksums map
ensure_checksum_matches(
data_md5sum.as_slice(),
data_sha256sum,
content_md5.as_deref(),
content_sha256,
)?;
// Store part etag in version
let data_md5sum_hex = hex::encode(data_md5sum);
let mut version = version;
version
.parts_etags
.put(part_number, data_md5sum_hex.clone());
garage.version_table.insert(&version).await?;
let response = Response::builder()
.header("ETag", format!("\"{}\"", data_md5sum_hex))
.body(Body::empty())
.unwrap();
Ok(response)
}
pub async fn handle_complete_multipart_upload(
garage: Arc<Garage>,
req: Request<Body>,
bucket_name: &str,
bucket: &Bucket,
key: &str,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let body = hyper::body::to_bytes(req.into_body()).await?;
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml)
.ok_or_bad_request("Invalid CompleteMultipartUpload XML")?;
debug!(
"CompleteMultipartUpload list of parts: {:?}",
body_list_of_parts
);
let version_uuid = decode_upload_id(upload_id)?;
// Get object and version
let key = key.to_string();
let (object, version) = futures::try_join!(
garage.object_table.get(&bucket.id, &key),
garage.version_table.get(&version_uuid, &EmptyKey),
)?;
let object = object.ok_or(Error::NoSuchKey)?;
let mut object_version = object
.versions()
.iter()
.find(|v| v.uuid == version_uuid && v.is_uploading())
.cloned()
.ok_or(Error::NoSuchUpload)?;
let version = version.ok_or(Error::NoSuchKey)?;
if version.blocks.is_empty() {
return Err(Error::bad_request("No data was uploaded"));
}
let headers = match object_version.state {
ObjectVersionState::Uploading(headers) => headers,
_ => unreachable!(),
};
// Check that part numbers are an increasing sequence.
// (it doesn't need to start at 1 nor to be a continuous sequence,
// see discussion in #192)
if body_list_of_parts.is_empty() {
return Err(Error::EntityTooSmall);
}
if !body_list_of_parts
.iter()
.zip(body_list_of_parts.iter().skip(1))
.all(|(p1, p2)| p1.part_number < p2.part_number)
{
return Err(Error::InvalidPartOrder);
}
// Garage-specific restriction, see #204: part numbers must be
// consecutive starting at 1
if body_list_of_parts[0].part_number != 1
|| !body_list_of_parts
.iter()
.zip(body_list_of_parts.iter().skip(1))
.all(|(p1, p2)| p1.part_number + 1 == p2.part_number)
{
return Err(Error::NotImplemented("Garage does not support completing a Multipart upload with non-consecutive part numbers. This is a restriction of Garage's data model, which might be fixed in a future release. See issue #204 for more information on this topic.".into()));
}
// Check that the list of parts they gave us corresponds to the parts we have here
debug!("Expected parts from request: {:?}", body_list_of_parts);
debug!("Parts stored in version: {:?}", version.parts_etags.items());
let parts = version
.parts_etags
.items()
.iter()
.map(|pair| (&pair.0, &pair.1));
let same_parts = body_list_of_parts
.iter()
.map(|x| (&x.part_number, &x.etag))
.eq(parts);
if !same_parts {
return Err(Error::InvalidPart);
}
// Check that all blocks belong to one of the parts
let block_parts = version
.blocks
.items()
.iter()
.map(|(bk, _)| bk.part_number)
.collect::<BTreeSet<_>>();
let same_parts = body_list_of_parts
.iter()
.map(|x| x.part_number)
.eq(block_parts.into_iter());
if !same_parts {
return Err(Error::bad_request(
"Part numbers in block list and part list do not match. This can happen if a part was partially uploaded. Please abort the multipart upload and try again."
));
}
// Calculate etag of final object
// To understand how etags are calculated, read more here:
// https://teppen.io/2018/06/23/aws_s3_etags/
let num_parts = body_list_of_parts.len();
let mut etag_md5_hasher = Md5::new();
for (_, etag) in version.parts_etags.items().iter() {
etag_md5_hasher.update(etag.as_bytes());
}
let etag = format!("{}-{}", hex::encode(etag_md5_hasher.finalize()), num_parts);
// Calculate total size of final object
let total_size = version.blocks.items().iter().map(|x| x.1.size).sum();
if let Err(e) = check_quotas(&garage, bucket, &key, total_size).await {
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
return Err(e);
}
// Write final object version
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
ObjectVersionMeta {
headers,
size: total_size,
etag: etag.clone(),
},
version.blocks.items()[0].1.hash,
));
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
// Send response saying ok we're done
let result = s3_xml::CompleteMultipartUploadResult {
xmlns: (),
location: None,
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key),
etag: s3_xml::Value(format!("\"{}\"", etag)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_abort_multipart_upload(
garage: Arc<Garage>,
bucket_id: Uuid,
key: &str,
upload_id: &str,
) -> Result<Response<Body>, Error> {
let version_uuid = decode_upload_id(upload_id)?;
let object = garage
.object_table
.get(&bucket_id, &key.to_string())
.await?;
let object = object.ok_or(Error::NoSuchKey)?;
let object_version = object
.versions()
.iter()
.find(|v| v.uuid == version_uuid && v.is_uploading());
let mut object_version = match object_version {
None => return Err(Error::NoSuchUpload),
Some(x) => x.clone(),
};
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
Ok(Response::new(Body::from(vec![])))
}
fn get_mime_type(headers: &HeaderMap<HeaderValue>) -> Result<String, Error> {
Ok(headers Ok(headers
.get(hyper::header::CONTENT_TYPE) .get(hyper::header::CONTENT_TYPE)
.map(|x| x.to_str()) .map(|x| x.to_str())
@ -519,3 +821,54 @@ pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<ObjectVers
other, other,
}) })
} }
pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?;
if id_bin.len() != 32 {
return Err(Error::NoSuchUpload);
}
let mut uuid = [0u8; 32];
uuid.copy_from_slice(&id_bin[..]);
Ok(Uuid::from(uuid))
}
#[derive(Debug)]
struct CompleteMultipartUploadPart {
etag: String,
part_number: u64,
}
fn parse_complete_multipart_upload_body(
xml: &roxmltree::Document,
) -> Option<Vec<CompleteMultipartUploadPart>> {
let mut parts = vec![];
let root = xml.root();
let cmu = root.first_child()?;
if !cmu.has_tag_name("CompleteMultipartUpload") {
return None;
}
for item in cmu.children() {
// Only parse <Part> nodes
if !item.is_element() {
continue;
}
if item.has_tag_name("Part") {
let etag = item.children().find(|e| e.has_tag_name("ETag"))?.text()?;
let part_number = item
.children()
.find(|e| e.has_tag_name("PartNumber"))?
.text()?;
parts.push(CompleteMultipartUploadPart {
etag: etag.trim_matches('"').to_string(),
part_number: part_number.parse().ok()?,
});
} else {
return None;
}
}
Some(parts)
}

View file

@ -43,11 +43,14 @@ pub async fn handle_get_website(bucket: &Bucket) -> Result<Response<Body>, Error
pub async fn handle_delete_website( pub async fn handle_delete_website(
garage: Arc<Garage>, garage: Arc<Garage>,
mut bucket: Bucket, bucket_id: Uuid,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let param = bucket let mut bucket = garage
.params_mut() .bucket_helper()
.ok_or_internal_error("Bucket should not be deleted at this point")?; .get_existing_bucket(bucket_id)
.await?;
let param = bucket.params_mut().unwrap();
param.website_config.update(None); param.website_config.update(None);
garage.bucket_table.insert(&bucket).await?; garage.bucket_table.insert(&bucket).await?;
@ -59,7 +62,7 @@ pub async fn handle_delete_website(
pub async fn handle_put_website( pub async fn handle_put_website(
garage: Arc<Garage>, garage: Arc<Garage>,
mut bucket: Bucket, bucket_id: Uuid,
req: Request<Body>, req: Request<Body>,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
@ -69,9 +72,12 @@ pub async fn handle_put_website(
verify_signed_content(content_sha256, &body[..])?; verify_signed_content(content_sha256, &body[..])?;
} }
let param = bucket let mut bucket = garage
.params_mut() .bucket_helper()
.ok_or_internal_error("Bucket should not be deleted at this point")?; .get_existing_bucket(bucket_id)
.await?;
let param = bucket.params_mut().unwrap();
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?; let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
conf.validate()?; conf.validate()?;

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_block" name = "garage_block"
version = "0.9.0" version = "0.8.4"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -24,7 +24,6 @@ opentelemetry = "0.17"
arc-swap = "1.5" arc-swap = "1.5"
async-trait = "0.1.7" async-trait = "0.1.7"
bytes = "1.0" bytes = "1.0"
bytesize = "1.2"
hex = "0.4" hex = "0.4"
tracing = "0.1" tracing = "0.1"
rand = "0.8" rand = "0.8"

View file

@ -1,5 +1,3 @@
use std::path::PathBuf;
use bytes::Bytes; use bytes::Bytes;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use zstd::stream::{decode_all as zstd_decode, Encoder}; use zstd::stream::{decode_all as zstd_decode, Encoder};
@ -21,14 +19,6 @@ pub enum DataBlock {
Compressed(Bytes), Compressed(Bytes),
} }
#[derive(Debug)]
pub enum DataBlockPath {
/// Uncompressed data fail
Plain(PathBuf),
/// Compressed data fail
Compressed(PathBuf),
}
impl DataBlock { impl DataBlock {
/// Query whether this block is compressed /// Query whether this block is compressed
pub fn is_compressed(&self) -> bool { pub fn is_compressed(&self) -> bool {

View file

@ -1,337 +0,0 @@
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
use garage_util::config::DataDirEnum;
use garage_util::data::Hash;
use garage_util::error::{Error, OkOrMessage};
use garage_util::migrate::*;
type Idx = u16;
const DRIVE_NPART: usize = 1024;
const HASH_DRIVE_BYTES: (usize, usize) = (2, 3);
#[derive(Serialize, Deserialize, Debug, Clone)]
pub(crate) struct DataLayout {
pub(crate) data_dirs: Vec<DataDir>,
/// Primary storage location (index in data_dirs) for each partition
/// = the location where the data is supposed to be, blocks are always
/// written there (copies in other dirs may be deleted if they exist)
pub(crate) part_prim: Vec<Idx>,
/// Secondary storage locations for each partition = locations
/// where data blocks might be, we check from these dirs when reading
pub(crate) part_sec: Vec<Vec<Idx>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
pub(crate) struct DataDir {
pub(crate) path: PathBuf,
pub(crate) state: DataDirState,
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
pub(crate) enum DataDirState {
Active { capacity: u64 },
ReadOnly,
}
impl DataLayout {
pub(crate) fn initialize(dirs: &DataDirEnum) -> Result<Self, Error> {
let data_dirs = make_data_dirs(dirs)?;
// Split partitions proportionnally to capacity for all drives
// to affect primary storage location
let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>();
assert!(total_cap > 0);
let mut part_prim = Vec::with_capacity(DRIVE_NPART);
let mut cum_cap = 0;
for (i, dd) in data_dirs.iter().enumerate() {
if let DataDirState::Active { capacity } = dd.state {
cum_cap += capacity;
let n_total = (cum_cap * DRIVE_NPART as u64) / total_cap;
part_prim.resize(n_total as usize, i as Idx);
}
}
assert_eq!(cum_cap, total_cap);
assert_eq!(part_prim.len(), DRIVE_NPART);
// If any of the storage locations is non-empty, it probably existed before
// this algorithm was added, so add it as a secondary storage location for all partitions
// to make sure existing files are not lost
let mut part_sec = vec![vec![]; DRIVE_NPART];
for (i, dd) in data_dirs.iter().enumerate() {
if dir_not_empty(&dd.path)? {
for (sec, prim) in part_sec.iter_mut().zip(part_prim.iter()) {
if *prim != i as Idx {
sec.push(i as Idx);
}
}
}
}
Ok(Self {
data_dirs,
part_prim,
part_sec,
})
}
pub(crate) fn update(&mut self, dirs: &DataDirEnum) -> Result<(), Error> {
// Make list of new data directories, exit if nothing changed
let data_dirs = make_data_dirs(dirs)?;
if data_dirs == self.data_dirs {
return Ok(());
}
let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>();
assert!(total_cap > 0);
// Compute mapping of old indices to new indices
let old2new = self
.data_dirs
.iter()
.map(|x| {
data_dirs
.iter()
.position(|y| y.path == x.path)
.map(|x| x as Idx)
})
.collect::<Vec<_>>();
// Compute secondary location list for partitions based on existing
// folders, translating indices from old to new
let mut part_sec = self
.part_sec
.iter()
.map(|dl| {
dl.iter()
.filter_map(|old| old2new.get(*old as usize).copied().flatten())
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
// Compute a vector that, for each data dir,
// contains the list of partitions primarily stored on that drive
let mut dir_prim = vec![vec![]; data_dirs.len()];
for (ipart, prim) in self.part_prim.iter().enumerate() {
if let Some(new) = old2new.get(*prim as usize).copied().flatten() {
dir_prim[new as usize].push(ipart);
}
}
// Compute the target number of partitions per data directory
let mut cum_cap = 0;
let mut npart_per_dir = vec![0; data_dirs.len()];
for (idir, dd) in data_dirs.iter().enumerate() {
if let DataDirState::Active { capacity } = dd.state {
let begin = (cum_cap * DRIVE_NPART as u64) / total_cap;
cum_cap += capacity;
let end = (cum_cap * DRIVE_NPART as u64) / total_cap;
npart_per_dir[idir] = (end - begin) as usize;
}
}
assert_eq!(cum_cap, total_cap);
assert_eq!(npart_per_dir.iter().sum::<usize>(), DRIVE_NPART);
// For all directories that have too many primary partitions,
// move that partition to secondary
for (idir, (parts, tgt_npart)) in dir_prim.iter_mut().zip(npart_per_dir.iter()).enumerate()
{
while parts.len() > *tgt_npart {
let part = parts.pop().unwrap();
if !part_sec[part].contains(&(idir as Idx)) {
part_sec[part].push(idir as Idx);
}
}
}
// Calculate the vector of primary partition dir index
let mut part_prim = vec![None; DRIVE_NPART];
for (idir, parts) in dir_prim.iter().enumerate() {
for part in parts.iter() {
assert!(part_prim[*part].is_none());
part_prim[*part] = Some(idir as Idx)
}
}
// Calculate a vector of unassigned partitions
let mut unassigned = part_prim
.iter()
.enumerate()
.filter(|(_, dir)| dir.is_none())
.map(|(ipart, _)| ipart)
.collect::<Vec<_>>();
// For all directories that don't have enough primary partitions,
// add partitions from unassigned
for (idir, (parts, tgt_npart)) in dir_prim.iter_mut().zip(npart_per_dir.iter()).enumerate()
{
if parts.len() < *tgt_npart {
let required = *tgt_npart - parts.len();
assert!(unassigned.len() >= required);
for _ in 0..required {
let new_part = unassigned.pop().unwrap();
part_prim[new_part] = Some(idir as Idx);
part_sec[new_part].retain(|x| *x != idir as Idx);
}
}
}
// Sanity checks
assert!(part_prim.iter().all(|x| x.is_some()));
assert!(unassigned.is_empty());
// Transform part_prim from vec of Option<Idx> to vec of Idx
let part_prim = part_prim
.into_iter()
.map(|x| x.unwrap())
.collect::<Vec<_>>();
assert!(part_prim.iter().all(|p| data_dirs
.get(*p as usize)
.and_then(|x| x.capacity())
.unwrap_or(0)
> 0));
// If any of the newly added storage locations is non-empty,
// it might have been removed and added again and might contain data,
// so add it as a secondary storage location for all partitions
// to make sure existing files are not lost
for (i, dd) in data_dirs.iter().enumerate() {
if self.data_dirs.iter().any(|ed| ed.path == dd.path) {
continue;
}
if dir_not_empty(&dd.path)? {
for (sec, prim) in part_sec.iter_mut().zip(part_prim.iter()) {
if *prim != i as Idx && !sec.contains(&(i as Idx)) {
sec.push(i as Idx);
}
}
}
}
// Apply newly generated config
*self = Self {
data_dirs,
part_prim,
part_sec,
};
Ok(())
}
pub(crate) fn primary_block_dir(&self, hash: &Hash) -> PathBuf {
let ipart = self.partition_from(hash);
let idir = self.part_prim[ipart] as usize;
self.block_dir_from(hash, &self.data_dirs[idir].path)
}
pub(crate) fn secondary_block_dirs<'a>(
&'a self,
hash: &'a Hash,
) -> impl Iterator<Item = PathBuf> + 'a {
let ipart = self.partition_from(hash);
self.part_sec[ipart]
.iter()
.map(move |idir| self.block_dir_from(hash, &self.data_dirs[*idir as usize].path))
}
fn partition_from(&self, hash: &Hash) -> usize {
u16::from_be_bytes([
hash.as_slice()[HASH_DRIVE_BYTES.0],
hash.as_slice()[HASH_DRIVE_BYTES.1],
]) as usize % DRIVE_NPART
}
fn block_dir_from(&self, hash: &Hash, dir: &PathBuf) -> PathBuf {
let mut path = dir.clone();
path.push(hex::encode(&hash.as_slice()[0..1]));
path.push(hex::encode(&hash.as_slice()[1..2]));
path
}
pub(crate) fn without_secondary_locations(&self) -> Self {
Self {
data_dirs: self.data_dirs.clone(),
part_prim: self.part_prim.clone(),
part_sec: self.part_sec.iter().map(|_| vec![]).collect::<Vec<_>>(),
}
}
}
impl InitialFormat for DataLayout {
const VERSION_MARKER: &'static [u8] = b"G09bmdl";
}
impl DataDir {
pub fn capacity(&self) -> Option<u64> {
match self.state {
DataDirState::Active { capacity } => Some(capacity),
_ => None,
}
}
}
fn make_data_dirs(dirs: &DataDirEnum) -> Result<Vec<DataDir>, Error> {
let mut data_dirs = vec![];
match dirs {
DataDirEnum::Single(path) => data_dirs.push(DataDir {
path: path.clone(),
state: DataDirState::Active {
capacity: 1_000_000_000, // whatever, doesn't matter
},
}),
DataDirEnum::Multiple(dirs) => {
let mut ok = false;
for dir in dirs.iter() {
let state = match &dir.capacity {
Some(cap) if dir.read_only == false => {
let capacity = cap.parse::<bytesize::ByteSize>()
.ok_or_message("invalid capacity value")?.as_u64();
if capacity == 0 {
return Err(Error::Message(format!("data directory {} should have non-zero capacity", dir.path.to_string_lossy())));
}
ok = true;
DataDirState::Active {
capacity,
}
}
None if dir.read_only == true => {
DataDirState::ReadOnly
}
_ => return Err(Error::Message(format!("data directories in data_dir should have a capacity value or be marked read_only, not the case for {}", dir.path.to_string_lossy()))),
};
data_dirs.push(DataDir {
path: dir.path.clone(),
state,
});
}
if !ok {
return Err(Error::Message(
"incorrect data_dir configuration, no primary writable directory specified"
.into(),
));
}
}
}
Ok(data_dirs)
}
fn dir_not_empty(path: &PathBuf) -> Result<bool, Error> {
for entry in std::fs::read_dir(&path)? {
let dir = entry?;
if dir.file_type()?.is_dir()
&& dir
.file_name()
.into_string()
.ok()
.and_then(|hex| hex::decode(&hex).ok())
.is_some()
{
return Ok(true);
}
}
Ok(false)
}

View file

@ -6,6 +6,5 @@ pub mod repair;
pub mod resync; pub mod resync;
mod block; mod block;
mod layout;
mod metrics; mod metrics;
mod rc; mod rc;

View file

@ -3,7 +3,7 @@ use std::pin::Pin;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use arc_swap::{ArcSwap, ArcSwapOption}; use arc_swap::ArcSwapOption;
use async_trait::async_trait; use async_trait::async_trait;
use bytes::Bytes; use bytes::Bytes;
use rand::prelude::*; use rand::prelude::*;
@ -25,11 +25,10 @@ use garage_rpc::rpc_helper::netapp::stream::{stream_asyncread, ByteStream};
use garage_db as db; use garage_db as db;
use garage_util::background::{vars, BackgroundRunner}; use garage_util::background::{vars, BackgroundRunner};
use garage_util::config::DataDirEnum;
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::*; use garage_util::error::*;
use garage_util::metrics::RecordDuration; use garage_util::metrics::RecordDuration;
use garage_util::persister::{Persister, PersisterShared}; use garage_util::persister::PersisterShared;
use garage_util::time::msec_to_rfc3339; use garage_util::time::msec_to_rfc3339;
use garage_rpc::rpc_helper::OrderTag; use garage_rpc::rpc_helper::OrderTag;
@ -39,7 +38,6 @@ use garage_rpc::*;
use garage_table::replication::{TableReplication, TableShardedReplication}; use garage_table::replication::{TableReplication, TableShardedReplication};
use crate::block::*; use crate::block::*;
use crate::layout::*;
use crate::metrics::*; use crate::metrics::*;
use crate::rc::*; use crate::rc::*;
use crate::repair::*; use crate::repair::*;
@ -79,16 +77,12 @@ impl Rpc for BlockRpc {
pub struct BlockManager { pub struct BlockManager {
/// Replication strategy, allowing to find on which node blocks should be located /// Replication strategy, allowing to find on which node blocks should be located
pub replication: TableShardedReplication, pub replication: TableShardedReplication,
/// Directory in which block are stored
pub data_dir: PathBuf,
/// Data layout
pub(crate) data_layout: ArcSwap<DataLayout>,
/// Data layout persister
pub(crate) data_layout_persister: Persister<DataLayout>,
data_fsync: bool,
compression_level: Option<i32>, compression_level: Option<i32>,
mutation_lock: Vec<Mutex<BlockManagerLocked>>, mutation_lock: [Mutex<BlockManagerLocked>; 256],
pub(crate) rc: BlockRc, pub(crate) rc: BlockRc,
pub resync: BlockResyncManager, pub resync: BlockResyncManager,
@ -111,9 +105,6 @@ pub struct BlockResyncErrorInfo {
pub next_try: u64, pub next_try: u64,
} }
// The number of different mutexes used to parallelize write access to data blocks
const MUTEX_COUNT: usize = 256;
// This custom struct contains functions that must only be ran // This custom struct contains functions that must only be ran
// when the lock is held. We ensure that it is the case by storing // when the lock is held. We ensure that it is the case by storing
// it INSIDE a Mutex. // it INSIDE a Mutex.
@ -122,29 +113,11 @@ struct BlockManagerLocked();
impl BlockManager { impl BlockManager {
pub fn new( pub fn new(
db: &db::Db, db: &db::Db,
data_dir: DataDirEnum, data_dir: PathBuf,
data_fsync: bool,
compression_level: Option<i32>, compression_level: Option<i32>,
replication: TableShardedReplication, replication: TableShardedReplication,
system: Arc<System>, system: Arc<System>,
) -> Result<Arc<Self>, Error> { ) -> Arc<Self> {
// Load or compute layout, i.e. assignment of data blocks to the different data directories
let data_layout_persister: Persister<DataLayout> =
Persister::new(&system.metadata_dir, "data_layout");
let data_layout = match data_layout_persister.load() {
Ok(mut layout) => {
layout
.update(&data_dir)
.ok_or_message("invalid data_dir config")?;
layout
}
Err(_) => DataLayout::initialize(&data_dir).ok_or_message("invalid data_dir config")?,
};
data_layout_persister
.save(&data_layout)
.expect("cannot save data_layout");
// Open metadata tables
let rc = db let rc = db
.open_tree("block_local_rc") .open_tree("block_local_rc")
.expect("Unable to open block_local_rc tree"); .expect("Unable to open block_local_rc tree");
@ -167,14 +140,9 @@ impl BlockManager {
let block_manager = Arc::new(Self { let block_manager = Arc::new(Self {
replication, replication,
data_layout: ArcSwap::new(Arc::new(data_layout)), data_dir,
data_layout_persister,
data_fsync,
compression_level, compression_level,
mutation_lock: vec![(); MUTEX_COUNT] mutation_lock: [(); 256].map(|_| Mutex::new(BlockManagerLocked())),
.iter()
.map(|_| Mutex::new(BlockManagerLocked()))
.collect::<Vec<_>>(),
rc, rc,
resync, resync,
system, system,
@ -186,7 +154,7 @@ impl BlockManager {
block_manager.endpoint.set_handler(block_manager.clone()); block_manager.endpoint.set_handler(block_manager.clone());
block_manager.scrub_persister.set_with(|_| ()).unwrap(); block_manager.scrub_persister.set_with(|_| ()).unwrap();
Ok(block_manager) block_manager
} }
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) { pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
@ -233,37 +201,6 @@ impl BlockManager {
hash: &Hash, hash: &Hash,
order_tag: Option<OrderTag>, order_tag: Option<OrderTag>,
) -> Result<(DataBlockHeader, ByteStream), Error> { ) -> Result<(DataBlockHeader, ByteStream), Error> {
self.rpc_get_raw_block_internal(hash, order_tag, |header, stream| async move {
Ok((header, stream))
})
.await
}
/// Ask nodes that might have a (possibly compressed) block for it
/// Return its entire body
pub(crate) async fn rpc_get_raw_block(
&self,
hash: &Hash,
order_tag: Option<OrderTag>,
) -> Result<DataBlock, Error> {
self.rpc_get_raw_block_internal(hash, order_tag, |header, stream| async move {
read_stream_to_end(stream)
.await
.map(|data| DataBlock::from_parts(header, data))
})
.await
}
async fn rpc_get_raw_block_internal<F, Fut, T>(
&self,
hash: &Hash,
order_tag: Option<OrderTag>,
f: F,
) -> Result<T, Error>
where
F: Fn(DataBlockHeader, ByteStream) -> Fut,
Fut: futures::Future<Output = Result<T, Error>>,
{
let who = self.replication.read_nodes(hash); let who = self.replication.read_nodes(hash);
let who = self.system.rpc.request_order(&who); let who = self.system.rpc.request_order(&who);
@ -279,41 +216,81 @@ impl BlockManager {
let res = match res { let res = match res {
Ok(res) => res, Ok(res) => res,
Err(e) => { Err(e) => {
debug!("Get block {:?}: node {:?} could not be contacted: {}", hash, node, e); debug!("Node {:?} returned error: {}", node, e);
continue; continue;
} }
}; };
let (header, stream) = match res.into_parts() { let (header, stream) = match res.into_parts() {
(Ok(BlockRpc::PutBlock { hash: _, header }), Some(stream)) => (header, stream), (Ok(BlockRpc::PutBlock { hash: _, header }), Some(stream)) => (header, stream),
(Ok(_), _) => { _ => {
debug!("Get block {:?}: node {:?} returned a malformed response", hash, node); debug!("Node {:?} returned a malformed response", node);
continue;
}
(Err(e), _) => {
debug!("Get block {:?}: node {:?} returned error: {}", hash, node, e);
continue; continue;
} }
}; };
match f(header, stream).await { return Ok((header, stream));
Ok(ret) => return Ok(ret),
Err(e) => {
debug!("Get block {:?}: error reading stream from node {:?}: {}", hash, node, e);
} }
}
}
// TODO: sleep less long (fail early), initiate a second request earlier
// if the first one doesn't succeed rapidly
// TODO: keep first request running when initiating a new one and take the
// one that finishes earlier
_ = tokio::time::sleep(self.system.rpc.rpc_timeout()) => { _ = tokio::time::sleep(self.system.rpc.rpc_timeout()) => {
debug!("Get block {:?}: node {:?} didn't return block in time, trying next.", hash, node); debug!("Node {:?} didn't return block in time, trying next.", node);
} }
}; };
} }
let msg = format!("Get block {:?}: no node returned a valid block", hash); Err(Error::Message(format!(
debug!("{}", msg); "Unable to read block {:?}: no node returned a valid block",
Err(Error::Message(msg)) hash
)))
}
/// Ask nodes that might have a (possibly compressed) block for it
/// Return its entire body
pub(crate) async fn rpc_get_raw_block(
&self,
hash: &Hash,
order_tag: Option<OrderTag>,
) -> Result<DataBlock, Error> {
let who = self.replication.read_nodes(hash);
let who = self.system.rpc.request_order(&who);
for node in who.iter() {
let node_id = NodeID::from(*node);
let rpc = self.endpoint.call_streaming(
&node_id,
BlockRpc::GetBlock(*hash, order_tag),
PRIO_NORMAL | PRIO_SECONDARY,
);
tokio::select! {
res = rpc => {
let res = match res {
Ok(res) => res,
Err(e) => {
debug!("Node {:?} returned error: {}", node, e);
continue;
}
};
let (header, stream) = match res.into_parts() {
(Ok(BlockRpc::PutBlock { hash: _, header }), Some(stream)) => (header, stream),
_ => {
debug!("Node {:?} returned a malformed response", node);
continue;
}
};
match read_stream_to_end(stream).await {
Ok(bytes) => return Ok(DataBlock::from_parts(header, bytes)),
Err(e) => {
debug!("Error reading stream from node {:?}: {}", node, e);
}
}
}
_ = tokio::time::sleep(self.system.rpc.rpc_timeout()) => {
debug!("Node {:?} didn't return block in time, trying next.", node);
}
};
}
Err(Error::Message(format!(
"Unable to read block {:?}: no node returned a valid block",
hash
)))
} }
// ---- Public interface ---- // ---- Public interface ----
@ -491,6 +468,8 @@ impl BlockManager {
pub(crate) async fn write_block(&self, hash: &Hash, data: &DataBlock) -> Result<(), Error> { pub(crate) async fn write_block(&self, hash: &Hash, data: &DataBlock) -> Result<(), Error> {
let tracer = opentelemetry::global::tracer("garage"); let tracer = opentelemetry::global::tracer("garage");
let write_size = data.inner_buffer().len() as u64;
self.lock_mutate(hash) self.lock_mutate(hash)
.await .await
.write_block(hash, data, self) .write_block(hash, data, self)
@ -500,6 +479,8 @@ impl BlockManager {
)) ))
.await?; .await?;
self.metrics.bytes_written.add(write_size);
Ok(()) Ok(())
} }
@ -526,42 +507,36 @@ impl BlockManager {
/// Read block from disk, verifying it's integrity /// Read block from disk, verifying it's integrity
pub(crate) async fn read_block(&self, hash: &Hash) -> Result<DataBlock, Error> { pub(crate) async fn read_block(&self, hash: &Hash) -> Result<DataBlock, Error> {
let tracer = opentelemetry::global::tracer("garage"); let data = self
async { .read_block_internal(hash)
match self.find_block(hash).await { .bound_record_duration(&self.metrics.block_read_duration)
Some(p) => self.read_block_from(hash, &p).await, .await?;
None => {
self.metrics
.bytes_read
.add(data.inner_buffer().len() as u64);
Ok(data)
}
async fn read_block_internal(&self, hash: &Hash) -> Result<DataBlock, Error> {
let mut path = self.block_path(hash);
let compressed = match self.is_block_compressed(hash).await {
Ok(c) => c,
Err(e) => {
// Not found but maybe we should have had it ?? // Not found but maybe we should have had it ??
self.resync self.resync
.put_to_resync(hash, 2 * self.system.rpc.rpc_timeout())?; .put_to_resync(hash, 2 * self.system.rpc.rpc_timeout())?;
return Err(Error::Message(format!( return Err(Into::into(e));
"block {:?} not found on node",
hash
)));
} }
}
}
.bound_record_duration(&self.metrics.block_read_duration)
.with_context(Context::current_with_span(
tracer.start("BlockManager::read_block"),
))
.await
}
pub(crate) async fn read_block_from(
&self,
hash: &Hash,
block_path: &DataBlockPath,
) -> Result<DataBlock, Error> {
let (path, compressed) = match block_path {
DataBlockPath::Plain(p) => (p, false),
DataBlockPath::Compressed(p) => (p, true),
}; };
if compressed {
path.set_extension("zst");
}
let mut f = fs::File::open(&path).await?; let mut f = fs::File::open(&path).await?;
let mut data = vec![]; let mut data = vec![];
f.read_to_end(&mut data).await?; f.read_to_end(&mut data).await?;
self.metrics.bytes_read.add(data.len() as u64);
drop(f); drop(f);
let data = if compressed { let data = if compressed {
@ -573,27 +548,29 @@ impl BlockManager {
if data.verify(*hash).is_err() { if data.verify(*hash).is_err() {
self.metrics.corruption_counter.add(1); self.metrics.corruption_counter.add(1);
warn!(
"Block {:?} is corrupted. Renaming to .corrupted and resyncing.",
hash
);
self.lock_mutate(hash) self.lock_mutate(hash)
.await .await
.move_block_to_corrupted(block_path) .move_block_to_corrupted(hash, self)
.await?; .await?;
self.resync.put_to_resync(hash, Duration::from_millis(0))?; self.resync.put_to_resync(hash, Duration::from_millis(0))?;
return Err(Error::CorruptData(*hash)); return Err(Error::CorruptData(*hash));
} }
Ok(data) Ok(data)
} }
/// Check if this node has a block and whether it needs it
pub(crate) async fn check_block_status(&self, hash: &Hash) -> Result<BlockStatus, Error> {
self.lock_mutate(hash)
.await
.check_block_status(hash, self)
.await
}
/// Check if this node should have a block, but don't actually have it /// Check if this node should have a block, but don't actually have it
async fn need_block(&self, hash: &Hash) -> Result<bool, Error> { async fn need_block(&self, hash: &Hash) -> Result<bool, Error> {
let rc = self.rc.get_block_rc(hash)?; let BlockStatus { exists, needed } = self.check_block_status(hash).await?;
let exists = self.find_block(hash).await.is_some(); Ok(needed.is_nonzero() && !exists)
Ok(rc.is_nonzero() && !exists)
} }
/// Delete block if it is not needed anymore /// Delete block if it is not needed anymore
@ -604,65 +581,59 @@ impl BlockManager {
.await .await
} }
/// Find the path where a block is currently stored /// Utility: gives the path of the directory in which a block should be found
pub(crate) async fn find_block(&self, hash: &Hash) -> Option<DataBlockPath> { fn block_dir(&self, hash: &Hash) -> PathBuf {
let data_layout = self.data_layout.load_full(); let mut path = self.data_dir.clone();
let dirs = Some(data_layout.primary_block_dir(hash)) path.push(hex::encode(&hash.as_slice()[0..1]));
.into_iter() path.push(hex::encode(&hash.as_slice()[1..2]));
.chain(data_layout.secondary_block_dirs(hash)); path
let filename = hex::encode(hash.as_ref()); }
for dir in dirs { /// Utility: give the full path where a block should be found, minus extension if block is
let mut path = dir; /// compressed
path.push(&filename); fn block_path(&self, hash: &Hash) -> PathBuf {
let mut path = self.block_dir(hash);
path.push(hex::encode(hash.as_ref()));
path
}
/// Utility: check if block is stored compressed. Error if block is not stored
async fn is_block_compressed(&self, hash: &Hash) -> Result<bool, Error> {
let mut path = self.block_path(hash);
if self.compression_level.is_none() {
// If compression is disabled on node - check for the raw block // If compression is disabled on node - check for the raw block
// first and then a compressed one (as compression may have been // first and then a compressed one (as compression may have been
// previously enabled). // previously enabled).
match self.compression_level {
None => {
if fs::metadata(&path).await.is_ok() { if fs::metadata(&path).await.is_ok() {
return Some(DataBlockPath::Plain(path)); return Ok(false);
} }
path.set_extension("zst"); path.set_extension("zst");
if fs::metadata(&path).await.is_ok() {
return Some(DataBlockPath::Compressed(path)); fs::metadata(&path).await.map(|_| true).map_err(Into::into)
} }
} else { _ => {
path.set_extension("zst"); path.set_extension("zst");
if fs::metadata(&path).await.is_ok() { if fs::metadata(&path).await.is_ok() {
return Some(DataBlockPath::Compressed(path)); return Ok(true);
} }
path.set_extension(""); path.set_extension("");
if fs::metadata(&path).await.is_ok() {
return Some(DataBlockPath::Plain(path));
}
}
}
None fs::metadata(&path).await.map(|_| false).map_err(Into::into)
}
} }
/// Rewrite a block at the primary location for its path and delete the old path.
/// Returns the number of bytes read/written
pub(crate) async fn fix_block_location(
&self,
hash: &Hash,
wrong_path: DataBlockPath,
) -> Result<usize, Error> {
self.lock_mutate(hash)
.await
.fix_block_location(hash, wrong_path, self)
.await
} }
async fn lock_mutate(&self, hash: &Hash) -> MutexGuard<'_, BlockManagerLocked> { async fn lock_mutate(&self, hash: &Hash) -> MutexGuard<'_, BlockManagerLocked> {
let tracer = opentelemetry::global::tracer("garage"); let tracer = opentelemetry::global::tracer("garage");
let ilock = u16::from_be_bytes([hash.as_slice()[0], hash.as_slice()[1]]) as usize self.mutation_lock[hash.as_slice()[0] as usize]
% self.mutation_lock.len();
self.mutation_lock[ilock]
.lock() .lock()
.with_context(Context::current_with_span( .with_context(Context::current_with_span(
tracer.start(format!("Acquire mutation_lock #{}", ilock)), tracer.start("Acquire mutation_lock"),
)) ))
.await .await
} }
@ -675,7 +646,7 @@ impl StreamingEndpointHandler<BlockRpc> for BlockManager {
BlockRpc::PutBlock { hash, header } => Resp::new( BlockRpc::PutBlock { hash, header } => Resp::new(
self.handle_put_block(*hash, *header, message.take_stream()) self.handle_put_block(*hash, *header, message.take_stream())
.await .await
.map(|()| BlockRpc::Ok), .map(|_| BlockRpc::Ok),
), ),
BlockRpc::GetBlock(h, order_tag) => self.handle_get_block(h, *order_tag).await, BlockRpc::GetBlock(h, order_tag) => self.handle_get_block(h, *order_tag).await,
BlockRpc::NeedBlockQuery(h) => { BlockRpc::NeedBlockQuery(h) => {
@ -686,78 +657,66 @@ impl StreamingEndpointHandler<BlockRpc> for BlockManager {
} }
} }
pub(crate) struct BlockStatus {
pub(crate) exists: bool,
pub(crate) needed: RcEntry,
}
impl BlockManagerLocked { impl BlockManagerLocked {
async fn check_block_status(
&self,
hash: &Hash,
mgr: &BlockManager,
) -> Result<BlockStatus, Error> {
let exists = mgr.is_block_compressed(hash).await.is_ok();
let needed = mgr.rc.get_block_rc(hash)?;
Ok(BlockStatus { exists, needed })
}
async fn write_block( async fn write_block(
&self, &self,
hash: &Hash, hash: &Hash,
data: &DataBlock, data: &DataBlock,
mgr: &BlockManager, mgr: &BlockManager,
) -> Result<(), Error> {
let existing_path = mgr.find_block(hash).await;
self.write_block_inner(hash, data, mgr, existing_path).await
}
async fn write_block_inner(
&self,
hash: &Hash,
data: &DataBlock,
mgr: &BlockManager,
existing_path: Option<DataBlockPath>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let compressed = data.is_compressed(); let compressed = data.is_compressed();
let data = data.inner_buffer(); let data = data.inner_buffer();
let directory = mgr.data_layout.load().primary_block_dir(hash); let mut path = mgr.block_dir(hash);
let directory = path.clone();
let mut tgt_path = directory.clone(); path.push(hex::encode(hash));
tgt_path.push(hex::encode(hash));
if compressed {
tgt_path.set_extension("zst");
}
let to_delete = match (existing_path, compressed) {
// If the block is stored in the wrong directory,
// write it again at the correct path and delete the old path
(Some(DataBlockPath::Plain(p)), false) if p != tgt_path => Some(p),
(Some(DataBlockPath::Compressed(p)), true) if p != tgt_path => Some(p),
// If the block is already stored not compressed but we have a compressed
// copy, write the compressed copy and delete the uncompressed one
(Some(DataBlockPath::Plain(plain_path)), true) => Some(plain_path),
// If the block is already stored compressed,
// keep the stored copy, we have nothing to do
(Some(DataBlockPath::Compressed(_)), _) => return Ok(()),
// If the block is already stored not compressed,
// and we don't have a compressed copy either,
// keep the stored copy, we have nothing to do
(Some(DataBlockPath::Plain(_)), false) => return Ok(()),
// If the block isn't stored already, just store what is given to us
(None, _) => None,
};
assert!(to_delete.as_ref() != Some(&tgt_path));
let mut path_tmp = tgt_path.clone();
let tmp_extension = format!("tmp{}", hex::encode(thread_rng().gen::<[u8; 4]>()));
path_tmp.set_extension(tmp_extension);
fs::create_dir_all(&directory).await?; fs::create_dir_all(&directory).await?;
let to_delete = match (mgr.is_block_compressed(hash).await, compressed) {
(Ok(true), _) => return Ok(()),
(Ok(false), false) => return Ok(()),
(Ok(false), true) => {
let path_to_delete = path.clone();
path.set_extension("zst");
Some(path_to_delete)
}
(Err(_), compressed) => {
if compressed {
path.set_extension("zst");
}
None
}
};
let mut path_tmp = path.clone();
let tmp_extension = format!("tmp{}", hex::encode(thread_rng().gen::<[u8; 4]>()));
path_tmp.set_extension(tmp_extension);
let mut delete_on_drop = DeleteOnDrop(Some(path_tmp.clone())); let mut delete_on_drop = DeleteOnDrop(Some(path_tmp.clone()));
let mut f = fs::File::create(&path_tmp).await?; let mut f = fs::File::create(&path_tmp).await?;
f.write_all(data).await?; f.write_all(data).await?;
mgr.metrics.bytes_written.add(data.len() as u64);
if mgr.data_fsync {
f.sync_all().await?; f.sync_all().await?;
}
drop(f); drop(f);
fs::rename(path_tmp, tgt_path).await?; fs::rename(path_tmp, path).await?;
delete_on_drop.cancel(); delete_on_drop.cancel();
@ -765,7 +724,6 @@ impl BlockManagerLocked {
fs::remove_file(to_delete).await?; fs::remove_file(to_delete).await?;
} }
if mgr.data_fsync {
// We want to ensure that when this function returns, data is properly persisted // We want to ensure that when this function returns, data is properly persisted
// to disk. The first step is the sync_all above that does an fsync on the data file. // to disk. The first step is the sync_all above that does an fsync on the data file.
// Now, we do an fsync on the containing directory, to ensure that the rename // Now, we do an fsync on the containing directory, to ensure that the rename
@ -778,54 +736,40 @@ impl BlockManagerLocked {
.await?; .await?;
dir.sync_all().await?; dir.sync_all().await?;
drop(dir); drop(dir);
}
Ok(()) Ok(())
} }
async fn move_block_to_corrupted(&self, block_path: &DataBlockPath) -> Result<(), Error> { async fn move_block_to_corrupted(&self, hash: &Hash, mgr: &BlockManager) -> Result<(), Error> {
let (path, path2) = match block_path { warn!(
DataBlockPath::Plain(p) => { "Block {:?} is corrupted. Renaming to .corrupted and resyncing.",
let mut p2 = p.clone(); hash
p2.set_extension("corrupted"); );
(p, p2) let mut path = mgr.block_path(hash);
let mut path2 = path.clone();
if mgr.is_block_compressed(hash).await? {
path.set_extension("zst");
path2.set_extension("zst.corrupted");
} else {
path2.set_extension("corrupted");
} }
DataBlockPath::Compressed(p) => {
let mut p2 = p.clone();
p2.set_extension("zst.corrupted");
(p, p2)
}
};
fs::rename(path, path2).await?; fs::rename(path, path2).await?;
Ok(()) Ok(())
} }
async fn delete_if_unneeded(&self, hash: &Hash, mgr: &BlockManager) -> Result<(), Error> { async fn delete_if_unneeded(&self, hash: &Hash, mgr: &BlockManager) -> Result<(), Error> {
let rc = mgr.rc.get_block_rc(hash)?; let BlockStatus { exists, needed } = self.check_block_status(hash, mgr).await?;
if rc.is_deletable() {
while let Some(path) = mgr.find_block(hash).await { if exists && needed.is_deletable() {
let path = match path { let mut path = mgr.block_path(hash);
DataBlockPath::Plain(p) | DataBlockPath::Compressed(p) => p, if mgr.is_block_compressed(hash).await? {
}; path.set_extension("zst");
}
fs::remove_file(path).await?; fs::remove_file(path).await?;
mgr.metrics.delete_counter.add(1); mgr.metrics.delete_counter.add(1);
} }
}
Ok(()) Ok(())
} }
async fn fix_block_location(
&self,
hash: &Hash,
wrong_path: DataBlockPath,
mgr: &BlockManager,
) -> Result<usize, Error> {
let data = mgr.read_block_from(hash, &wrong_path).await?;
self.write_block_inner(hash, &data, mgr, Some(wrong_path))
.await?;
Ok(data.inner_buffer().len())
}
} }
async fn read_stream_to_end(mut stream: ByteStream) -> Result<Bytes, Error> { async fn read_stream_to_end(mut stream: ByteStream) -> Result<Bytes, Error> {

View file

@ -56,7 +56,7 @@ impl BlockRc {
/// deletion time has passed /// deletion time has passed
pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> { pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> {
let now = now_msec(); let now = now_msec();
self.rc.db().transaction(|tx| { self.rc.db().transaction(|mut tx| {
let rcval = RcEntry::parse_opt(tx.get(&self.rc, hash)?); let rcval = RcEntry::parse_opt(tx.get(&self.rc, hash)?);
match rcval { match rcval {
RcEntry::Deletable { at_time } if now > at_time => { RcEntry::Deletable { at_time } if now > at_time => {
@ -64,7 +64,7 @@ impl BlockRc {
} }
_ => (), _ => (),
}; };
Ok(()) tx.commit(())
})?; })?;
Ok(()) Ok(())
} }

View file

@ -17,7 +17,6 @@ use garage_util::persister::PersisterShared;
use garage_util::time::*; use garage_util::time::*;
use garage_util::tranquilizer::Tranquilizer; use garage_util::tranquilizer::Tranquilizer;
use crate::block::*;
use crate::manager::*; use crate::manager::*;
// Full scrub every 25 days with a random element of 10 days mixed in below // Full scrub every 25 days with a random element of 10 days mixed in below
@ -137,7 +136,7 @@ impl Worker for RepairWorker {
// Lists all blocks on disk and adds them to the resync queue. // Lists all blocks on disk and adds them to the resync queue.
// This allows us to find blocks we are storing but don't actually need, // This allows us to find blocks we are storing but don't actually need,
// so that we can offload them if necessary and then delete them locally. // so that we can offload them if necessary and then delete them locally.
if let Some((_path, hash)) = bi.next().await? { if let Some(hash) = bi.next().await? {
self.manager self.manager
.resync .resync
.put_to_resync(&hash, Duration::from_secs(0))?; .put_to_resync(&hash, Duration::from_secs(0))?;
@ -176,9 +175,7 @@ mod v081 {
} }
mod v082 { mod v082 {
use garage_util::data::Hash;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use super::v081; use super::v081;
@ -188,27 +185,6 @@ mod v082 {
pub(crate) time_last_complete_scrub: u64, pub(crate) time_last_complete_scrub: u64,
pub(crate) time_next_run_scrub: u64, pub(crate) time_next_run_scrub: u64,
pub(crate) corruptions_detected: u64, pub(crate) corruptions_detected: u64,
#[serde(default)]
pub(crate) checkpoint: Option<BlockStoreIterator>,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct BlockStoreIterator {
pub todo: Vec<BsiTodo>,
}
#[derive(Serialize, Deserialize, Clone)]
pub enum BsiTodo {
Directory {
path: PathBuf,
progress_min: u64,
progress_max: u64,
},
File {
path: PathBuf,
hash: Hash,
progress: u64,
},
} }
impl garage_util::migrate::Migrate for ScrubWorkerPersisted { impl garage_util::migrate::Migrate for ScrubWorkerPersisted {
@ -223,7 +199,6 @@ mod v082 {
time_last_complete_scrub: old.time_last_complete_scrub, time_last_complete_scrub: old.time_last_complete_scrub,
time_next_run_scrub: randomize_next_scrub_run_time(old.time_last_complete_scrub), time_next_run_scrub: randomize_next_scrub_run_time(old.time_last_complete_scrub),
corruptions_detected: old.corruptions_detected, corruptions_detected: old.corruptions_detected,
checkpoint: None,
} }
} }
} }
@ -260,23 +235,14 @@ impl Default for ScrubWorkerPersisted {
time_next_run_scrub: randomize_next_scrub_run_time(now_msec()), time_next_run_scrub: randomize_next_scrub_run_time(now_msec()),
tranquility: INITIAL_SCRUB_TRANQUILITY, tranquility: INITIAL_SCRUB_TRANQUILITY,
corruptions_detected: 0, corruptions_detected: 0,
checkpoint: None,
} }
} }
} }
#[derive(Default)] #[derive(Default)]
enum ScrubWorkerState { enum ScrubWorkerState {
Running { Running(BlockStoreIterator),
iterator: BlockStoreIterator, Paused(BlockStoreIterator, u64), // u64 = time when to resume scrub
// time of the last checkpoint
t_cp: u64,
},
Paused {
iterator: BlockStoreIterator,
// time at which the scrub should be resumed
t_resume: u64,
},
#[default] #[default]
Finished, Finished,
} }
@ -295,17 +261,10 @@ impl ScrubWorker {
rx_cmd: mpsc::Receiver<ScrubWorkerCommand>, rx_cmd: mpsc::Receiver<ScrubWorkerCommand>,
persister: PersisterShared<ScrubWorkerPersisted>, persister: PersisterShared<ScrubWorkerPersisted>,
) -> Self { ) -> Self {
let work = match persister.get_with(|x| x.checkpoint.clone()) {
None => ScrubWorkerState::Finished,
Some(iterator) => ScrubWorkerState::Running {
iterator,
t_cp: now_msec(),
},
};
Self { Self {
manager, manager,
rx_cmd, rx_cmd,
work, work: ScrubWorkerState::Finished,
tranquilizer: Tranquilizer::new(30), tranquilizer: Tranquilizer::new(30),
persister, persister,
} }
@ -318,16 +277,7 @@ impl ScrubWorker {
ScrubWorkerState::Finished => { ScrubWorkerState::Finished => {
info!("Scrub worker initializing, now performing datastore scrub"); info!("Scrub worker initializing, now performing datastore scrub");
let iterator = BlockStoreIterator::new(&self.manager); let iterator = BlockStoreIterator::new(&self.manager);
if let Err(e) = self ScrubWorkerState::Running(iterator)
.persister
.set_with(|x| x.checkpoint = Some(iterator.clone()))
{
error!("Could not save scrub checkpoint: {}", e);
}
ScrubWorkerState::Running {
iterator,
t_cp: now_msec(),
}
} }
work => { work => {
error!("Cannot start scrub worker: already running!"); error!("Cannot start scrub worker: already running!");
@ -337,18 +287,8 @@ impl ScrubWorker {
} }
ScrubWorkerCommand::Pause(dur) => { ScrubWorkerCommand::Pause(dur) => {
self.work = match std::mem::take(&mut self.work) { self.work = match std::mem::take(&mut self.work) {
ScrubWorkerState::Running { iterator, .. } ScrubWorkerState::Running(it) | ScrubWorkerState::Paused(it, _) => {
| ScrubWorkerState::Paused { iterator, .. } => { ScrubWorkerState::Paused(it, now_msec() + dur.as_millis() as u64)
if let Err(e) = self
.persister
.set_with(|x| x.checkpoint = Some(iterator.clone()))
{
error!("Could not save scrub checkpoint: {}", e);
}
ScrubWorkerState::Paused {
iterator,
t_resume: now_msec() + dur.as_millis() as u64,
}
} }
work => { work => {
error!("Cannot pause scrub worker: not running!"); error!("Cannot pause scrub worker: not running!");
@ -358,10 +298,7 @@ impl ScrubWorker {
} }
ScrubWorkerCommand::Resume => { ScrubWorkerCommand::Resume => {
self.work = match std::mem::take(&mut self.work) { self.work = match std::mem::take(&mut self.work) {
ScrubWorkerState::Paused { iterator, .. } => ScrubWorkerState::Running { ScrubWorkerState::Paused(it, _) => ScrubWorkerState::Running(it),
iterator,
t_cp: now_msec(),
},
work => { work => {
error!("Cannot resume scrub worker: not paused!"); error!("Cannot resume scrub worker: not paused!");
work work
@ -370,10 +307,7 @@ impl ScrubWorker {
} }
ScrubWorkerCommand::Cancel => { ScrubWorkerCommand::Cancel => {
self.work = match std::mem::take(&mut self.work) { self.work = match std::mem::take(&mut self.work) {
ScrubWorkerState::Running { .. } | ScrubWorkerState::Paused { .. } => { ScrubWorkerState::Running(_) | ScrubWorkerState::Paused(_, _) => {
if let Err(e) = self.persister.set_with(|x| x.checkpoint = None) {
error!("Could not save scrub checkpoint: {}", e);
}
ScrubWorkerState::Finished ScrubWorkerState::Finished
} }
work => { work => {
@ -409,15 +343,12 @@ impl Worker for ScrubWorker {
..Default::default() ..Default::default()
}; };
match &self.work { match &self.work {
ScrubWorkerState::Running { iterator, .. } => { ScrubWorkerState::Running(bsi) => {
s.progress = Some(format!("{:.2}%", iterator.progress() * 100.)); s.progress = Some(format!("{:.2}%", bsi.progress() * 100.));
} }
ScrubWorkerState::Paused { iterator, t_resume } => { ScrubWorkerState::Paused(bsi, rt) => {
s.progress = Some(format!("{:.2}%", iterator.progress() * 100.)); s.progress = Some(format!("{:.2}%", bsi.progress() * 100.));
s.freeform = vec![format!( s.freeform = vec![format!("Scrub paused, resumes at {}", msec_to_rfc3339(*rt))];
"Scrub paused, resumes at {}",
msec_to_rfc3339(*t_resume)
)];
} }
ScrubWorkerState::Finished => { ScrubWorkerState::Finished => {
s.freeform = vec![ s.freeform = vec![
@ -443,11 +374,9 @@ impl Worker for ScrubWorker {
}; };
match &mut self.work { match &mut self.work {
ScrubWorkerState::Running { iterator, t_cp } => { ScrubWorkerState::Running(bsi) => {
self.tranquilizer.reset(); self.tranquilizer.reset();
let now = now_msec(); if let Some(hash) = bsi.next().await? {
if let Some((_path, hash)) = iterator.next().await? {
match self.manager.read_block(&hash).await { match self.manager.read_block(&hash).await {
Err(Error::CorruptData(_)) => { Err(Error::CorruptData(_)) => {
error!("Found corrupt data block during scrub: {:?}", hash); error!("Found corrupt data block during scrub: {:?}", hash);
@ -456,23 +385,16 @@ impl Worker for ScrubWorker {
Err(e) => return Err(e), Err(e) => return Err(e),
_ => (), _ => (),
}; };
if now - *t_cp > 60 * 1000 {
self.persister
.set_with(|p| p.checkpoint = Some(iterator.clone()))?;
*t_cp = now;
}
Ok(self Ok(self
.tranquilizer .tranquilizer
.tranquilize_worker(self.persister.get_with(|p| p.tranquility))) .tranquilize_worker(self.persister.get_with(|p| p.tranquility)))
} else { } else {
let now = now_msec();
let next_scrub_timestamp = randomize_next_scrub_run_time(now); let next_scrub_timestamp = randomize_next_scrub_run_time(now);
self.persister.set_with(|p| { self.persister.set_with(|p| {
p.time_last_complete_scrub = now; p.time_last_complete_scrub = now;
p.time_next_run_scrub = next_scrub_timestamp; p.time_next_run_scrub = next_scrub_timestamp;
p.checkpoint = None;
})?; })?;
self.work = ScrubWorkerState::Finished; self.work = ScrubWorkerState::Finished;
self.tranquilizer.clear(); self.tranquilizer.clear();
@ -491,8 +413,8 @@ impl Worker for ScrubWorker {
async fn wait_for_work(&mut self) -> WorkerState { async fn wait_for_work(&mut self) -> WorkerState {
let (wait_until, command) = match &self.work { let (wait_until, command) = match &self.work {
ScrubWorkerState::Running { .. } => return WorkerState::Busy, ScrubWorkerState::Running(_) => return WorkerState::Busy,
ScrubWorkerState::Paused { t_resume, .. } => (*t_resume, ScrubWorkerCommand::Resume), ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume),
ScrubWorkerState::Finished => ( ScrubWorkerState::Finished => (
self.persister.get_with(|p| p.time_next_run_scrub), self.persister.get_with(|p| p.time_next_run_scrub),
ScrubWorkerCommand::Start, ScrubWorkerCommand::Start,
@ -515,250 +437,110 @@ impl Worker for ScrubWorker {
} }
match &self.work { match &self.work {
ScrubWorkerState::Running { .. } => WorkerState::Busy, ScrubWorkerState::Running(_) => WorkerState::Busy,
_ => WorkerState::Idle, _ => WorkerState::Idle,
} }
} }
} }
// ---- ---- ----
// THIRD KIND OF REPAIR: REBALANCING DATA BLOCKS
// between multiple storage locations.
// This is a one-shot repair operation that can be launched,
// checks everything, and then exits.
// ---- ---- ----
pub struct RebalanceWorker {
manager: Arc<BlockManager>,
block_iter: BlockStoreIterator,
t_started: u64,
t_finished: Option<u64>,
moved: usize,
moved_bytes: u64,
}
impl RebalanceWorker {
pub fn new(manager: Arc<BlockManager>) -> Self {
let block_iter = BlockStoreIterator::new(&manager);
Self {
manager,
block_iter,
t_started: now_msec(),
t_finished: None,
moved: 0,
moved_bytes: 0,
}
}
}
#[async_trait]
impl Worker for RebalanceWorker {
fn name(&self) -> String {
"Block rebalance worker".into()
}
fn status(&self) -> WorkerStatus {
let t_cur = self.t_finished.unwrap_or_else(|| now_msec());
let rate = self.moved_bytes / std::cmp::max(1, (t_cur - self.t_started) / 1000);
let mut freeform = vec![
format!("Blocks moved: {}", self.moved),
format!(
"Bytes moved: {} ({}/s)",
bytesize::ByteSize::b(self.moved_bytes),
bytesize::ByteSize::b(rate)
),
format!("Started: {}", msec_to_rfc3339(self.t_started)),
];
if let Some(t_fin) = self.t_finished {
freeform.push(format!("Finished: {}", msec_to_rfc3339(t_fin)))
}
WorkerStatus {
progress: Some(format!("{:.2}%", self.block_iter.progress() * 100.)),
freeform,
..Default::default()
}
}
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
if let Some((path, hash)) = self.block_iter.next().await? {
let prim_loc = self.manager.data_layout.load().primary_block_dir(&hash);
if path.ancestors().all(|x| x != prim_loc) {
let block_path = match path.extension() {
None => DataBlockPath::Plain(path.clone()),
Some(x) if x.to_str() == Some("zst") => DataBlockPath::Compressed(path.clone()),
_ => {
warn!("not rebalancing file: {}", path.to_string_lossy());
return Ok(WorkerState::Busy);
}
};
// block is not in its primary location,
// move it there (reading and re-writing does the trick)
debug!("rebalance: moving block {:?} => {:?}", block_path, prim_loc);
let block_len = self.manager.fix_block_location(&hash, block_path).await?;
self.moved += 1;
self.moved_bytes += block_len as u64;
}
Ok(WorkerState::Busy)
} else {
// all blocks are in their primary location:
// - the ones we moved now are
// - the ones written in the meantime always were, because we only
// write to primary locations
// so we can safely remove all secondary locations from the data layout
let new_layout = self
.manager
.data_layout
.load_full()
.without_secondary_locations();
self.manager
.data_layout_persister
.save_async(&new_layout)
.await?;
self.manager.data_layout.store(Arc::new(new_layout));
self.t_finished = Some(now_msec());
Ok(WorkerState::Done)
}
}
async fn wait_for_work(&mut self) -> WorkerState {
unreachable!()
}
}
// ---- ---- ---- // ---- ---- ----
// UTILITY FOR ENUMERATING THE BLOCK STORE // UTILITY FOR ENUMERATING THE BLOCK STORE
// ---- ---- ---- // ---- ---- ----
const PROGRESS_FP: u64 = 1_000_000_000; struct BlockStoreIterator {
path: Vec<ReadingDir>,
}
enum ReadingDir {
Pending(PathBuf),
Read {
subpaths: Vec<fs::DirEntry>,
pos: usize,
},
}
impl BlockStoreIterator { impl BlockStoreIterator {
fn new(manager: &BlockManager) -> Self { fn new(manager: &BlockManager) -> Self {
let data_layout = manager.data_layout.load_full(); let root_dir = manager.data_dir.clone();
Self {
let mut dir_cap = vec![0; data_layout.data_dirs.len()]; path: vec![ReadingDir::Pending(root_dir)],
for prim in data_layout.part_prim.iter() {
dir_cap[*prim as usize] += 1;
} }
for sec_vec in data_layout.part_sec.iter() {
for sec in sec_vec.iter() {
dir_cap[*sec as usize] += 1;
}
}
let sum_cap = dir_cap.iter().sum::<usize>() as u64;
let mut cum_cap = 0;
let mut todo = vec![];
for (dir, cap) in data_layout.data_dirs.iter().zip(dir_cap.into_iter()) {
let progress_min = (cum_cap * PROGRESS_FP) / sum_cap;
let progress_max = ((cum_cap + cap as u64) * PROGRESS_FP) / sum_cap;
cum_cap += cap as u64;
todo.push(BsiTodo::Directory {
path: dir.path.clone(),
progress_min,
progress_max,
});
}
// entries are processed back-to-front (because of .pop()),
// so reverse entries to process them in increasing progress bounds
todo.reverse();
let ret = Self { todo };
debug_assert!(ret.progress_invariant());
ret
} }
/// Returns progress done, between 0 and 1 /// Returns progress done, between 0 and 1
fn progress(&self) -> f32 { fn progress(&self) -> f32 {
self.todo if self.path.is_empty() {
.last() 1.0
.map(|x| match x { } else {
BsiTodo::Directory { progress_min, .. } => *progress_min, let mut ret = 0.0;
BsiTodo::File { progress, .. } => *progress, let mut next_div = 1;
}) for p in self.path.iter() {
.map(|x| x as f32 / PROGRESS_FP as f32) match p {
.unwrap_or(1.0) ReadingDir::Pending(_) => break,
ReadingDir::Read { subpaths, pos } => {
next_div *= subpaths.len();
ret += ((*pos - 1) as f32) / (next_div as f32);
}
}
}
ret
}
} }
async fn next(&mut self) -> Result<Option<(PathBuf, Hash)>, Error> { async fn next(&mut self) -> Result<Option<Hash>, Error> {
loop { loop {
match self.todo.pop() { let last_path = match self.path.last_mut() {
None => return Ok(None), None => return Ok(None),
Some(BsiTodo::Directory { Some(lp) => lp,
path, };
progress_min,
progress_max,
}) => {
let istart = self.todo.len();
if let ReadingDir::Pending(path) = last_path {
let mut reader = fs::read_dir(&path).await?; let mut reader = fs::read_dir(&path).await?;
let mut subpaths = vec![];
while let Some(ent) = reader.next_entry().await? { while let Some(ent) = reader.next_entry().await? {
let name = if let Ok(n) = ent.file_name().into_string() { subpaths.push(ent);
}
*last_path = ReadingDir::Read { subpaths, pos: 0 };
}
let (subpaths, pos) = match *last_path {
ReadingDir::Read {
ref subpaths,
ref mut pos,
} => (subpaths, pos),
ReadingDir::Pending(_) => unreachable!(),
};
let data_dir_ent = match subpaths.get(*pos) {
None => {
self.path.pop();
continue;
}
Some(ent) => {
*pos += 1;
ent
}
};
let name = data_dir_ent.file_name();
let name = if let Ok(n) = name.into_string() {
n n
} else { } else {
continue; continue;
}; };
let ft = ent.file_type().await?; let ent_type = data_dir_ent.file_type().await?;
if ft.is_dir() && hex::decode(&name).is_ok() {
self.todo.push(BsiTodo::Directory { let name = name.strip_suffix(".zst").unwrap_or(&name);
path: ent.path(), if name.len() == 2 && hex::decode(name).is_ok() && ent_type.is_dir() {
progress_min: 0, let path = data_dir_ent.path();
progress_max: 0, self.path.push(ReadingDir::Pending(path));
}); } else if name.len() == 64 {
} else if ft.is_file() { if let Ok(h) = hex::decode(name) {
let filename = name.split_once('.').map(|(f, _)| f).unwrap_or(&name);
if filename.len() == 64 {
if let Ok(h) = hex::decode(filename) {
let mut hash = [0u8; 32]; let mut hash = [0u8; 32];
hash.copy_from_slice(&h); hash.copy_from_slice(&h);
self.todo.push(BsiTodo::File { return Ok(Some(hash.into()));
path: ent.path(),
hash: hash.into(),
progress: 0,
});
} }
} }
} }
} }
let count = self.todo.len() - istart;
for (i, ent) in self.todo[istart..].iter_mut().enumerate() {
let p1 = progress_min
+ ((progress_max - progress_min) * i as u64) / count as u64;
let p2 = progress_min
+ ((progress_max - progress_min) * (i + 1) as u64) / count as u64;
match ent {
BsiTodo::Directory {
progress_min,
progress_max,
..
} => {
*progress_min = p1;
*progress_max = p2;
}
BsiTodo::File { progress, .. } => {
*progress = p1;
}
}
}
self.todo[istart..].reverse();
debug_assert!(self.progress_invariant());
}
Some(BsiTodo::File { path, hash, .. }) => {
return Ok(Some((path, hash)));
}
}
}
}
// for debug_assert!
fn progress_invariant(&self) -> bool {
let iter = self.todo.iter().map(|x| match x {
BsiTodo::Directory { progress_min, .. } => progress_min,
BsiTodo::File { progress, .. } => progress,
});
let iter_1 = iter.clone().skip(1);
iter.zip(iter_1).all(|(prev, next)| prev >= next)
}
} }

View file

@ -41,7 +41,7 @@ pub(crate) const RESYNC_RETRY_DELAY: Duration = Duration::from_secs(60);
pub(crate) const RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER: u64 = 6; pub(crate) const RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER: u64 = 6;
// No more than 4 resync workers can be running in the system // No more than 4 resync workers can be running in the system
pub(crate) const MAX_RESYNC_WORKERS: usize = 8; pub(crate) const MAX_RESYNC_WORKERS: usize = 4;
// Resync tranquility is initially set to 2, but can be changed in the CLI // Resync tranquility is initially set to 2, but can be changed in the CLI
// and the updated version is persisted over Garage restarts // and the updated version is persisted over Garage restarts
const INITIAL_RESYNC_TRANQUILITY: u32 = 2; const INITIAL_RESYNC_TRANQUILITY: u32 = 2;
@ -359,23 +359,20 @@ impl BlockResyncManager {
} }
async fn resync_block(&self, manager: &BlockManager, hash: &Hash) -> Result<(), Error> { async fn resync_block(&self, manager: &BlockManager, hash: &Hash) -> Result<(), Error> {
let existing_path = manager.find_block(hash).await; let BlockStatus { exists, needed } = manager.check_block_status(hash).await?;
let exists = existing_path.is_some();
let rc = manager.rc.get_block_rc(hash)?;
if exists != rc.is_needed() || exists != rc.is_nonzero() { if exists != needed.is_needed() || exists != needed.is_nonzero() {
debug!( debug!(
"Resync block {:?}: exists {}, nonzero rc {}, deletable {}", "Resync block {:?}: exists {}, nonzero rc {}, deletable {}",
hash, hash,
exists, exists,
rc.is_nonzero(), needed.is_nonzero(),
rc.is_deletable(), needed.is_deletable(),
); );
} }
if exists && rc.is_deletable() { if exists && needed.is_deletable() {
info!("Resync block {:?}: offloading and deleting", hash); info!("Resync block {:?}: offloading and deleting", hash);
let existing_path = existing_path.unwrap();
let mut who = manager.replication.write_nodes(hash); let mut who = manager.replication.write_nodes(hash);
if who.len() < manager.replication.write_quorum() { if who.len() < manager.replication.write_quorum() {
@ -422,7 +419,7 @@ impl BlockResyncManager {
.add(1, &[KeyValue::new("to", format!("{:?}", node))]); .add(1, &[KeyValue::new("to", format!("{:?}", node))]);
} }
let block = manager.read_block_from(hash, &existing_path).await?; let block = manager.read_block(hash).await?;
let (header, bytes) = block.into_parts(); let (header, bytes) = block.into_parts();
let put_block_message = Req::new(BlockRpc::PutBlock { let put_block_message = Req::new(BlockRpc::PutBlock {
hash: *hash, hash: *hash,
@ -454,7 +451,7 @@ impl BlockResyncManager {
manager.rc.clear_deleted_block_rc(hash)?; manager.rc.clear_deleted_block_rc(hash)?;
} }
if rc.is_nonzero() && !exists { if needed.is_nonzero() && !exists {
info!( info!(
"Resync block {:?}: fetching absent but needed block (refcount > 0)", "Resync block {:?}: fetching absent but needed block (refcount > 0)",
hash hash

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_db" name = "garage_db"
version = "0.9.0" version = "0.8.4"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -11,6 +11,11 @@ readme = "../../README.md"
[lib] [lib]
path = "lib.rs" path = "lib.rs"
[[bin]]
name = "convert"
path = "bin/convert.rs"
required-features = ["cli"]
[dependencies] [dependencies]
err-derive = "0.3" err-derive = "0.3"
hexdump = "0.1" hexdump = "0.1"
@ -28,7 +33,7 @@ pretty_env_logger = { version = "0.5", optional = true }
mktemp = "0.5" mktemp = "0.5"
[features] [features]
default = [ "sled", "lmdb", "sqlite" ] default = [ "sled" ]
bundled-libs = [ "rusqlite?/bundled" ] bundled-libs = [ "rusqlite?/bundled" ]
cli = ["clap", "pretty_env_logger"] cli = ["clap", "pretty_env_logger"]
lmdb = [ "heed" ] lmdb = [ "heed" ]

69
src/db/bin/convert.rs Normal file
View file

@ -0,0 +1,69 @@
use std::path::PathBuf;
use garage_db::*;
use clap::Parser;
/// K2V command line interface
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
/// Input DB path
#[clap(short = 'i')]
input_path: PathBuf,
/// Input DB engine
#[clap(short = 'a')]
input_engine: String,
/// Output DB path
#[clap(short = 'o')]
output_path: PathBuf,
/// Output DB engine
#[clap(short = 'b')]
output_engine: String,
}
fn main() {
let args = Args::parse();
pretty_env_logger::init();
match do_conversion(args) {
Ok(()) => println!("Success!"),
Err(e) => eprintln!("Error: {}", e),
}
}
fn do_conversion(args: Args) -> Result<()> {
let input = open_db(args.input_path, args.input_engine)?;
let output = open_db(args.output_path, args.output_engine)?;
output.import(&input)?;
Ok(())
}
fn open_db(path: PathBuf, engine: String) -> Result<Db> {
match engine.as_str() {
"sled" => {
let db = sled_adapter::sled::Config::default().path(&path).open()?;
Ok(sled_adapter::SledDb::init(db))
}
"sqlite" | "sqlite3" | "rusqlite" => {
let db = sqlite_adapter::rusqlite::Connection::open(&path)?;
Ok(sqlite_adapter::SqliteDb::init(db))
}
"lmdb" | "heed" => {
std::fs::create_dir_all(&path).map_err(|e| {
Error(format!("Unable to create LMDB data directory: {}", e).into())
})?;
let map_size = lmdb_adapter::recommended_map_size();
let db = lmdb_adapter::heed::EnvOpenOptions::new()
.max_dbs(100)
.map_size(map_size)
.open(&path)
.unwrap();
Ok(lmdb_adapter::LmdbDb::init(db))
}
e => Err(Error(format!("Invalid DB engine: {}", e).into())),
}
}

View file

@ -85,7 +85,7 @@ impl CountedTree {
let old_some = expected_old.is_some(); let old_some = expected_old.is_some();
let new_some = new.is_some(); let new_some = new.is_some();
let tx_res = self.0.tree.db().transaction(|tx| { let tx_res = self.0.tree.db().transaction(|mut tx| {
let old_val = tx.get(&self.0.tree, &key)?; let old_val = tx.get(&self.0.tree, &key)?;
let is_same = match (&old_val, &expected_old) { let is_same = match (&old_val, &expected_old) {
(None, None) => true, (None, None) => true,
@ -101,9 +101,9 @@ impl CountedTree {
tx.remove(&self.0.tree, &key)?; tx.remove(&self.0.tree, &key)?;
} }
} }
Ok(()) tx.commit(())
} else { } else {
Err(TxError::Abort(())) tx.abort(())
} }
}); });

View file

@ -2,6 +2,9 @@
#[cfg(feature = "sqlite")] #[cfg(feature = "sqlite")]
extern crate tracing; extern crate tracing;
#[cfg(not(any(feature = "lmdb", feature = "sled", feature = "sqlite")))]
compile_error!("Must activate the Cargo feature for at least one DB engine: lmdb, sled or sqlite.");
#[cfg(feature = "lmdb")] #[cfg(feature = "lmdb")]
pub mod lmdb_adapter; pub mod lmdb_adapter;
#[cfg(feature = "sled")] #[cfg(feature = "sled")]
@ -22,15 +25,10 @@ use std::sync::Arc;
use err_derive::Error; use err_derive::Error;
pub(crate) type OnCommit = Vec<Box<dyn FnOnce()>>;
#[derive(Clone)] #[derive(Clone)]
pub struct Db(pub(crate) Arc<dyn IDb>); pub struct Db(pub(crate) Arc<dyn IDb>);
pub struct Transaction<'a> { pub struct Transaction<'a>(&'a mut dyn ITx);
tx: &'a mut dyn ITx,
on_commit: OnCommit,
}
#[derive(Clone)] #[derive(Clone)]
pub struct Tree(Arc<dyn IDb>, usize); pub struct Tree(Arc<dyn IDb>, usize);
@ -90,7 +88,7 @@ impl Db {
pub fn transaction<R, E, F>(&self, fun: F) -> TxResult<R, E> pub fn transaction<R, E, F>(&self, fun: F) -> TxResult<R, E>
where where
F: Fn(&mut Transaction<'_>) -> TxResult<R, E>, F: Fn(Transaction<'_>) -> TxResult<R, E>,
{ {
let f = TxFn { let f = TxFn {
function: fun, function: fun,
@ -103,17 +101,14 @@ impl Db {
.expect("Transaction did not store result"); .expect("Transaction did not store result");
match tx_res { match tx_res {
Ok(on_commit) => match ret { Ok(()) => {
Ok(value) => { assert!(matches!(ret, Ok(_)));
on_commit.into_iter().for_each(|f| f()); ret
Ok(value) }
Err(TxError::Abort(())) => {
assert!(matches!(ret, Err(TxError::Abort(_))));
ret
} }
_ => unreachable!(),
},
Err(TxError::Abort(())) => match ret {
Err(TxError::Abort(e)) => Err(TxError::Abort(e)),
_ => unreachable!(),
},
Err(TxError::Db(e2)) => match ret { Err(TxError::Db(e2)) => match ret {
// Ok was stored -> the error occured when finalizing // Ok was stored -> the error occured when finalizing
// transaction // transaction
@ -147,7 +142,7 @@ impl Db {
let ex_tree = other.open_tree(&name)?; let ex_tree = other.open_tree(&name)?;
let tx_res = self.transaction(|tx| { let tx_res = self.transaction(|mut tx| {
let mut i = 0; let mut i = 0;
for item in ex_tree.iter().map_err(TxError::Abort)? { for item in ex_tree.iter().map_err(TxError::Abort)? {
let (k, v) = item.map_err(TxError::Abort)?; let (k, v) = item.map_err(TxError::Abort)?;
@ -157,7 +152,7 @@ impl Db {
println!("{}: imported {}", name, i); println!("{}: imported {}", name, i);
} }
} }
Ok(i) tx.commit(i)
}); });
let total = match tx_res { let total = match tx_res {
Err(TxError::Db(e)) => return Err(e), Err(TxError::Db(e)) => return Err(e),
@ -257,11 +252,11 @@ impl Tree {
impl<'a> Transaction<'a> { impl<'a> Transaction<'a> {
#[inline] #[inline]
pub fn get<T: AsRef<[u8]>>(&self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> { pub fn get<T: AsRef<[u8]>>(&self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> {
self.tx.get(tree.1, key.as_ref()) self.0.get(tree.1, key.as_ref())
} }
#[inline] #[inline]
pub fn len(&self, tree: &Tree) -> TxOpResult<usize> { pub fn len(&self, tree: &Tree) -> TxOpResult<usize> {
self.tx.len(tree.1) self.0.len(tree.1)
} }
/// Returns the old value if there was one /// Returns the old value if there was one
@ -272,21 +267,21 @@ impl<'a> Transaction<'a> {
key: T, key: T,
value: U, value: U,
) -> TxOpResult<Option<Value>> { ) -> TxOpResult<Option<Value>> {
self.tx.insert(tree.1, key.as_ref(), value.as_ref()) self.0.insert(tree.1, key.as_ref(), value.as_ref())
} }
/// Returns the old value if there was one /// Returns the old value if there was one
#[inline] #[inline]
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> { pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> {
self.tx.remove(tree.1, key.as_ref()) self.0.remove(tree.1, key.as_ref())
} }
#[inline] #[inline]
pub fn iter(&self, tree: &Tree) -> TxOpResult<TxValueIter<'_>> { pub fn iter(&self, tree: &Tree) -> TxOpResult<TxValueIter<'_>> {
self.tx.iter(tree.1) self.0.iter(tree.1)
} }
#[inline] #[inline]
pub fn iter_rev(&self, tree: &Tree) -> TxOpResult<TxValueIter<'_>> { pub fn iter_rev(&self, tree: &Tree) -> TxOpResult<TxValueIter<'_>> {
self.tx.iter_rev(tree.1) self.0.iter_rev(tree.1)
} }
#[inline] #[inline]
@ -297,7 +292,7 @@ impl<'a> Transaction<'a> {
{ {
let sb = range.start_bound(); let sb = range.start_bound();
let eb = range.end_bound(); let eb = range.end_bound();
self.tx.range(tree.1, get_bound(sb), get_bound(eb)) self.0.range(tree.1, get_bound(sb), get_bound(eb))
} }
#[inline] #[inline]
pub fn range_rev<K, R>(&self, tree: &Tree, range: R) -> TxOpResult<TxValueIter<'_>> pub fn range_rev<K, R>(&self, tree: &Tree, range: R) -> TxOpResult<TxValueIter<'_>>
@ -307,12 +302,19 @@ impl<'a> Transaction<'a> {
{ {
let sb = range.start_bound(); let sb = range.start_bound();
let eb = range.end_bound(); let eb = range.end_bound();
self.tx.range_rev(tree.1, get_bound(sb), get_bound(eb)) self.0.range_rev(tree.1, get_bound(sb), get_bound(eb))
}
// ----
#[inline]
pub fn abort<R, E>(self, e: E) -> TxResult<R, E> {
Err(TxError::Abort(e))
} }
#[inline] #[inline]
pub fn on_commit<F: FnOnce() + 'static>(&mut self, f: F) { pub fn commit<R, E>(self, r: R) -> TxResult<R, E> {
self.on_commit.push(Box::new(f)); Ok(r)
} }
} }
@ -349,7 +351,7 @@ pub(crate) trait IDb: Send + Sync {
high: Bound<&'r [u8]>, high: Bound<&'r [u8]>,
) -> Result<ValueIter<'_>>; ) -> Result<ValueIter<'_>>;
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()>; fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()>;
} }
pub(crate) trait ITx { pub(crate) trait ITx {
@ -381,14 +383,14 @@ pub(crate) trait ITxFn {
} }
pub(crate) enum TxFnResult { pub(crate) enum TxFnResult {
Ok(OnCommit), Ok,
Abort, Abort,
DbErr, DbErr,
} }
struct TxFn<F, R, E> struct TxFn<F, R, E>
where where
F: Fn(&mut Transaction<'_>) -> TxResult<R, E>, F: Fn(Transaction<'_>) -> TxResult<R, E>,
{ {
function: F, function: F,
result: Cell<Option<TxResult<R, E>>>, result: Cell<Option<TxResult<R, E>>>,
@ -396,16 +398,12 @@ where
impl<F, R, E> ITxFn for TxFn<F, R, E> impl<F, R, E> ITxFn for TxFn<F, R, E>
where where
F: Fn(&mut Transaction<'_>) -> TxResult<R, E>, F: Fn(Transaction<'_>) -> TxResult<R, E>,
{ {
fn try_on(&self, tx: &mut dyn ITx) -> TxFnResult { fn try_on(&self, tx: &mut dyn ITx) -> TxFnResult {
let mut tx = Transaction { let res = (self.function)(Transaction(tx));
tx,
on_commit: vec![],
};
let res = (self.function)(&mut tx);
let res2 = match &res { let res2 = match &res {
Ok(_) => TxFnResult::Ok(tx.on_commit), Ok(_) => TxFnResult::Ok,
Err(TxError::Abort(_)) => TxFnResult::Abort, Err(TxError::Abort(_)) => TxFnResult::Abort,
Err(TxError::Db(_)) => TxFnResult::DbErr, Err(TxError::Db(_)) => TxFnResult::DbErr,
}; };

View file

@ -9,8 +9,8 @@ use heed::types::ByteSlice;
use heed::{BytesDecode, Env, RoTxn, RwTxn, UntypedDatabase as Database}; use heed::{BytesDecode, Env, RoTxn, RwTxn, UntypedDatabase as Database};
use crate::{ use crate::{
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult, Db, Error, IDb, ITx, ITxFn, Result, TxError, TxFnResult, TxOpError, TxOpResult, TxResult,
TxResult, TxValueIter, Value, ValueIter, TxValueIter, Value, ValueIter,
}; };
pub use heed; pub use heed;
@ -186,7 +186,7 @@ impl IDb for LmdbDb {
// ---- // ----
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> { fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()> {
let trees = self.trees.read().unwrap(); let trees = self.trees.read().unwrap();
let mut tx = LmdbTx { let mut tx = LmdbTx {
trees: &trees.0[..], trees: &trees.0[..],
@ -199,9 +199,9 @@ impl IDb for LmdbDb {
let res = f.try_on(&mut tx); let res = f.try_on(&mut tx);
match res { match res {
TxFnResult::Ok(on_commit) => { TxFnResult::Ok => {
tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?; tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?;
Ok(on_commit) Ok(())
} }
TxFnResult::Abort => { TxFnResult::Abort => {
tx.tx.abort().map_err(Error::from).map_err(TxError::Db)?; tx.tx.abort().map_err(Error::from).map_err(TxError::Db)?;

View file

@ -10,8 +10,8 @@ use sled::transaction::{
}; };
use crate::{ use crate::{
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult, Db, Error, IDb, ITx, ITxFn, Result, TxError, TxFnResult, TxOpError, TxOpResult, TxResult,
TxResult, TxValueIter, Value, ValueIter, TxValueIter, Value, ValueIter,
}; };
pub use sled; pub use sled;
@ -38,15 +38,7 @@ pub struct SledDb {
} }
impl SledDb { impl SledDb {
#[deprecated(
since = "0.9.0",
note = "The Sled database is now deprecated and will be removed in Garage v1.0. Please migrate to LMDB or Sqlite as soon as possible."
)]
pub fn init(db: sled::Db) -> Db { pub fn init(db: sled::Db) -> Db {
tracing::warn!("-------------------- IMPORTANT WARNING !!! ----------------------");
tracing::warn!("The Sled database is now deprecated and will be removed in Garage v1.0.");
tracing::warn!("Please migrate to LMDB or Sqlite as soon as possible.");
tracing::warn!("-----------------------------------------------------------------------");
let s = Self { let s = Self {
db, db,
trees: RwLock::new((Vec::new(), HashMap::new())), trees: RwLock::new((Vec::new(), HashMap::new())),
@ -166,7 +158,7 @@ impl IDb for SledDb {
// ---- // ----
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> { fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()> {
let trees = self.trees.read().unwrap(); let trees = self.trees.read().unwrap();
let res = trees.0.transaction(|txtrees| { let res = trees.0.transaction(|txtrees| {
let mut tx = SledTx { let mut tx = SledTx {
@ -174,9 +166,9 @@ impl IDb for SledDb {
err: Cell::new(None), err: Cell::new(None),
}; };
match f.try_on(&mut tx) { match f.try_on(&mut tx) {
TxFnResult::Ok(on_commit) => { TxFnResult::Ok => {
assert!(tx.err.into_inner().is_none()); assert!(tx.err.into_inner().is_none());
Ok(on_commit) Ok(())
} }
TxFnResult::Abort => { TxFnResult::Abort => {
assert!(tx.err.into_inner().is_none()); assert!(tx.err.into_inner().is_none());
@ -189,7 +181,7 @@ impl IDb for SledDb {
} }
}); });
match res { match res {
Ok(on_commit) => Ok(on_commit), Ok(()) => Ok(()),
Err(TransactionError::Abort(())) => Err(TxError::Abort(())), Err(TransactionError::Abort(())) => Err(TxError::Abort(())),
Err(TransactionError::Storage(s)) => Err(TxError::Db(s.into())), Err(TransactionError::Storage(s)) => Err(TxError::Db(s.into())),
} }

View file

@ -9,8 +9,8 @@ use std::sync::{Arc, Mutex, MutexGuard};
use rusqlite::{params, Connection, Rows, Statement, Transaction}; use rusqlite::{params, Connection, Rows, Statement, Transaction};
use crate::{ use crate::{
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult, Db, Error, IDb, ITx, ITxFn, Result, TxError, TxFnResult, TxOpError, TxOpResult, TxResult,
TxResult, TxValueIter, Value, ValueIter, TxValueIter, Value, ValueIter,
}; };
pub use rusqlite; pub use rusqlite;
@ -261,7 +261,7 @@ impl IDb for SqliteDb {
// ---- // ----
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> { fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()> {
trace!("transaction: lock db"); trace!("transaction: lock db");
let mut this = self.0.lock().unwrap(); let mut this = self.0.lock().unwrap();
trace!("transaction: lock acquired"); trace!("transaction: lock acquired");
@ -277,9 +277,9 @@ impl IDb for SqliteDb {
trees: &this_mut_ref.trees, trees: &this_mut_ref.trees,
}; };
let res = match f.try_on(&mut tx) { let res = match f.try_on(&mut tx) {
TxFnResult::Ok(on_commit) => { TxFnResult::Ok => {
tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?; tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?;
Ok(on_commit) Ok(())
} }
TxFnResult::Abort => { TxFnResult::Abort => {
tx.tx.rollback().map_err(Error::from).map_err(TxError::Db)?; tx.tx.rollback().map_err(Error::from).map_err(TxError::Db)?;

View file

@ -13,26 +13,26 @@ fn test_suite(db: Db) {
assert!(tree.insert(ka, va).unwrap().is_none()); assert!(tree.insert(ka, va).unwrap().is_none());
assert_eq!(tree.get(ka).unwrap().unwrap(), va); assert_eq!(tree.get(ka).unwrap().unwrap(), va);
let res = db.transaction::<_, (), _>(|tx| { let res = db.transaction::<_, (), _>(|mut tx| {
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), va); assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), va);
assert_eq!(tx.insert(&tree, ka, vb).unwrap().unwrap(), va); assert_eq!(tx.insert(&tree, ka, vb).unwrap().unwrap(), va);
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb); assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
Ok(12) tx.commit(12)
}); });
assert!(matches!(res, Ok(12))); assert!(matches!(res, Ok(12)));
assert_eq!(tree.get(ka).unwrap().unwrap(), vb); assert_eq!(tree.get(ka).unwrap().unwrap(), vb);
let res = db.transaction::<(), _, _>(|tx| { let res = db.transaction::<(), _, _>(|mut tx| {
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb); assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
assert_eq!(tx.insert(&tree, ka, vc).unwrap().unwrap(), vb); assert_eq!(tx.insert(&tree, ka, vc).unwrap().unwrap(), vb);
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vc); assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vc);
Err(TxError::Abort(42)) tx.abort(42)
}); });
assert!(matches!(res, Err(TxError::Abort(42)))); assert!(matches!(res, Err(TxError::Abort(42))));
assert_eq!(tree.get(ka).unwrap().unwrap(), vb); assert_eq!(tree.get(ka).unwrap().unwrap(), vb);

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage" name = "garage"
version = "0.9.0" version = "0.8.4"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -53,7 +53,7 @@ futures = "0.3"
futures-util = "0.3" futures-util = "0.3"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] } tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
netapp = "0.10" netapp = "0.5"
opentelemetry = { version = "0.17", features = [ "rt-tokio" ] } opentelemetry = { version = "0.17", features = [ "rt-tokio" ] }
opentelemetry-prometheus = { version = "0.10", optional = true } opentelemetry-prometheus = { version = "0.10", optional = true }
@ -78,7 +78,7 @@ k2v-client.workspace = true
[features] [features]
default = [ "bundled-libs", "metrics", "sled", "lmdb", "sqlite", "k2v" ] default = [ "bundled-libs", "metrics", "sled", "k2v" ]
k2v = [ "garage_util/k2v", "garage_api/k2v" ] k2v = [ "garage_util/k2v", "garage_api/k2v" ]

View file

@ -34,7 +34,6 @@ impl AdminRpcHandler {
.get_range(&hash, None, None, 10000, Default::default()) .get_range(&hash, None, None, 10000, Default::default())
.await?; .await?;
let mut versions = vec![]; let mut versions = vec![];
let mut uploads = vec![];
for br in block_refs { for br in block_refs {
if let Some(v) = self if let Some(v) = self
.garage .garage
@ -42,11 +41,6 @@ impl AdminRpcHandler {
.get(&br.version, &EmptyKey) .get(&br.version, &EmptyKey)
.await? .await?
{ {
if let VersionBacklink::MultipartUpload { upload_id } = &v.backlink {
if let Some(u) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? {
uploads.push(u);
}
}
versions.push(Ok(v)); versions.push(Ok(v));
} else { } else {
versions.push(Err(br.version)); versions.push(Err(br.version));
@ -56,7 +50,6 @@ impl AdminRpcHandler {
hash, hash,
refcount, refcount,
versions, versions,
uploads,
}) })
} }
@ -100,7 +93,6 @@ impl AdminRpcHandler {
} }
let mut obj_dels = 0; let mut obj_dels = 0;
let mut mpu_dels = 0;
let mut ver_dels = 0; let mut ver_dels = 0;
for hash in blocks { for hash in blocks {
@ -113,80 +105,56 @@ impl AdminRpcHandler {
.await?; .await?;
for br in block_refs { for br in block_refs {
if let Some(version) = self let version = match self
.garage .garage
.version_table .version_table
.get(&br.version, &EmptyKey) .get(&br.version, &EmptyKey)
.await? .await?
{ {
self.handle_block_purge_version_backlink( Some(v) => v,
&version, None => continue,
&mut obj_dels, };
&mut mpu_dels,
) if let Some(object) = self
.await?; .garage
.object_table
.get(&version.bucket_id, &version.key)
.await?
{
let ov = object.versions().iter().rev().find(|v| v.is_complete());
if let Some(ov) = ov {
if ov.uuid == br.version {
let del_uuid = gen_uuid();
let deleted_object = Object::new(
version.bucket_id,
version.key.clone(),
vec![ObjectVersion {
uuid: del_uuid,
timestamp: ov.timestamp + 1,
state: ObjectVersionState::Complete(
ObjectVersionData::DeleteMarker,
),
}],
);
self.garage.object_table.insert(&deleted_object).await?;
obj_dels += 1;
}
}
}
if !version.deleted.get() { if !version.deleted.get() {
let deleted_version = Version::new(version.uuid, version.backlink, true); let deleted_version =
Version::new(version.uuid, version.bucket_id, version.key.clone(), true);
self.garage.version_table.insert(&deleted_version).await?; self.garage.version_table.insert(&deleted_version).await?;
ver_dels += 1; ver_dels += 1;
} }
} }
} }
}
Ok(AdminRpc::Ok(format!( Ok(AdminRpc::Ok(format!(
"Purged {} blocks, {} versions, {} objects, {} multipart uploads", "{} blocks were purged: {} object deletion markers added, {} versions marked deleted",
blocks.len(), blocks.len(),
ver_dels,
obj_dels, obj_dels,
mpu_dels, ver_dels
))) )))
} }
async fn handle_block_purge_version_backlink(
&self,
version: &Version,
obj_dels: &mut usize,
mpu_dels: &mut usize,
) -> Result<(), Error> {
let (bucket_id, key, ov_id) = match &version.backlink {
VersionBacklink::Object { bucket_id, key } => (*bucket_id, key.clone(), version.uuid),
VersionBacklink::MultipartUpload { upload_id } => {
if let Some(mut mpu) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? {
if !mpu.deleted.get() {
mpu.parts.clear();
mpu.deleted.set();
self.garage.mpu_table.insert(&mpu).await?;
*mpu_dels += 1;
}
(mpu.bucket_id, mpu.key.clone(), *upload_id)
} else {
return Ok(());
}
}
};
if let Some(object) = self.garage.object_table.get(&bucket_id, &key).await? {
let ov = object.versions().iter().rev().find(|v| v.is_complete());
if let Some(ov) = ov {
if ov.uuid == ov_id {
let del_uuid = gen_uuid();
let deleted_object = Object::new(
bucket_id,
key,
vec![ObjectVersion {
uuid: del_uuid,
timestamp: ov.timestamp + 1,
state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker),
}],
);
self.garage.object_table.insert(&deleted_object).await?;
*obj_dels += 1;
}
}
}
Ok(())
}
} }

View file

@ -73,15 +73,6 @@ impl AdminRpcHandler {
.map(|x| x.filtered_values(&self.garage.system.ring.borrow())) .map(|x| x.filtered_values(&self.garage.system.ring.borrow()))
.unwrap_or_default(); .unwrap_or_default();
let mpu_counters = self
.garage
.mpu_counter_table
.table
.get(&bucket_id, &EmptyKey)
.await?
.map(|x| x.filtered_values(&self.garage.system.ring.borrow()))
.unwrap_or_default();
let mut relevant_keys = HashMap::new(); let mut relevant_keys = HashMap::new();
for (k, _) in bucket for (k, _) in bucket
.state .state
@ -121,7 +112,6 @@ impl AdminRpcHandler {
bucket, bucket,
relevant_keys, relevant_keys,
counters, counters,
mpu_counters,
}) })
} }

View file

@ -2,7 +2,7 @@ use std::collections::HashMap;
use garage_table::*; use garage_table::*;
use garage_model::helper::error::*; use garage_model::helper::error::Error;
use garage_model::key_table::*; use garage_model::key_table::*;
use crate::cli::*; use crate::cli::*;
@ -14,7 +14,7 @@ impl AdminRpcHandler {
match cmd { match cmd {
KeyOperation::List => self.handle_list_keys().await, KeyOperation::List => self.handle_list_keys().await,
KeyOperation::Info(query) => self.handle_key_info(query).await, KeyOperation::Info(query) => self.handle_key_info(query).await,
KeyOperation::Create(query) => self.handle_create_key(query).await, KeyOperation::New(query) => self.handle_create_key(query).await,
KeyOperation::Rename(query) => self.handle_rename_key(query).await, KeyOperation::Rename(query) => self.handle_rename_key(query).await,
KeyOperation::Delete(query) => self.handle_delete_key(query).await, KeyOperation::Delete(query) => self.handle_delete_key(query).await,
KeyOperation::Allow(query) => self.handle_allow_key(query).await, KeyOperation::Allow(query) => self.handle_allow_key(query).await,
@ -41,17 +41,12 @@ impl AdminRpcHandler {
Ok(AdminRpc::KeyList(key_ids)) Ok(AdminRpc::KeyList(key_ids))
} }
async fn handle_key_info(&self, query: &KeyInfoOpt) -> Result<AdminRpc, Error> { async fn handle_key_info(&self, query: &KeyOpt) -> Result<AdminRpc, Error> {
let mut key = self let key = self
.garage .garage
.key_helper() .key_helper()
.get_existing_matching_key(&query.key_pattern) .get_existing_matching_key(&query.key_pattern)
.await?; .await?;
if !query.show_secret {
key.state.as_option_mut().unwrap().secret_key = "(redacted)".into();
}
self.key_info_result(key).await self.key_info_result(key).await
} }
@ -123,17 +118,11 @@ impl AdminRpcHandler {
} }
async fn handle_import_key(&self, query: &KeyImportOpt) -> Result<AdminRpc, Error> { async fn handle_import_key(&self, query: &KeyImportOpt) -> Result<AdminRpc, Error> {
if !query.yes {
return Err(Error::BadRequest("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string()));
}
let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?; let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?;
if prev_key.is_some() { if prev_key.is_some() {
return Err(Error::BadRequest(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id))); return Err(Error::BadRequest(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id)));
} }
let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name);
let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name)
.ok_or_bad_request("Invalid key format")?;
self.garage.key_table.insert(&imported_key).await?; self.garage.key_table.insert(&imported_key).await?;
self.key_info_result(imported_key).await self.key_info_result(imported_key).await

View file

@ -28,7 +28,6 @@ use garage_model::garage::Garage;
use garage_model::helper::error::{Error, OkOrBadRequest}; use garage_model::helper::error::{Error, OkOrBadRequest};
use garage_model::key_table::*; use garage_model::key_table::*;
use garage_model::migrate::Migrate; use garage_model::migrate::Migrate;
use garage_model::s3::mpu_table::MultipartUpload;
use garage_model::s3::version_table::Version; use garage_model::s3::version_table::Version;
use crate::cli::*; use crate::cli::*;
@ -54,7 +53,6 @@ pub enum AdminRpc {
bucket: Bucket, bucket: Bucket,
relevant_keys: HashMap<String, Key>, relevant_keys: HashMap<String, Key>,
counters: HashMap<String, i64>, counters: HashMap<String, i64>,
mpu_counters: HashMap<String, i64>,
}, },
KeyList(Vec<(String, String)>), KeyList(Vec<(String, String)>),
KeyInfo(Key, HashMap<Uuid, Bucket>), KeyInfo(Key, HashMap<Uuid, Bucket>),
@ -69,7 +67,6 @@ pub enum AdminRpc {
hash: Hash, hash: Hash,
refcount: u64, refcount: u64,
versions: Vec<Result<Version, Uuid>>, versions: Vec<Result<Version, Uuid>>,
uploads: Vec<MultipartUpload>,
}, },
} }
@ -277,7 +274,7 @@ impl AdminRpcHandler {
// Gather storage node and free space statistics // Gather storage node and free space statistics
let layout = &self.garage.system.ring.borrow().layout; let layout = &self.garage.system.ring.borrow().layout;
let mut node_partition_count = HashMap::<Uuid, u64>::new(); let mut node_partition_count = HashMap::<Uuid, u64>::new();
for short_id in layout.ring_assignment_data.iter() { for short_id in layout.ring_assignation_data.iter() {
let id = layout.node_id_vec[*short_id as usize]; let id = layout.node_id_vec[*short_id as usize];
*node_partition_count.entry(id).or_default() += 1; *node_partition_count.entry(id).or_default() += 1;
} }

View file

@ -85,7 +85,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
)); ));
} }
_ => { _ => {
let new_role = match layout.staging_roles.get(&adv.id) { let new_role = match layout.staging.get(&adv.id) {
Some(NodeRoleV(Some(_))) => "(pending)", Some(NodeRoleV(Some(_))) => "(pending)",
_ => "NO ROLE ASSIGNED", _ => "NO ROLE ASSIGNED",
}; };
@ -190,9 +190,8 @@ pub async fn cmd_admin(
bucket, bucket,
relevant_keys, relevant_keys,
counters, counters,
mpu_counters,
} => { } => {
print_bucket_info(&bucket, &relevant_keys, &counters, &mpu_counters); print_bucket_info(&bucket, &relevant_keys, &counters);
} }
AdminRpc::KeyList(kl) => { AdminRpc::KeyList(kl) => {
print_key_list(kl); print_key_list(kl);
@ -216,9 +215,8 @@ pub async fn cmd_admin(
hash, hash,
refcount, refcount,
versions, versions,
uploads,
} => { } => {
print_block_info(hash, refcount, versions, uploads); print_block_info(hash, refcount, versions);
} }
r => { r => {
error!("Unexpected response: {:?}", r); error!("Unexpected response: {:?}", r);

View file

@ -1,69 +0,0 @@
use std::path::PathBuf;
use structopt::StructOpt;
use garage_db::*;
/// K2V command line interface
#[derive(StructOpt, Debug)]
pub struct ConvertDbOpt {
/// Input database path (not the same as metadata_dir, see
/// https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0)
#[structopt(short = "i")]
input_path: PathBuf,
/// Input database engine (sled, lmdb or sqlite; limited by db engines
/// enabled in this build)
#[structopt(short = "a")]
input_engine: String,
/// Output database path
#[structopt(short = "o")]
output_path: PathBuf,
/// Output database engine
#[structopt(short = "b")]
output_engine: String,
}
pub(crate) fn do_conversion(args: ConvertDbOpt) -> Result<()> {
let input = open_db(args.input_path, args.input_engine)?;
let output = open_db(args.output_path, args.output_engine)?;
output.import(&input)?;
Ok(())
}
fn open_db(path: PathBuf, engine: String) -> Result<Db> {
match engine.as_str() {
#[cfg(feature = "sled")]
"sled" => {
let db = sled_adapter::sled::Config::default().path(&path).open()?;
Ok(sled_adapter::SledDb::init(db))
}
#[cfg(feature = "sqlite")]
"sqlite" | "sqlite3" | "rusqlite" => {
let db = sqlite_adapter::rusqlite::Connection::open(&path)?;
db.pragma_update(None, "journal_mode", &"WAL")?;
db.pragma_update(None, "synchronous", &"NORMAL")?;
Ok(sqlite_adapter::SqliteDb::init(db))
}
#[cfg(feature = "lmdb")]
"lmdb" | "heed" => {
std::fs::create_dir_all(&path).map_err(|e| {
Error(format!("Unable to create LMDB data directory: {}", e).into())
})?;
let map_size = lmdb_adapter::recommended_map_size();
let mut env_builder = lmdb_adapter::heed::EnvOpenOptions::new();
env_builder.max_dbs(100);
env_builder.map_size(map_size);
unsafe {
env_builder.flag(lmdb_adapter::heed::flags::Flags::MdbNoMetaSync);
}
let db = env_builder.open(&path)?;
Ok(lmdb_adapter::LmdbDb::init(db))
}
e => Err(Error(
format!("Invalid or unsupported DB engine: {}", e).into(),
)),
}
}

View file

@ -1,5 +1,3 @@
use bytesize::ByteSize;
use format_table::format_table; use format_table::format_table;
use garage_util::crdt::Crdt; use garage_util::crdt::Crdt;
use garage_util::error::*; use garage_util::error::*;
@ -16,8 +14,8 @@ pub async fn cli_layout_command_dispatch(
rpc_host: NodeID, rpc_host: NodeID,
) -> Result<(), Error> { ) -> Result<(), Error> {
match cmd { match cmd {
LayoutOperation::Assign(assign_opt) => { LayoutOperation::Assign(configure_opt) => {
cmd_assign_role(system_rpc_endpoint, rpc_host, assign_opt).await cmd_assign_role(system_rpc_endpoint, rpc_host, configure_opt).await
} }
LayoutOperation::Remove(remove_opt) => { LayoutOperation::Remove(remove_opt) => {
cmd_remove_role(system_rpc_endpoint, rpc_host, remove_opt).await cmd_remove_role(system_rpc_endpoint, rpc_host, remove_opt).await
@ -29,9 +27,6 @@ pub async fn cli_layout_command_dispatch(
LayoutOperation::Revert(revert_opt) => { LayoutOperation::Revert(revert_opt) => {
cmd_revert_layout(system_rpc_endpoint, rpc_host, revert_opt).await cmd_revert_layout(system_rpc_endpoint, rpc_host, revert_opt).await
} }
LayoutOperation::Config(config_opt) => {
cmd_config_layout(system_rpc_endpoint, rpc_host, config_opt).await
}
} }
} }
@ -65,14 +60,14 @@ pub async fn cmd_assign_role(
.collect::<Result<Vec<_>, _>>()?; .collect::<Result<Vec<_>, _>>()?;
let mut roles = layout.roles.clone(); let mut roles = layout.roles.clone();
roles.merge(&layout.staging_roles); roles.merge(&layout.staging);
for replaced in args.replace.iter() { for replaced in args.replace.iter() {
let replaced_node = find_matching_node(layout.node_ids().iter().cloned(), replaced)?; let replaced_node = find_matching_node(layout.node_ids().iter().cloned(), replaced)?;
match roles.get(&replaced_node) { match roles.get(&replaced_node) {
Some(NodeRoleV(Some(_))) => { Some(NodeRoleV(Some(_))) => {
layout layout
.staging_roles .staging
.merge(&roles.update_mutator(replaced_node, NodeRoleV(None))); .merge(&roles.update_mutator(replaced_node, NodeRoleV(None)));
} }
_ => { _ => {
@ -88,7 +83,7 @@ pub async fn cmd_assign_role(
return Err(Error::Message( return Err(Error::Message(
"-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into())); "-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into()));
} }
if args.capacity == Some(ByteSize::b(0)) { if args.capacity == Some(0) {
return Err(Error::Message("Invalid capacity value: 0".into())); return Err(Error::Message("Invalid capacity value: 0".into()));
} }
@ -96,7 +91,7 @@ pub async fn cmd_assign_role(
let new_entry = match roles.get(&added_node) { let new_entry = match roles.get(&added_node) {
Some(NodeRoleV(Some(old))) => { Some(NodeRoleV(Some(old))) => {
let capacity = match args.capacity { let capacity = match args.capacity {
Some(c) => Some(c.as_u64()), Some(c) => Some(c),
None if args.gateway => None, None if args.gateway => None,
None => old.capacity, None => old.capacity,
}; };
@ -113,7 +108,7 @@ pub async fn cmd_assign_role(
} }
_ => { _ => {
let capacity = match args.capacity { let capacity = match args.capacity {
Some(c) => Some(c.as_u64()), Some(c) => Some(c),
None if args.gateway => None, None if args.gateway => None,
None => return Err(Error::Message( None => return Err(Error::Message(
"Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())), "Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())),
@ -130,7 +125,7 @@ pub async fn cmd_assign_role(
}; };
layout layout
.staging_roles .staging
.merge(&roles.update_mutator(added_node, NodeRoleV(Some(new_entry)))); .merge(&roles.update_mutator(added_node, NodeRoleV(Some(new_entry))));
} }
@ -150,13 +145,13 @@ pub async fn cmd_remove_role(
let mut layout = fetch_layout(rpc_cli, rpc_host).await?; let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
let mut roles = layout.roles.clone(); let mut roles = layout.roles.clone();
roles.merge(&layout.staging_roles); roles.merge(&layout.staging);
let deleted_node = let deleted_node =
find_matching_node(roles.items().iter().map(|(id, _, _)| *id), &args.node_id)?; find_matching_node(roles.items().iter().map(|(id, _, _)| *id), &args.node_id)?;
layout layout
.staging_roles .staging
.merge(&roles.update_mutator(deleted_node, NodeRoleV(None))); .merge(&roles.update_mutator(deleted_node, NodeRoleV(None)));
send_layout(rpc_cli, rpc_host, layout).await?; send_layout(rpc_cli, rpc_host, layout).await?;
@ -171,45 +166,40 @@ pub async fn cmd_show_layout(
rpc_cli: &Endpoint<SystemRpc, ()>, rpc_cli: &Endpoint<SystemRpc, ()>,
rpc_host: NodeID, rpc_host: NodeID,
) -> Result<(), Error> { ) -> Result<(), Error> {
let layout = fetch_layout(rpc_cli, rpc_host).await?; let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
println!("==== CURRENT CLUSTER LAYOUT ===="); println!("==== CURRENT CLUSTER LAYOUT ====");
print_cluster_layout(&layout, "No nodes currently have a role in the cluster.\nSee `garage status` to view available nodes."); if !print_cluster_layout(&layout) {
println!("No nodes currently have a role in the cluster.");
println!("See `garage status` to view available nodes.");
}
println!(); println!();
println!("Current cluster layout version: {}", layout.version); println!("Current cluster layout version: {}", layout.version);
let has_role_changes = print_staging_role_changes(&layout); if print_staging_role_changes(&layout) {
if has_role_changes { layout.roles.merge(&layout.staging);
let v = layout.version;
let res_apply = layout.apply_staged_changes(Some(v + 1)); println!();
println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====");
if !print_cluster_layout(&layout) {
println!("No nodes have a role in the new layout.");
}
println!();
// this will print the stats of what partitions // this will print the stats of what partitions
// will move around when we apply // will move around when we apply
match res_apply { if layout.calculate_partition_assignation() {
Ok((layout, msg)) => {
println!();
println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====");
print_cluster_layout(&layout, "No nodes have a role in the new layout.");
println!();
for line in msg.iter() {
println!("{}", line);
}
println!("To enact the staged role changes, type:"); println!("To enact the staged role changes, type:");
println!(); println!();
println!(" garage layout apply --version {}", v + 1); println!(" garage layout apply --version {}", layout.version + 1);
println!(); println!();
println!( println!(
"You can also revert all proposed changes with: garage layout revert --version {}", "You can also revert all proposed changes with: garage layout revert --version {}",
v + 1) layout.version + 1
} );
Err(e) => { } else {
println!("Error while trying to compute the assignment: {}", e); println!("Not enough nodes have an assigned role to maintain enough copies of data.");
println!("This new layout cannot yet be applied."); println!("This new layout cannot yet be applied.");
println!(
"You can also revert all proposed changes with: garage layout revert --version {}",
v + 1)
}
} }
} }
@ -223,14 +213,11 @@ pub async fn cmd_apply_layout(
) -> Result<(), Error> { ) -> Result<(), Error> {
let layout = fetch_layout(rpc_cli, rpc_host).await?; let layout = fetch_layout(rpc_cli, rpc_host).await?;
let (layout, msg) = layout.apply_staged_changes(apply_opt.version)?; let layout = layout.apply_staged_changes(apply_opt.version)?;
for line in msg.iter() {
println!("{}", line);
}
send_layout(rpc_cli, rpc_host, layout).await?; send_layout(rpc_cli, rpc_host, layout).await?;
println!("New cluster layout with updated role assignment has been applied in cluster."); println!("New cluster layout with updated role assignation has been applied in cluster.");
println!("Data will now be moved around between nodes accordingly."); println!("Data will now be moved around between nodes accordingly.");
Ok(()) Ok(())
@ -251,52 +238,6 @@ pub async fn cmd_revert_layout(
Ok(()) Ok(())
} }
pub async fn cmd_config_layout(
rpc_cli: &Endpoint<SystemRpc, ()>,
rpc_host: NodeID,
config_opt: ConfigLayoutOpt,
) -> Result<(), Error> {
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
let mut did_something = false;
match config_opt.redundancy {
None => (),
Some(r_str) => {
let r = r_str
.parse::<ZoneRedundancy>()
.ok_or_message("invalid zone redundancy value")?;
if let ZoneRedundancy::AtLeast(r_int) = r {
if r_int > layout.replication_factor {
return Err(Error::Message(format!(
"The zone redundancy must be smaller or equal to the \
replication factor ({}).",
layout.replication_factor
)));
} else if r_int < 1 {
return Err(Error::Message(
"The zone redundancy must be at least 1.".into(),
));
}
}
layout
.staging_parameters
.update(LayoutParameters { zone_redundancy: r });
println!("The zone redundancy parameter has been set to '{}'.", r);
did_something = true;
}
}
if !did_something {
return Err(Error::Message(
"Please specify an action for `garage layout config`".into(),
));
}
send_layout(rpc_cli, rpc_host, layout).await?;
Ok(())
}
// --- utility --- // --- utility ---
pub async fn fetch_layout( pub async fn fetch_layout(
@ -327,27 +268,14 @@ pub async fn send_layout(
Ok(()) Ok(())
} }
pub fn print_cluster_layout(layout: &ClusterLayout, empty_msg: &str) { pub fn print_cluster_layout(layout: &ClusterLayout) -> bool {
let mut table = vec!["ID\tTags\tZone\tCapacity\tUsable capacity".to_string()]; let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
for (id, _, role) in layout.roles.items().iter() { for (id, _, role) in layout.roles.items().iter() {
let role = match &role.0 { let role = match &role.0 {
Some(r) => r, Some(r) => r,
_ => continue, _ => continue,
}; };
let tags = role.tags.join(","); let tags = role.tags.join(",");
let usage = layout.get_node_usage(id).unwrap_or(0);
let capacity = layout.get_node_capacity(id).unwrap_or(0);
if capacity > 0 {
table.push(format!(
"{:?}\t{}\t{}\t{}\t{} ({:.1}%)",
id,
tags,
role.zone,
role.capacity_string(),
ByteSize::b(usage as u64 * layout.partition_size).to_string_as(false),
(100.0 * usage as f32 * layout.partition_size as f32) / (capacity as f32)
));
} else {
table.push(format!( table.push(format!(
"{:?}\t{}\t{}\t{}", "{:?}\t{}\t{}\t{}",
id, id,
@ -355,31 +283,27 @@ pub fn print_cluster_layout(layout: &ClusterLayout, empty_msg: &str) {
role.zone, role.zone,
role.capacity_string() role.capacity_string()
)); ));
};
} }
if table.len() > 1 { if table.len() == 1 {
format_table(table); false
println!();
println!("Zone redundancy: {}", layout.parameters.zone_redundancy);
} else { } else {
println!("{}", empty_msg); format_table(table);
true
} }
} }
pub fn print_staging_role_changes(layout: &ClusterLayout) -> bool { pub fn print_staging_role_changes(layout: &ClusterLayout) -> bool {
let has_role_changes = layout let has_changes = layout
.staging_roles .staging
.items() .items()
.iter() .iter()
.any(|(k, _, v)| layout.roles.get(k) != Some(v)); .any(|(k, _, v)| layout.roles.get(k) != Some(v));
let has_layout_changes = *layout.staging_parameters.get() != layout.parameters;
if has_role_changes || has_layout_changes { if has_changes {
println!(); println!();
println!("==== STAGED ROLE CHANGES ===="); println!("==== STAGED ROLE CHANGES ====");
if has_role_changes {
let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()]; let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
for (id, _, role) in layout.staging_roles.items().iter() { for (id, _, role) in layout.staging.items().iter() {
if layout.roles.get(id) == Some(role) { if layout.roles.get(id) == Some(role) {
continue; continue;
} }
@ -397,14 +321,6 @@ pub fn print_staging_role_changes(layout: &ClusterLayout) -> bool {
} }
} }
format_table(table); format_table(table);
println!();
}
if has_layout_changes {
println!(
"Zone redundancy: {}",
layout.staging_parameters.get().zone_redundancy
);
}
true true
} else { } else {
false false

View file

@ -4,8 +4,6 @@ pub(crate) mod layout;
pub(crate) mod structs; pub(crate) mod structs;
pub(crate) mod util; pub(crate) mod util;
pub(crate) mod convert_db;
pub(crate) use cmd::*; pub(crate) use cmd::*;
pub(crate) use init::*; pub(crate) use init::*;
pub(crate) use layout::*; pub(crate) use layout::*;

View file

@ -3,8 +3,6 @@ use structopt::StructOpt;
use garage_util::version::garage_version; use garage_util::version::garage_version;
use crate::cli::convert_db;
#[derive(StructOpt, Debug)] #[derive(StructOpt, Debug)]
pub enum Command { pub enum Command {
/// Run Garage server /// Run Garage server
@ -19,7 +17,7 @@ pub enum Command {
#[structopt(name = "node", version = garage_version())] #[structopt(name = "node", version = garage_version())]
Node(NodeOperation), Node(NodeOperation),
/// Operations on the assignment of node roles in the cluster layout /// Operations on the assignation of node roles in the cluster layout
#[structopt(name = "layout", version = garage_version())] #[structopt(name = "layout", version = garage_version())]
Layout(LayoutOperation), Layout(LayoutOperation),
@ -56,10 +54,6 @@ pub enum Command {
/// Low-level debug operations on data blocks /// Low-level debug operations on data blocks
#[structopt(name = "block", version = garage_version())] #[structopt(name = "block", version = garage_version())]
Block(BlockOperation), Block(BlockOperation),
/// Convert metadata db between database engine formats
#[structopt(name = "convert-db", version = garage_version())]
ConvertDb(convert_db::ConvertDbOpt),
} }
#[derive(StructOpt, Debug)] #[derive(StructOpt, Debug)]
@ -97,10 +91,6 @@ pub enum LayoutOperation {
#[structopt(name = "remove", version = garage_version())] #[structopt(name = "remove", version = garage_version())]
Remove(RemoveRoleOpt), Remove(RemoveRoleOpt),
/// Configure parameters value for the layout computation
#[structopt(name = "config", version = garage_version())]
Config(ConfigLayoutOpt),
/// Show roles currently assigned to nodes and changes staged for commit /// Show roles currently assigned to nodes and changes staged for commit
#[structopt(name = "show", version = garage_version())] #[structopt(name = "show", version = garage_version())]
Show, Show,
@ -124,9 +114,9 @@ pub struct AssignRoleOpt {
#[structopt(short = "z", long = "zone")] #[structopt(short = "z", long = "zone")]
pub(crate) zone: Option<String>, pub(crate) zone: Option<String>,
/// Storage capacity, in bytes (supported suffixes: B, KB, MB, GB, TB, PB) /// Capacity (in relative terms, use 1 to represent your smallest server)
#[structopt(short = "c", long = "capacity")] #[structopt(short = "c", long = "capacity")]
pub(crate) capacity: Option<bytesize::ByteSize>, pub(crate) capacity: Option<u32>,
/// Gateway-only node /// Gateway-only node
#[structopt(short = "g", long = "gateway")] #[structopt(short = "g", long = "gateway")]
@ -147,13 +137,6 @@ pub struct RemoveRoleOpt {
pub(crate) node_id: String, pub(crate) node_id: String,
} }
#[derive(StructOpt, Debug)]
pub struct ConfigLayoutOpt {
/// Zone redundancy parameter ('none'/'max' or integer)
#[structopt(short = "r", long = "redundancy")]
pub(crate) redundancy: Option<String>,
}
#[derive(StructOpt, Debug)] #[derive(StructOpt, Debug)]
pub struct ApplyLayoutOpt { pub struct ApplyLayoutOpt {
/// Version number of new configuration: this command will fail if /// Version number of new configuration: this command will fail if
@ -334,11 +317,11 @@ pub enum KeyOperation {
/// Get key info /// Get key info
#[structopt(name = "info", version = garage_version())] #[structopt(name = "info", version = garage_version())]
Info(KeyInfoOpt), Info(KeyOpt),
/// Create new key /// Create new key
#[structopt(name = "create", version = garage_version())] #[structopt(name = "new", version = garage_version())]
Create(KeyNewOpt), New(KeyNewOpt),
/// Rename key /// Rename key
#[structopt(name = "rename", version = garage_version())] #[structopt(name = "rename", version = garage_version())]
@ -362,18 +345,15 @@ pub enum KeyOperation {
} }
#[derive(Serialize, Deserialize, StructOpt, Debug)] #[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyInfoOpt { pub struct KeyOpt {
/// ID or name of the key /// ID or name of the key
pub key_pattern: String, pub key_pattern: String,
/// Whether to display the secret key
#[structopt(long = "show-secret")]
pub show_secret: bool,
} }
#[derive(Serialize, Deserialize, StructOpt, Debug)] #[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyNewOpt { pub struct KeyNewOpt {
/// Name of the key /// Name of the key
#[structopt(default_value = "Unnamed key")] #[structopt(long = "name", default_value = "Unnamed key")]
pub name: String, pub name: String,
} }
@ -417,10 +397,6 @@ pub struct KeyImportOpt {
/// Key name /// Key name
#[structopt(short = "n", default_value = "Imported key")] #[structopt(short = "n", default_value = "Imported key")]
pub name: String, pub name: String,
/// Confirm key import
#[structopt(long = "yes")]
pub yes: bool,
} }
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)] #[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
@ -456,30 +432,24 @@ pub struct RepairOpt {
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] #[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
pub enum RepairWhat { pub enum RepairWhat {
/// Do a full sync of metadata tables /// Only do a full sync of metadata tables
#[structopt(name = "tables", version = garage_version())] #[structopt(name = "tables", version = garage_version())]
Tables, Tables,
/// Repair (resync/rebalance) the set of stored blocks in the cluster /// Only repair (resync/rebalance) the set of stored blocks
#[structopt(name = "blocks", version = garage_version())] #[structopt(name = "blocks", version = garage_version())]
Blocks, Blocks,
/// Repropagate object deletions to the version table /// Only redo the propagation of object deletions to the version table (slow)
#[structopt(name = "versions", version = garage_version())] #[structopt(name = "versions", version = garage_version())]
Versions, Versions,
/// Repropagate object deletions to the multipart upload table /// Only redo the propagation of version deletions to the block ref table (extremely slow)
#[structopt(name = "mpu", version = garage_version())]
MultipartUploads,
/// Repropagate version deletions to the block ref table
#[structopt(name = "block_refs", version = garage_version())] #[structopt(name = "block_refs", version = garage_version())]
BlockRefs, BlockRefs,
/// Verify integrity of all blocks on disc /// Verify integrity of all blocks on disc (extremely slow, i/o intensive)
#[structopt(name = "scrub", version = garage_version())] #[structopt(name = "scrub", version = garage_version())]
Scrub { Scrub {
#[structopt(subcommand)] #[structopt(subcommand)]
cmd: ScrubCmd, cmd: ScrubCmd,
}, },
/// Rebalance data blocks among HDDs on individual nodes
#[structopt(name = "rebalance", version = garage_version())]
Rebalance,
} }
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] #[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]

View file

@ -12,9 +12,8 @@ use garage_block::manager::BlockResyncErrorInfo;
use garage_model::bucket_table::*; use garage_model::bucket_table::*;
use garage_model::key_table::*; use garage_model::key_table::*;
use garage_model::s3::mpu_table::{self, MultipartUpload}; use garage_model::s3::object_table::{BYTES, OBJECTS, UNFINISHED_UPLOADS};
use garage_model::s3::object_table; use garage_model::s3::version_table::Version;
use garage_model::s3::version_table::*;
use crate::cli::structs::WorkerListOpt; use crate::cli::structs::WorkerListOpt;
@ -136,7 +135,6 @@ pub fn print_bucket_info(
bucket: &Bucket, bucket: &Bucket,
relevant_keys: &HashMap<String, Key>, relevant_keys: &HashMap<String, Key>,
counters: &HashMap<String, i64>, counters: &HashMap<String, i64>,
mpu_counters: &HashMap<String, i64>,
) { ) {
let key_name = |k| { let key_name = |k| {
relevant_keys relevant_keys
@ -150,7 +148,7 @@ pub fn print_bucket_info(
Deletable::Deleted => println!("Bucket is deleted."), Deletable::Deleted => println!("Bucket is deleted."),
Deletable::Present(p) => { Deletable::Present(p) => {
let size = let size =
bytesize::ByteSize::b(*counters.get(object_table::BYTES).unwrap_or(&0) as u64); bytesize::ByteSize::b(counters.get(BYTES).cloned().unwrap_or_default() as u64);
println!( println!(
"\nSize: {} ({})", "\nSize: {} ({})",
size.to_string_as(true), size.to_string_as(true),
@ -158,22 +156,14 @@ pub fn print_bucket_info(
); );
println!( println!(
"Objects: {}", "Objects: {}",
*counters.get(object_table::OBJECTS).unwrap_or(&0) counters.get(OBJECTS).cloned().unwrap_or_default()
);
println!(
"Unfinished uploads (multipart and non-multipart): {}",
*counters.get(object_table::UNFINISHED_UPLOADS).unwrap_or(&0)
); );
println!( println!(
"Unfinished multipart uploads: {}", "Unfinished multipart uploads: {}",
*mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0) counters
); .get(UNFINISHED_UPLOADS)
let mpu_size = .cloned()
bytesize::ByteSize::b(*mpu_counters.get(mpu_table::BYTES).unwrap_or(&0) as u64); .unwrap_or_default()
println!(
"Size of unfinished multipart uploads: {} ({})",
mpu_size.to_string_as(true),
mpu_size.to_string_as(false),
); );
println!("\nWebsite access: {}", p.website_config.get().is_some()); println!("\nWebsite access: {}", p.website_config.get().is_some());
@ -400,49 +390,29 @@ pub fn print_block_error_list(el: Vec<BlockResyncErrorInfo>) {
format_table(table); format_table(table);
} }
pub fn print_block_info( pub fn print_block_info(hash: Hash, refcount: u64, versions: Vec<Result<Version, Uuid>>) {
hash: Hash,
refcount: u64,
versions: Vec<Result<Version, Uuid>>,
uploads: Vec<MultipartUpload>,
) {
println!("Block hash: {}", hex::encode(hash.as_slice())); println!("Block hash: {}", hex::encode(hash.as_slice()));
println!("Refcount: {}", refcount); println!("Refcount: {}", refcount);
println!(); println!();
let mut table = vec!["Version\tBucket\tKey\tMPU\tDeleted".into()]; let mut table = vec!["Version\tBucket\tKey\tDeleted".into()];
let mut nondeleted_count = 0; let mut nondeleted_count = 0;
for v in versions.iter() { for v in versions.iter() {
match v { match v {
Ok(ver) => { Ok(ver) => {
match &ver.backlink {
VersionBacklink::Object { bucket_id, key } => {
table.push(format!( table.push(format!(
"{:?}\t{:?}\t{}\t\t{:?}", "{:?}\t{:?}\t{}\t{:?}",
ver.uuid, ver.uuid,
bucket_id, ver.bucket_id,
key, ver.key,
ver.deleted.get() ver.deleted.get()
)); ));
}
VersionBacklink::MultipartUpload { upload_id } => {
let upload = uploads.iter().find(|x| x.upload_id == *upload_id);
table.push(format!(
"{:?}\t{:?}\t{}\t{:?}\t{:?}",
ver.uuid,
upload.map(|u| u.bucket_id).unwrap_or_default(),
upload.map(|u| u.key.as_str()).unwrap_or_default(),
upload_id,
ver.deleted.get()
));
}
}
if !ver.deleted.get() { if !ver.deleted.get() {
nondeleted_count += 1; nondeleted_count += 1;
} }
} }
Err(vh) => { Err(vh) => {
table.push(format!("{:?}\t\t\t\tyes", vh)); table.push(format!("{:?}\t\t\tyes", vh));
} }
} }
} }

View file

@ -17,9 +17,6 @@ compile_error!("Either bundled-libs or system-libs Cargo feature must be enabled
#[cfg(all(feature = "bundled-libs", feature = "system-libs"))] #[cfg(all(feature = "bundled-libs", feature = "system-libs"))]
compile_error!("Only one of bundled-libs and system-libs Cargo features must be enabled"); compile_error!("Only one of bundled-libs and system-libs Cargo features must be enabled");
#[cfg(not(any(feature = "lmdb", feature = "sled", feature = "sqlite")))]
compile_error!("Must activate the Cargo feature for at least one DB engine: lmdb, sled or sqlite.");
use std::net::SocketAddr; use std::net::SocketAddr;
use std::path::PathBuf; use std::path::PathBuf;
@ -176,9 +173,6 @@ async fn main() {
Command::OfflineRepair(repair_opt) => { Command::OfflineRepair(repair_opt) => {
repair::offline::offline_repair(opt.config_file, opt.secrets, repair_opt).await repair::offline::offline_repair(opt.config_file, opt.secrets, repair_opt).await
} }
Command::ConvertDb(conv_opt) => {
cli::convert_db::do_conversion(conv_opt).map_err(From::from)
}
Command::Node(NodeOperation::NodeId(node_id_opt)) => { Command::Node(NodeOperation::NodeId(node_id_opt)) => {
node_id_command(opt.config_file, node_id_opt.quiet) node_id_command(opt.config_file, node_id_opt.quiet)
} }

View file

@ -5,16 +5,11 @@ use async_trait::async_trait;
use tokio::sync::watch; use tokio::sync::watch;
use garage_block::repair::ScrubWorkerCommand; use garage_block::repair::ScrubWorkerCommand;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::s3::block_ref_table::*; use garage_model::s3::block_ref_table::*;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use garage_table::replication::*;
use garage_table::*; use garage_table::*;
use garage_util::background::*; use garage_util::background::*;
use garage_util::error::Error; use garage_util::error::Error;
use garage_util::migrate::Migrate; use garage_util::migrate::Migrate;
@ -37,15 +32,11 @@ pub async fn launch_online_repair(
} }
RepairWhat::Versions => { RepairWhat::Versions => {
info!("Repairing the versions table"); info!("Repairing the versions table");
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairVersions)); bg.spawn_worker(RepairVersionsWorker::new(garage.clone()));
}
RepairWhat::MultipartUploads => {
info!("Repairing the multipart uploads table");
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairMpu));
} }
RepairWhat::BlockRefs => { RepairWhat::BlockRefs => {
info!("Repairing the block refs table"); info!("Repairing the block refs table");
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs)); bg.spawn_worker(RepairBlockrefsWorker::new(garage.clone()));
} }
RepairWhat::Blocks => { RepairWhat::Blocks => {
info!("Repairing the stored blocks"); info!("Repairing the stored blocks");
@ -70,82 +61,76 @@ pub async fn launch_online_repair(
info!("Sending command to scrub worker: {:?}", cmd); info!("Sending command to scrub worker: {:?}", cmd);
garage.block_manager.send_scrub_command(cmd).await?; garage.block_manager.send_scrub_command(cmd).await?;
} }
RepairWhat::Rebalance => {
info!("Rebalancing the stored blocks among storage locations");
bg.spawn_worker(garage_block::repair::RebalanceWorker::new(
garage.block_manager.clone(),
));
}
} }
Ok(()) Ok(())
} }
// ---- // ----
#[async_trait] struct RepairVersionsWorker {
trait TableRepair: Send + Sync + 'static {
type T: TableSchema;
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication>;
async fn process(
&mut self,
garage: &Garage,
entry: <<Self as TableRepair>::T as TableSchema>::E,
) -> Result<bool, Error>;
}
struct TableRepairWorker<T: TableRepair> {
garage: Arc<Garage>, garage: Arc<Garage>,
pos: Vec<u8>, pos: Vec<u8>,
counter: usize, counter: usize,
repairs: usize,
inner: T,
} }
impl<R: TableRepair> TableRepairWorker<R> { impl RepairVersionsWorker {
fn new(garage: Arc<Garage>, inner: R) -> Self { fn new(garage: Arc<Garage>) -> Self {
Self { Self {
garage, garage,
inner,
pos: vec![], pos: vec![],
counter: 0, counter: 0,
repairs: 0,
} }
} }
} }
#[async_trait] #[async_trait]
impl<R: TableRepair> Worker for TableRepairWorker<R> { impl Worker for RepairVersionsWorker {
fn name(&self) -> String { fn name(&self) -> String {
format!("{} repair worker", R::T::TABLE_NAME) "Version repair worker".into()
} }
fn status(&self) -> WorkerStatus { fn status(&self) -> WorkerStatus {
WorkerStatus { WorkerStatus {
progress: Some(format!("{} ({})", self.counter, self.repairs)), progress: Some(self.counter.to_string()),
..Default::default() ..Default::default()
} }
} }
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> { async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
let (item_bytes, next_pos) = match R::table(&self.garage).data.store.get_gt(&self.pos)? { let (item_bytes, next_pos) = match self.garage.version_table.data.store.get_gt(&self.pos)? {
Some((k, v)) => (v, k), Some((k, v)) => (v, k),
None => { None => {
info!( info!("repair_versions: finished, done {}", self.counter);
"{}: finished, done {}, fixed {}",
self.name(),
self.counter,
self.repairs
);
return Ok(WorkerState::Done); return Ok(WorkerState::Done);
} }
}; };
let entry = <R::T as TableSchema>::E::decode(&item_bytes) let version = Version::decode(&item_bytes).ok_or_message("Cannot decode Version")?;
.ok_or_message("Cannot decode table entry")?; if !version.deleted.get() {
if self.inner.process(&self.garage, entry).await? { let object = self
self.repairs += 1; .garage
.object_table
.get(&version.bucket_id, &version.key)
.await?;
let version_exists = match object {
Some(o) => o
.versions()
.iter()
.any(|x| x.uuid == version.uuid && x.state != ObjectVersionState::Aborted),
None => false,
};
if !version_exists {
info!("Repair versions: marking version as deleted: {:?}", version);
self.garage
.version_table
.insert(&Version::new(
version.uuid,
version.bucket_id,
version.key,
true,
))
.await?;
}
} }
self.counter += 1; self.counter += 1;
@ -161,124 +146,77 @@ impl<R: TableRepair> Worker for TableRepairWorker<R> {
// ---- // ----
struct RepairVersions; struct RepairBlockrefsWorker {
garage: Arc<Garage>,
pos: Vec<u8>,
counter: usize,
}
#[async_trait] impl RepairBlockrefsWorker {
impl TableRepair for RepairVersions { fn new(garage: Arc<Garage>) -> Self {
type T = VersionTable; Self {
garage,
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> { pos: vec![],
&garage.version_table counter: 0,
} }
async fn process(&mut self, garage: &Garage, version: Version) -> Result<bool, Error> {
if !version.deleted.get() {
let ref_exists = match &version.backlink {
VersionBacklink::Object { bucket_id, key } => garage
.object_table
.get(bucket_id, key)
.await?
.map(|o| {
o.versions().iter().any(|x| {
x.uuid == version.uuid && x.state != ObjectVersionState::Aborted
})
})
.unwrap_or(false),
VersionBacklink::MultipartUpload { upload_id } => garage
.mpu_table
.get(upload_id, &EmptyKey)
.await?
.map(|u| !u.deleted.get())
.unwrap_or(false),
};
if !ref_exists {
info!("Repair versions: marking version as deleted: {:?}", version);
garage
.version_table
.insert(&Version::new(version.uuid, version.backlink, true))
.await?;
return Ok(true);
}
}
Ok(false)
} }
} }
// ----
struct RepairBlockRefs;
#[async_trait] #[async_trait]
impl TableRepair for RepairBlockRefs { impl Worker for RepairBlockrefsWorker {
type T = BlockRefTable; fn name(&self) -> String {
"Block refs repair worker".into()
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> {
&garage.block_ref_table
} }
async fn process(&mut self, garage: &Garage, mut block_ref: BlockRef) -> Result<bool, Error> { fn status(&self) -> WorkerStatus {
WorkerStatus {
progress: Some(self.counter.to_string()),
..Default::default()
}
}
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
let (item_bytes, next_pos) =
match self.garage.block_ref_table.data.store.get_gt(&self.pos)? {
Some((k, v)) => (v, k),
None => {
info!("repair_block_ref: finished, done {}", self.counter);
return Ok(WorkerState::Done);
}
};
let block_ref = BlockRef::decode(&item_bytes).ok_or_message("Cannot decode BlockRef")?;
if !block_ref.deleted.get() { if !block_ref.deleted.get() {
let ref_exists = garage let version = self
.garage
.version_table .version_table
.get(&block_ref.version, &EmptyKey) .get(&block_ref.version, &EmptyKey)
.await? .await?;
.map(|v| !v.deleted.get()) // The version might not exist if it has been GC'ed
.unwrap_or(false); let ref_exists = version.map(|v| !v.deleted.get()).unwrap_or(false);
if !ref_exists { if !ref_exists {
info!( info!(
"Repair block ref: marking block_ref as deleted: {:?}", "Repair block ref: marking block_ref as deleted: {:?}",
block_ref block_ref
); );
block_ref.deleted.set(); self.garage
garage.block_ref_table.insert(&block_ref).await?; .block_ref_table
return Ok(true); .insert(&BlockRef {
} block: block_ref.block,
} version: block_ref.version,
deleted: true.into(),
Ok(false)
}
}
// ----
struct RepairMpu;
#[async_trait]
impl TableRepair for RepairMpu {
type T = MultipartUploadTable;
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> {
&garage.mpu_table
}
async fn process(&mut self, garage: &Garage, mut mpu: MultipartUpload) -> Result<bool, Error> {
if !mpu.deleted.get() {
let ref_exists = garage
.object_table
.get(&mpu.bucket_id, &mpu.key)
.await?
.map(|o| {
o.versions()
.iter()
.any(|x| x.uuid == mpu.upload_id && x.is_uploading(Some(true)))
}) })
.unwrap_or(false); .await?;
if !ref_exists {
info!(
"Repair multipart uploads: marking mpu as deleted: {:?}",
mpu
);
mpu.parts.clear();
mpu.deleted.set();
garage.mpu_table.insert(&mpu).await?;
return Ok(true);
} }
} }
Ok(false) self.counter += 1;
self.pos = next_pos;
Ok(WorkerState::Busy)
}
async fn wait_for_work(&mut self) -> WorkerState {
unreachable!()
} }
} }

View file

@ -52,7 +52,6 @@ impl Instance {
r#" r#"
metadata_dir = "{path}/meta" metadata_dir = "{path}/meta"
data_dir = "{path}/data" data_dir = "{path}/data"
db_engine = "lmdb"
replication_mode = "1" replication_mode = "1"
@ -142,7 +141,7 @@ api_bind_addr = "127.0.0.1:{admin_port}"
self.command() self.command()
.args(["layout", "assign"]) .args(["layout", "assign"])
.arg(node_short_id) .arg(node_short_id)
.args(["-c", "1G", "-z", "unzonned"]) .args(["-c", "1", "-z", "unzonned"])
.quiet() .quiet()
.expect_success_status("Could not assign garage node layout"); .expect_success_status("Could not assign garage node layout");
self.command() self.command()
@ -187,9 +186,9 @@ api_bind_addr = "127.0.0.1:{admin_port}"
let mut key = Key::default(); let mut key = Key::default();
let mut cmd = self.command(); let mut cmd = self.command();
let base = cmd.args(["key", "create"]); let base = cmd.args(["key", "new"]);
let with_name = match maybe_name { let with_name = match maybe_name {
Some(name) => base.args([name]), Some(name) => base.args(["--name", name]),
None => base, None => base,
}; };

View file

@ -44,7 +44,6 @@ async fn test_items_and_indices() {
let content = format!("{}: hello world", sk).into_bytes(); let content = format!("{}: hello world", sk).into_bytes();
let content2 = format!("{}: hello universe", sk).into_bytes(); let content2 = format!("{}: hello universe", sk).into_bytes();
let content3 = format!("{}: concurrent value", sk).into_bytes(); let content3 = format!("{}: concurrent value", sk).into_bytes();
eprintln!("test iteration {}: {}", i, sk);
// Put initially, no causality token // Put initially, no causality token
let res = ctx let res = ctx
@ -90,7 +89,7 @@ async fn test_items_and_indices() {
assert_eq!(res_body, content); assert_eq!(res_body, content);
// ReadIndex -- now there should be some stuff // ReadIndex -- now there should be some stuff
tokio::time::sleep(Duration::from_millis(100)).await; tokio::time::sleep(Duration::from_secs(1)).await;
let res = ctx let res = ctx
.k2v .k2v
.request .request
@ -159,7 +158,7 @@ async fn test_items_and_indices() {
assert_eq!(res_body, content2); assert_eq!(res_body, content2);
// ReadIndex -- now there should be some stuff // ReadIndex -- now there should be some stuff
tokio::time::sleep(Duration::from_millis(100)).await; tokio::time::sleep(Duration::from_secs(1)).await;
let res = ctx let res = ctx
.k2v .k2v
.request .request
@ -231,7 +230,7 @@ async fn test_items_and_indices() {
); );
// ReadIndex -- now there should be some stuff // ReadIndex -- now there should be some stuff
tokio::time::sleep(Duration::from_millis(100)).await; tokio::time::sleep(Duration::from_secs(1)).await;
let res = ctx let res = ctx
.k2v .k2v
.request .request
@ -300,7 +299,7 @@ async fn test_items_and_indices() {
assert_eq!(res.status(), StatusCode::NO_CONTENT); assert_eq!(res.status(), StatusCode::NO_CONTENT);
// ReadIndex -- now there should be some stuff // ReadIndex -- now there should be some stuff
tokio::time::sleep(Duration::from_millis(100)).await; tokio::time::sleep(Duration::from_secs(1)).await;
let res = ctx let res = ctx
.k2v .k2v
.request .request

View file

@ -5,190 +5,6 @@ use aws_sdk_s3::types::{CompletedMultipartUpload, CompletedPart};
const SZ_5MB: usize = 5 * 1024 * 1024; const SZ_5MB: usize = 5 * 1024 * 1024;
const SZ_10MB: usize = 10 * 1024 * 1024; const SZ_10MB: usize = 10 * 1024 * 1024;
#[tokio::test]
async fn test_multipart_upload() {
let ctx = common::context();
let bucket = ctx.create_bucket("testmpu");
let u1 = vec![0x11; SZ_5MB];
let u2 = vec![0x22; SZ_5MB];
let u3 = vec![0x33; SZ_5MB];
let u4 = vec![0x44; SZ_5MB];
let u5 = vec![0x55; SZ_5MB];
let up = ctx
.client
.create_multipart_upload()
.bucket(&bucket)
.key("a")
.send()
.await
.unwrap();
assert!(up.upload_id.is_some());
let uid = up.upload_id.as_ref().unwrap();
let p3 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(3)
.body(ByteStream::from(u3.clone()))
.send()
.await
.unwrap();
let _p1 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(1)
.body(ByteStream::from(u1))
.send()
.await
.unwrap();
let _p4 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(4)
.body(ByteStream::from(u4))
.send()
.await
.unwrap();
let p1bis = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(1)
.body(ByteStream::from(u2.clone()))
.send()
.await
.unwrap();
let p6 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(6)
.body(ByteStream::from(u5.clone()))
.send()
.await
.unwrap();
{
let r = ctx
.client
.list_parts()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.send()
.await
.unwrap();
assert_eq!(r.parts.unwrap().len(), 4);
}
let cmp = CompletedMultipartUpload::builder()
.parts(
CompletedPart::builder()
.part_number(1)
.e_tag(p1bis.e_tag.unwrap())
.build(),
)
.parts(
CompletedPart::builder()
.part_number(3)
.e_tag(p3.e_tag.unwrap())
.build(),
)
.parts(
CompletedPart::builder()
.part_number(6)
.e_tag(p6.e_tag.unwrap())
.build(),
)
.build();
ctx.client
.complete_multipart_upload()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.multipart_upload(cmp)
.send()
.await
.unwrap();
// The multipart upload must not appear anymore
assert!(ctx
.client
.list_parts()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.send()
.await
.is_err());
{
// The object must appear as a regular object
let r = ctx
.client
.head_object()
.bucket(&bucket)
.key("a")
.send()
.await
.unwrap();
assert_eq!(r.content_length, (SZ_5MB * 3) as i64);
}
{
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key("a")
.send()
.await
.unwrap();
assert_bytes_eq!(o.body, &[&u2[..], &u3[..], &u5[..]].concat());
}
{
for (part_number, data) in [(1, &u2), (2, &u3), (3, &u5)] {
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key("a")
.part_number(part_number)
.send()
.await
.unwrap();
eprintln!("get_object with part_number = {}", part_number);
assert_eq!(o.content_length, SZ_5MB as i64);
assert_bytes_eq!(o.body, data);
}
}
}
#[tokio::test] #[tokio::test]
async fn test_uploadlistpart() { async fn test_uploadlistpart() {
let ctx = common::context(); let ctx = common::context();
@ -249,8 +65,7 @@ async fn test_uploadlistpart() {
let ps = r.parts.unwrap(); let ps = r.parts.unwrap();
assert_eq!(ps.len(), 1); assert_eq!(ps.len(), 1);
assert_eq!(ps[0].part_number, 2); let fp = ps.iter().find(|x| x.part_number == 2).unwrap();
let fp = &ps[0];
assert!(fp.last_modified.is_some()); assert!(fp.last_modified.is_some());
assert_eq!( assert_eq!(
fp.e_tag.as_ref().unwrap(), fp.e_tag.as_ref().unwrap(),
@ -285,24 +100,13 @@ async fn test_uploadlistpart() {
let ps = r.parts.unwrap(); let ps = r.parts.unwrap();
assert_eq!(ps.len(), 2); assert_eq!(ps.len(), 2);
let fp = ps.iter().find(|x| x.part_number == 1).unwrap();
assert_eq!(ps[0].part_number, 1);
let fp = &ps[0];
assert!(fp.last_modified.is_some()); assert!(fp.last_modified.is_some());
assert_eq!( assert_eq!(
fp.e_tag.as_ref().unwrap(), fp.e_tag.as_ref().unwrap(),
"\"3c484266f9315485694556e6c693bfa2\"" "\"3c484266f9315485694556e6c693bfa2\""
); );
assert_eq!(fp.size, SZ_5MB as i64); assert_eq!(fp.size, SZ_5MB as i64);
assert_eq!(ps[1].part_number, 2);
let sp = &ps[1];
assert!(sp.last_modified.is_some());
assert_eq!(
sp.e_tag.as_ref().unwrap(),
"\"3366bb9dcf710d6801b5926467d02e19\""
);
assert_eq!(sp.size, SZ_5MB as i64);
} }
{ {
@ -319,19 +123,12 @@ async fn test_uploadlistpart() {
.unwrap(); .unwrap();
assert!(r.part_number_marker.is_none()); assert!(r.part_number_marker.is_none());
assert_eq!(r.next_part_number_marker.as_deref(), Some("1")); assert!(r.next_part_number_marker.is_some());
assert_eq!(r.max_parts, 1_i32); assert_eq!(r.max_parts, 1_i32);
assert!(r.is_truncated); assert!(r.is_truncated);
assert_eq!(r.key.unwrap(), "a"); assert_eq!(r.key.unwrap(), "a");
assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str()); assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str());
let parts = r.parts.unwrap(); assert_eq!(r.parts.unwrap().len(), 1);
assert_eq!(parts.len(), 1);
let fp = &parts[0];
assert_eq!(fp.part_number, 1);
assert_eq!(
fp.e_tag.as_ref().unwrap(),
"\"3c484266f9315485694556e6c693bfa2\""
);
let r2 = ctx let r2 = ctx
.client .client
@ -350,18 +147,10 @@ async fn test_uploadlistpart() {
r.next_part_number_marker.as_ref().unwrap() r.next_part_number_marker.as_ref().unwrap()
); );
assert_eq!(r2.max_parts, 1_i32); assert_eq!(r2.max_parts, 1_i32);
assert!(r2.is_truncated);
assert_eq!(r2.key.unwrap(), "a"); assert_eq!(r2.key.unwrap(), "a");
assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str()); assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str());
let parts = r2.parts.unwrap(); assert_eq!(r2.parts.unwrap().len(), 1);
assert_eq!(parts.len(), 1);
let fp = &parts[0];
assert_eq!(fp.part_number, 2);
assert_eq!(
fp.e_tag.as_ref().unwrap(),
"\"3366bb9dcf710d6801b5926467d02e19\""
);
//assert!(r2.is_truncated); // WHY? (this was the test before)
assert!(!r2.is_truncated);
} }
let cmp = CompletedMultipartUpload::builder() let cmp = CompletedMultipartUpload::builder()

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_model" name = "garage_model"
version = "0.9.0" version = "0.8.4"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -23,7 +23,6 @@ garage_util.workspace = true
async-trait = "0.1.7" async-trait = "0.1.7"
arc-swap = "1.0" arc-swap = "1.0"
blake2 = "0.10" blake2 = "0.10"
chrono = "0.4"
err-derive = "0.3" err-derive = "0.3"
hex = "0.4" hex = "0.4"
base64 = "0.21" base64 = "0.21"
@ -39,10 +38,10 @@ futures-util = "0.3"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] } tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
opentelemetry = "0.17" opentelemetry = "0.17"
netapp = "0.10" netapp = "0.5"
[features] [features]
default = [ "sled", "lmdb", "sqlite" ] default = [ "sled" ]
k2v = [ "garage_util/k2v" ] k2v = [ "garage_util/k2v" ]
lmdb = [ "garage_db/lmdb" ] lmdb = [ "garage_db/lmdb" ]
sled = [ "garage_db/sled" ] sled = [ "garage_db/sled" ]

View file

@ -48,9 +48,6 @@ mod v08 {
pub website_config: crdt::Lww<Option<WebsiteConfig>>, pub website_config: crdt::Lww<Option<WebsiteConfig>>,
/// CORS rules /// CORS rules
pub cors_config: crdt::Lww<Option<Vec<CorsRule>>>, pub cors_config: crdt::Lww<Option<Vec<CorsRule>>>,
/// Lifecycle configuration
#[serde(default)]
pub lifecycle_config: crdt::Lww<Option<Vec<LifecycleRule>>>,
/// Bucket quotas /// Bucket quotas
#[serde(default)] #[serde(default)]
pub quotas: crdt::Lww<BucketQuotas>, pub quotas: crdt::Lww<BucketQuotas>,
@ -72,42 +69,6 @@ mod v08 {
pub expose_headers: Vec<String>, pub expose_headers: Vec<String>,
} }
/// Lifecycle configuration rule
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct LifecycleRule {
/// The ID of the rule
pub id: Option<String>,
/// Whether the rule is active
pub enabled: bool,
/// The filter to check whether rule applies to a given object
pub filter: LifecycleFilter,
/// Number of days after which incomplete multipart uploads are aborted
pub abort_incomplete_mpu_days: Option<usize>,
/// Expiration policy for stored objects
pub expiration: Option<LifecycleExpiration>,
}
/// A lifecycle filter is a set of conditions that must all be true.
/// For each condition, if it is None, it is not verified (always true),
/// and if it is Some(x), then it is verified for value x
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, Default)]
pub struct LifecycleFilter {
/// If Some(x), object key has to start with prefix x
pub prefix: Option<String>,
/// If Some(x), object size has to be more than x
pub size_gt: Option<u64>,
/// If Some(x), object size has to be less than x
pub size_lt: Option<u64>,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub enum LifecycleExpiration {
/// Objects expire x days after they were created
AfterDays(usize),
/// Objects expire at date x (must be in yyyy-mm-dd format)
AtDate(String),
}
#[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] #[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct BucketQuotas { pub struct BucketQuotas {
/// Maximum size in bytes (bucket size = sum of sizes of objects in the bucket) /// Maximum size in bytes (bucket size = sum of sizes of objects in the bucket)
@ -127,7 +88,7 @@ impl AutoCrdt for BucketQuotas {
impl BucketParams { impl BucketParams {
/// Create an empty BucketParams with no authorized keys and no website accesss /// Create an empty BucketParams with no authorized keys and no website accesss
fn new() -> Self { pub fn new() -> Self {
BucketParams { BucketParams {
creation_date: now_msec(), creation_date: now_msec(),
authorized_keys: crdt::Map::new(), authorized_keys: crdt::Map::new(),
@ -135,7 +96,6 @@ impl BucketParams {
local_aliases: crdt::LwwMap::new(), local_aliases: crdt::LwwMap::new(),
website_config: crdt::Lww::new(None), website_config: crdt::Lww::new(None),
cors_config: crdt::Lww::new(None), cors_config: crdt::Lww::new(None),
lifecycle_config: crdt::Lww::new(None),
quotas: crdt::Lww::new(BucketQuotas::default()), quotas: crdt::Lww::new(BucketQuotas::default()),
} }
} }
@ -151,25 +111,10 @@ impl Crdt for BucketParams {
self.website_config.merge(&o.website_config); self.website_config.merge(&o.website_config);
self.cors_config.merge(&o.cors_config); self.cors_config.merge(&o.cors_config);
self.lifecycle_config.merge(&o.lifecycle_config);
self.quotas.merge(&o.quotas); self.quotas.merge(&o.quotas);
} }
} }
pub fn parse_lifecycle_date(date: &str) -> Result<chrono::NaiveDate, &'static str> {
use chrono::prelude::*;
if let Ok(datetime) = NaiveDateTime::parse_from_str(date, "%Y-%m-%dT%H:%M:%SZ") {
if datetime.time() == NaiveTime::MIN {
Ok(datetime.date())
} else {
Err("date must be at midnight")
}
} else {
NaiveDate::parse_from_str(date, "%Y-%m-%d").map_err(|_| "date has invalid format")
}
}
impl Default for Bucket { impl Default for Bucket {
fn default() -> Self { fn default() -> Self {
Self::new() Self::new()

View file

@ -7,7 +7,6 @@ use garage_db as db;
use garage_util::background::*; use garage_util::background::*;
use garage_util::config::*; use garage_util::config::*;
use garage_util::error::*; use garage_util::error::*;
use garage_util::persister::PersisterShared;
use garage_rpc::replication_mode::ReplicationMode; use garage_rpc::replication_mode::ReplicationMode;
use garage_rpc::system::System; use garage_rpc::system::System;
@ -18,8 +17,6 @@ use garage_table::replication::TableShardedReplication;
use garage_table::*; use garage_table::*;
use crate::s3::block_ref_table::*; use crate::s3::block_ref_table::*;
use crate::s3::lifecycle_worker;
use crate::s3::mpu_table::*;
use crate::s3::object_table::*; use crate::s3::object_table::*;
use crate::s3::version_table::*; use crate::s3::version_table::*;
@ -60,18 +57,11 @@ pub struct Garage {
pub object_table: Arc<Table<ObjectTable, TableShardedReplication>>, pub object_table: Arc<Table<ObjectTable, TableShardedReplication>>,
/// Counting table containing object counters /// Counting table containing object counters
pub object_counter_table: Arc<IndexCounter<Object>>, pub object_counter_table: Arc<IndexCounter<Object>>,
/// Table containing S3 multipart uploads
pub mpu_table: Arc<Table<MultipartUploadTable, TableShardedReplication>>,
/// Counting table containing multipart object counters
pub mpu_counter_table: Arc<IndexCounter<MultipartUpload>>,
/// Table containing S3 object versions /// Table containing S3 object versions
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>, pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
/// Table containing S3 block references (not blocks themselves) /// Table containing S3 block references (not blocks themselves)
pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>, pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
/// Persister for lifecycle worker info
pub lifecycle_persister: PersisterShared<lifecycle_worker::LifecycleWorkerPersisted>,
#[cfg(feature = "k2v")] #[cfg(feature = "k2v")]
pub k2v: GarageK2V, pub k2v: GarageK2V,
} }
@ -92,22 +82,8 @@ impl Garage {
// Create meta dir and data dir if they don't exist already // Create meta dir and data dir if they don't exist already
std::fs::create_dir_all(&config.metadata_dir) std::fs::create_dir_all(&config.metadata_dir)
.ok_or_message("Unable to create Garage metadata directory")?; .ok_or_message("Unable to create Garage metadata directory")?;
match &config.data_dir { std::fs::create_dir_all(&config.data_dir)
DataDirEnum::Single(data_dir) => { .ok_or_message("Unable to create Garage data directory")?;
std::fs::create_dir_all(data_dir).ok_or_message(format!(
"Unable to create Garage data directory: {}",
data_dir.to_string_lossy()
))?;
}
DataDirEnum::Multiple(data_dirs) => {
for dir in data_dirs {
std::fs::create_dir_all(&dir.path).ok_or_message(format!(
"Unable to create Garage data directory: {}",
dir.path.to_string_lossy()
))?;
}
}
}
info!("Opening database..."); info!("Opening database...");
let mut db_path = config.metadata_dir.clone(); let mut db_path = config.metadata_dir.clone();
@ -115,11 +91,6 @@ impl Garage {
// ---- Sled DB ---- // ---- Sled DB ----
#[cfg(feature = "sled")] #[cfg(feature = "sled")]
"sled" => { "sled" => {
if config.metadata_fsync {
return Err(Error::Message(format!(
"`metadata_fsync = true` is not supported with the Sled database engine"
)));
}
db_path.push("db"); db_path.push("db");
info!("Opening Sled database at: {}", db_path.display()); info!("Opening Sled database at: {}", db_path.display());
let db = db::sled_adapter::sled::Config::default() let db = db::sled_adapter::sled::Config::default()
@ -138,15 +109,6 @@ impl Garage {
db_path.push("db.sqlite"); db_path.push("db.sqlite");
info!("Opening Sqlite database at: {}", db_path.display()); info!("Opening Sqlite database at: {}", db_path.display());
let db = db::sqlite_adapter::rusqlite::Connection::open(db_path) let db = db::sqlite_adapter::rusqlite::Connection::open(db_path)
.and_then(|db| {
db.pragma_update(None, "journal_mode", &"WAL")?;
if config.metadata_fsync {
db.pragma_update(None, "synchronous", &"NORMAL")?;
} else {
db.pragma_update(None, "synchronous", &"OFF")?;
}
Ok(db)
})
.ok_or_message("Unable to open sqlite DB")?; .ok_or_message("Unable to open sqlite DB")?;
db::sqlite_adapter::SqliteDb::init(db) db::sqlite_adapter::SqliteDb::init(db)
} }
@ -174,10 +136,8 @@ impl Garage {
env_builder.max_readers(500); env_builder.max_readers(500);
env_builder.map_size(map_size); env_builder.map_size(map_size);
unsafe { unsafe {
env_builder.flag(heed::flags::Flags::MdbNoMetaSync);
if !config.metadata_fsync {
env_builder.flag(heed::flags::Flags::MdbNoSync); env_builder.flag(heed::flags::Flags::MdbNoSync);
} env_builder.flag(heed::flags::Flags::MdbNoMetaSync);
} }
let db = match env_builder.open(&db_path) { let db = match env_builder.open(&db_path) {
Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => { Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => {
@ -222,9 +182,6 @@ impl Garage {
let replication_mode = ReplicationMode::parse(&config.replication_mode) let replication_mode = ReplicationMode::parse(&config.replication_mode)
.ok_or_message("Invalid replication_mode in config file.")?; .ok_or_message("Invalid replication_mode in config file.")?;
info!("Initialize background variable system...");
let mut bg_vars = vars::BgVars::new();
info!("Initialize membership management system..."); info!("Initialize membership management system...");
let system = System::new(network_key, replication_mode, &config)?; let system = System::new(network_key, replication_mode, &config)?;
@ -251,12 +208,10 @@ impl Garage {
let block_manager = BlockManager::new( let block_manager = BlockManager::new(
&db, &db,
config.data_dir.clone(), config.data_dir.clone(),
config.data_fsync,
config.compression_level, config.compression_level,
data_rep_param, data_rep_param,
system.clone(), system.clone(),
)?; );
block_manager.register_bg_vars(&mut bg_vars);
// ---- admin tables ---- // ---- admin tables ----
info!("Initialize bucket_table..."); info!("Initialize bucket_table...");
@ -293,20 +248,6 @@ impl Garage {
&db, &db,
); );
info!("Initialize multipart upload counter table...");
let mpu_counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), &db);
info!("Initialize multipart upload table...");
let mpu_table = Table::new(
MultipartUploadTable {
version_table: version_table.clone(),
mpu_counter_table: mpu_counter_table.clone(),
},
meta_rep_param.clone(),
system.clone(),
&db,
);
info!("Initialize object counter table..."); info!("Initialize object counter table...");
let object_counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), &db); let object_counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), &db);
@ -315,7 +256,6 @@ impl Garage {
let object_table = Table::new( let object_table = Table::new(
ObjectTable { ObjectTable {
version_table: version_table.clone(), version_table: version_table.clone(),
mpu_table: mpu_table.clone(),
object_counter_table: object_counter_table.clone(), object_counter_table: object_counter_table.clone(),
}, },
meta_rep_param.clone(), meta_rep_param.clone(),
@ -323,15 +263,14 @@ impl Garage {
&db, &db,
); );
info!("Load lifecycle worker state...");
let lifecycle_persister =
PersisterShared::new(&system.metadata_dir, "lifecycle_worker_state");
lifecycle_worker::register_bg_vars(&lifecycle_persister, &mut bg_vars);
// ---- K2V ---- // ---- K2V ----
#[cfg(feature = "k2v")] #[cfg(feature = "k2v")]
let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param); let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param);
// Initialize bg vars
let mut bg_vars = vars::BgVars::new();
block_manager.register_bg_vars(&mut bg_vars);
// -- done -- // -- done --
Ok(Arc::new(Self { Ok(Arc::new(Self {
config, config,
@ -345,17 +284,14 @@ impl Garage {
key_table, key_table,
object_table, object_table,
object_counter_table, object_counter_table,
mpu_table,
mpu_counter_table,
version_table, version_table,
block_ref_table, block_ref_table,
lifecycle_persister,
#[cfg(feature = "k2v")] #[cfg(feature = "k2v")]
k2v, k2v,
})) }))
} }
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) { pub fn spawn_workers(&self, bg: &BackgroundRunner) {
self.block_manager.spawn_workers(bg); self.block_manager.spawn_workers(bg);
self.bucket_table.spawn_workers(bg); self.bucket_table.spawn_workers(bg);
@ -364,16 +300,9 @@ impl Garage {
self.object_table.spawn_workers(bg); self.object_table.spawn_workers(bg);
self.object_counter_table.spawn_workers(bg); self.object_counter_table.spawn_workers(bg);
self.mpu_table.spawn_workers(bg);
self.mpu_counter_table.spawn_workers(bg);
self.version_table.spawn_workers(bg); self.version_table.spawn_workers(bg);
self.block_ref_table.spawn_workers(bg); self.block_ref_table.spawn_workers(bg);
bg.spawn_worker(lifecycle_worker::LifecycleWorker::new(
self.clone(),
self.lifecycle_persister.clone(),
));
#[cfg(feature = "k2v")] #[cfg(feature = "k2v")]
self.k2v.spawn_workers(bg); self.k2v.spawn_workers(bg);
} }

View file

@ -478,9 +478,7 @@ impl<'a> BucketHelper<'a> {
// ---- // ----
/// Deletes all incomplete multipart uploads that are older than a certain time. /// Deletes all incomplete multipart uploads that are older than a certain time.
/// Returns the number of uploads aborted. /// Returns the number of uploads aborted
/// This will also include non-multipart uploads, which may be lingering
/// after a node crash
pub async fn cleanup_incomplete_uploads( pub async fn cleanup_incomplete_uploads(
&self, &self,
bucket_id: &Uuid, bucket_id: &Uuid,
@ -498,9 +496,7 @@ impl<'a> BucketHelper<'a> {
.get_range( .get_range(
bucket_id, bucket_id,
start, start,
Some(ObjectFilter::IsUploading { Some(ObjectFilter::IsUploading),
check_multipart: None,
}),
1000, 1000,
EnumerationOrder::Forward, EnumerationOrder::Forward,
) )
@ -512,7 +508,7 @@ impl<'a> BucketHelper<'a> {
let aborted_versions = object let aborted_versions = object
.versions() .versions()
.iter() .iter()
.filter(|v| v.is_uploading(None) && v.timestamp < older_than) .filter(|v| v.is_uploading() && v.timestamp < older_than)
.map(|v| ObjectVersion { .map(|v| ObjectVersion {
state: ObjectVersionState::Aborted, state: ObjectVersionState::Aborted,
uuid: v.uuid, uuid: v.uuid,

View file

@ -294,7 +294,7 @@ impl<T: CountedItem> IndexCounter<T> {
let counter_entry = local_counter.into_counter_entry(self.this_node); let counter_entry = local_counter.into_counter_entry(self.this_node);
self.local_counter self.local_counter
.db() .db()
.transaction(|tx| self.table.queue_insert(tx, &counter_entry))?; .transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
next_start = Some(local_counter_k); next_start = Some(local_counter_k);
} }
@ -360,7 +360,7 @@ impl<T: CountedItem> IndexCounter<T> {
let counter_entry = local_counter.into_counter_entry(self.this_node); let counter_entry = local_counter.into_counter_entry(self.this_node);
self.local_counter self.local_counter
.db() .db()
.transaction(|tx| self.table.queue_insert(tx, &counter_entry))?; .transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
next_start = Some(counted_entry_k); next_start = Some(counted_entry_k);
} }

View file

@ -149,19 +149,11 @@ impl Key {
} }
/// Import a key from it's parts /// Import a key from it's parts
pub fn import(key_id: &str, secret_key: &str, name: &str) -> Result<Self, &'static str> { pub fn import(key_id: &str, secret_key: &str, name: &str) -> Self {
if key_id.len() != 26 || &key_id[..2] != "GK" || hex::decode(&key_id[2..]).is_err() { Self {
return Err("The specified key ID is not a valid Garage key ID (starts with `GK`, followed by 12 hex-encoded bytes)");
}
if secret_key.len() != 64 || hex::decode(&secret_key).is_err() {
return Err("The specified secret key is not a valid Garage secret key (composed of 32 hex-encoded bytes)");
}
Ok(Self {
key_id: key_id.to_string(), key_id: key_id.to_string(),
state: crdt::Deletable::present(KeyParams::new(secret_key, name)), state: crdt::Deletable::present(KeyParams::new(secret_key, name)),
}) }
} }
/// Create a new Key which can me merged to mark an existing key deleted /// Create a new Key which can me merged to mark an existing key deleted

Some files were not shown because too many files have changed in this diff Show more