forked from Deuxfleurs/garage
Compare commits
1 commit
main
...
db-no-unsa
Author | SHA1 | Date | |
---|---|---|---|
a46c3d2502 |
96 changed files with 1440 additions and 3284 deletions
|
@ -34,9 +34,7 @@ steps:
|
||||||
- ./result/bin/garage_util-*
|
- ./result/bin/garage_util-*
|
||||||
- ./result/bin/garage_web-*
|
- ./result/bin/garage_web-*
|
||||||
- ./result/bin/garage-*
|
- ./result/bin/garage-*
|
||||||
- GARAGE_TEST_INTEGRATION_DB_ENGINE=lmdb ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
- ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
||||||
- nix-shell --attr ci --run "killall -9 garage" || true
|
|
||||||
- GARAGE_TEST_INTEGRATION_DB_ENGINE=sqlite ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
|
||||||
- rm result
|
- rm result
|
||||||
- rm -rv tmp-garage-integration
|
- rm -rv tmp-garage-integration
|
||||||
|
|
||||||
|
|
144
Cargo.lock
generated
144
Cargo.lock
generated
|
@ -74,6 +74,12 @@ dependencies = [
|
||||||
"memchr",
|
"memchr",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "aliasable"
|
||||||
|
version = "0.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "allocator-api2"
|
name = "allocator-api2"
|
||||||
version = "0.2.16"
|
version = "0.2.16"
|
||||||
|
@ -905,9 +911,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crc32fast"
|
name = "crc32fast"
|
||||||
version = "1.4.0"
|
version = "1.3.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"
|
checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
]
|
]
|
||||||
|
@ -1304,7 +1310,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"assert-json-diff",
|
"assert-json-diff",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1346,11 +1352,9 @@ dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"sha1",
|
|
||||||
"sha2",
|
"sha2",
|
||||||
"static_init",
|
"static_init",
|
||||||
"structopt",
|
"structopt",
|
||||||
"syslog-tracing",
|
|
||||||
"timeago",
|
"timeago",
|
||||||
"tokio",
|
"tokio",
|
||||||
"toml",
|
"toml",
|
||||||
|
@ -1360,7 +1364,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes-gcm",
|
"aes-gcm",
|
||||||
"argon2",
|
"argon2",
|
||||||
|
@ -1369,8 +1373,6 @@ dependencies = [
|
||||||
"base64 0.21.7",
|
"base64 0.21.7",
|
||||||
"bytes",
|
"bytes",
|
||||||
"chrono",
|
"chrono",
|
||||||
"crc32c",
|
|
||||||
"crc32fast",
|
|
||||||
"crypto-common",
|
"crypto-common",
|
||||||
"err-derive",
|
"err-derive",
|
||||||
"form_urlencoded",
|
"form_urlencoded",
|
||||||
|
@ -1404,7 +1406,6 @@ dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"sha1",
|
|
||||||
"sha2",
|
"sha2",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
|
@ -1415,7 +1416,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-compression",
|
"async-compression",
|
||||||
|
@ -1442,21 +1443,20 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_db"
|
name = "garage_db"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"err-derive",
|
"err-derive",
|
||||||
"heed",
|
"heed",
|
||||||
"hexdump",
|
"hexdump",
|
||||||
"mktemp",
|
"mktemp",
|
||||||
"r2d2",
|
"ouroboros",
|
||||||
"r2d2_sqlite",
|
|
||||||
"rusqlite",
|
"rusqlite",
|
||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1475,7 +1475,6 @@ dependencies = [
|
||||||
"hex",
|
"hex",
|
||||||
"http 1.0.0",
|
"http 1.0.0",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"parse_duration",
|
|
||||||
"rand",
|
"rand",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
|
@ -1486,7 +1485,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_net"
|
name = "garage_net"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1512,7 +1511,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_rpc"
|
name = "garage_rpc"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1547,7 +1546,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_table"
|
name = "garage_table"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1569,7 +1568,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_util"
|
name = "garage_util"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1603,7 +1602,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_web"
|
name = "garage_web"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"err-derive",
|
"err-derive",
|
||||||
"futures",
|
"futures",
|
||||||
|
@ -1755,9 +1754,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hashlink"
|
name = "hashlink"
|
||||||
version = "0.9.0"
|
version = "0.8.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee"
|
checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hashbrown 0.14.3",
|
"hashbrown 0.14.3",
|
||||||
]
|
]
|
||||||
|
@ -2423,9 +2422,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libsqlite3-sys"
|
name = "libsqlite3-sys"
|
||||||
version = "0.28.0"
|
version = "0.27.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f"
|
checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"pkg-config",
|
"pkg-config",
|
||||||
|
@ -2783,6 +2782,31 @@ dependencies = [
|
||||||
"num-traits",
|
"num-traits",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ouroboros"
|
||||||
|
version = "0.18.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "97b7be5a8a3462b752f4be3ff2b2bf2f7f1d00834902e46be2a4d68b87b0573c"
|
||||||
|
dependencies = [
|
||||||
|
"aliasable",
|
||||||
|
"ouroboros_macro",
|
||||||
|
"static_assertions",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ouroboros_macro"
|
||||||
|
version = "0.18.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b645dcde5f119c2c454a92d0dfa271a2a3b205da92e4292a68ead4bdbfde1f33"
|
||||||
|
dependencies = [
|
||||||
|
"heck 0.4.1",
|
||||||
|
"itertools 0.12.1",
|
||||||
|
"proc-macro2",
|
||||||
|
"proc-macro2-diagnostics",
|
||||||
|
"quote",
|
||||||
|
"syn 2.0.48",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "outref"
|
name = "outref"
|
||||||
version = "0.5.1"
|
version = "0.5.1"
|
||||||
|
@ -3110,6 +3134,19 @@ dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "proc-macro2-diagnostics"
|
||||||
|
version = "0.10.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 2.0.48",
|
||||||
|
"version_check",
|
||||||
|
"yansi",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prometheus"
|
name = "prometheus"
|
||||||
version = "0.13.3"
|
version = "0.13.3"
|
||||||
|
@ -3203,28 +3240,6 @@ dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "r2d2"
|
|
||||||
version = "0.8.10"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93"
|
|
||||||
dependencies = [
|
|
||||||
"log",
|
|
||||||
"parking_lot 0.12.1",
|
|
||||||
"scheduled-thread-pool",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "r2d2_sqlite"
|
|
||||||
version = "0.24.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "6a982edf65c129796dba72f8775b292ef482b40d035e827a9825b3bc07ccc5f2"
|
|
||||||
dependencies = [
|
|
||||||
"r2d2",
|
|
||||||
"rusqlite",
|
|
||||||
"uuid",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rand"
|
name = "rand"
|
||||||
version = "0.8.5"
|
version = "0.8.5"
|
||||||
|
@ -3418,9 +3433,9 @@ checksum = "3cd14fd5e3b777a7422cca79358c57a8f6e3a703d9ac187448d0daf220c2407f"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rusqlite"
|
name = "rusqlite"
|
||||||
version = "0.31.0"
|
version = "0.30.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae"
|
checksum = "a78046161564f5e7cd9008aff3b2990b3850dc8e0349119b98e8f251e099f24d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 2.4.2",
|
"bitflags 2.4.2",
|
||||||
"fallible-iterator",
|
"fallible-iterator",
|
||||||
|
@ -3585,15 +3600,6 @@ dependencies = [
|
||||||
"windows-sys 0.52.0",
|
"windows-sys 0.52.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "scheduled-thread-pool"
|
|
||||||
version = "0.2.7"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"
|
|
||||||
dependencies = [
|
|
||||||
"parking_lot 0.12.1",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "schemars"
|
name = "schemars"
|
||||||
version = "0.8.16"
|
version = "0.8.16"
|
||||||
|
@ -3873,6 +3879,12 @@ dependencies = [
|
||||||
"der",
|
"der",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "static_assertions"
|
||||||
|
version = "1.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "static_init"
|
name = "static_init"
|
||||||
version = "1.0.3"
|
version = "1.0.3"
|
||||||
|
@ -3986,17 +3998,6 @@ dependencies = [
|
||||||
"unicode-xid",
|
"unicode-xid",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "syslog-tracing"
|
|
||||||
version = "0.3.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "340b1540dcdb6b066bc2966e7974f977ab1a38f21b2be189014ffb0cc2405768"
|
|
||||||
dependencies = [
|
|
||||||
"libc",
|
|
||||||
"tracing-core",
|
|
||||||
"tracing-subscriber",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "system-configuration"
|
name = "system-configuration"
|
||||||
version = "0.5.1"
|
version = "0.5.1"
|
||||||
|
@ -4552,7 +4553,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"
|
checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"getrandom",
|
"getrandom",
|
||||||
"rand",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -4895,6 +4895,12 @@ version = "0.8.8"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "53be06678ed9e83edb1745eb72efc0bbcd7b5c3c35711a860906aed827a13d61"
|
checksum = "53be06678ed9e83edb1745eb72efc0bbcd7b5c3c35711a860906aed827a13d61"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "yansi"
|
||||||
|
version = "1.0.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6c2861d76f58ec8fc95708b9b1e417f7b12fd72ad33c01fa6886707092dea0d3"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerocopy"
|
name = "zerocopy"
|
||||||
version = "0.7.32"
|
version = "0.7.32"
|
||||||
|
|
396
Cargo.nix
396
Cargo.nix
|
@ -34,7 +34,7 @@ args@{
|
||||||
ignoreLockHash,
|
ignoreLockHash,
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
nixifiedLockHash = "1ccd5eb25a83962821e0e9da4ce6df31717b2b97a5b3a0c80c9e0e0759710143";
|
nixifiedLockHash = "17e620fad07e725e301488a3746ca3512d8785ce3077f634adde82e9e4eab2bb";
|
||||||
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
||||||
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
||||||
lockHashIgnored = if ignoreLockHash
|
lockHashIgnored = if ignoreLockHash
|
||||||
|
@ -58,17 +58,17 @@ in
|
||||||
{
|
{
|
||||||
cargo2nixVersion = "0.11.0";
|
cargo2nixVersion = "0.11.0";
|
||||||
workspace = {
|
workspace = {
|
||||||
garage_db = rustPackages.unknown.garage_db."1.0.0";
|
garage_db = rustPackages.unknown.garage_db."0.10.0";
|
||||||
garage_util = rustPackages.unknown.garage_util."1.0.0";
|
garage_util = rustPackages.unknown.garage_util."0.10.0";
|
||||||
garage_net = rustPackages.unknown.garage_net."1.0.0";
|
garage_net = rustPackages.unknown.garage_net."0.10.0";
|
||||||
garage_rpc = rustPackages.unknown.garage_rpc."1.0.0";
|
garage_rpc = rustPackages.unknown.garage_rpc."0.10.0";
|
||||||
format_table = rustPackages.unknown.format_table."0.1.1";
|
format_table = rustPackages.unknown.format_table."0.1.1";
|
||||||
garage_table = rustPackages.unknown.garage_table."1.0.0";
|
garage_table = rustPackages.unknown.garage_table."0.10.0";
|
||||||
garage_block = rustPackages.unknown.garage_block."1.0.0";
|
garage_block = rustPackages.unknown.garage_block."0.10.0";
|
||||||
garage_model = rustPackages.unknown.garage_model."1.0.0";
|
garage_model = rustPackages.unknown.garage_model."0.10.0";
|
||||||
garage_api = rustPackages.unknown.garage_api."1.0.0";
|
garage_api = rustPackages.unknown.garage_api."0.10.0";
|
||||||
garage_web = rustPackages.unknown.garage_web."1.0.0";
|
garage_web = rustPackages.unknown.garage_web."0.10.0";
|
||||||
garage = rustPackages.unknown.garage."1.0.0";
|
garage = rustPackages.unknown.garage."0.10.0";
|
||||||
k2v-client = rustPackages.unknown.k2v-client."0.0.4";
|
k2v-client = rustPackages.unknown.k2v-client."0.0.4";
|
||||||
};
|
};
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.21.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.21.0" = overridableMkRustCrate (profileName: rec {
|
||||||
|
@ -152,13 +152,13 @@ in
|
||||||
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "std")
|
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "std")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "getrandom" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "getrandom" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out;
|
||||||
${ if (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && !((hostPlatform.parsed.cpu.name == "armv6l" || hostPlatform.parsed.cpu.name == "armv7l") && hostPlatform.parsed.kernel.name == "none") then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.19.0" { inherit profileName; }).out;
|
${ if (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && !((hostPlatform.parsed.cpu.name == "armv6l" || hostPlatform.parsed.cpu.name == "armv7l") && hostPlatform.parsed.kernel.name == "none") then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.19.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "zerocopy" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy."0.7.32" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "zerocopy" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy."0.7.32" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
buildDependencies = {
|
buildDependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "version_check" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "version_check" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -176,13 +176,24 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".aliasable."0.1.3" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "aliasable";
|
||||||
|
version = "0.1.3";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd"; };
|
||||||
|
features = builtins.concatLists [
|
||||||
|
[ "alloc" ]
|
||||||
|
[ "default" ]
|
||||||
|
];
|
||||||
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "allocator-api2";
|
name = "allocator-api2";
|
||||||
version = "0.2.16";
|
version = "0.2.16";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"; };
|
src = fetchCratesIo { inherit name version; sha256 = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "alloc")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "alloc")
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -674,7 +685,7 @@ in
|
||||||
aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out;
|
aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out;
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
||||||
crc32c = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32c."0.6.4" { inherit profileName; }).out;
|
crc32c = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32c."0.6.4" { inherit profileName; }).out;
|
||||||
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" { inherit profileName; }).out;
|
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out;
|
||||||
http_body = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body."0.4.6" { inherit profileName; }).out;
|
http_body = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body."0.4.6" { inherit profileName; }).out;
|
||||||
|
@ -694,7 +705,7 @@ in
|
||||||
dependencies = {
|
dependencies = {
|
||||||
aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out;
|
aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out;
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
||||||
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" { inherit profileName; }).out;
|
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -1287,11 +1298,11 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "crc32fast";
|
name = "crc32fast";
|
||||||
version = "1.4.0";
|
version = "1.3.2";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"; };
|
src = fetchCratesIo { inherit name version; sha256 = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
[ "default" ]
|
[ "default" ]
|
||||||
[ "std" ]
|
[ "std" ]
|
||||||
|
@ -1684,8 +1695,8 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"; };
|
src = fetchCratesIo { inherit name version; sha256 = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "alloc")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "alloc")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -1910,9 +1921,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage."1.0.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage";
|
name = "garage";
|
||||||
version = "1.0.0";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/garage");
|
src = fetchCrateLocal (workspaceSrc + "/src/garage");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1927,8 +1938,6 @@ in
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus") "opentelemetry-prometheus")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus") "opentelemetry-prometheus")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus") "prometheus")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus") "prometheus")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite") "sqlite")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite") "sqlite")
|
||||||
(lib.optional (rootFeatures' ? "garage/syslog") "syslog")
|
|
||||||
(lib.optional (rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing") "syslog-tracing")
|
|
||||||
(lib.optional (rootFeatures' ? "garage/system-libs") "system-libs")
|
(lib.optional (rootFeatures' ? "garage/system-libs") "system-libs")
|
||||||
(lib.optional (rootFeatures' ? "garage/telemetry-otlp") "telemetry-otlp")
|
(lib.optional (rootFeatures' ? "garage/telemetry-otlp") "telemetry-otlp")
|
||||||
];
|
];
|
||||||
|
@ -1940,15 +1949,15 @@ in
|
||||||
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
|
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_api = (rustPackages."unknown".garage_api."1.0.0" { inherit profileName; }).out;
|
garage_api = (rustPackages."unknown".garage_api."0.10.0" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."1.0.0" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.10.0" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."1.0.0" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.0" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."1.0.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.10.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."1.0.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
garage_web = (rustPackages."unknown".garage_web."1.0.0" { inherit profileName; }).out;
|
garage_web = (rustPackages."unknown".garage_web."0.10.0" { inherit profileName; }).out;
|
||||||
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.9" { inherit profileName; }).out;
|
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.9" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
|
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
|
||||||
|
@ -1960,9 +1969,7 @@ in
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
||||||
sha1 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha1."0.10.6" { inherit profileName; }).out;
|
|
||||||
structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out;
|
structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "syslog_tracing" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syslog-tracing."0.3.0" { inherit profileName; }).out;
|
|
||||||
timeago = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".timeago."0.4.2" { inherit profileName; }).out;
|
timeago = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".timeago."0.4.2" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
||||||
toml = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".toml."0.8.10" { inherit profileName; }).out;
|
toml = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".toml."0.8.10" { inherit profileName; }).out;
|
||||||
|
@ -1988,9 +1995,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_api."1.0.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_api."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_api";
|
name = "garage_api";
|
||||||
version = "1.0.0";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/api");
|
src = fetchCrateLocal (workspaceSrc + "/src/api");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -2007,19 +2014,17 @@ in
|
||||||
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.21.7" { inherit profileName; }).out;
|
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.21.7" { inherit profileName; }).out;
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
||||||
chrono = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.33" { inherit profileName; }).out;
|
chrono = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.33" { inherit profileName; }).out;
|
||||||
crc32c = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32c."0.6.4" { inherit profileName; }).out;
|
|
||||||
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" { inherit profileName; }).out;
|
|
||||||
crypto_common = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crypto-common."0.1.6" { inherit profileName; }).out;
|
crypto_common = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crypto-common."0.1.6" { inherit profileName; }).out;
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.2.1" { inherit profileName; }).out;
|
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.2.1" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."1.0.0" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.10.0" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."1.0.0" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.0" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."1.0.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.10.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."1.0.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
|
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
||||||
|
@ -2042,7 +2047,6 @@ in
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
||||||
serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.113" { inherit profileName; }).out;
|
serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.113" { inherit profileName; }).out;
|
||||||
sha1 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha1."0.10.6" { inherit profileName; }).out;
|
|
||||||
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out;
|
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
||||||
tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.14" { inherit profileName; }).out;
|
tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.14" { inherit profileName; }).out;
|
||||||
|
@ -2052,9 +2056,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_block."1.0.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_block."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_block";
|
name = "garage_block";
|
||||||
version = "1.0.0";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/block");
|
src = fetchCrateLocal (workspaceSrc + "/src/block");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -2068,11 +2072,11 @@ in
|
||||||
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.3.0" { inherit profileName; }).out;
|
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.3.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.0" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."1.0.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.10.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."1.0.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
|
@ -2085,9 +2089,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_db."1.0.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_db."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_db";
|
name = "garage_db";
|
||||||
version = "1.0.0";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/db");
|
src = fetchCrateLocal (workspaceSrc + "/src/db");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -2095,8 +2099,6 @@ in
|
||||||
(lib.optional (rootFeatures' ? "garage_db/default") "default")
|
(lib.optional (rootFeatures' ? "garage_db/default") "default")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "heed")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "heed")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "r2d2")
|
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "r2d2_sqlite")
|
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "rusqlite")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "rusqlite")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
||||||
];
|
];
|
||||||
|
@ -2104,9 +2106,8 @@ in
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed."0.11.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed."0.11.0" { inherit profileName; }).out;
|
||||||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "r2d2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".r2d2."0.8.10" { inherit profileName; }).out;
|
ouroboros = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ouroboros."0.18.3" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "r2d2_sqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".r2d2_sqlite."0.24.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.30.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.31.0" { inherit profileName; }).out;
|
|
||||||
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
|
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
devDependencies = {
|
devDependencies = {
|
||||||
|
@ -2114,9 +2115,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_model."1.0.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_model."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_model";
|
name = "garage_model";
|
||||||
version = "1.0.0";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/model");
|
src = fetchCrateLocal (workspaceSrc + "/src/model");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -2134,16 +2135,15 @@ in
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."1.0.0" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.10.0" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.0" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."1.0.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.10.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."1.0.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
parse_duration = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parse_duration."2.1.1" { inherit profileName; }).out;
|
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
||||||
|
@ -2153,9 +2153,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_net."1.0.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_net."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_net";
|
name = "garage_net";
|
||||||
version = "1.0.0";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/net");
|
src = fetchCrateLocal (workspaceSrc + "/src/net");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -2190,9 +2190,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_rpc."1.0.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_rpc."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_rpc";
|
name = "garage_rpc";
|
||||||
version = "1.0.0";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
|
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -2214,9 +2214,9 @@ in
|
||||||
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
|
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.0" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out;
|
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.12.1" { inherit profileName; }).out;
|
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.12.1" { inherit profileName; }).out;
|
||||||
|
@ -2238,9 +2238,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_table."1.0.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_table."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_table";
|
name = "garage_table";
|
||||||
version = "1.0.0";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/table");
|
src = fetchCrateLocal (workspaceSrc + "/src/table");
|
||||||
dependencies = {
|
dependencies = {
|
||||||
|
@ -2249,9 +2249,9 @@ in
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."1.0.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
|
@ -2263,9 +2263,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_util."1.0.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_util."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_util";
|
name = "garage_util";
|
||||||
version = "1.0.0";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/util");
|
src = fetchCrateLocal (workspaceSrc + "/src/util");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -2281,8 +2281,8 @@ in
|
||||||
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.7" { inherit profileName; }).out;
|
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.7" { inherit profileName; }).out;
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.0" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
||||||
|
@ -2307,18 +2307,18 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_web."1.0.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_web."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_web";
|
name = "garage_web";
|
||||||
version = "1.0.0";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/web");
|
src = fetchCrateLocal (workspaceSrc + "/src/web");
|
||||||
dependencies = {
|
dependencies = {
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
garage_api = (rustPackages."unknown".garage_api."1.0.0" { inherit profileName; }).out;
|
garage_api = (rustPackages."unknown".garage_api."0.10.0" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."1.0.0" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.10.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."1.0.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
||||||
http_body_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body-util."0.1.0" { inherit profileName; }).out;
|
http_body_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body-util."0.1.0" { inherit profileName; }).out;
|
||||||
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."1.1.0" { inherit profileName; }).out;
|
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."1.1.0" { inherit profileName; }).out;
|
||||||
|
@ -2484,25 +2484,25 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"; };
|
src = fetchCratesIo { inherit name version; sha256 = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "ahash")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "ahash")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "allocator-api2")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "allocator-api2")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "inline-more")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "inline-more")
|
||||||
[ "raw" ]
|
[ "raw" ]
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "ahash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.7" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "ahash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.7" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "allocator_api2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "allocator_api2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".hashlink."0.9.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".hashlink."0.8.4" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "hashlink";
|
name = "hashlink";
|
||||||
version = "0.9.0";
|
version = "0.8.4";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee"; };
|
src = fetchCratesIo { inherit name version; sha256 = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"; };
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashbrown" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashbrown."0.14.3" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashbrown" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashbrown."0.14.3" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -2522,7 +2522,7 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"; };
|
src = fetchCratesIo { inherit name version; sha256 = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli") "default")
|
[ "default" ]
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -3426,24 +3426,24 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.28.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.27.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "libsqlite3-sys";
|
name = "libsqlite3-sys";
|
||||||
version = "0.28.0";
|
version = "0.27.0";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f"; };
|
src = fetchCratesIo { inherit name version; sha256 = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled_bindings")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled_bindings")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "cc")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "cc")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "min_sqlite_version_3_14_0")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "min_sqlite_version_3_14_0")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "pkg-config")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "pkg-config")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "vcpkg")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "vcpkg")
|
||||||
];
|
];
|
||||||
buildDependencies = {
|
buildDependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.29" { profileName = "__noProfile"; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.29" { profileName = "__noProfile"; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "vcpkg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".vcpkg."0.2.15" { profileName = "__noProfile"; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "vcpkg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".vcpkg."0.2.15" { profileName = "__noProfile"; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -3970,6 +3970,40 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".ouroboros."0.18.3" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "ouroboros";
|
||||||
|
version = "0.18.3";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "97b7be5a8a3462b752f4be3ff2b2bf2f7f1d00834902e46be2a4d68b87b0573c"; };
|
||||||
|
features = builtins.concatLists [
|
||||||
|
[ "default" ]
|
||||||
|
[ "std" ]
|
||||||
|
];
|
||||||
|
dependencies = {
|
||||||
|
aliasable = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aliasable."0.1.3" { inherit profileName; }).out;
|
||||||
|
ouroboros_macro = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".ouroboros_macro."0.18.3" { profileName = "__noProfile"; }).out;
|
||||||
|
static_assertions = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".static_assertions."1.1.0" { inherit profileName; }).out;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".ouroboros_macro."0.18.3" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "ouroboros_macro";
|
||||||
|
version = "0.18.3";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "b645dcde5f119c2c454a92d0dfa271a2a3b205da92e4292a68ead4bdbfde1f33"; };
|
||||||
|
features = builtins.concatLists [
|
||||||
|
[ "std" ]
|
||||||
|
];
|
||||||
|
dependencies = {
|
||||||
|
heck = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heck."0.4.1" { inherit profileName; }).out;
|
||||||
|
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.12.1" { inherit profileName; }).out;
|
||||||
|
proc_macro2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.78" { inherit profileName; }).out;
|
||||||
|
proc_macro2_diagnostics = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2-diagnostics."0.10.1" { inherit profileName; }).out;
|
||||||
|
quote = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.35" { inherit profileName; }).out;
|
||||||
|
syn = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."2.0.48" { inherit profileName; }).out;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".outref."0.5.1" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".outref."0.5.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "outref";
|
name = "outref";
|
||||||
version = "0.5.1";
|
version = "0.5.1";
|
||||||
|
@ -4039,11 +4073,11 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"; };
|
src = fetchCratesIo { inherit name version; sha256 = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "lock_api" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lock_api."0.4.11" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "lock_api" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lock_api."0.4.11" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "parking_lot_core" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot_core."0.9.9" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "parking_lot_core" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot_core."0.9.9" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -4068,11 +4102,11 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"; };
|
src = fetchCratesIo { inherit name version; sha256 = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"; };
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
||||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.153" { inherit profileName; }).out;
|
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.153" { inherit profileName; }).out;
|
||||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.parsed.kernel.name == "redox" then "syscall" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".redox_syscall."0.4.1" { inherit profileName; }).out;
|
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.parsed.kernel.name == "redox" then "syscall" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".redox_syscall."0.4.1" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out;
|
||||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isWindows then "windows_targets" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows-targets."0.48.5" { inherit profileName; }).out;
|
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isWindows then "windows_targets" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows-targets."0.48.5" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -4406,6 +4440,27 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".proc-macro2-diagnostics."0.10.1" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "proc-macro2-diagnostics";
|
||||||
|
version = "0.10.1";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8"; };
|
||||||
|
features = builtins.concatLists [
|
||||||
|
[ "colors" ]
|
||||||
|
[ "default" ]
|
||||||
|
[ "yansi" ]
|
||||||
|
];
|
||||||
|
dependencies = {
|
||||||
|
proc_macro2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.78" { inherit profileName; }).out;
|
||||||
|
quote = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.35" { inherit profileName; }).out;
|
||||||
|
syn = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."2.0.48" { inherit profileName; }).out;
|
||||||
|
yansi = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".yansi."1.0.0" { inherit profileName; }).out;
|
||||||
|
};
|
||||||
|
buildDependencies = {
|
||||||
|
version_check = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".prometheus."0.13.3" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".prometheus."0.13.3" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "prometheus";
|
name = "prometheus";
|
||||||
version = "0.13.3";
|
version = "0.13.3";
|
||||||
|
@ -4527,30 +4582,6 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".r2d2."0.8.10" = overridableMkRustCrate (profileName: rec {
|
|
||||||
name = "r2d2";
|
|
||||||
version = "0.8.10";
|
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93"; };
|
|
||||||
dependencies = {
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "log" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.20" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "parking_lot" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot."0.12.1" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "scheduled_thread_pool" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".scheduled-thread-pool."0.2.7" { inherit profileName; }).out;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".r2d2_sqlite."0.24.0" = overridableMkRustCrate (profileName: rec {
|
|
||||||
name = "r2d2_sqlite";
|
|
||||||
version = "0.24.0";
|
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "6a982edf65c129796dba72f8775b292ef482b40d035e827a9825b3bc07ccc5f2"; };
|
|
||||||
dependencies = {
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "r2d2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".r2d2."0.8.10" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.31.0" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "uuid" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".uuid."1.4.1" { inherit profileName; }).out;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "rand";
|
name = "rand";
|
||||||
version = "0.8.5";
|
version = "0.8.5";
|
||||||
|
@ -4618,7 +4649,7 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"; };
|
src = fetchCratesIo { inherit name version; sha256 = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"; };
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."1.3.2" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."1.3.2" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -4879,23 +4910,22 @@ in
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.31.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.30.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "rusqlite";
|
name = "rusqlite";
|
||||||
version = "0.31.0";
|
version = "0.30.0";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae"; };
|
src = fetchCratesIo { inherit name version; sha256 = "a78046161564f5e7cd9008aff3b2990b3850dc8e0349119b98e8f251e099f24d"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "backup")
|
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "modern_sqlite")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "modern_sqlite")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."2.4.2" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."2.4.2" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-iterator."0.3.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-iterator."0.3.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_streaming_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-streaming-iterator."0.1.9" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_streaming_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-streaming-iterator."0.1.9" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashlink" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashlink."0.9.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashlink" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashlink."0.8.4" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "libsqlite3_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.28.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "libsqlite3_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.27.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -5109,16 +5139,6 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".scheduled-thread-pool."0.2.7" = overridableMkRustCrate (profileName: rec {
|
|
||||||
name = "scheduled-thread-pool";
|
|
||||||
version = "0.2.7";
|
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"; };
|
|
||||||
dependencies = {
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "parking_lot" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot."0.12.1" { inherit profileName; }).out;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.16" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.16" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "schemars";
|
name = "schemars";
|
||||||
version = "0.8.16";
|
version = "0.8.16";
|
||||||
|
@ -5513,6 +5533,13 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".static_assertions."1.1.0" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "static_assertions";
|
||||||
|
version = "1.1.0";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"; };
|
||||||
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".static_init."1.0.3" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".static_init."1.0.3" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "static_init";
|
name = "static_init";
|
||||||
version = "1.0.3";
|
version = "1.0.3";
|
||||||
|
@ -5678,18 +5705,6 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".syslog-tracing."0.3.0" = overridableMkRustCrate (profileName: rec {
|
|
||||||
name = "syslog-tracing";
|
|
||||||
version = "0.3.0";
|
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "340b1540dcdb6b066bc2966e7974f977ab1a38f21b2be189014ffb0cc2405768"; };
|
|
||||||
dependencies = {
|
|
||||||
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.153" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "tracing_core" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing-core."0.1.32" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "tracing_subscriber" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing-subscriber."0.3.18" { inherit profileName; }).out;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".system-configuration."0.5.1" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".system-configuration."0.5.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "system-configuration";
|
name = "system-configuration";
|
||||||
version = "0.5.1";
|
version = "0.5.1";
|
||||||
|
@ -6496,16 +6511,13 @@ in
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"; };
|
src = fetchCratesIo { inherit name version; sha256 = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
[ "default" ]
|
[ "default" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "fast-rng")
|
|
||||||
[ "getrandom" ]
|
[ "getrandom" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "rand")
|
|
||||||
[ "rng" ]
|
[ "rng" ]
|
||||||
[ "std" ]
|
[ "std" ]
|
||||||
[ "v4" ]
|
[ "v4" ]
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
getrandom = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out;
|
getrandom = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rand" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -7019,13 +7031,25 @@ in
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".yansi."1.0.0" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "yansi";
|
||||||
|
version = "1.0.0";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "6c2861d76f58ec8fc95708b9b1e417f7b12fd72ad33c01fa6886707092dea0d3"; };
|
||||||
|
features = builtins.concatLists [
|
||||||
|
[ "alloc" ]
|
||||||
|
[ "default" ]
|
||||||
|
[ "std" ]
|
||||||
|
];
|
||||||
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".zerocopy."0.7.32" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".zerocopy."0.7.32" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "zerocopy";
|
name = "zerocopy";
|
||||||
version = "0.7.32";
|
version = "0.7.32";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"; };
|
src = fetchCratesIo { inherit name version; sha256 = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "simd")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "simd")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if false then "zerocopy_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy-derive."0.7.32" { profileName = "__noProfile"; }).out;
|
${ if false then "zerocopy_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy-derive."0.7.32" { profileName = "__noProfile"; }).out;
|
||||||
|
@ -7038,9 +7062,9 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"; };
|
src = fetchCratesIo { inherit name version; sha256 = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"; };
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "proc_macro2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.78" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "proc_macro2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.78" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "quote" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.35" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "quote" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.35" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "syn" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."2.0.48" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "syn" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."2.0.48" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
27
Cargo.toml
27
Cargo.toml
|
@ -21,15 +21,15 @@ default-members = ["src/garage"]
|
||||||
|
|
||||||
# Internal Garage crates
|
# Internal Garage crates
|
||||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||||
garage_api = { version = "1.0.0", path = "src/api" }
|
garage_api = { version = "0.10.0", path = "src/api" }
|
||||||
garage_block = { version = "1.0.0", path = "src/block" }
|
garage_block = { version = "0.10.0", path = "src/block" }
|
||||||
garage_db = { version = "1.0.0", path = "src/db", default-features = false }
|
garage_db = { version = "0.10.0", path = "src/db", default-features = false }
|
||||||
garage_model = { version = "1.0.0", path = "src/model", default-features = false }
|
garage_model = { version = "0.10.0", path = "src/model", default-features = false }
|
||||||
garage_net = { version = "1.0.0", path = "src/net" }
|
garage_net = { version = "0.10.0", path = "src/net" }
|
||||||
garage_rpc = { version = "1.0.0", path = "src/rpc" }
|
garage_rpc = { version = "0.10.0", path = "src/rpc" }
|
||||||
garage_table = { version = "1.0.0", path = "src/table" }
|
garage_table = { version = "0.10.0", path = "src/table" }
|
||||||
garage_util = { version = "1.0.0", path = "src/util" }
|
garage_util = { version = "0.10.0", path = "src/util" }
|
||||||
garage_web = { version = "1.0.0", path = "src/web" }
|
garage_web = { version = "0.10.0", path = "src/web" }
|
||||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||||
|
|
||||||
# External crates from crates.io
|
# External crates from crates.io
|
||||||
|
@ -43,8 +43,6 @@ bytes = "1.0"
|
||||||
bytesize = "1.1"
|
bytesize = "1.1"
|
||||||
cfg-if = "1.0"
|
cfg-if = "1.0"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
crc32fast = "1.4"
|
|
||||||
crc32c = "0.6"
|
|
||||||
crypto-common = "0.1"
|
crypto-common = "0.1"
|
||||||
digest = "0.10"
|
digest = "0.10"
|
||||||
err-derive = "0.3"
|
err-derive = "0.3"
|
||||||
|
@ -60,11 +58,11 @@ md-5 = "0.10"
|
||||||
mktemp = "0.5"
|
mktemp = "0.5"
|
||||||
nix = { version = "0.27", default-features = false, features = ["fs"] }
|
nix = { version = "0.27", default-features = false, features = ["fs"] }
|
||||||
nom = "7.1"
|
nom = "7.1"
|
||||||
|
ouroboros = "0.18"
|
||||||
parse_duration = "2.1"
|
parse_duration = "2.1"
|
||||||
pin-project = "1.0.12"
|
pin-project = "1.0.12"
|
||||||
pnet_datalink = "0.34"
|
pnet_datalink = "0.34"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
sha1 = "0.10"
|
|
||||||
sha2 = "0.10"
|
sha2 = "0.10"
|
||||||
timeago = { version = "0.4", default-features = false }
|
timeago = { version = "0.4", default-features = false }
|
||||||
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
||||||
|
@ -76,14 +74,11 @@ kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] }
|
||||||
clap = { version = "4.1", features = ["derive", "env"] }
|
clap = { version = "4.1", features = ["derive", "env"] }
|
||||||
pretty_env_logger = "0.5"
|
pretty_env_logger = "0.5"
|
||||||
structopt = { version = "0.3", default-features = false }
|
structopt = { version = "0.3", default-features = false }
|
||||||
syslog-tracing = "0.3"
|
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
|
||||||
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
||||||
rusqlite = "0.31.0"
|
rusqlite = "0.30.0"
|
||||||
r2d2 = "0.8"
|
|
||||||
r2d2_sqlite = "0.24"
|
|
||||||
|
|
||||||
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
||||||
zstd = { version = "0.13", default-features = false }
|
zstd = { version = "0.13", default-features = false }
|
||||||
|
|
|
@ -259,7 +259,7 @@ duck --delete garage:/my-files/an-object.txt
|
||||||
|
|
||||||
## WinSCP (libs3) {#winscp}
|
## WinSCP (libs3) {#winscp}
|
||||||
|
|
||||||
*You can find instructions on how to use the GUI in french [in our wiki](https://guide.deuxfleurs.fr/prise_en_main/winscp/).*
|
*You can find instructions on how to use the GUI in french [in our wiki](https://wiki.deuxfleurs.fr/fr/Guide/Garage/WinSCP).*
|
||||||
|
|
||||||
How to use `winscp.com`, the CLI interface of WinSCP:
|
How to use `winscp.com`, the CLI interface of WinSCP:
|
||||||
|
|
||||||
|
|
|
@ -53,43 +53,20 @@ and that's also why your nodes have super long identifiers.
|
||||||
|
|
||||||
Adding TLS support built into Garage is not currently planned.
|
Adding TLS support built into Garage is not currently planned.
|
||||||
|
|
||||||
## Garage stores data in plain text on the filesystem or encrypted using customer keys (SSE-C)
|
## Garage stores data in plain text on the filesystem
|
||||||
|
|
||||||
For standard S3 API requests, Garage does not encrypt data at rest by itself.
|
Garage does not handle data encryption at rest by itself, and instead delegates
|
||||||
For the most generic at rest encryption of data, we recommend setting up your
|
to the user to add encryption, either at the storage layer (LUKS, etc) or on
|
||||||
storage partitions on encrypted LUKS devices.
|
the client side (or both). There are no current plans to add data encryption
|
||||||
|
directly in Garage.
|
||||||
|
|
||||||
If you are developping your own client software that makes use of S3 storage,
|
Implementing data encryption directly in Garage might make things simpler for
|
||||||
we recommend implementing data encryption directly on the client side and never
|
end users, but also raises many more questions, especially around key
|
||||||
transmitting plaintext data to Garage. This makes it easy to use an external
|
management: for encryption of data, where could Garage get the encryption keys
|
||||||
untrusted storage provider if necessary.
|
from ? If we encrypt data but keep the keys in a plaintext file next to them,
|
||||||
|
it's useless. We probably don't want to have to manage secrets in garage as it
|
||||||
Garage does support [SSE-C
|
would be very hard to do in a secure way. Maybe integrate with an external
|
||||||
encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html),
|
system such as Hashicorp Vault?
|
||||||
an encryption mode of Amazon S3 where data is encrypted at rest using
|
|
||||||
encryption keys given by the client. The encryption keys are passed to the
|
|
||||||
server in a header in each request, to encrypt or decrypt data at the moment of
|
|
||||||
reading or writing. The server discards the key as soon as it has finished
|
|
||||||
using it for the request. This mode allows the data to be encrypted at rest by
|
|
||||||
Garage itself, but it requires support in the client software. It is also not
|
|
||||||
adapted to a model where the server is not trusted or assumed to be
|
|
||||||
compromised, as the server can easily know the encryption keys. Note however
|
|
||||||
that when using SSE-C encryption, the only Garage node that knows the
|
|
||||||
encryption key passed in a given request is the node to which the request is
|
|
||||||
directed (which can be a gateway node), so it is easy to have untrusted nodes
|
|
||||||
in the cluster as long as S3 API requests containing SSE-C encryption keys are
|
|
||||||
not directed to them.
|
|
||||||
|
|
||||||
Implementing automatic data encryption directly in Garage without client-side
|
|
||||||
management of keys (something like
|
|
||||||
[SSE-S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html))
|
|
||||||
could make things simpler for end users that don't want to setup LUKS, but also
|
|
||||||
raises many more questions, especially around key management: for encryption of
|
|
||||||
data, where could Garage get the encryption keys from? If we encrypt data but
|
|
||||||
keep the keys in a plaintext file next to them, it's useless. We probably don't
|
|
||||||
want to have to manage secrets in Garage as it would be very hard to do in a
|
|
||||||
secure way. At the time of speaking, there are no plans to implement this in
|
|
||||||
Garage.
|
|
||||||
|
|
||||||
|
|
||||||
# Adding data encryption using external tools
|
# Adding data encryption using external tools
|
||||||
|
|
|
@ -90,6 +90,5 @@ The following feature flags are available in v0.8.0:
|
||||||
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
|
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
|
||||||
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
||||||
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
||||||
| `syslog` | optional | Enable logging to Syslog |
|
|
||||||
| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
|
| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
|
||||||
| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata |
|
| `sqlite` | optional | Enable using Sqlite3 to store Garage's metadata |
|
||||||
|
|
|
@ -53,9 +53,9 @@ to store 2 TB of data in total.
|
||||||
|
|
||||||
### Best practices
|
### Best practices
|
||||||
|
|
||||||
- If you have reasonably fast networking between all your nodes, and are planing to store
|
- If you have fast dedicated networking between all your nodes, and are planing to store
|
||||||
mostly large files, bump the `block_size` configuration parameter to 10 MB
|
very large files, bump the `block_size` configuration parameter to 10 MB
|
||||||
(`block_size = "10M"`).
|
(`block_size = 10485760`).
|
||||||
|
|
||||||
- Garage stores its files in two locations: it uses a metadata directory to store frequently-accessed
|
- Garage stores its files in two locations: it uses a metadata directory to store frequently-accessed
|
||||||
small metadata items, and a data directory to store data blocks of uploaded objects.
|
small metadata items, and a data directory to store data blocks of uploaded objects.
|
||||||
|
@ -68,42 +68,30 @@ to store 2 TB of data in total.
|
||||||
EXT4 is not recommended as it has more strict limitations on the number of inodes,
|
EXT4 is not recommended as it has more strict limitations on the number of inodes,
|
||||||
which might cause issues with Garage when large numbers of objects are stored.
|
which might cause issues with Garage when large numbers of objects are stored.
|
||||||
|
|
||||||
- Servers with multiple HDDs are supported natively by Garage without resorting
|
- If you only have an HDD and no SSD, it's fine to put your metadata alongside the data
|
||||||
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
|
on the same drive. Having lots of RAM for your kernel to cache the metadata will
|
||||||
|
help a lot with performance. The default LMDB database engine is the most tested
|
||||||
|
and has good performance.
|
||||||
|
|
||||||
- For the metadata storage, Garage does not do checksumming and integrity
|
- For the metadata storage, Garage does not do checksumming and integrity
|
||||||
verification on its own, so it is better to use a robust filesystem such as
|
verification on its own. If you are afraid of bitrot/data corruption,
|
||||||
BTRFS or ZFS. Users have reported that when using the LMDB database engine
|
put your metadata directory on a ZFS or BTRFS partition. Otherwise, just use regular
|
||||||
(the default), database files have a tendency of becoming corrupted after an
|
EXT4 or XFS.
|
||||||
unclean shutdown (e.g. a power outage), so you should take regular snapshots
|
|
||||||
to be able to recover from such a situation. This can be done using Garage's
|
|
||||||
built-in automatic snapshotting (since v0.9.4), or by using filesystem level
|
|
||||||
snapshots. If you cannot do so, you might want to switch to Sqlite which is
|
|
||||||
more robust.
|
|
||||||
|
|
||||||
- LMDB is the fastest and most tested database engine, but it has the following
|
- Servers with multiple HDDs are supported natively by Garage without resorting
|
||||||
weaknesses: 1/ data files are not architecture-independent, you cannot simply
|
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
|
||||||
move a Garage metadata directory between nodes running different architectures,
|
|
||||||
and 2/ LMDB is not suited for 32-bit platforms. Sqlite is a viable alternative
|
|
||||||
if any of these are of concern.
|
|
||||||
|
|
||||||
- If you only have an HDD and no SSD, it's fine to put your metadata alongside
|
|
||||||
the data on the same drive, but then consider your filesystem choice wisely
|
|
||||||
(see above). Having lots of RAM for your kernel to cache the metadata will
|
|
||||||
help a lot with performance. The default LMDB database engine is the most
|
|
||||||
tested and has good performance.
|
|
||||||
|
|
||||||
## Get a Docker image
|
## Get a Docker image
|
||||||
|
|
||||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||||
We encourage you to use a fixed tag (eg. `v1.0.0`) and not the `latest` tag.
|
We encourage you to use a fixed tag (eg. `v0.9.3`) and not the `latest` tag.
|
||||||
For this example, we will use the latest published version at the time of the writing which is `v1.0.0` but it's up to you
|
For this example, we will use the latest published version at the time of the writing which is `v0.9.3` but it's up to you
|
||||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo docker pull dxflrs/garage:v1.0.0
|
sudo docker pull dxflrs/garage:v0.9.3
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploying and configuring Garage
|
## Deploying and configuring Garage
|
||||||
|
@ -126,7 +114,6 @@ A valid `/etc/garage.toml` for our cluster would look as follows:
|
||||||
metadata_dir = "/var/lib/garage/meta"
|
metadata_dir = "/var/lib/garage/meta"
|
||||||
data_dir = "/var/lib/garage/data"
|
data_dir = "/var/lib/garage/data"
|
||||||
db_engine = "lmdb"
|
db_engine = "lmdb"
|
||||||
metadata_auto_snapshot_interval = "6h"
|
|
||||||
|
|
||||||
replication_factor = 3
|
replication_factor = 3
|
||||||
|
|
||||||
|
@ -169,7 +156,7 @@ docker run \
|
||||||
-v /etc/garage.toml:/etc/garage.toml \
|
-v /etc/garage.toml:/etc/garage.toml \
|
||||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||||
dxflrs/garage:v1.0.0
|
dxflrs/garage:v0.9.3
|
||||||
```
|
```
|
||||||
|
|
||||||
With this command line, Garage should be started automatically at each boot.
|
With this command line, Garage should be started automatically at each boot.
|
||||||
|
@ -183,7 +170,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
||||||
version: "3"
|
version: "3"
|
||||||
services:
|
services:
|
||||||
garage:
|
garage:
|
||||||
image: dxflrs/garage:v1.0.0
|
image: dxflrs/garage:v0.9.3
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -199,7 +186,7 @@ upgrades. With the containerized setup proposed here, the upgrade process
|
||||||
will require stopping and removing the existing container, and re-creating it
|
will require stopping and removing the existing container, and re-creating it
|
||||||
with the upgraded version.
|
with the upgraded version.
|
||||||
|
|
||||||
## Controlling the daemon
|
## Controling the daemon
|
||||||
|
|
||||||
The `garage` binary has two purposes:
|
The `garage` binary has two purposes:
|
||||||
- it acts as a daemon when launched with `garage server`
|
- it acts as a daemon when launched with `garage server`
|
||||||
|
@ -257,7 +244,7 @@ You can then instruct nodes to connect to one another as follows:
|
||||||
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
|
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
|
||||||
```
|
```
|
||||||
|
|
||||||
You don't need to instruct all node to connect to all other nodes:
|
You don't nead to instruct all node to connect to all other nodes:
|
||||||
nodes will discover one another transitively.
|
nodes will discover one another transitively.
|
||||||
|
|
||||||
Now if your run `garage status` on any node, you should have an output that looks as follows:
|
Now if your run `garage status` on any node, you should have an output that looks as follows:
|
||||||
|
|
|
@ -19,7 +19,7 @@ connecting to. To run on all nodes, add the `-a` flag as follows:
|
||||||
|
|
||||||
# Data block operations
|
# Data block operations
|
||||||
|
|
||||||
## Data store scrub {#scrub}
|
## Data store scrub
|
||||||
|
|
||||||
Scrubbing the data store means examining each individual data block to check that
|
Scrubbing the data store means examining each individual data block to check that
|
||||||
their content is correct, by verifying their hash. Any block found to be corrupted
|
their content is correct, by verifying their hash. Any block found to be corrupted
|
||||||
|
@ -104,24 +104,6 @@ operation will also move out all data from locations marked as read-only.
|
||||||
|
|
||||||
# Metadata operations
|
# Metadata operations
|
||||||
|
|
||||||
## Metadata snapshotting
|
|
||||||
|
|
||||||
It is good practice to setup automatic snapshotting of your metadata database
|
|
||||||
file, to recover from situations where it becomes corrupted on disk. This can
|
|
||||||
be done at the filesystem level if you are using ZFS or BTRFS.
|
|
||||||
|
|
||||||
Since Garage v0.9.4, Garage is able to take snapshots of the metadata database
|
|
||||||
itself. This basically amounts to copying the database file, except that it can
|
|
||||||
be run live while Garage is running without the risk of corruption or
|
|
||||||
inconsistencies. This can be setup to run automatically on a schedule using
|
|
||||||
[`metadata_auto_snapshot_interval`](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval).
|
|
||||||
A snapshot can also be triggered manually using the `garage meta snapshot`
|
|
||||||
command. Note that taking a snapshot using this method is very intensive as it
|
|
||||||
requires making a full copy of the database file, so you might prefer using
|
|
||||||
filesystem-level snapshots if possible. To recover a corrupted node from such a
|
|
||||||
snapshot, read the instructions
|
|
||||||
[here](@/documentation/operations/recovering.md#corrupted_meta).
|
|
||||||
|
|
||||||
## Metadata table resync
|
## Metadata table resync
|
||||||
|
|
||||||
Garage automatically resyncs all entries stored in the metadata tables every hour,
|
Garage automatically resyncs all entries stored in the metadata tables every hour,
|
||||||
|
@ -141,7 +123,4 @@ blocks may still be held by Garage. If you suspect that such corruption has occu
|
||||||
in your cluster, you can run one of the following repair procedures:
|
in your cluster, you can run one of the following repair procedures:
|
||||||
|
|
||||||
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
|
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
|
||||||
|
- `garage repair block_refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
|
||||||
- `garage repair block-refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
|
|
||||||
|
|
||||||
- `garage repair block-rc`: checks that the reference counters for blocks are in sync with the actual number of non-deleted entries in the block reference table
|
|
||||||
|
|
|
@ -108,57 +108,3 @@ garage layout apply # once satisfied, apply the changes
|
||||||
|
|
||||||
Garage will then start synchronizing all required data on the new node.
|
Garage will then start synchronizing all required data on the new node.
|
||||||
This process can be monitored using the `garage stats -a` command.
|
This process can be monitored using the `garage stats -a` command.
|
||||||
|
|
||||||
## Replacement scenario 3: corrupted metadata {#corrupted_meta}
|
|
||||||
|
|
||||||
In some cases, your metadata DB file might become corrupted, for instance if
|
|
||||||
your node suffered a power outage and did not shut down properly. In this case,
|
|
||||||
you can recover without having to change the node ID and rebuilding a cluster
|
|
||||||
layout. This means that data blocks will not need to be shuffled around, you
|
|
||||||
must simply find a way to repair the metadata file. The best way is generally
|
|
||||||
to discard the corrupted file and recover it from another source.
|
|
||||||
|
|
||||||
First of all, start by locating the database file in your metadata directory,
|
|
||||||
which [depends on your `db_engine`
|
|
||||||
choice](@/documentation/reference-manual/configuration.md#db_engine). Then,
|
|
||||||
your recovery options are as follows:
|
|
||||||
|
|
||||||
- **Option 1: resyncing from other nodes.** In case your cluster is replicated
|
|
||||||
with two or three copies, you can simply delete the database file, and Garage
|
|
||||||
will resync from other nodes. To do so, stop Garage, delete the database file
|
|
||||||
or directory, and restart Garage. Then, do a full table repair by calling
|
|
||||||
`garage repair -a --yes tables`. This will take a bit of time to complete as
|
|
||||||
the new node will need to receive copies of the metadata tables from the
|
|
||||||
network.
|
|
||||||
|
|
||||||
- **Option 2: restoring a snapshot taken by Garage.** Since v0.9.4, Garage can
|
|
||||||
[automatically take regular
|
|
||||||
snapshots](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval)
|
|
||||||
of your metadata DB file. This file or directory should be located under
|
|
||||||
`<metadata_dir>/snapshots`, and is named according to the UTC time at which it
|
|
||||||
was taken. Stop Garage, discard the database file/directory and replace it by the
|
|
||||||
snapshot you want to use. For instance, in the case of LMDB:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd $METADATA_DIR
|
|
||||||
mv db.lmdb db.lmdb.bak
|
|
||||||
cp -r snapshots/2024-03-15T12:13:52Z db.lmdb
|
|
||||||
```
|
|
||||||
|
|
||||||
And for Sqlite:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd $METADATA_DIR
|
|
||||||
mv db.sqlite db.sqlite.bak
|
|
||||||
cp snapshots/2024-03-15T12:13:52Z db.sqlite
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, restart Garage and run a full table repair by calling `garage repair -a
|
|
||||||
--yes tables`. This should run relatively fast as only the changes that
|
|
||||||
occurred since the snapshot was taken will need to be resynchronized. Of
|
|
||||||
course, if your cluster is not replicated, you will lose all changes that
|
|
||||||
occurred since the snapshot was taken.
|
|
||||||
|
|
||||||
- **Option 3: restoring a filesystem-level snapshot.** If you are using ZFS or
|
|
||||||
BTRFS to snapshot your metadata partition, refer to their specific
|
|
||||||
documentation on rolling back or copying files from an old snapshot.
|
|
||||||
|
|
|
@ -73,18 +73,6 @@ The entire procedure would look something like this:
|
||||||
You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
|
You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
|
||||||
Do not try to make a backup of the metadata folder of a running node.
|
Do not try to make a backup of the metadata folder of a running node.
|
||||||
|
|
||||||
**Since Garage v0.9.4,** you can use the `garage meta snapshot --all` command
|
|
||||||
to take a simultaneous snapshot of the metadata database files of all your
|
|
||||||
nodes. This avoids the tedious process of having to take them down one by
|
|
||||||
one before upgrading. Be careful that if automatic snapshotting is enabled,
|
|
||||||
Garage only keeps the last two snapshots and deletes older ones, so you might
|
|
||||||
want to disable automatic snapshotting in your upgraded configuration file
|
|
||||||
until you have confirmed that the upgrade ran successfully. In addition to
|
|
||||||
snapshotting the metadata databases of your nodes, you should back-up at
|
|
||||||
least the `cluster_layout` file of one of your Garage instances (this file
|
|
||||||
should be the same on all nodes and you can copy it safely while Garage is
|
|
||||||
running).
|
|
||||||
|
|
||||||
3. Prepare your binaries and configuration files for the new Garage version
|
3. Prepare your binaries and configuration files for the new Garage version
|
||||||
|
|
||||||
4. Restart all nodes simultaneously in the new version
|
4. Restart all nodes simultaneously in the new version
|
||||||
|
|
|
@ -57,7 +57,7 @@ to generate unique and private secrets for security reasons:
|
||||||
cat > garage.toml <<EOF
|
cat > garage.toml <<EOF
|
||||||
metadata_dir = "/tmp/meta"
|
metadata_dir = "/tmp/meta"
|
||||||
data_dir = "/tmp/data"
|
data_dir = "/tmp/data"
|
||||||
db_engine = "sqlite"
|
db_engine = "lmdb"
|
||||||
|
|
||||||
replication_factor = 1
|
replication_factor = 1
|
||||||
|
|
||||||
|
|
|
@ -15,13 +15,10 @@ metadata_dir = "/var/lib/garage/meta"
|
||||||
data_dir = "/var/lib/garage/data"
|
data_dir = "/var/lib/garage/data"
|
||||||
metadata_fsync = true
|
metadata_fsync = true
|
||||||
data_fsync = false
|
data_fsync = false
|
||||||
disable_scrub = false
|
|
||||||
metadata_auto_snapshot_interval = "6h"
|
|
||||||
|
|
||||||
db_engine = "lmdb"
|
db_engine = "lmdb"
|
||||||
|
|
||||||
block_size = "1M"
|
block_size = "1M"
|
||||||
block_ram_buffer_max = "256MiB"
|
|
||||||
|
|
||||||
lmdb_map_size = "1T"
|
lmdb_map_size = "1T"
|
||||||
|
|
||||||
|
@ -32,8 +29,6 @@ rpc_bind_addr = "[::]:3901"
|
||||||
rpc_bind_outgoing = false
|
rpc_bind_outgoing = false
|
||||||
rpc_public_addr = "[fc00:1::1]:3901"
|
rpc_public_addr = "[fc00:1::1]:3901"
|
||||||
|
|
||||||
allow_world_readable_secrets = false
|
|
||||||
|
|
||||||
bootstrap_peers = [
|
bootstrap_peers = [
|
||||||
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
|
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
|
||||||
"86f0f26ae4afbd59aaf9cfb059eefac844951efd5b8caeec0d53f4ed6c85f332@[fc00:1::2]:3901",
|
"86f0f26ae4afbd59aaf9cfb059eefac844951efd5b8caeec0d53f4ed6c85f332@[fc00:1::2]:3901",
|
||||||
|
@ -84,20 +79,14 @@ The following gives details about each available configuration option.
|
||||||
|
|
||||||
### Index
|
### Index
|
||||||
|
|
||||||
[Environment variables](#env_variables).
|
|
||||||
|
|
||||||
Top-level configuration options:
|
Top-level configuration options:
|
||||||
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
|
||||||
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
|
||||||
[`block_size`](#block_size),
|
[`block_size`](#block_size),
|
||||||
[`bootstrap_peers`](#bootstrap_peers),
|
[`bootstrap_peers`](#bootstrap_peers),
|
||||||
[`compression_level`](#compression_level),
|
[`compression_level`](#compression_level),
|
||||||
[`data_dir`](#data_dir),
|
[`data_dir`](#data_dir),
|
||||||
[`data_fsync`](#data_fsync),
|
[`data_fsync`](#data_fsync),
|
||||||
[`db_engine`](#db_engine),
|
[`db_engine`](#db_engine),
|
||||||
[`disable_scrub`](#disable_scrub),
|
|
||||||
[`lmdb_map_size`](#lmdb_map_size),
|
[`lmdb_map_size`](#lmdb_map_size),
|
||||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
|
||||||
[`metadata_dir`](#metadata_dir),
|
[`metadata_dir`](#metadata_dir),
|
||||||
[`metadata_fsync`](#metadata_fsync),
|
[`metadata_fsync`](#metadata_fsync),
|
||||||
[`replication_factor`](#replication_factor),
|
[`replication_factor`](#replication_factor),
|
||||||
|
@ -139,23 +128,6 @@ The `[admin]` section:
|
||||||
[`admin_token`/`admin_token_file`](#admin_token),
|
[`admin_token`/`admin_token_file`](#admin_token),
|
||||||
[`trace_sink`](#admin_trace_sink),
|
[`trace_sink`](#admin_trace_sink),
|
||||||
|
|
||||||
### Environment variables {#env_variables}
|
|
||||||
|
|
||||||
The following configuration parameter must be specified as an environment
|
|
||||||
variable, it does not exist in the configuration file:
|
|
||||||
|
|
||||||
- `GARAGE_LOG_TO_SYSLOG` (since v0.9.4): set this to `1` or `true` to make the
|
|
||||||
Garage daemon send its logs to `syslog` (using the libc `syslog` function)
|
|
||||||
instead of printing to stderr.
|
|
||||||
|
|
||||||
The following environment variables can be used to override the corresponding
|
|
||||||
values in the configuration file:
|
|
||||||
|
|
||||||
- [`GARAGE_ALLOW_WORLD_READABLE_SECRETS`](#allow_world_readable_secrets)
|
|
||||||
- [`GARAGE_RPC_SECRET` and `GARAGE_RPC_SECRET_FILE`](#rpc_secret)
|
|
||||||
- [`GARAGE_ADMIN_TOKEN` and `GARAGE_ADMIN_TOKEN_FILE`](#admin_token)
|
|
||||||
- [`GARAGE_METRICS_TOKEN` and `GARAGE_METRICS_TOKEN`](#admin_metrics_token)
|
|
||||||
|
|
||||||
|
|
||||||
### Top-level configuration options
|
### Top-level configuration options
|
||||||
|
|
||||||
|
@ -300,38 +272,23 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
||||||
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
||||||
|
|
||||||
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
||||||
You can still use an older binary of Garage (e.g. v0.9.4) to migrate
|
You can still use an older binary of Garage (e.g. v0.9.3) to migrate
|
||||||
old Sled metadata databases to another engine.
|
old Sled metadata databases to another engine.
|
||||||
|
|
||||||
Performance characteristics of the different DB engines are as follows:
|
Performance characteristics of the different DB engines are as follows:
|
||||||
|
|
||||||
- LMDB: the recommended database engine for high-performance distributed clusters.
|
- LMDB: the recommended database engine on 64-bit systems, much more
|
||||||
LMDB works very well, but is known to have the following limitations:
|
space-efficient and slightly faster. Note that the data format of LMDB is not
|
||||||
|
portable between architectures, so for instance the Garage database of an
|
||||||
- The data format of LMDB is not portable between architectures, so for
|
x86-64 node cannot be moved to an ARM64 node. Also note that, while LMDB can
|
||||||
instance the Garage database of an x86-64 node cannot be moved to an ARM64
|
technically be used on 32-bit systems, this will limit your node to very
|
||||||
node.
|
small database sizes due to how LMDB works; it is therefore not recommended.
|
||||||
|
|
||||||
- While LMDB can technically be used on 32-bit systems, this will limit your
|
|
||||||
node to very small database sizes due to how LMDB works; it is therefore
|
|
||||||
not recommended.
|
|
||||||
|
|
||||||
- Several users have reported corrupted LMDB database files after an unclean
|
|
||||||
shutdown (e.g. a power outage). This situation can generally be recovered
|
|
||||||
from if your cluster is geo-replicated (by rebuilding your metadata db from
|
|
||||||
other nodes), or if you have saved regular snapshots at the filesystem
|
|
||||||
level.
|
|
||||||
|
|
||||||
- Keys in LMDB are limited to 511 bytes. This limit translates to limits on
|
|
||||||
object keys in S3 and sort keys in K2V that are limted to 479 bytes.
|
|
||||||
|
|
||||||
- Sqlite: Garage supports Sqlite as an alternative storage backend for
|
- Sqlite: Garage supports Sqlite as an alternative storage backend for
|
||||||
metadata, which does not have the issues listed above for LMDB.
|
metadata, and although it has not been tested as much, it is expected to work
|
||||||
On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal
|
satisfactorily. Since Garage v0.9.0, performance issues have largely been
|
||||||
performance, which was fixed with the addition of `metadata_fsync`.
|
fixed by allowing for a no-fsync mode (see `metadata_fsync`). Sqlite does not
|
||||||
Sqlite is still probably slower than LMDB due to the way we use it,
|
have the database size limitation of LMDB on 32-bit systems.
|
||||||
so it is not the best choice for high-performance storage clusters,
|
|
||||||
but it should work fine in many cases.
|
|
||||||
|
|
||||||
It is possible to convert Garage's metadata directory from one format to another
|
It is possible to convert Garage's metadata directory from one format to another
|
||||||
using the `garage convert-db` command, which should be used as follows:
|
using the `garage convert-db` command, which should be used as follows:
|
||||||
|
@ -386,43 +343,6 @@ at the cost of a moderate drop in write performance.
|
||||||
Similarly to `metatada_fsync`, this is likely not necessary
|
Similarly to `metatada_fsync`, this is likely not necessary
|
||||||
if geographical replication is used.
|
if geographical replication is used.
|
||||||
|
|
||||||
#### `metadata_auto_snapshot_interval` (since Garage v0.9.4) {#metadata_auto_snapshot_interval}
|
|
||||||
|
|
||||||
If this value is set, Garage will automatically take a snapshot of the metadata
|
|
||||||
DB file at a regular interval and save it in the metadata directory.
|
|
||||||
This parameter can take any duration string that can be parsed by
|
|
||||||
the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate.
|
|
||||||
|
|
||||||
Snapshots can allow to recover from situations where the metadata DB file is
|
|
||||||
corrupted, for instance after an unclean shutdown. See [this
|
|
||||||
page](@/documentation/operations/recovering.md#corrupted_meta) for details.
|
|
||||||
Garage keeps only the two most recent snapshots of the metadata DB and deletes
|
|
||||||
older ones automatically.
|
|
||||||
|
|
||||||
Note that taking a metadata snapshot is a relatively intensive operation as the
|
|
||||||
entire data file is copied. A snapshot being taken might have performance
|
|
||||||
impacts on the Garage node while it is running. If the cluster is under heavy
|
|
||||||
write load when a snapshot operation is running, this might also cause the
|
|
||||||
database file to grow in size significantly as pages cannot be recycled easily.
|
|
||||||
For this reason, it might be better to use filesystem-level snapshots instead
|
|
||||||
if possible.
|
|
||||||
|
|
||||||
#### `disable_scrub` {#disable_scrub}
|
|
||||||
|
|
||||||
By default, Garage runs a scrub of the data directory approximately once per
|
|
||||||
month, with a random delay to avoid all nodes running at the same time. When
|
|
||||||
it scrubs the data directory, Garage will read all of the data files stored on
|
|
||||||
disk to check their integrity, and will rebuild any data files that it finds
|
|
||||||
corrupted, using the remaining valid copies stored on other nodes.
|
|
||||||
See [this page](@/documentation/operations/durability-repairs.md#scrub) for details.
|
|
||||||
|
|
||||||
Set the `disable_scrub` configuration value to `true` if you don't need Garage
|
|
||||||
to scrub the data directory, for instance if you are already scrubbing at the
|
|
||||||
filesystem level. Note that in this case, if you find a corrupted data file,
|
|
||||||
you should delete it from the data directory and then call `garage repair
|
|
||||||
blocks` on the node to ensure that it re-obtains a copy from another node on
|
|
||||||
the network.
|
|
||||||
|
|
||||||
#### `block_size` {#block_size}
|
#### `block_size` {#block_size}
|
||||||
|
|
||||||
Garage splits stored objects in consecutive chunks of size `block_size`
|
Garage splits stored objects in consecutive chunks of size `block_size`
|
||||||
|
@ -438,37 +358,6 @@ files will remain available. This however means that chunks from existing files
|
||||||
will not be deduplicated with chunks from newly uploaded files, meaning you
|
will not be deduplicated with chunks from newly uploaded files, meaning you
|
||||||
might use more storage space that is optimally possible.
|
might use more storage space that is optimally possible.
|
||||||
|
|
||||||
#### `block_ram_buffer_max` (since v0.9.4) {#block_ram_buffer_max}
|
|
||||||
|
|
||||||
A limit on the total size of data blocks kept in RAM by S3 API nodes awaiting
|
|
||||||
to be sent to storage nodes asynchronously.
|
|
||||||
|
|
||||||
Explanation: since Garage wants to tolerate node failures, it uses quorum
|
|
||||||
writes to send data blocks to storage nodes: try to write the block to three
|
|
||||||
nodes, and return ok as soon as two writes complete. So even if all three nodes
|
|
||||||
are online, the third write always completes asynchronously. In general, there
|
|
||||||
are not many writes to a cluster, and the third asynchronous write can
|
|
||||||
terminate early enough so as to not cause unbounded RAM growth. However, if
|
|
||||||
the S3 API node is continuously receiving large quantities of data and the
|
|
||||||
third node is never able to catch up, many data blocks will be kept buffered in
|
|
||||||
RAM as they are awaiting transfer to the third node.
|
|
||||||
|
|
||||||
The `block_ram_buffer_max` sets a limit to the size of buffers that can be kept
|
|
||||||
in RAM in this process. When the limit is reached, backpressure is applied
|
|
||||||
back to the S3 client.
|
|
||||||
|
|
||||||
Note that this only counts buffers that have arrived to a certain stage of
|
|
||||||
processing (received from the client + encrypted and/or compressed as
|
|
||||||
necessary) and are ready to send to the storage nodes. Many other buffers will
|
|
||||||
not be counted and this is not a hard limit on RAM consumption. In particular,
|
|
||||||
if many clients send requests simultaneously with large objects, the RAM
|
|
||||||
consumption will always grow linearly with the number of concurrent requests,
|
|
||||||
as each request will use a few buffers of size `block_size` for receiving and
|
|
||||||
intermediate processing before even trying to send the data to the storage
|
|
||||||
node.
|
|
||||||
|
|
||||||
The default value is 256MiB.
|
|
||||||
|
|
||||||
#### `lmdb_map_size` {#lmdb_map_size}
|
#### `lmdb_map_size` {#lmdb_map_size}
|
||||||
|
|
||||||
This parameters can be used to set the map size used by LMDB,
|
This parameters can be used to set the map size used by LMDB,
|
||||||
|
@ -559,7 +448,7 @@ be obtained by running `garage node id` and then included directly in the
|
||||||
key will be returned by `garage node id` and you will have to add the IP
|
key will be returned by `garage node id` and you will have to add the IP
|
||||||
yourself.
|
yourself.
|
||||||
|
|
||||||
### `allow_world_readable_secrets` or `GARAGE_ALLOW_WORLD_READABLE_SECRETS` (env) {#allow_world_readable_secrets}
|
### `allow_world_readable_secrets`
|
||||||
|
|
||||||
Garage checks the permissions of your secret files to make sure they're not
|
Garage checks the permissions of your secret files to make sure they're not
|
||||||
world-readable. In some cases, the check might fail and consider your files as
|
world-readable. In some cases, the check might fail and consider your files as
|
||||||
|
|
|
@ -225,17 +225,6 @@ block_bytes_read 120586322022
|
||||||
block_bytes_written 3386618077
|
block_bytes_written 3386618077
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `block_ram_buffer_free_kb` (gauge)
|
|
||||||
|
|
||||||
Kibibytes available for buffering blocks that have to be sent to remote nodes.
|
|
||||||
When clients send too much data to this node and a storage node is not receiving
|
|
||||||
data fast enough due to slower network conditions, this will decrease down to
|
|
||||||
zero and backpressure will be applied.
|
|
||||||
|
|
||||||
```
|
|
||||||
block_ram_buffer_free_kb 219829
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `block_compression_level` (counter)
|
#### `block_compression_level` (counter)
|
||||||
|
|
||||||
Exposes the block compression level configured for the Garage node.
|
Exposes the block compression level configured for the Garage node.
|
||||||
|
|
|
@ -1,77 +0,0 @@
|
||||||
+++
|
|
||||||
title = "Migrating from 0.9 to 1.0"
|
|
||||||
weight = 11
|
|
||||||
+++
|
|
||||||
|
|
||||||
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
|
|
||||||
We don't recommend trying to migrate to 1.0 directly from 0.8 or older.**
|
|
||||||
|
|
||||||
This migration procedure has been tested on several clusters without issues.
|
|
||||||
However, it is still a *critical procedure* that might cause issues.
|
|
||||||
**Make sure to back up all your data before attempting it!**
|
|
||||||
|
|
||||||
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
|
|
||||||
|
|
||||||
## Changes introduced in v1.0
|
|
||||||
|
|
||||||
The following are **breaking changes** in Garage v1.0 that require your attention when migrating:
|
|
||||||
|
|
||||||
- The Sled metadata db engine has been **removed**. If your cluster was still
|
|
||||||
using Sled, you will need to **use a Garage v0.9.x binary** to convert the
|
|
||||||
database using the `garage convert-db` subcommand. See
|
|
||||||
[here](@/documentation/reference-manual/configuration.md#db_engine) for the
|
|
||||||
details of the procedure.
|
|
||||||
|
|
||||||
The following syntax changes have been made to the configuration file:
|
|
||||||
|
|
||||||
- The `replication_mode` parameter has been split into two parameters:
|
|
||||||
[`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)
|
|
||||||
and
|
|
||||||
[`consistency_mode`](@/documentation/reference-manual/configuration.md#consistency_mode).
|
|
||||||
The old syntax using `replication_mode` is still supported for legacy
|
|
||||||
reasons and can still be used.
|
|
||||||
|
|
||||||
- The parameters `sled_cache_capacity` and `sled_flush_every_ms` have been removed.
|
|
||||||
|
|
||||||
## Migration procedure
|
|
||||||
|
|
||||||
The migration to Garage v1.0 can be done with almost no downtime,
|
|
||||||
by restarting all nodes at once in the new version.
|
|
||||||
|
|
||||||
The migration steps are as follows:
|
|
||||||
|
|
||||||
1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
|
|
||||||
all data seems to be synced correctly between nodes. If you have time, do
|
|
||||||
additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
|
|
||||||
etc.)
|
|
||||||
|
|
||||||
2. Ensure you have a snapshot of your Garage installation that you can restore
|
|
||||||
to in case the upgrade goes wrong:
|
|
||||||
|
|
||||||
- If you are running Garage v0.9.4 or later, use the `garage meta snapshot
|
|
||||||
--all` to make a backup snapshot of the metadata directories of your nodes
|
|
||||||
for backup purposes, and save a copy of the following files in the
|
|
||||||
metadata directories of your nodes: `cluster_layout`, `data_layout`,
|
|
||||||
`node_key`, `node_key.pub`.
|
|
||||||
|
|
||||||
- If you are running a filesystem such as ZFS or BTRFS that support
|
|
||||||
snapshotting, you can create a filesystem-level snapshot to be used as a
|
|
||||||
restoration point if needed.
|
|
||||||
|
|
||||||
- In other cases, make a backup using the old procedure: turn off each node
|
|
||||||
individually; back up its metadata folder (for instance, use the following
|
|
||||||
command if your metadata directory is `/var/lib/garage/meta`: `cd
|
|
||||||
/var/lib/garage ; tar -acf meta-v0.9.tar.zst meta/`); turn it back on
|
|
||||||
again. This will allow you to take a backup of all nodes without
|
|
||||||
impacting global cluster availability. You can do all nodes of a single
|
|
||||||
zone at once as this does not impact the availability of Garage.
|
|
||||||
|
|
||||||
3. Prepare your updated binaries and configuration files for Garage v1.0
|
|
||||||
|
|
||||||
4. Shut down all v0.9 nodes simultaneously, and restart them all simultaneously
|
|
||||||
in v1.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
|
|
||||||
achieve this as fast as possible. Garage v1.0 should be in a working state
|
|
||||||
as soon as enough nodes have started.
|
|
||||||
|
|
||||||
5. Monitor your cluster in the following hours to see if it works well under
|
|
||||||
your production load.
|
|
|
@ -70,7 +70,7 @@ Example response body:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||||
"garageVersion": "v1.0.0",
|
"garageVersion": "v0.10.0",
|
||||||
"garageFeatures": [
|
"garageFeatures": [
|
||||||
"k2v",
|
"k2v",
|
||||||
"lmdb",
|
"lmdb",
|
||||||
|
|
|
@ -76,7 +76,6 @@
|
||||||
# import the full shell using `nix develop .#full`
|
# import the full shell using `nix develop .#full`
|
||||||
full = shellWithPackages (with pkgs; [
|
full = shellWithPackages (with pkgs; [
|
||||||
rustfmt
|
rustfmt
|
||||||
rust-analyzer
|
|
||||||
clang
|
clang
|
||||||
mold
|
mold
|
||||||
# ---- extra packages for dev tasks ----
|
# ---- extra packages for dev tasks ----
|
||||||
|
|
158
k2v_test.py
Executable file
158
k2v_test.py
Executable file
|
@ -0,0 +1,158 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import os
|
||||||
|
import requests
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
# let's talk to our AWS Elasticsearch cluster
|
||||||
|
#from requests_aws4auth import AWS4Auth
|
||||||
|
#auth = AWS4Auth('GK31c2f218a2e44f485b94239e',
|
||||||
|
# 'b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
||||||
|
# 'us-east-1',
|
||||||
|
# 's3')
|
||||||
|
|
||||||
|
from aws_requests_auth.aws_auth import AWSRequestsAuth
|
||||||
|
auth = AWSRequestsAuth(aws_access_key='GK31c2f218a2e44f485b94239e',
|
||||||
|
aws_secret_access_key='b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
||||||
|
aws_host='localhost:3812',
|
||||||
|
aws_region='us-east-1',
|
||||||
|
aws_service='k2v')
|
||||||
|
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
|
||||||
|
sort_keys = ["a", "b", "c", "d"]
|
||||||
|
|
||||||
|
for sk in sort_keys:
|
||||||
|
print("-- (%s) Put initial (no CT)"%sk)
|
||||||
|
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth,
|
||||||
|
data='{}: Hello, world!'.format(datetime.timestamp(datetime.now())))
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Get")
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
ct = response.headers["x-garage-causality-token"]
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Put with CT")
|
||||||
|
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth,
|
||||||
|
headers={'x-garage-causality-token': ct},
|
||||||
|
data='{}: Good bye, world!'.format(datetime.timestamp(datetime.now())))
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Get")
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Put again with same CT (concurrent)")
|
||||||
|
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth,
|
||||||
|
headers={'x-garage-causality-token': ct},
|
||||||
|
data='{}: Concurrent value, oops'.format(datetime.timestamp(datetime.now())))
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
for sk in sort_keys:
|
||||||
|
print("-- (%s) Get"%sk)
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
ct = response.headers["x-garage-causality-token"]
|
||||||
|
|
||||||
|
print("-- Delete")
|
||||||
|
response = requests.delete('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
headers={'x-garage-causality-token': ct},
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- InsertBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"pk": "root", "sk": "a", "ct": null, "v": "aW5pdGlhbCB0ZXN0Cg=="},
|
||||||
|
{"pk": "root", "sk": "b", "ct": null, "v": "aW5pdGlhbCB0ZXN1Cg=="},
|
||||||
|
{"pk": "root", "sk": "c", "ct": null, "v": "aW5pdGlhbCB0ZXN2Cg=="}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
for sk in sort_keys:
|
||||||
|
print("-- (%s) Get"%sk)
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
ct = response.headers["x-garage-causality-token"]
|
||||||
|
|
||||||
|
print("-- ReadBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex?search',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"partitionKey": "root"},
|
||||||
|
{"partitionKey": "root", "tombstones": true},
|
||||||
|
{"partitionKey": "root", "tombstones": true, "limit": 2},
|
||||||
|
{"partitionKey": "root", "start": "c", "singleItem": true},
|
||||||
|
{"partitionKey": "root", "start": "b", "end": "d", "tombstones": true}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
|
||||||
|
print("-- DeleteBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex?delete',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"partitionKey": "root", "start": "b", "end": "c"}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- ReadBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex?search',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"partitionKey": "root"}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
|
@ -168,12 +168,13 @@ let
|
||||||
rootFeatures = if features != null then
|
rootFeatures = if features != null then
|
||||||
features
|
features
|
||||||
else
|
else
|
||||||
([ "garage/bundled-libs" "garage/lmdb" "garage/sqlite" "garage/k2v" ] ++ (if release then [
|
([ "garage/bundled-libs" "garage/lmdb" "garage/k2v" ] ++ (if release then [
|
||||||
"garage/consul-discovery"
|
"garage/consul-discovery"
|
||||||
"garage/kubernetes-discovery"
|
"garage/kubernetes-discovery"
|
||||||
"garage/metrics"
|
"garage/metrics"
|
||||||
"garage/telemetry-otlp"
|
"garage/telemetry-otlp"
|
||||||
"garage/syslog"
|
"garage/lmdb"
|
||||||
|
"garage/sqlite"
|
||||||
] else
|
] else
|
||||||
[ ]));
|
[ ]));
|
||||||
|
|
||||||
|
|
|
@ -15,10 +15,10 @@ type: application
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.5.0
|
version: 0.4.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "v1.0.0"
|
appVersion: "v0.9.3"
|
||||||
|
|
|
@ -11,7 +11,6 @@ spec:
|
||||||
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
||||||
replicas: {{ .Values.deployment.replicaCount }}
|
replicas: {{ .Values.deployment.replicaCount }}
|
||||||
serviceName: {{ include "garage.fullname" . }}
|
serviceName: {{ include "garage.fullname" . }}
|
||||||
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
|
|
|
@ -96,8 +96,6 @@ deployment:
|
||||||
kind: StatefulSet
|
kind: StatefulSet
|
||||||
# Number of StatefulSet replicas/garage nodes to start
|
# Number of StatefulSet replicas/garage nodes to start
|
||||||
replicaCount: 3
|
replicaCount: 3
|
||||||
# If using statefulset, allow Parallel or OrderedReady (default)
|
|
||||||
podManagementPolicy: OrderedReady
|
|
||||||
|
|
||||||
image:
|
image:
|
||||||
repository: dxflrs/amd64_garage
|
repository: dxflrs/amd64_garage
|
||||||
|
|
14
script/jepsen.garage/Vagrantfile
vendored
14
script/jepsen.garage/Vagrantfile
vendored
|
@ -30,11 +30,11 @@ Vagrant.configure("2") do |config|
|
||||||
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
|
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
|
||||||
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
|
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
|
||||||
|
|
||||||
#config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
|
config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
|
||||||
#config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
|
config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
|
||||||
#config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
|
config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
|
||||||
#config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
|
config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
|
||||||
#config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
|
config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
|
||||||
#config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
|
config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
|
||||||
#config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
|
config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
|
||||||
end
|
end
|
||||||
|
|
|
@ -3,10 +3,11 @@
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
#for ppatch in task3c task3a tsfix2; do
|
#for ppatch in task3c task3a tsfix2; do
|
||||||
for ppatch in v093 v1rc1; do
|
for ppatch in tsfix2; do
|
||||||
#for psc in c cp cdp r pr cpr dpr; do
|
#for psc in c cp cdp r pr cpr dpr; do
|
||||||
for ptsk in reg2 set2; do
|
for psc in cdp r pr cpr dpr; do
|
||||||
for psc in c cp cdp r pr cpr dpr; do
|
#for ptsk in reg2 set1 set2; do
|
||||||
|
for ptsk in set1; do
|
||||||
for irun in $(seq 10); do
|
for irun in $(seq 10); do
|
||||||
lein run test --nodes-file nodes.vagrant \
|
lein run test --nodes-file nodes.vagrant \
|
||||||
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \
|
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \
|
||||||
|
|
|
@ -38,9 +38,7 @@
|
||||||
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
|
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
|
||||||
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
|
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
|
||||||
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
|
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
|
||||||
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"
|
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"})
|
||||||
"v093" "v0.9.3"
|
|
||||||
"v1rc1" "v1.0.0-rc1"})
|
|
||||||
|
|
||||||
(def cli-opts
|
(def cli-opts
|
||||||
"Additional command line options."
|
"Additional command line options."
|
||||||
|
|
|
@ -43,7 +43,7 @@
|
||||||
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
|
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
|
||||||
"rpc_public_addr = \"" node ":3901\"\n"
|
"rpc_public_addr = \"" node ":3901\"\n"
|
||||||
"db_engine = \"lmdb\"\n"
|
"db_engine = \"lmdb\"\n"
|
||||||
"replication_mode = \"3\"\n"
|
"replication_mode = \"2\"\n"
|
||||||
"data_dir = \"" data-dir "\"\n"
|
"data_dir = \"" data-dir "\"\n"
|
||||||
"metadata_dir = \"" meta-dir "\"\n"
|
"metadata_dir = \"" meta-dir "\"\n"
|
||||||
"[s3_api]\n"
|
"[s3_api]\n"
|
||||||
|
|
|
@ -11,7 +11,6 @@ in
|
||||||
{
|
{
|
||||||
# --- Dev shell inherited from flake.nix ---
|
# --- Dev shell inherited from flake.nix ---
|
||||||
devShell = devShells.default;
|
devShell = devShells.default;
|
||||||
devShellFull = devShells.full;
|
|
||||||
|
|
||||||
# --- Continuous integration shell ---
|
# --- Continuous integration shell ---
|
||||||
# The shell used for all CI jobs (along with devShell)
|
# The shell used for all CI jobs (along with devShell)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -28,8 +28,6 @@ async-trait.workspace = true
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
crc32fast.workspace = true
|
|
||||||
crc32c.workspace = true
|
|
||||||
crypto-common.workspace = true
|
crypto-common.workspace = true
|
||||||
err-derive.workspace = true
|
err-derive.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
|
@ -39,7 +37,6 @@ tracing.workspace = true
|
||||||
md-5.workspace = true
|
md-5.workspace = true
|
||||||
nom.workspace = true
|
nom.workspace = true
|
||||||
pin-project.workspace = true
|
pin-project.workspace = true
|
||||||
sha1.workspace = true
|
|
||||||
sha2.workspace = true
|
sha2.workspace = true
|
||||||
|
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
|
|
@ -27,7 +27,7 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
||||||
i.id,
|
i.id,
|
||||||
NodeResp {
|
NodeResp {
|
||||||
id: hex::encode(i.id),
|
id: hex::encode(i.id),
|
||||||
addr: i.addr,
|
addr: Some(i.addr),
|
||||||
hostname: i.status.hostname,
|
hostname: i.status.hostname,
|
||||||
is_up: i.is_up,
|
is_up: i.is_up,
|
||||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||||
|
@ -70,21 +70,18 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Some(n) => {
|
Some(n) => {
|
||||||
|
if n.role.is_none() {
|
||||||
n.role = Some(role);
|
n.role = Some(role);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for ver in layout.versions().iter().rev().skip(1) {
|
for ver in layout.versions.iter().rev().skip(1) {
|
||||||
for (id, _, role) in ver.roles.items().iter() {
|
for (id, _, role) in ver.roles.items().iter() {
|
||||||
if let layout::NodeRoleV(Some(r)) = role {
|
if let layout::NodeRoleV(Some(r)) = role {
|
||||||
if r.capacity.is_some() {
|
if !nodes.contains_key(id) && r.capacity.is_some() {
|
||||||
if let Some(n) = nodes.get_mut(id) {
|
|
||||||
if n.role.is_none() {
|
|
||||||
n.draining = true;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
nodes.insert(
|
nodes.insert(
|
||||||
*id,
|
*id,
|
||||||
NodeResp {
|
NodeResp {
|
||||||
|
@ -97,7 +94,6 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
||||||
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
||||||
|
@ -160,7 +156,7 @@ pub async fn handle_connect_cluster_nodes(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let res = format_cluster_layout(garage.system.cluster_layout().inner());
|
let res = format_cluster_layout(&garage.system.cluster_layout());
|
||||||
|
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
}
|
}
|
||||||
|
@ -299,7 +295,7 @@ pub async fn handle_update_cluster_layout(
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let mut layout = garage.system.cluster_layout().inner().clone();
|
let mut layout = garage.system.cluster_layout().clone();
|
||||||
|
|
||||||
let mut roles = layout.current().roles.clone();
|
let mut roles = layout.current().roles.clone();
|
||||||
roles.merge(&layout.staging.get().roles);
|
roles.merge(&layout.staging.get().roles);
|
||||||
|
@ -345,7 +341,7 @@ pub async fn handle_apply_cluster_layout(
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let layout = garage.system.cluster_layout().inner().clone();
|
let layout = garage.system.cluster_layout().clone();
|
||||||
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
||||||
|
|
||||||
garage
|
garage
|
||||||
|
@ -364,7 +360,7 @@ pub async fn handle_apply_cluster_layout(
|
||||||
pub async fn handle_revert_cluster_layout(
|
pub async fn handle_revert_cluster_layout(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let layout = garage.system.cluster_layout().inner().clone();
|
let layout = garage.system.cluster_layout().clone();
|
||||||
let layout = layout.revert_staged_changes()?;
|
let layout = layout.revert_staged_changes()?;
|
||||||
garage
|
garage
|
||||||
.system
|
.system
|
||||||
|
|
|
@ -325,7 +325,7 @@ impl ApiHandler for S3ApiServer {
|
||||||
part_number_marker: part_number_marker.map(|p| p.min(10000)),
|
part_number_marker: part_number_marker.map(|p| p.min(10000)),
|
||||||
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
|
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
|
||||||
};
|
};
|
||||||
handle_list_parts(ctx, req, &query).await
|
handle_list_parts(ctx, &query).await
|
||||||
}
|
}
|
||||||
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
|
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
|
||||||
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
|
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
|
||||||
|
|
|
@ -1,406 +0,0 @@
|
||||||
use std::convert::{TryFrom, TryInto};
|
|
||||||
use std::hash::Hasher;
|
|
||||||
|
|
||||||
use base64::prelude::*;
|
|
||||||
use crc32c::Crc32cHasher as Crc32c;
|
|
||||||
use crc32fast::Hasher as Crc32;
|
|
||||||
use md5::{Digest, Md5};
|
|
||||||
use sha1::Sha1;
|
|
||||||
use sha2::Sha256;
|
|
||||||
|
|
||||||
use http::{HeaderMap, HeaderName, HeaderValue};
|
|
||||||
|
|
||||||
use garage_util::data::*;
|
|
||||||
use garage_util::error::OkOrMessage;
|
|
||||||
|
|
||||||
use garage_model::s3::object_table::*;
|
|
||||||
|
|
||||||
use crate::s3::error::*;
|
|
||||||
|
|
||||||
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
|
|
||||||
HeaderName::from_static("x-amz-checksum-algorithm");
|
|
||||||
pub const X_AMZ_CHECKSUM_MODE: HeaderName = HeaderName::from_static("x-amz-checksum-mode");
|
|
||||||
pub const X_AMZ_CHECKSUM_CRC32: HeaderName = HeaderName::from_static("x-amz-checksum-crc32");
|
|
||||||
pub const X_AMZ_CHECKSUM_CRC32C: HeaderName = HeaderName::from_static("x-amz-checksum-crc32c");
|
|
||||||
pub const X_AMZ_CHECKSUM_SHA1: HeaderName = HeaderName::from_static("x-amz-checksum-sha1");
|
|
||||||
pub const X_AMZ_CHECKSUM_SHA256: HeaderName = HeaderName::from_static("x-amz-checksum-sha256");
|
|
||||||
|
|
||||||
pub type Crc32Checksum = [u8; 4];
|
|
||||||
pub type Crc32cChecksum = [u8; 4];
|
|
||||||
pub type Md5Checksum = [u8; 16];
|
|
||||||
pub type Sha1Checksum = [u8; 20];
|
|
||||||
pub type Sha256Checksum = [u8; 32];
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub(crate) struct ExpectedChecksums {
|
|
||||||
// base64-encoded md5 (content-md5 header)
|
|
||||||
pub md5: Option<String>,
|
|
||||||
// content_sha256 (as a Hash / FixedBytes32)
|
|
||||||
pub sha256: Option<Hash>,
|
|
||||||
// extra x-amz-checksum-* header
|
|
||||||
pub extra: Option<ChecksumValue>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct Checksummer {
|
|
||||||
pub crc32: Option<Crc32>,
|
|
||||||
pub crc32c: Option<Crc32c>,
|
|
||||||
pub md5: Option<Md5>,
|
|
||||||
pub sha1: Option<Sha1>,
|
|
||||||
pub sha256: Option<Sha256>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub(crate) struct Checksums {
|
|
||||||
pub crc32: Option<Crc32Checksum>,
|
|
||||||
pub crc32c: Option<Crc32cChecksum>,
|
|
||||||
pub md5: Option<Md5Checksum>,
|
|
||||||
pub sha1: Option<Sha1Checksum>,
|
|
||||||
pub sha256: Option<Sha256Checksum>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Checksummer {
|
|
||||||
pub(crate) fn init(expected: &ExpectedChecksums, require_md5: bool) -> Self {
|
|
||||||
let mut ret = Self {
|
|
||||||
crc32: None,
|
|
||||||
crc32c: None,
|
|
||||||
md5: None,
|
|
||||||
sha1: None,
|
|
||||||
sha256: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if expected.md5.is_some() || require_md5 {
|
|
||||||
ret.md5 = Some(Md5::new());
|
|
||||||
}
|
|
||||||
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
|
|
||||||
ret.sha256 = Some(Sha256::new());
|
|
||||||
}
|
|
||||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
|
|
||||||
ret.crc32 = Some(Crc32::new());
|
|
||||||
}
|
|
||||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
|
|
||||||
ret.crc32c = Some(Crc32c::default());
|
|
||||||
}
|
|
||||||
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
|
|
||||||
ret.sha1 = Some(Sha1::new());
|
|
||||||
}
|
|
||||||
ret
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
|
|
||||||
match algo {
|
|
||||||
Some(ChecksumAlgorithm::Crc32) => {
|
|
||||||
self.crc32 = Some(Crc32::new());
|
|
||||||
}
|
|
||||||
Some(ChecksumAlgorithm::Crc32c) => {
|
|
||||||
self.crc32c = Some(Crc32c::default());
|
|
||||||
}
|
|
||||||
Some(ChecksumAlgorithm::Sha1) => {
|
|
||||||
self.sha1 = Some(Sha1::new());
|
|
||||||
}
|
|
||||||
Some(ChecksumAlgorithm::Sha256) => {
|
|
||||||
self.sha256 = Some(Sha256::new());
|
|
||||||
}
|
|
||||||
None => (),
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn update(&mut self, bytes: &[u8]) {
|
|
||||||
if let Some(crc32) = &mut self.crc32 {
|
|
||||||
crc32.update(bytes);
|
|
||||||
}
|
|
||||||
if let Some(crc32c) = &mut self.crc32c {
|
|
||||||
crc32c.write(bytes);
|
|
||||||
}
|
|
||||||
if let Some(md5) = &mut self.md5 {
|
|
||||||
md5.update(bytes);
|
|
||||||
}
|
|
||||||
if let Some(sha1) = &mut self.sha1 {
|
|
||||||
sha1.update(bytes);
|
|
||||||
}
|
|
||||||
if let Some(sha256) = &mut self.sha256 {
|
|
||||||
sha256.update(bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn finalize(self) -> Checksums {
|
|
||||||
Checksums {
|
|
||||||
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
|
|
||||||
crc32c: self
|
|
||||||
.crc32c
|
|
||||||
.map(|x| u32::to_be_bytes(u32::try_from(x.finish()).unwrap())),
|
|
||||||
md5: self.md5.map(|x| x.finalize()[..].try_into().unwrap()),
|
|
||||||
sha1: self.sha1.map(|x| x.finalize()[..].try_into().unwrap()),
|
|
||||||
sha256: self.sha256.map(|x| x.finalize()[..].try_into().unwrap()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Checksums {
|
|
||||||
pub fn verify(&self, expected: &ExpectedChecksums) -> Result<(), Error> {
|
|
||||||
if let Some(expected_md5) = &expected.md5 {
|
|
||||||
match self.md5 {
|
|
||||||
Some(md5) if BASE64_STANDARD.encode(&md5) == expected_md5.trim_matches('"') => (),
|
|
||||||
_ => {
|
|
||||||
return Err(Error::InvalidDigest(
|
|
||||||
"MD5 checksum verification failed (from content-md5)".into(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(expected_sha256) = &expected.sha256 {
|
|
||||||
match self.sha256 {
|
|
||||||
Some(sha256) if &sha256[..] == expected_sha256.as_slice() => (),
|
|
||||||
_ => {
|
|
||||||
return Err(Error::InvalidDigest(
|
|
||||||
"SHA256 checksum verification failed (from x-amz-content-sha256)".into(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(extra) = expected.extra {
|
|
||||||
let algo = extra.algorithm();
|
|
||||||
if self.extract(Some(algo)) != Some(extra) {
|
|
||||||
return Err(Error::InvalidDigest(format!(
|
|
||||||
"Failed to validate checksum for algorithm {:?}",
|
|
||||||
algo
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn extract(&self, algo: Option<ChecksumAlgorithm>) -> Option<ChecksumValue> {
|
|
||||||
match algo {
|
|
||||||
None => None,
|
|
||||||
Some(ChecksumAlgorithm::Crc32) => Some(ChecksumValue::Crc32(self.crc32.unwrap())),
|
|
||||||
Some(ChecksumAlgorithm::Crc32c) => Some(ChecksumValue::Crc32c(self.crc32c.unwrap())),
|
|
||||||
Some(ChecksumAlgorithm::Sha1) => Some(ChecksumValue::Sha1(self.sha1.unwrap())),
|
|
||||||
Some(ChecksumAlgorithm::Sha256) => Some(ChecksumValue::Sha256(self.sha256.unwrap())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub(crate) struct MultipartChecksummer {
|
|
||||||
pub md5: Md5,
|
|
||||||
pub extra: Option<MultipartExtraChecksummer>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) enum MultipartExtraChecksummer {
|
|
||||||
Crc32(Crc32),
|
|
||||||
Crc32c(Crc32c),
|
|
||||||
Sha1(Sha1),
|
|
||||||
Sha256(Sha256),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MultipartChecksummer {
|
|
||||||
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
|
|
||||||
Self {
|
|
||||||
md5: Md5::new(),
|
|
||||||
extra: match algo {
|
|
||||||
None => None,
|
|
||||||
Some(ChecksumAlgorithm::Crc32) => {
|
|
||||||
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
|
|
||||||
}
|
|
||||||
Some(ChecksumAlgorithm::Crc32c) => {
|
|
||||||
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
|
|
||||||
}
|
|
||||||
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
|
|
||||||
Some(ChecksumAlgorithm::Sha256) => {
|
|
||||||
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn update(
|
|
||||||
&mut self,
|
|
||||||
etag: &str,
|
|
||||||
checksum: Option<ChecksumValue>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
self.md5
|
|
||||||
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
|
|
||||||
match (&mut self.extra, checksum) {
|
|
||||||
(None, _) => (),
|
|
||||||
(
|
|
||||||
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
|
|
||||||
Some(ChecksumValue::Crc32(x)),
|
|
||||||
) => {
|
|
||||||
crc32.update(&x);
|
|
||||||
}
|
|
||||||
(
|
|
||||||
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
|
|
||||||
Some(ChecksumValue::Crc32c(x)),
|
|
||||||
) => {
|
|
||||||
crc32c.write(&x);
|
|
||||||
}
|
|
||||||
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
|
|
||||||
sha1.update(&x);
|
|
||||||
}
|
|
||||||
(
|
|
||||||
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
|
|
||||||
Some(ChecksumValue::Sha256(x)),
|
|
||||||
) => {
|
|
||||||
sha256.update(&x);
|
|
||||||
}
|
|
||||||
(Some(_), b) => {
|
|
||||||
return Err(Error::internal_error(format!(
|
|
||||||
"part checksum was not computed correctly, got: {:?}",
|
|
||||||
b
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
|
|
||||||
let md5 = self.md5.finalize()[..].try_into().unwrap();
|
|
||||||
let extra = match self.extra {
|
|
||||||
None => None,
|
|
||||||
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
|
|
||||||
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
|
|
||||||
}
|
|
||||||
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
|
|
||||||
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
|
|
||||||
)),
|
|
||||||
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
|
|
||||||
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
|
|
||||||
}
|
|
||||||
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
|
|
||||||
sha256.finalize()[..].try_into().unwrap(),
|
|
||||||
)),
|
|
||||||
};
|
|
||||||
(md5, extra)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----
|
|
||||||
|
|
||||||
/// Extract the value of the x-amz-checksum-algorithm header
|
|
||||||
pub(crate) fn request_checksum_algorithm(
|
|
||||||
headers: &HeaderMap<HeaderValue>,
|
|
||||||
) -> Result<Option<ChecksumAlgorithm>, Error> {
|
|
||||||
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(x) if x == "CRC32" => Ok(Some(ChecksumAlgorithm::Crc32)),
|
|
||||||
Some(x) if x == "CRC32C" => Ok(Some(ChecksumAlgorithm::Crc32c)),
|
|
||||||
Some(x) if x == "SHA1" => Ok(Some(ChecksumAlgorithm::Sha1)),
|
|
||||||
Some(x) if x == "SHA256" => Ok(Some(ChecksumAlgorithm::Sha256)),
|
|
||||||
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract the value of any of the x-amz-checksum-* headers
|
|
||||||
pub(crate) fn request_checksum_value(
|
|
||||||
headers: &HeaderMap<HeaderValue>,
|
|
||||||
) -> Result<Option<ChecksumValue>, Error> {
|
|
||||||
let mut ret = vec![];
|
|
||||||
|
|
||||||
if let Some(crc32_str) = headers.get(X_AMZ_CHECKSUM_CRC32) {
|
|
||||||
let crc32 = BASE64_STANDARD
|
|
||||||
.decode(&crc32_str)
|
|
||||||
.ok()
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
|
||||||
ret.push(ChecksumValue::Crc32(crc32))
|
|
||||||
}
|
|
||||||
if let Some(crc32c_str) = headers.get(X_AMZ_CHECKSUM_CRC32C) {
|
|
||||||
let crc32c = BASE64_STANDARD
|
|
||||||
.decode(&crc32c_str)
|
|
||||||
.ok()
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
|
||||||
ret.push(ChecksumValue::Crc32c(crc32c))
|
|
||||||
}
|
|
||||||
if let Some(sha1_str) = headers.get(X_AMZ_CHECKSUM_SHA1) {
|
|
||||||
let sha1 = BASE64_STANDARD
|
|
||||||
.decode(&sha1_str)
|
|
||||||
.ok()
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
|
||||||
ret.push(ChecksumValue::Sha1(sha1))
|
|
||||||
}
|
|
||||||
if let Some(sha256_str) = headers.get(X_AMZ_CHECKSUM_SHA256) {
|
|
||||||
let sha256 = BASE64_STANDARD
|
|
||||||
.decode(&sha256_str)
|
|
||||||
.ok()
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
|
||||||
ret.push(ChecksumValue::Sha256(sha256))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ret.len() > 1 {
|
|
||||||
return Err(Error::bad_request(
|
|
||||||
"multiple x-amz-checksum-* headers given",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(ret.pop())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks for the presense of x-amz-checksum-algorithm
|
|
||||||
/// if so extract the corrseponding x-amz-checksum-* value
|
|
||||||
pub(crate) fn request_checksum_algorithm_value(
|
|
||||||
headers: &HeaderMap<HeaderValue>,
|
|
||||||
) -> Result<Option<ChecksumValue>, Error> {
|
|
||||||
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
|
||||||
Some(x) if x == "CRC32" => {
|
|
||||||
let crc32 = headers
|
|
||||||
.get(X_AMZ_CHECKSUM_CRC32)
|
|
||||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
|
||||||
Ok(Some(ChecksumValue::Crc32(crc32)))
|
|
||||||
}
|
|
||||||
Some(x) if x == "CRC32C" => {
|
|
||||||
let crc32c = headers
|
|
||||||
.get(X_AMZ_CHECKSUM_CRC32C)
|
|
||||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
|
||||||
Ok(Some(ChecksumValue::Crc32c(crc32c)))
|
|
||||||
}
|
|
||||||
Some(x) if x == "SHA1" => {
|
|
||||||
let sha1 = headers
|
|
||||||
.get(X_AMZ_CHECKSUM_SHA1)
|
|
||||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
|
||||||
Ok(Some(ChecksumValue::Sha1(sha1)))
|
|
||||||
}
|
|
||||||
Some(x) if x == "SHA256" => {
|
|
||||||
let sha256 = headers
|
|
||||||
.get(X_AMZ_CHECKSUM_SHA256)
|
|
||||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
|
||||||
Ok(Some(ChecksumValue::Sha256(sha256)))
|
|
||||||
}
|
|
||||||
Some(_) => Err(Error::bad_request("invalid x-amz-checksum-algorithm")),
|
|
||||||
None => Ok(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn add_checksum_response_headers(
|
|
||||||
checksum: &Option<ChecksumValue>,
|
|
||||||
mut resp: http::response::Builder,
|
|
||||||
) -> http::response::Builder {
|
|
||||||
match checksum {
|
|
||||||
Some(ChecksumValue::Crc32(crc32)) => {
|
|
||||||
resp = resp.header(X_AMZ_CHECKSUM_CRC32, BASE64_STANDARD.encode(&crc32));
|
|
||||||
}
|
|
||||||
Some(ChecksumValue::Crc32c(crc32c)) => {
|
|
||||||
resp = resp.header(X_AMZ_CHECKSUM_CRC32C, BASE64_STANDARD.encode(&crc32c));
|
|
||||||
}
|
|
||||||
Some(ChecksumValue::Sha1(sha1)) => {
|
|
||||||
resp = resp.header(X_AMZ_CHECKSUM_SHA1, BASE64_STANDARD.encode(&sha1));
|
|
||||||
}
|
|
||||||
Some(ChecksumValue::Sha256(sha256)) => {
|
|
||||||
resp = resp.header(X_AMZ_CHECKSUM_SHA256, BASE64_STANDARD.encode(&sha256));
|
|
||||||
}
|
|
||||||
None => (),
|
|
||||||
}
|
|
||||||
resp
|
|
||||||
}
|
|
|
@ -2,6 +2,7 @@ use std::pin::Pin;
|
||||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
use futures::{stream, stream::Stream, StreamExt, TryStreamExt};
|
use futures::{stream, stream::Stream, StreamExt, TryStreamExt};
|
||||||
|
use md5::{Digest as Md5Digest, Md5};
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use hyper::{Request, Response};
|
use hyper::{Request, Response};
|
||||||
|
@ -22,12 +23,11 @@ use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::{ReqBody, ResBody};
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::checksum::*;
|
|
||||||
use crate::s3::encryption::EncryptionParams;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::get::full_object_byte_stream;
|
use crate::s3::get::full_object_byte_stream;
|
||||||
use crate::s3::multipart;
|
use crate::s3::multipart;
|
||||||
use crate::s3::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
|
use crate::s3::put::{get_headers, save_stream, SaveStreamResult};
|
||||||
use crate::s3::xml::{self as s3_xml, xmlns_tag};
|
use crate::s3::xml::{self as s3_xml, xmlns_tag};
|
||||||
|
|
||||||
// -------- CopyObject ---------
|
// -------- CopyObject ---------
|
||||||
|
@ -39,8 +39,6 @@ pub async fn handle_copy(
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
||||||
|
|
||||||
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
|
|
||||||
|
|
||||||
let source_object = get_copy_source(&ctx, req).await?;
|
let source_object = get_copy_source(&ctx, req).await?;
|
||||||
|
|
||||||
let (source_version, source_version_data, source_version_meta) =
|
let (source_version, source_version_data, source_version_meta) =
|
||||||
|
@ -50,7 +48,7 @@ pub async fn handle_copy(
|
||||||
copy_precondition.check(source_version, &source_version_meta.etag)?;
|
copy_precondition.check(source_version, &source_version_meta.etag)?;
|
||||||
|
|
||||||
// Determine encryption parameters
|
// Determine encryption parameters
|
||||||
let (source_encryption, source_object_meta_inner) =
|
let (source_encryption, source_object_headers) =
|
||||||
EncryptionParams::check_decrypt_for_copy_source(
|
EncryptionParams::check_decrypt_for_copy_source(
|
||||||
&ctx.garage,
|
&ctx.garage,
|
||||||
req.headers(),
|
req.headers(),
|
||||||
|
@ -58,54 +56,23 @@ pub async fn handle_copy(
|
||||||
)?;
|
)?;
|
||||||
let dest_encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
let dest_encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
||||||
|
|
||||||
// Extract source checksum info before source_object_meta_inner is consumed
|
// Determine headers of destination object
|
||||||
let source_checksum = source_object_meta_inner.checksum;
|
let dest_object_headers = match req.headers().get("x-amz-metadata-directive") {
|
||||||
let source_checksum_algorithm = source_checksum.map(|x| x.algorithm());
|
|
||||||
|
|
||||||
// If source object has a checksum, the destination object must as well.
|
|
||||||
// The x-amz-checksum-algorihtm header allows to change that algorithm,
|
|
||||||
// but if it is absent, we must use the same as before
|
|
||||||
let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm);
|
|
||||||
|
|
||||||
// Determine metadata of destination object
|
|
||||||
let was_multipart = source_version_meta.etag.contains('-');
|
|
||||||
let dest_object_meta = ObjectVersionMetaInner {
|
|
||||||
headers: match req.headers().get("x-amz-metadata-directive") {
|
|
||||||
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
|
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
|
||||||
get_headers(req.headers())?
|
get_headers(req.headers())?
|
||||||
}
|
}
|
||||||
_ => source_object_meta_inner.into_owned().headers,
|
_ => source_object_headers.into_owned(),
|
||||||
},
|
|
||||||
checksum: source_checksum,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Do actual object copying
|
// Do actual object copying
|
||||||
//
|
let res = if EncryptionParams::is_same(&source_encryption, &dest_encryption) {
|
||||||
// In any of the following scenarios, we need to read the whole object
|
// If source and dest are both unencrypted, or if the encryption keys
|
||||||
// data and re-write it again:
|
// are the same, we can just copy the metadata and link blocks of the
|
||||||
//
|
|
||||||
// - the data needs to be decrypted or encrypted
|
|
||||||
// - the requested checksum algorithm requires us to recompute a checksum
|
|
||||||
// - the original object was a multipart upload and a checksum algorithm
|
|
||||||
// is defined (AWS specifies that in this case, we must recompute the
|
|
||||||
// checksum from scratch as if this was a single big object and not
|
|
||||||
// a multipart object, as the checksums are not computed in the same way)
|
|
||||||
//
|
|
||||||
// In other cases, we can just copy the metadata and reference the same blocks.
|
|
||||||
//
|
|
||||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
|
||||||
|
|
||||||
let must_recopy = !EncryptionParams::is_same(&source_encryption, &dest_encryption)
|
|
||||||
|| source_checksum_algorithm != checksum_algorithm
|
|
||||||
|| (was_multipart && checksum_algorithm.is_some());
|
|
||||||
|
|
||||||
let res = if !must_recopy {
|
|
||||||
// In most cases, we can just copy the metadata and link blocks of the
|
|
||||||
// old object from the new object.
|
// old object from the new object.
|
||||||
handle_copy_metaonly(
|
handle_copy_metaonly(
|
||||||
ctx,
|
ctx,
|
||||||
dest_key,
|
dest_key,
|
||||||
dest_object_meta,
|
dest_object_headers,
|
||||||
dest_encryption,
|
dest_encryption,
|
||||||
source_version,
|
source_version,
|
||||||
source_version_data,
|
source_version_data,
|
||||||
|
@ -113,27 +80,16 @@ pub async fn handle_copy(
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
} else {
|
} else {
|
||||||
let expected_checksum = ExpectedChecksums {
|
|
||||||
md5: None,
|
|
||||||
sha256: None,
|
|
||||||
extra: source_checksum,
|
|
||||||
};
|
|
||||||
let checksum_mode = if was_multipart || source_checksum_algorithm != checksum_algorithm {
|
|
||||||
ChecksumMode::Calculate(checksum_algorithm)
|
|
||||||
} else {
|
|
||||||
ChecksumMode::Verify(&expected_checksum)
|
|
||||||
};
|
|
||||||
// If source and dest encryption use different keys,
|
// If source and dest encryption use different keys,
|
||||||
// we must decrypt content and re-encrypt, so rewrite all data blocks.
|
// we must decrypt content and re-encrypt, so rewrite all data blocks.
|
||||||
handle_copy_reencrypt(
|
handle_copy_reencrypt(
|
||||||
ctx,
|
ctx,
|
||||||
dest_key,
|
dest_key,
|
||||||
dest_object_meta,
|
dest_object_headers,
|
||||||
dest_encryption,
|
dest_encryption,
|
||||||
source_version,
|
source_version,
|
||||||
source_version_data,
|
source_version_data,
|
||||||
source_encryption,
|
source_encryption,
|
||||||
checksum_mode,
|
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
};
|
};
|
||||||
|
@ -159,7 +115,7 @@ pub async fn handle_copy(
|
||||||
async fn handle_copy_metaonly(
|
async fn handle_copy_metaonly(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
dest_key: &str,
|
dest_key: &str,
|
||||||
dest_object_meta: ObjectVersionMetaInner,
|
dest_object_headers: ObjectVersionHeaders,
|
||||||
dest_encryption: EncryptionParams,
|
dest_encryption: EncryptionParams,
|
||||||
source_version: &ObjectVersion,
|
source_version: &ObjectVersion,
|
||||||
source_version_data: &ObjectVersionData,
|
source_version_data: &ObjectVersionData,
|
||||||
|
@ -176,7 +132,7 @@ async fn handle_copy_metaonly(
|
||||||
let new_timestamp = now_msec();
|
let new_timestamp = now_msec();
|
||||||
|
|
||||||
let new_meta = ObjectVersionMeta {
|
let new_meta = ObjectVersionMeta {
|
||||||
encryption: dest_encryption.encrypt_meta(dest_object_meta)?,
|
encryption: dest_encryption.encrypt_headers(dest_object_headers)?,
|
||||||
size: source_version_meta.size,
|
size: source_version_meta.size,
|
||||||
etag: source_version_meta.etag.clone(),
|
etag: source_version_meta.etag.clone(),
|
||||||
};
|
};
|
||||||
|
@ -224,7 +180,6 @@ async fn handle_copy_metaonly(
|
||||||
timestamp: new_timestamp,
|
timestamp: new_timestamp,
|
||||||
state: ObjectVersionState::Uploading {
|
state: ObjectVersionState::Uploading {
|
||||||
encryption: new_meta.encryption.clone(),
|
encryption: new_meta.encryption.clone(),
|
||||||
checksum_algorithm: None,
|
|
||||||
multipart: false,
|
multipart: false,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -297,12 +252,11 @@ async fn handle_copy_metaonly(
|
||||||
async fn handle_copy_reencrypt(
|
async fn handle_copy_reencrypt(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
dest_key: &str,
|
dest_key: &str,
|
||||||
dest_object_meta: ObjectVersionMetaInner,
|
dest_object_headers: ObjectVersionHeaders,
|
||||||
dest_encryption: EncryptionParams,
|
dest_encryption: EncryptionParams,
|
||||||
source_version: &ObjectVersion,
|
source_version: &ObjectVersion,
|
||||||
source_version_data: &ObjectVersionData,
|
source_version_data: &ObjectVersionData,
|
||||||
source_encryption: EncryptionParams,
|
source_encryption: EncryptionParams,
|
||||||
checksum_mode: ChecksumMode<'_>,
|
|
||||||
) -> Result<SaveStreamResult, Error> {
|
) -> Result<SaveStreamResult, Error> {
|
||||||
// basically we will read the source data (decrypt if necessary)
|
// basically we will read the source data (decrypt if necessary)
|
||||||
// and save that in a new object (encrypt if necessary),
|
// and save that in a new object (encrypt if necessary),
|
||||||
|
@ -316,11 +270,12 @@ async fn handle_copy_reencrypt(
|
||||||
|
|
||||||
save_stream(
|
save_stream(
|
||||||
&ctx,
|
&ctx,
|
||||||
dest_object_meta,
|
dest_object_headers,
|
||||||
dest_encryption,
|
dest_encryption,
|
||||||
source_stream.map_err(|e| Error::from(GarageError::from(e))),
|
source_stream.map_err(|e| Error::from(GarageError::from(e))),
|
||||||
&dest_key.to_string(),
|
&dest_key.to_string(),
|
||||||
checksum_mode,
|
None,
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
@ -358,12 +313,8 @@ pub async fn handle_upload_part_copy(
|
||||||
req.headers(),
|
req.headers(),
|
||||||
&source_version_meta.encryption,
|
&source_version_meta.encryption,
|
||||||
)?;
|
)?;
|
||||||
let (dest_object_encryption, dest_object_checksum_algorithm) = match dest_version.state {
|
let dest_object_encryption = match dest_version.state {
|
||||||
ObjectVersionState::Uploading {
|
ObjectVersionState::Uploading { encryption, .. } => encryption,
|
||||||
encryption,
|
|
||||||
checksum_algorithm,
|
|
||||||
..
|
|
||||||
} => (encryption, checksum_algorithm),
|
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
let (dest_encryption, _) =
|
let (dest_encryption, _) =
|
||||||
|
@ -461,9 +412,7 @@ pub async fn handle_upload_part_copy(
|
||||||
dest_mpu_part_key,
|
dest_mpu_part_key,
|
||||||
MpuPart {
|
MpuPart {
|
||||||
version: dest_version_id,
|
version: dest_version_id,
|
||||||
// These are all filled in later (bottom of this function)
|
|
||||||
etag: None,
|
etag: None,
|
||||||
checksum: None,
|
|
||||||
size: None,
|
size: None,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
@ -480,8 +429,7 @@ pub async fn handle_upload_part_copy(
|
||||||
garage.version_table.insert(&dest_version).await?;
|
garage.version_table.insert(&dest_version).await?;
|
||||||
|
|
||||||
// Now, actually copy the blocks
|
// Now, actually copy the blocks
|
||||||
let mut checksummer = Checksummer::init(&Default::default(), !dest_encryption.is_encrypted())
|
let mut md5hasher = Md5::new();
|
||||||
.add(dest_object_checksum_algorithm);
|
|
||||||
|
|
||||||
// First, create a stream that is able to read the source blocks
|
// First, create a stream that is able to read the source blocks
|
||||||
// and extract the subrange if necessary.
|
// and extract the subrange if necessary.
|
||||||
|
@ -547,24 +495,18 @@ pub async fn handle_upload_part_copy(
|
||||||
}
|
}
|
||||||
|
|
||||||
let data_len = data.len() as u64;
|
let data_len = data.len() as u64;
|
||||||
|
md5hasher.update(&data[..]);
|
||||||
|
|
||||||
let (checksummer_updated, (data_to_upload, final_hash)) =
|
let (final_data, must_upload, final_hash) = match existing_block_hash {
|
||||||
tokio::task::spawn_blocking(move || {
|
Some(hash) if same_encryption => (data, false, hash),
|
||||||
checksummer.update(&data[..]);
|
_ => tokio::task::spawn_blocking(move || {
|
||||||
|
|
||||||
let tup = match existing_block_hash {
|
|
||||||
Some(hash) if same_encryption => (None, hash),
|
|
||||||
_ => {
|
|
||||||
let data_enc = dest_encryption.encrypt_block(data)?;
|
let data_enc = dest_encryption.encrypt_block(data)?;
|
||||||
let hash = blake2sum(&data_enc);
|
let hash = blake2sum(&data_enc);
|
||||||
(Some(data_enc), hash)
|
Ok::<_, Error>((data_enc, true, hash))
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok::<_, Error>((checksummer, tup))
|
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
.unwrap()?;
|
.unwrap()?,
|
||||||
checksummer = checksummer_updated;
|
};
|
||||||
|
|
||||||
dest_version.blocks.clear();
|
dest_version.blocks.clear();
|
||||||
dest_version.blocks.put(
|
dest_version.blocks.put(
|
||||||
|
@ -589,7 +531,7 @@ pub async fn handle_upload_part_copy(
|
||||||
// Thing 1: if the block is not exactly a block that existed before,
|
// Thing 1: if the block is not exactly a block that existed before,
|
||||||
// we need to insert that data as a new block.
|
// we need to insert that data as a new block.
|
||||||
async {
|
async {
|
||||||
if let Some(final_data) = data_to_upload {
|
if must_upload {
|
||||||
garage
|
garage
|
||||||
.block_manager
|
.block_manager
|
||||||
.rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None)
|
.rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None)
|
||||||
|
@ -610,9 +552,8 @@ pub async fn handle_upload_part_copy(
|
||||||
|
|
||||||
assert_eq!(current_offset, source_range.length);
|
assert_eq!(current_offset, source_range.length);
|
||||||
|
|
||||||
let checksums = checksummer.finalize();
|
let data_md5sum = md5hasher.finalize();
|
||||||
let etag = dest_encryption.etag_from_md5(&checksums.md5);
|
let etag = dest_encryption.etag_from_md5(&data_md5sum);
|
||||||
let checksum = checksums.extract(dest_object_checksum_algorithm);
|
|
||||||
|
|
||||||
// Put the part's ETag in the Versiontable
|
// Put the part's ETag in the Versiontable
|
||||||
dest_mpu.parts.put(
|
dest_mpu.parts.put(
|
||||||
|
@ -620,7 +561,6 @@ pub async fn handle_upload_part_copy(
|
||||||
MpuPart {
|
MpuPart {
|
||||||
version: dest_version_id,
|
version: dest_version_id,
|
||||||
etag: Some(etag.clone()),
|
etag: Some(etag.clone()),
|
||||||
checksum,
|
|
||||||
size: Some(current_offset),
|
size: Some(current_offset),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
|
@ -26,10 +26,9 @@ use garage_util::error::Error as GarageError;
|
||||||
use garage_util::migrate::Migrate;
|
use garage_util::migrate::Migrate;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
|
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionHeaders};
|
||||||
|
|
||||||
use crate::common_error::*;
|
use crate::common_error::*;
|
||||||
use crate::s3::checksum::Md5Checksum;
|
|
||||||
use crate::s3::error::Error;
|
use crate::s3::error::Error;
|
||||||
|
|
||||||
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
||||||
|
@ -125,7 +124,7 @@ impl EncryptionParams {
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
headers: &HeaderMap,
|
headers: &HeaderMap,
|
||||||
obj_enc: &'a ObjectVersionEncryption,
|
obj_enc: &'a ObjectVersionEncryption,
|
||||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
) -> Result<(Self, Cow<'a, ObjectVersionHeaders>), Error> {
|
||||||
let key = parse_request_headers(
|
let key = parse_request_headers(
|
||||||
headers,
|
headers,
|
||||||
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||||
|
@ -139,7 +138,7 @@ impl EncryptionParams {
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
headers: &HeaderMap,
|
headers: &HeaderMap,
|
||||||
obj_enc: &'a ObjectVersionEncryption,
|
obj_enc: &'a ObjectVersionEncryption,
|
||||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
) -> Result<(Self, Cow<'a, ObjectVersionHeaders>), Error> {
|
||||||
let key = parse_request_headers(
|
let key = parse_request_headers(
|
||||||
headers,
|
headers,
|
||||||
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||||
|
@ -153,11 +152,14 @@ impl EncryptionParams {
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
key: Option<(Key<Aes256Gcm>, Md5Output)>,
|
key: Option<(Key<Aes256Gcm>, Md5Output)>,
|
||||||
obj_enc: &'a ObjectVersionEncryption,
|
obj_enc: &'a ObjectVersionEncryption,
|
||||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
) -> Result<(Self, Cow<'a, ObjectVersionHeaders>), Error> {
|
||||||
match (key, &obj_enc) {
|
match (key, &obj_enc) {
|
||||||
(
|
(
|
||||||
Some((client_key, client_key_md5)),
|
Some((client_key, client_key_md5)),
|
||||||
ObjectVersionEncryption::SseC { inner, compressed },
|
ObjectVersionEncryption::SseC {
|
||||||
|
headers,
|
||||||
|
compressed,
|
||||||
|
},
|
||||||
) => {
|
) => {
|
||||||
let enc = Self::SseC {
|
let enc = Self::SseC {
|
||||||
client_key,
|
client_key,
|
||||||
|
@ -168,13 +170,13 @@ impl EncryptionParams {
|
||||||
None
|
None
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
let plaintext = enc.decrypt_blob(&inner)?;
|
let plaintext = enc.decrypt_blob(&headers)?;
|
||||||
let inner = ObjectVersionMetaInner::decode(&plaintext)
|
let headers = ObjectVersionHeaders::decode(&plaintext)
|
||||||
.ok_or_internal_error("Could not decode encrypted metadata")?;
|
.ok_or_internal_error("Could not decode encrypted headers")?;
|
||||||
Ok((enc, Cow::Owned(inner)))
|
Ok((enc, Cow::Owned(headers)))
|
||||||
}
|
}
|
||||||
(None, ObjectVersionEncryption::Plaintext { inner }) => {
|
(None, ObjectVersionEncryption::Plaintext { headers }) => {
|
||||||
Ok((Self::Plaintext, Cow::Borrowed(inner)))
|
Ok((Self::Plaintext, Cow::Borrowed(headers)))
|
||||||
}
|
}
|
||||||
(_, ObjectVersionEncryption::SseC { .. }) => {
|
(_, ObjectVersionEncryption::SseC { .. }) => {
|
||||||
Err(Error::bad_request("Object is encrypted"))
|
Err(Error::bad_request("Object is encrypted"))
|
||||||
|
@ -186,31 +188,29 @@ impl EncryptionParams {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_meta(
|
pub fn encrypt_headers(
|
||||||
&self,
|
&self,
|
||||||
meta: ObjectVersionMetaInner,
|
h: ObjectVersionHeaders,
|
||||||
) -> Result<ObjectVersionEncryption, Error> {
|
) -> Result<ObjectVersionEncryption, Error> {
|
||||||
match self {
|
match self {
|
||||||
Self::SseC {
|
Self::SseC {
|
||||||
compression_level, ..
|
compression_level, ..
|
||||||
} => {
|
} => {
|
||||||
let plaintext = meta.encode().map_err(GarageError::from)?;
|
let plaintext = h.encode().map_err(GarageError::from)?;
|
||||||
let ciphertext = self.encrypt_blob(&plaintext)?;
|
let ciphertext = self.encrypt_blob(&plaintext)?;
|
||||||
Ok(ObjectVersionEncryption::SseC {
|
Ok(ObjectVersionEncryption::SseC {
|
||||||
inner: ciphertext.into_owned(),
|
headers: ciphertext.into_owned(),
|
||||||
compressed: compression_level.is_some(),
|
compressed: compression_level.is_some(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
Self::Plaintext => Ok(ObjectVersionEncryption::Plaintext { inner: meta }),
|
Self::Plaintext => Ok(ObjectVersionEncryption::Plaintext { headers: h }),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- generating object Etag values ----
|
// ---- generating object Etag values ----
|
||||||
pub fn etag_from_md5(&self, md5sum: &Option<Md5Checksum>) -> String {
|
pub fn etag_from_md5(&self, md5sum: &[u8]) -> String {
|
||||||
match self {
|
match self {
|
||||||
Self::Plaintext => md5sum
|
Self::Plaintext => hex::encode(md5sum),
|
||||||
.map(|x| hex::encode(&x[..]))
|
|
||||||
.expect("md5 digest should have been computed"),
|
|
||||||
Self::SseC { .. } => {
|
Self::SseC { .. } => {
|
||||||
// AWS specifies that for encrypted objects, the Etag is not
|
// AWS specifies that for encrypted objects, the Etag is not
|
||||||
// the md5sum of the data, but doesn't say what it is.
|
// the md5sum of the data, but doesn't say what it is.
|
||||||
|
@ -224,7 +224,7 @@ impl EncryptionParams {
|
||||||
|
|
||||||
// ---- generic function for encrypting / decrypting blobs ----
|
// ---- generic function for encrypting / decrypting blobs ----
|
||||||
// Prepends a randomly-generated nonce to the encrypted value.
|
// Prepends a randomly-generated nonce to the encrypted value.
|
||||||
// This is used for encrypting object metadata and inlined data for small objects.
|
// This is used for encrypting object headers and inlined data for small objects.
|
||||||
// This does not compress anything.
|
// This does not compress anything.
|
||||||
|
|
||||||
pub fn encrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
|
pub fn encrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
|
||||||
|
|
|
@ -69,10 +69,6 @@ pub enum Error {
|
||||||
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
|
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
|
||||||
InvalidEncryptionAlgorithm(String),
|
InvalidEncryptionAlgorithm(String),
|
||||||
|
|
||||||
/// The client sent invalid XML data
|
|
||||||
#[error(display = "Invalid digest: {}", _0)]
|
|
||||||
InvalidDigest(String),
|
|
||||||
|
|
||||||
/// The client sent a request for an action not supported by garage
|
/// The client sent a request for an action not supported by garage
|
||||||
#[error(display = "Unimplemented action: {}", _0)]
|
#[error(display = "Unimplemented action: {}", _0)]
|
||||||
NotImplemented(String),
|
NotImplemented(String),
|
||||||
|
@ -133,7 +129,6 @@ impl Error {
|
||||||
Error::NotImplemented(_) => "NotImplemented",
|
Error::NotImplemented(_) => "NotImplemented",
|
||||||
Error::InvalidXml(_) => "MalformedXML",
|
Error::InvalidXml(_) => "MalformedXML",
|
||||||
Error::InvalidRange(_) => "InvalidRange",
|
Error::InvalidRange(_) => "InvalidRange",
|
||||||
Error::InvalidDigest(_) => "InvalidDigest",
|
|
||||||
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest",
|
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest",
|
||||||
Error::InvalidEncryptionAlgorithm(_) => "InvalidEncryptionAlgorithmError",
|
Error::InvalidEncryptionAlgorithm(_) => "InvalidEncryptionAlgorithmError",
|
||||||
}
|
}
|
||||||
|
@ -153,7 +148,6 @@ impl ApiError for Error {
|
||||||
| Error::InvalidPart
|
| Error::InvalidPart
|
||||||
| Error::InvalidPartOrder
|
| Error::InvalidPartOrder
|
||||||
| Error::EntityTooSmall
|
| Error::EntityTooSmall
|
||||||
| Error::InvalidDigest(_)
|
|
||||||
| Error::InvalidEncryptionAlgorithm(_)
|
| Error::InvalidEncryptionAlgorithm(_)
|
||||||
| Error::InvalidXml(_)
|
| Error::InvalidXml(_)
|
||||||
| Error::InvalidUtf8Str(_)
|
| Error::InvalidUtf8Str(_)
|
||||||
|
|
|
@ -27,7 +27,6 @@ use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::ResBody;
|
use crate::s3::api_server::ResBody;
|
||||||
use crate::s3::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
|
||||||
use crate::s3::encryption::EncryptionParams;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
|
|
||||||
|
@ -46,9 +45,8 @@ pub struct GetObjectOverrides {
|
||||||
fn object_headers(
|
fn object_headers(
|
||||||
version: &ObjectVersion,
|
version: &ObjectVersion,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
meta_inner: &ObjectVersionMetaInner,
|
headers: &ObjectVersionHeaders,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
checksum_mode: ChecksumMode,
|
|
||||||
) -> http::response::Builder {
|
) -> http::response::Builder {
|
||||||
debug!("Version meta: {:?}", version_meta);
|
debug!("Version meta: {:?}", version_meta);
|
||||||
|
|
||||||
|
@ -67,7 +65,7 @@ fn object_headers(
|
||||||
// have the same name (ignoring case) into a comma-delimited list.
|
// have the same name (ignoring case) into a comma-delimited list.
|
||||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
|
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
|
||||||
let mut headers_by_name = BTreeMap::new();
|
let mut headers_by_name = BTreeMap::new();
|
||||||
for (name, value) in meta_inner.headers.iter() {
|
for (name, value) in headers.0.iter() {
|
||||||
match headers_by_name.get_mut(name) {
|
match headers_by_name.get_mut(name) {
|
||||||
None => {
|
None => {
|
||||||
headers_by_name.insert(name, vec![value.as_str()]);
|
headers_by_name.insert(name, vec![value.as_str()]);
|
||||||
|
@ -82,10 +80,6 @@ fn object_headers(
|
||||||
resp = resp.header(name, values.join(","));
|
resp = resp.header(name, values.join(","));
|
||||||
}
|
}
|
||||||
|
|
||||||
if checksum_mode.enabled {
|
|
||||||
resp = add_checksum_response_headers(&meta_inner.checksum, resp);
|
|
||||||
}
|
|
||||||
|
|
||||||
encryption.add_response_headers(&mut resp);
|
encryption.add_response_headers(&mut resp);
|
||||||
|
|
||||||
resp
|
resp
|
||||||
|
@ -205,8 +199,6 @@ pub async fn handle_head_without_ctx(
|
||||||
let (encryption, headers) =
|
let (encryption, headers) =
|
||||||
EncryptionParams::check_decrypt(&garage, req.headers(), &version_meta.encryption)?;
|
EncryptionParams::check_decrypt(&garage, req.headers(), &version_meta.encryption)?;
|
||||||
|
|
||||||
let checksum_mode = checksum_mode(&req);
|
|
||||||
|
|
||||||
if let Some(pn) = part_number {
|
if let Some(pn) = part_number {
|
||||||
match version_data {
|
match version_data {
|
||||||
ObjectVersionData::Inline(_, _) => {
|
ObjectVersionData::Inline(_, _) => {
|
||||||
|
@ -214,13 +206,8 @@ pub async fn handle_head_without_ctx(
|
||||||
return Err(Error::InvalidPart);
|
return Err(Error::InvalidPart);
|
||||||
}
|
}
|
||||||
let bytes_len = version_meta.size;
|
let bytes_len = version_meta.size;
|
||||||
Ok(object_headers(
|
Ok(
|
||||||
object_version,
|
object_headers(object_version, version_meta, &headers, encryption)
|
||||||
version_meta,
|
|
||||||
&headers,
|
|
||||||
encryption,
|
|
||||||
checksum_mode,
|
|
||||||
)
|
|
||||||
.header(CONTENT_LENGTH, format!("{}", bytes_len))
|
.header(CONTENT_LENGTH, format!("{}", bytes_len))
|
||||||
.header(
|
.header(
|
||||||
CONTENT_RANGE,
|
CONTENT_RANGE,
|
||||||
|
@ -228,7 +215,8 @@ pub async fn handle_head_without_ctx(
|
||||||
)
|
)
|
||||||
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
||||||
.status(StatusCode::PARTIAL_CONTENT)
|
.status(StatusCode::PARTIAL_CONTENT)
|
||||||
.body(empty_body())?)
|
.body(empty_body())?,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
ObjectVersionData::FirstBlock(_, _) => {
|
ObjectVersionData::FirstBlock(_, _) => {
|
||||||
let version = garage
|
let version = garage
|
||||||
|
@ -240,13 +228,8 @@ pub async fn handle_head_without_ctx(
|
||||||
let (part_offset, part_end) =
|
let (part_offset, part_end) =
|
||||||
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
|
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
|
||||||
|
|
||||||
Ok(object_headers(
|
Ok(
|
||||||
object_version,
|
object_headers(object_version, version_meta, &headers, encryption)
|
||||||
version_meta,
|
|
||||||
&headers,
|
|
||||||
encryption,
|
|
||||||
checksum_mode,
|
|
||||||
)
|
|
||||||
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
|
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
|
||||||
.header(
|
.header(
|
||||||
CONTENT_RANGE,
|
CONTENT_RANGE,
|
||||||
|
@ -259,21 +242,18 @@ pub async fn handle_head_without_ctx(
|
||||||
)
|
)
|
||||||
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
|
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
|
||||||
.status(StatusCode::PARTIAL_CONTENT)
|
.status(StatusCode::PARTIAL_CONTENT)
|
||||||
.body(empty_body())?)
|
.body(empty_body())?,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Ok(object_headers(
|
Ok(
|
||||||
object_version,
|
object_headers(object_version, version_meta, &headers, encryption)
|
||||||
version_meta,
|
|
||||||
&headers,
|
|
||||||
encryption,
|
|
||||||
checksum_mode,
|
|
||||||
)
|
|
||||||
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(empty_body())?)
|
.body(empty_body())?,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -327,24 +307,12 @@ pub async fn handle_get_without_ctx(
|
||||||
let (enc, headers) =
|
let (enc, headers) =
|
||||||
EncryptionParams::check_decrypt(&garage, req.headers(), &last_v_meta.encryption)?;
|
EncryptionParams::check_decrypt(&garage, req.headers(), &last_v_meta.encryption)?;
|
||||||
|
|
||||||
let checksum_mode = checksum_mode(&req);
|
|
||||||
|
|
||||||
match (part_number, parse_range_header(req, last_v_meta.size)?) {
|
match (part_number, parse_range_header(req, last_v_meta.size)?) {
|
||||||
(Some(_), Some(_)) => Err(Error::bad_request(
|
(Some(_), Some(_)) => Err(Error::bad_request(
|
||||||
"Cannot specify both partNumber and Range header",
|
"Cannot specify both partNumber and Range header",
|
||||||
)),
|
)),
|
||||||
(Some(pn), None) => {
|
(Some(pn), None) => {
|
||||||
handle_get_part(
|
handle_get_part(garage, last_v, last_v_data, last_v_meta, enc, &headers, pn).await
|
||||||
garage,
|
|
||||||
last_v,
|
|
||||||
last_v_data,
|
|
||||||
last_v_meta,
|
|
||||||
enc,
|
|
||||||
&headers,
|
|
||||||
pn,
|
|
||||||
checksum_mode,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
(None, Some(range)) => {
|
(None, Some(range)) => {
|
||||||
handle_get_range(
|
handle_get_range(
|
||||||
|
@ -356,7 +324,6 @@ pub async fn handle_get_without_ctx(
|
||||||
&headers,
|
&headers,
|
||||||
range.start,
|
range.start,
|
||||||
range.start + range.length,
|
range.start + range.length,
|
||||||
checksum_mode,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
@ -369,7 +336,6 @@ pub async fn handle_get_without_ctx(
|
||||||
enc,
|
enc,
|
||||||
&headers,
|
&headers,
|
||||||
overrides,
|
overrides,
|
||||||
checksum_mode,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
@ -382,17 +348,10 @@ async fn handle_get_full(
|
||||||
version_data: &ObjectVersionData,
|
version_data: &ObjectVersionData,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
meta_inner: &ObjectVersionMetaInner,
|
headers: &ObjectVersionHeaders,
|
||||||
overrides: GetObjectOverrides,
|
overrides: GetObjectOverrides,
|
||||||
checksum_mode: ChecksumMode,
|
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let mut resp_builder = object_headers(
|
let mut resp_builder = object_headers(version, version_meta, &headers, encryption)
|
||||||
version,
|
|
||||||
version_meta,
|
|
||||||
&meta_inner,
|
|
||||||
encryption,
|
|
||||||
checksum_mode,
|
|
||||||
)
|
|
||||||
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
||||||
.status(StatusCode::OK);
|
.status(StatusCode::OK);
|
||||||
getobject_override_headers(overrides, &mut resp_builder)?;
|
getobject_override_headers(overrides, &mut resp_builder)?;
|
||||||
|
@ -473,15 +432,14 @@ async fn handle_get_range(
|
||||||
version_data: &ObjectVersionData,
|
version_data: &ObjectVersionData,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
meta_inner: &ObjectVersionMetaInner,
|
headers: &ObjectVersionHeaders,
|
||||||
begin: u64,
|
begin: u64,
|
||||||
end: u64,
|
end: u64,
|
||||||
checksum_mode: ChecksumMode,
|
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
// Here we do not use getobject_override_headers because we don't
|
// Here we do not use getobject_override_headers because we don't
|
||||||
// want to add any overridden headers (those should not be added
|
// want to add any overridden headers (those should not be added
|
||||||
// when returning PARTIAL_CONTENT)
|
// when returning PARTIAL_CONTENT)
|
||||||
let resp_builder = object_headers(version, version_meta, meta_inner, encryption, checksum_mode)
|
let resp_builder = object_headers(version, version_meta, headers, encryption)
|
||||||
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
||||||
.header(
|
.header(
|
||||||
CONTENT_RANGE,
|
CONTENT_RANGE,
|
||||||
|
@ -522,18 +480,11 @@ async fn handle_get_part(
|
||||||
version_data: &ObjectVersionData,
|
version_data: &ObjectVersionData,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
meta_inner: &ObjectVersionMetaInner,
|
headers: &ObjectVersionHeaders,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
checksum_mode: ChecksumMode,
|
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
// Same as for get_range, no getobject_override_headers
|
// Same as for get_range, no getobject_override_headers
|
||||||
let resp_builder = object_headers(
|
let resp_builder = object_headers(object_version, version_meta, headers, encryption)
|
||||||
object_version,
|
|
||||||
version_meta,
|
|
||||||
meta_inner,
|
|
||||||
encryption,
|
|
||||||
checksum_mode,
|
|
||||||
)
|
|
||||||
.status(StatusCode::PARTIAL_CONTENT);
|
.status(StatusCode::PARTIAL_CONTENT);
|
||||||
|
|
||||||
match version_data {
|
match version_data {
|
||||||
|
@ -616,20 +567,6 @@ fn calculate_part_bounds(v: &Version, part_number: u64) -> Option<(u64, u64)> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ChecksumMode {
|
|
||||||
enabled: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn checksum_mode(req: &Request<impl Body>) -> ChecksumMode {
|
|
||||||
ChecksumMode {
|
|
||||||
enabled: req
|
|
||||||
.headers()
|
|
||||||
.get(X_AMZ_CHECKSUM_MODE)
|
|
||||||
.map(|x| x == "ENABLED")
|
|
||||||
.unwrap_or(false),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn body_from_blocks_range(
|
fn body_from_blocks_range(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
|
|
|
@ -2,7 +2,7 @@ use std::collections::{BTreeMap, BTreeSet};
|
||||||
use std::iter::{Iterator, Peekable};
|
use std::iter::{Iterator, Peekable};
|
||||||
|
|
||||||
use base64::prelude::*;
|
use base64::prelude::*;
|
||||||
use hyper::{Request, Response};
|
use hyper::Response;
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
|
@ -15,8 +15,7 @@ use garage_table::EnumerationOrder;
|
||||||
|
|
||||||
use crate::encoding::*;
|
use crate::encoding::*;
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::{ReqBody, ResBody};
|
use crate::s3::api_server::ResBody;
|
||||||
use crate::s3::encryption::EncryptionParams;
|
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::multipart as s3_multipart;
|
use crate::s3::multipart as s3_multipart;
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
|
@ -272,21 +271,13 @@ pub async fn handle_list_multipart_upload(
|
||||||
|
|
||||||
pub async fn handle_list_parts(
|
pub async fn handle_list_parts(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
req: Request<ReqBody>,
|
|
||||||
query: &ListPartsQuery,
|
query: &ListPartsQuery,
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
debug!("ListParts {:?}", query);
|
debug!("ListParts {:?}", query);
|
||||||
|
|
||||||
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
|
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
|
||||||
|
|
||||||
let (_, object_version, mpu) = s3_multipart::get_upload(&ctx, &query.key, &upload_id).await?;
|
let (_, _, mpu) = s3_multipart::get_upload(&ctx, &query.key, &upload_id).await?;
|
||||||
|
|
||||||
let object_encryption = match object_version.state {
|
|
||||||
ObjectVersionState::Uploading { encryption, .. } => encryption,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
let encryption_res =
|
|
||||||
EncryptionParams::check_decrypt(&ctx.garage, req.headers(), &object_encryption);
|
|
||||||
|
|
||||||
let (info, next) = fetch_part_info(query, &mpu)?;
|
let (info, next) = fetch_part_info(query, &mpu)?;
|
||||||
|
|
||||||
|
@ -305,40 +296,11 @@ pub async fn handle_list_parts(
|
||||||
is_truncated: s3_xml::Value(format!("{}", next.is_some())),
|
is_truncated: s3_xml::Value(format!("{}", next.is_some())),
|
||||||
parts: info
|
parts: info
|
||||||
.iter()
|
.iter()
|
||||||
.map(|part| {
|
.map(|part| s3_xml::PartItem {
|
||||||
// hide checksum if object is encrypted and the decryption
|
|
||||||
// keys are not provided
|
|
||||||
let checksum = part.checksum.filter(|_| encryption_res.is_ok());
|
|
||||||
s3_xml::PartItem {
|
|
||||||
etag: s3_xml::Value(format!("\"{}\"", part.etag)),
|
etag: s3_xml::Value(format!("\"{}\"", part.etag)),
|
||||||
last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)),
|
last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)),
|
||||||
part_number: s3_xml::IntValue(part.part_number as i64),
|
part_number: s3_xml::IntValue(part.part_number as i64),
|
||||||
size: s3_xml::IntValue(part.size as i64),
|
size: s3_xml::IntValue(part.size as i64),
|
||||||
checksum_crc32: match &checksum {
|
|
||||||
Some(ChecksumValue::Crc32(x)) => {
|
|
||||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_crc32c: match &checksum {
|
|
||||||
Some(ChecksumValue::Crc32c(x)) => {
|
|
||||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_sha1: match &checksum {
|
|
||||||
Some(ChecksumValue::Sha1(x)) => {
|
|
||||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_sha256: match &checksum {
|
|
||||||
Some(ChecksumValue::Sha256(x)) => {
|
|
||||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
|
|
||||||
|
@ -384,7 +346,6 @@ struct PartInfo<'a> {
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
size: u64,
|
size: u64,
|
||||||
checksum: Option<ChecksumValue>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum ExtractionResult {
|
enum ExtractionResult {
|
||||||
|
@ -525,7 +486,6 @@ fn fetch_part_info<'a>(
|
||||||
timestamp: pk.timestamp,
|
timestamp: pk.timestamp,
|
||||||
etag,
|
etag,
|
||||||
size,
|
size,
|
||||||
checksum: p.checksum,
|
|
||||||
};
|
};
|
||||||
match parts.last_mut() {
|
match parts.last_mut() {
|
||||||
Some(lastpart) if lastpart.part_number == pk.part_number => {
|
Some(lastpart) if lastpart.part_number == pk.part_number => {
|
||||||
|
@ -985,13 +945,9 @@ mod tests {
|
||||||
state: ObjectVersionState::Uploading {
|
state: ObjectVersionState::Uploading {
|
||||||
multipart: true,
|
multipart: true,
|
||||||
encryption: ObjectVersionEncryption::Plaintext {
|
encryption: ObjectVersionEncryption::Plaintext {
|
||||||
inner: ObjectVersionMetaInner {
|
headers: ObjectVersionHeaders(vec![]),
|
||||||
headers: vec![],
|
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
checksum_algorithm: None,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1179,7 +1135,6 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(3),
|
size: Some(3),
|
||||||
etag: Some("etag1".into()),
|
etag: Some("etag1".into()),
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1191,7 +1146,6 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: None,
|
size: None,
|
||||||
etag: None,
|
etag: None,
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1203,7 +1157,6 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(10),
|
size: Some(10),
|
||||||
etag: Some("etag2".into()),
|
etag: Some("etag2".into()),
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1215,7 +1168,6 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(7),
|
size: Some(7),
|
||||||
etag: Some("etag3".into()),
|
etag: Some("etag3".into()),
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1227,7 +1179,6 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(5),
|
size: Some(5),
|
||||||
etag: Some("etag4".into()),
|
etag: Some("etag4".into()),
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
@ -1266,14 +1217,12 @@ mod tests {
|
||||||
etag: "etag1",
|
etag: "etag1",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
part_number: 1,
|
part_number: 1,
|
||||||
size: 3,
|
size: 3
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag2",
|
etag: "etag2",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
part_number: 3,
|
part_number: 3,
|
||||||
checksum: None,
|
|
||||||
size: 10
|
size: 10
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
@ -1289,14 +1238,12 @@ mod tests {
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag3",
|
etag: "etag3",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 5,
|
part_number: 5,
|
||||||
size: 7
|
size: 7
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag4",
|
etag: "etag4",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 8,
|
part_number: 8,
|
||||||
size: 5
|
size: 5
|
||||||
},
|
},
|
||||||
|
@ -1320,28 +1267,24 @@ mod tests {
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag1",
|
etag: "etag1",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 1,
|
part_number: 1,
|
||||||
size: 3
|
size: 3
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag2",
|
etag: "etag2",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 3,
|
part_number: 3,
|
||||||
size: 10
|
size: 10
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag3",
|
etag: "etag3",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 5,
|
part_number: 5,
|
||||||
size: 7
|
size: 7
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag4",
|
etag: "etag4",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 8,
|
part_number: 8,
|
||||||
size: 5
|
size: 5
|
||||||
},
|
},
|
||||||
|
|
|
@ -13,7 +13,6 @@ mod post_object;
|
||||||
mod put;
|
mod put;
|
||||||
mod website;
|
mod website;
|
||||||
|
|
||||||
mod checksum;
|
|
||||||
mod encryption;
|
mod encryption;
|
||||||
mod router;
|
mod router;
|
||||||
pub mod xml;
|
pub mod xml;
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::convert::TryInto;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use base64::prelude::*;
|
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use hyper::{Request, Response};
|
use hyper::{Request, Response};
|
||||||
|
use md5::{Digest as Md5Digest, Md5};
|
||||||
|
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
@ -17,7 +16,6 @@ use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::{ReqBody, ResBody};
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::checksum::*;
|
|
||||||
use crate::s3::encryption::EncryptionParams;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::put::*;
|
use crate::s3::put::*;
|
||||||
|
@ -43,16 +41,10 @@ pub async fn handle_create_multipart_upload(
|
||||||
let timestamp = next_timestamp(existing_object.as_ref());
|
let timestamp = next_timestamp(existing_object.as_ref());
|
||||||
|
|
||||||
let headers = get_headers(req.headers())?;
|
let headers = get_headers(req.headers())?;
|
||||||
let meta = ObjectVersionMetaInner {
|
|
||||||
headers,
|
|
||||||
checksum: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Determine whether object should be encrypted, and if so the key
|
// Determine whether object should be encrypted, and if so the key
|
||||||
let encryption = EncryptionParams::new_from_headers(&garage, req.headers())?;
|
let encryption = EncryptionParams::new_from_headers(&garage, req.headers())?;
|
||||||
let object_encryption = encryption.encrypt_meta(meta)?;
|
let object_encryption = encryption.encrypt_headers(headers)?;
|
||||||
|
|
||||||
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
|
|
||||||
|
|
||||||
// Create object in object table
|
// Create object in object table
|
||||||
let object_version = ObjectVersion {
|
let object_version = ObjectVersion {
|
||||||
|
@ -61,7 +53,6 @@ pub async fn handle_create_multipart_upload(
|
||||||
state: ObjectVersionState::Uploading {
|
state: ObjectVersionState::Uploading {
|
||||||
multipart: true,
|
multipart: true,
|
||||||
encryption: object_encryption,
|
encryption: object_encryption,
|
||||||
checksum_algorithm,
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
let object = Object::new(*bucket_id, key.to_string(), vec![object_version]);
|
let object = Object::new(*bucket_id, key.to_string(), vec![object_version]);
|
||||||
|
@ -99,13 +90,9 @@ pub async fn handle_put_part(
|
||||||
|
|
||||||
let upload_id = decode_upload_id(upload_id)?;
|
let upload_id = decode_upload_id(upload_id)?;
|
||||||
|
|
||||||
let expected_checksums = ExpectedChecksums {
|
let content_md5 = match req.headers().get("content-md5") {
|
||||||
md5: match req.headers().get("content-md5") {
|
|
||||||
Some(x) => Some(x.to_str()?.to_string()),
|
Some(x) => Some(x.to_str()?.to_string()),
|
||||||
None => None,
|
None => None,
|
||||||
},
|
|
||||||
sha256: content_sha256,
|
|
||||||
extra: request_checksum_value(req.headers())?,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Read first chuck, and at the same time try to get object to see if it exists
|
// Read first chuck, and at the same time try to get object to see if it exists
|
||||||
|
@ -119,12 +106,8 @@ pub async fn handle_put_part(
|
||||||
futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?;
|
futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?;
|
||||||
|
|
||||||
// Check encryption params
|
// Check encryption params
|
||||||
let (object_encryption, checksum_algorithm) = match object_version.state {
|
let object_encryption = match object_version.state {
|
||||||
ObjectVersionState::Uploading {
|
ObjectVersionState::Uploading { encryption, .. } => encryption,
|
||||||
encryption,
|
|
||||||
checksum_algorithm,
|
|
||||||
..
|
|
||||||
} => (encryption, checksum_algorithm),
|
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
let (encryption, _) =
|
let (encryption, _) =
|
||||||
|
@ -155,9 +138,7 @@ pub async fn handle_put_part(
|
||||||
mpu_part_key,
|
mpu_part_key,
|
||||||
MpuPart {
|
MpuPart {
|
||||||
version: version_uuid,
|
version: version_uuid,
|
||||||
// all these are filled in later, at the end of this function
|
|
||||||
etag: None,
|
etag: None,
|
||||||
checksum: None,
|
|
||||||
size: None,
|
size: None,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
@ -171,31 +152,32 @@ pub async fn handle_put_part(
|
||||||
garage.version_table.insert(&version).await?;
|
garage.version_table.insert(&version).await?;
|
||||||
|
|
||||||
// Copy data to version
|
// Copy data to version
|
||||||
let checksummer =
|
let (total_size, data_md5sum, data_sha256sum, _) = read_and_put_blocks(
|
||||||
Checksummer::init(&expected_checksums, !encryption.is_encrypted()).add(checksum_algorithm);
|
|
||||||
let (total_size, checksums, _) = read_and_put_blocks(
|
|
||||||
&ctx,
|
&ctx,
|
||||||
&version,
|
&version,
|
||||||
encryption,
|
encryption,
|
||||||
part_number,
|
part_number,
|
||||||
first_block,
|
first_block,
|
||||||
&mut chunker,
|
&mut chunker,
|
||||||
checksummer,
|
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Verify that checksums map
|
// Verify that checksums map
|
||||||
checksums.verify(&expected_checksums)?;
|
ensure_checksum_matches(
|
||||||
|
&data_md5sum,
|
||||||
|
data_sha256sum,
|
||||||
|
content_md5.as_deref(),
|
||||||
|
content_sha256,
|
||||||
|
)?;
|
||||||
|
|
||||||
// Store part etag in version
|
// Store part etag in version
|
||||||
let etag = encryption.etag_from_md5(&checksums.md5);
|
let etag = encryption.etag_from_md5(&data_md5sum);
|
||||||
|
|
||||||
mpu.parts.put(
|
mpu.parts.put(
|
||||||
mpu_part_key,
|
mpu_part_key,
|
||||||
MpuPart {
|
MpuPart {
|
||||||
version: version_uuid,
|
version: version_uuid,
|
||||||
etag: Some(etag.clone()),
|
etag: Some(etag.clone()),
|
||||||
checksum: checksums.extract(checksum_algorithm),
|
|
||||||
size: Some(total_size),
|
size: Some(total_size),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
@ -207,7 +189,6 @@ pub async fn handle_put_part(
|
||||||
|
|
||||||
let mut resp = Response::builder().header("ETag", format!("\"{}\"", etag));
|
let mut resp = Response::builder().header("ETag", format!("\"{}\"", etag));
|
||||||
encryption.add_response_headers(&mut resp);
|
encryption.add_response_headers(&mut resp);
|
||||||
let resp = add_checksum_response_headers(&expected_checksums.extra, resp);
|
|
||||||
Ok(resp.body(empty_body())?)
|
Ok(resp.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,11 +236,10 @@ pub async fn handle_complete_multipart_upload(
|
||||||
bucket_name,
|
bucket_name,
|
||||||
..
|
..
|
||||||
} = &ctx;
|
} = &ctx;
|
||||||
let (req_head, req_body) = req.into_parts();
|
|
||||||
|
|
||||||
let expected_checksum = request_checksum_value(&req_head.headers)?;
|
let body = http_body_util::BodyExt::collect(req.into_body())
|
||||||
|
.await?
|
||||||
let body = http_body_util::BodyExt::collect(req_body).await?.to_bytes();
|
.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
|
@ -283,12 +263,8 @@ pub async fn handle_complete_multipart_upload(
|
||||||
return Err(Error::bad_request("No data was uploaded"));
|
return Err(Error::bad_request("No data was uploaded"));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (object_encryption, checksum_algorithm) = match object_version.state {
|
let object_encryption = match object_version.state {
|
||||||
ObjectVersionState::Uploading {
|
ObjectVersionState::Uploading { encryption, .. } => encryption,
|
||||||
encryption,
|
|
||||||
checksum_algorithm,
|
|
||||||
..
|
|
||||||
} => (encryption, checksum_algorithm),
|
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -316,13 +292,6 @@ pub async fn handle_complete_multipart_upload(
|
||||||
for req_part in body_list_of_parts.iter() {
|
for req_part in body_list_of_parts.iter() {
|
||||||
match have_parts.get(&req_part.part_number) {
|
match have_parts.get(&req_part.part_number) {
|
||||||
Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => {
|
Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => {
|
||||||
// alternative version: if req_part.checksum.is_some() && part.checksum != req_part.checksum {
|
|
||||||
if part.checksum != req_part.checksum {
|
|
||||||
return Err(Error::InvalidDigest(format!(
|
|
||||||
"Invalid checksum for part {}: in request = {:?}, uploaded part = {:?}",
|
|
||||||
req_part.part_number, req_part.checksum, part.checksum
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
parts.push(*part)
|
parts.push(*part)
|
||||||
}
|
}
|
||||||
_ => return Err(Error::InvalidPart),
|
_ => return Err(Error::InvalidPart),
|
||||||
|
@ -370,23 +339,18 @@ pub async fn handle_complete_multipart_upload(
|
||||||
});
|
});
|
||||||
garage.block_ref_table.insert_many(block_refs).await?;
|
garage.block_ref_table.insert_many(block_refs).await?;
|
||||||
|
|
||||||
// Calculate checksum and etag of final object
|
// Calculate etag of final object
|
||||||
// To understand how etags are calculated, read more here:
|
// To understand how etags are calculated, read more here:
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
|
||||||
// https://teppen.io/2018/06/23/aws_s3_etags/
|
// https://teppen.io/2018/06/23/aws_s3_etags/
|
||||||
let mut checksummer = MultipartChecksummer::init(checksum_algorithm);
|
let mut etag_md5_hasher = Md5::new();
|
||||||
for part in parts.iter() {
|
for part in parts.iter() {
|
||||||
checksummer.update(part.etag.as_ref().unwrap(), part.checksum)?;
|
etag_md5_hasher.update(part.etag.as_ref().unwrap().as_bytes());
|
||||||
}
|
}
|
||||||
let (checksum_md5, checksum_extra) = checksummer.finalize();
|
let etag = format!(
|
||||||
|
"{}-{}",
|
||||||
if expected_checksum.is_some() && checksum_extra != expected_checksum {
|
hex::encode(etag_md5_hasher.finalize()),
|
||||||
return Err(Error::InvalidDigest(
|
parts.len()
|
||||||
"Failed to validate x-amz-checksum-*".into(),
|
);
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let etag = format!("{}-{}", hex::encode(&checksum_md5[..]), parts.len());
|
|
||||||
|
|
||||||
// Calculate total size of final object
|
// Calculate total size of final object
|
||||||
let total_size = parts.iter().map(|x| x.size.unwrap()).sum();
|
let total_size = parts.iter().map(|x| x.size.unwrap()).sum();
|
||||||
|
@ -399,20 +363,6 @@ pub async fn handle_complete_multipart_upload(
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a checksum algorithm, update metadata with checksum
|
|
||||||
let object_encryption = match checksum_algorithm {
|
|
||||||
None => object_encryption,
|
|
||||||
Some(_) => {
|
|
||||||
let (encryption, meta) =
|
|
||||||
EncryptionParams::check_decrypt(&garage, &req_head.headers, &object_encryption)?;
|
|
||||||
let new_meta = ObjectVersionMetaInner {
|
|
||||||
headers: meta.into_owned().headers,
|
|
||||||
checksum: checksum_extra,
|
|
||||||
};
|
|
||||||
encryption.encrypt_meta(new_meta)?
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Write final object version
|
// Write final object version
|
||||||
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||||
ObjectVersionMeta {
|
ObjectVersionMeta {
|
||||||
|
@ -433,28 +383,10 @@ pub async fn handle_complete_multipart_upload(
|
||||||
bucket: s3_xml::Value(bucket_name.to_string()),
|
bucket: s3_xml::Value(bucket_name.to_string()),
|
||||||
key: s3_xml::Value(key),
|
key: s3_xml::Value(key),
|
||||||
etag: s3_xml::Value(format!("\"{}\"", etag)),
|
etag: s3_xml::Value(format!("\"{}\"", etag)),
|
||||||
checksum_crc32: match &checksum_extra {
|
|
||||||
Some(ChecksumValue::Crc32(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_crc32c: match &checksum_extra {
|
|
||||||
Some(ChecksumValue::Crc32c(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_sha1: match &checksum_extra {
|
|
||||||
Some(ChecksumValue::Sha1(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_sha256: match &checksum_extra {
|
|
||||||
Some(ChecksumValue::Sha256(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
let xml = s3_xml::to_xml_with_header(&result)?;
|
let xml = s3_xml::to_xml_with_header(&result)?;
|
||||||
|
|
||||||
let resp = Response::builder();
|
Ok(Response::new(string_body(xml)))
|
||||||
let resp = add_checksum_response_headers(&expected_checksum, resp);
|
|
||||||
Ok(resp.body(string_body(xml))?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_abort_multipart_upload(
|
pub async fn handle_abort_multipart_upload(
|
||||||
|
@ -523,7 +455,6 @@ pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
|
||||||
struct CompleteMultipartUploadPart {
|
struct CompleteMultipartUploadPart {
|
||||||
etag: String,
|
etag: String,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
checksum: Option<ChecksumValue>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_complete_multipart_upload_body(
|
fn parse_complete_multipart_upload_body(
|
||||||
|
@ -549,41 +480,9 @@ fn parse_complete_multipart_upload_body(
|
||||||
.children()
|
.children()
|
||||||
.find(|e| e.has_tag_name("PartNumber"))?
|
.find(|e| e.has_tag_name("PartNumber"))?
|
||||||
.text()?;
|
.text()?;
|
||||||
let checksum = if let Some(crc32) =
|
|
||||||
item.children().find(|e| e.has_tag_name("ChecksumCRC32"))
|
|
||||||
{
|
|
||||||
Some(ChecksumValue::Crc32(
|
|
||||||
BASE64_STANDARD.decode(crc32.text()?).ok()?[..]
|
|
||||||
.try_into()
|
|
||||||
.ok()?,
|
|
||||||
))
|
|
||||||
} else if let Some(crc32c) = item.children().find(|e| e.has_tag_name("ChecksumCRC32C"))
|
|
||||||
{
|
|
||||||
Some(ChecksumValue::Crc32c(
|
|
||||||
BASE64_STANDARD.decode(crc32c.text()?).ok()?[..]
|
|
||||||
.try_into()
|
|
||||||
.ok()?,
|
|
||||||
))
|
|
||||||
} else if let Some(sha1) = item.children().find(|e| e.has_tag_name("ChecksumSHA1")) {
|
|
||||||
Some(ChecksumValue::Sha1(
|
|
||||||
BASE64_STANDARD.decode(sha1.text()?).ok()?[..]
|
|
||||||
.try_into()
|
|
||||||
.ok()?,
|
|
||||||
))
|
|
||||||
} else if let Some(sha256) = item.children().find(|e| e.has_tag_name("ChecksumSHA256"))
|
|
||||||
{
|
|
||||||
Some(ChecksumValue::Sha256(
|
|
||||||
BASE64_STANDARD.decode(sha256.text()?).ok()?[..]
|
|
||||||
.try_into()
|
|
||||||
.ok()?,
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
parts.push(CompleteMultipartUploadPart {
|
parts.push(CompleteMultipartUploadPart {
|
||||||
etag: etag.trim_matches('"').to_string(),
|
etag: etag.trim_matches('"').to_string(),
|
||||||
part_number: part_number.parse().ok()?,
|
part_number: part_number.parse().ok()?,
|
||||||
checksum,
|
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
return None;
|
return None;
|
||||||
|
|
|
@ -14,15 +14,13 @@ use multer::{Constraints, Multipart, SizeLimit};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::s3::object_table::*;
|
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::ResBody;
|
use crate::s3::api_server::ResBody;
|
||||||
use crate::s3::checksum::*;
|
|
||||||
use crate::s3::cors::*;
|
use crate::s3::cors::*;
|
||||||
use crate::s3::encryption::EncryptionParams;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::put::{get_headers, save_stream, ChecksumMode};
|
use crate::s3::put::{get_headers, save_stream};
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::signature::payload::{verify_v4, Authorization};
|
use crate::signature::payload::{verify_v4, Authorization};
|
||||||
|
|
||||||
|
@ -100,6 +98,10 @@ pub async fn handle_post_object(
|
||||||
.ok_or_bad_request("No policy was provided")?
|
.ok_or_bad_request("No policy was provided")?
|
||||||
.to_str()?;
|
.to_str()?;
|
||||||
let authorization = Authorization::parse_form(¶ms)?;
|
let authorization = Authorization::parse_form(¶ms)?;
|
||||||
|
let content_md5 = params
|
||||||
|
.get("content-md5")
|
||||||
|
.map(HeaderValue::to_str)
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
let key = if key.contains("${filename}") {
|
let key = if key.contains("${filename}") {
|
||||||
// if no filename is provided, don't replace. This matches the behavior of AWS.
|
// if no filename is provided, don't replace. This matches the behavior of AWS.
|
||||||
|
@ -224,21 +226,6 @@ pub async fn handle_post_object(
|
||||||
|
|
||||||
let headers = get_headers(¶ms)?;
|
let headers = get_headers(¶ms)?;
|
||||||
|
|
||||||
let expected_checksums = ExpectedChecksums {
|
|
||||||
md5: params
|
|
||||||
.get("content-md5")
|
|
||||||
.map(HeaderValue::to_str)
|
|
||||||
.transpose()?
|
|
||||||
.map(str::to_string),
|
|
||||||
sha256: None,
|
|
||||||
extra: request_checksum_algorithm_value(¶ms)?,
|
|
||||||
};
|
|
||||||
|
|
||||||
let meta = ObjectVersionMetaInner {
|
|
||||||
headers,
|
|
||||||
checksum: expected_checksums.extra,
|
|
||||||
};
|
|
||||||
|
|
||||||
let encryption = EncryptionParams::new_from_headers(&garage, ¶ms)?;
|
let encryption = EncryptionParams::new_from_headers(&garage, ¶ms)?;
|
||||||
|
|
||||||
let stream = file_field.map(|r| r.map_err(Into::into));
|
let stream = file_field.map(|r| r.map_err(Into::into));
|
||||||
|
@ -252,11 +239,12 @@ pub async fn handle_post_object(
|
||||||
|
|
||||||
let res = save_stream(
|
let res = save_stream(
|
||||||
&ctx,
|
&ctx,
|
||||||
meta,
|
headers,
|
||||||
encryption,
|
encryption,
|
||||||
StreamLimiter::new(stream, conditions.content_length),
|
StreamLimiter::new(stream, conditions.content_length),
|
||||||
&key,
|
&key,
|
||||||
ChecksumMode::Verify(&expected_checksums),
|
content_md5.map(str::to_string),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use base64::prelude::*;
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use futures::stream::FuturesOrdered;
|
use futures::stream::FuturesOrdered;
|
||||||
use futures::try_join;
|
use futures::try_join;
|
||||||
|
use md5::{digest::generic_array::*, Digest as Md5Digest, Md5};
|
||||||
|
use sha2::Sha256;
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
@ -19,6 +22,7 @@ use opentelemetry::{
|
||||||
use garage_net::bytes_buf::BytesBuf;
|
use garage_net::bytes_buf::BytesBuf;
|
||||||
use garage_rpc::rpc_helper::OrderTag;
|
use garage_rpc::rpc_helper::OrderTag;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
use garage_util::async_hash::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
@ -32,22 +36,16 @@ use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::{ReqBody, ResBody};
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::checksum::*;
|
|
||||||
use crate::s3::encryption::EncryptionParams;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
|
|
||||||
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
||||||
|
|
||||||
pub(crate) struct SaveStreamResult {
|
pub struct SaveStreamResult {
|
||||||
pub(crate) version_uuid: Uuid,
|
pub version_uuid: Uuid,
|
||||||
pub(crate) version_timestamp: u64,
|
pub version_timestamp: u64,
|
||||||
/// Etag WITHOUT THE QUOTES (just the hex value)
|
/// Etag WITHOUT THE QUOTES (just the hex value)
|
||||||
pub(crate) etag: String,
|
pub etag: String,
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) enum ChecksumMode<'a> {
|
|
||||||
Verify(&'a ExpectedChecksums),
|
|
||||||
Calculate(Option<ChecksumAlgorithm>),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_put(
|
pub async fn handle_put(
|
||||||
|
@ -60,32 +58,24 @@ pub async fn handle_put(
|
||||||
let headers = get_headers(req.headers())?;
|
let headers = get_headers(req.headers())?;
|
||||||
debug!("Object headers: {:?}", headers);
|
debug!("Object headers: {:?}", headers);
|
||||||
|
|
||||||
let expected_checksums = ExpectedChecksums {
|
|
||||||
md5: match req.headers().get("content-md5") {
|
|
||||||
Some(x) => Some(x.to_str()?.to_string()),
|
|
||||||
None => None,
|
|
||||||
},
|
|
||||||
sha256: content_sha256,
|
|
||||||
extra: request_checksum_value(req.headers())?,
|
|
||||||
};
|
|
||||||
|
|
||||||
let meta = ObjectVersionMetaInner {
|
|
||||||
headers,
|
|
||||||
checksum: expected_checksums.extra,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Determine whether object should be encrypted, and if so the key
|
// Determine whether object should be encrypted, and if so the key
|
||||||
let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
||||||
|
|
||||||
|
let content_md5 = match req.headers().get("content-md5") {
|
||||||
|
Some(x) => Some(x.to_str()?.to_string()),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
let stream = body_stream(req.into_body());
|
let stream = body_stream(req.into_body());
|
||||||
|
|
||||||
let res = save_stream(
|
let res = save_stream(
|
||||||
&ctx,
|
&ctx,
|
||||||
meta,
|
headers,
|
||||||
encryption,
|
encryption,
|
||||||
stream,
|
stream,
|
||||||
key,
|
key,
|
||||||
ChecksumMode::Verify(&expected_checksums),
|
content_md5,
|
||||||
|
content_sha256,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -93,17 +83,17 @@ pub async fn handle_put(
|
||||||
.header("x-amz-version-id", hex::encode(res.version_uuid))
|
.header("x-amz-version-id", hex::encode(res.version_uuid))
|
||||||
.header("ETag", format!("\"{}\"", res.etag));
|
.header("ETag", format!("\"{}\"", res.etag));
|
||||||
encryption.add_response_headers(&mut resp);
|
encryption.add_response_headers(&mut resp);
|
||||||
let resp = add_checksum_response_headers(&expected_checksums.extra, resp);
|
|
||||||
Ok(resp.body(empty_body())?)
|
Ok(resp.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
ctx: &ReqCtx,
|
ctx: &ReqCtx,
|
||||||
mut meta: ObjectVersionMetaInner,
|
headers: ObjectVersionHeaders,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
body: S,
|
body: S,
|
||||||
key: &String,
|
key: &String,
|
||||||
checksum_mode: ChecksumMode<'_>,
|
content_md5: Option<String>,
|
||||||
|
content_sha256: Option<FixedBytes32>,
|
||||||
) -> Result<SaveStreamResult, Error> {
|
) -> Result<SaveStreamResult, Error> {
|
||||||
let ReqCtx {
|
let ReqCtx {
|
||||||
garage, bucket_id, ..
|
garage, bucket_id, ..
|
||||||
|
@ -117,36 +107,32 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
|
|
||||||
let first_block = first_block_opt.unwrap_or_default();
|
let first_block = first_block_opt.unwrap_or_default();
|
||||||
|
|
||||||
|
let object_encryption = encryption.encrypt_headers(headers)?;
|
||||||
|
|
||||||
// Generate identity of new version
|
// Generate identity of new version
|
||||||
let version_uuid = gen_uuid();
|
let version_uuid = gen_uuid();
|
||||||
let version_timestamp = next_timestamp(existing_object.as_ref());
|
let version_timestamp = next_timestamp(existing_object.as_ref());
|
||||||
|
|
||||||
let mut checksummer = match checksum_mode {
|
|
||||||
ChecksumMode::Verify(expected) => Checksummer::init(expected, !encryption.is_encrypted()),
|
|
||||||
ChecksumMode::Calculate(algo) => {
|
|
||||||
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(algo)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// If body is small enough, store it directly in the object table
|
// If body is small enough, store it directly in the object table
|
||||||
// as "inline data". We can then return immediately.
|
// as "inline data". We can then return immediately.
|
||||||
if first_block.len() < INLINE_THRESHOLD {
|
if first_block.len() < INLINE_THRESHOLD {
|
||||||
checksummer.update(&first_block);
|
let mut md5sum = Md5::new();
|
||||||
let checksums = checksummer.finalize();
|
md5sum.update(&first_block[..]);
|
||||||
|
let data_md5sum = md5sum.finalize();
|
||||||
|
|
||||||
match checksum_mode {
|
let data_sha256sum = sha256sum(&first_block[..]);
|
||||||
ChecksumMode::Verify(expected) => {
|
|
||||||
checksums.verify(&expected)?;
|
ensure_checksum_matches(
|
||||||
}
|
&data_md5sum,
|
||||||
ChecksumMode::Calculate(algo) => {
|
data_sha256sum,
|
||||||
meta.checksum = checksums.extract(algo);
|
content_md5.as_deref(),
|
||||||
}
|
content_sha256,
|
||||||
};
|
)?;
|
||||||
|
|
||||||
let size = first_block.len() as u64;
|
let size = first_block.len() as u64;
|
||||||
check_quotas(ctx, size, existing_object.as_ref()).await?;
|
check_quotas(ctx, size, existing_object.as_ref()).await?;
|
||||||
|
|
||||||
let etag = encryption.etag_from_md5(&checksums.md5);
|
let etag = encryption.etag_from_md5(&data_md5sum);
|
||||||
let inline_data = encryption.encrypt_blob(&first_block)?.to_vec();
|
let inline_data = encryption.encrypt_blob(&first_block)?.to_vec();
|
||||||
|
|
||||||
let object_version = ObjectVersion {
|
let object_version = ObjectVersion {
|
||||||
|
@ -154,7 +140,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
timestamp: version_timestamp,
|
timestamp: version_timestamp,
|
||||||
state: ObjectVersionState::Complete(ObjectVersionData::Inline(
|
state: ObjectVersionState::Complete(ObjectVersionData::Inline(
|
||||||
ObjectVersionMeta {
|
ObjectVersionMeta {
|
||||||
encryption: encryption.encrypt_meta(meta)?,
|
encryption: object_encryption,
|
||||||
size,
|
size,
|
||||||
etag: etag.clone(),
|
etag: etag.clone(),
|
||||||
},
|
},
|
||||||
|
@ -189,8 +175,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
uuid: version_uuid,
|
uuid: version_uuid,
|
||||||
timestamp: version_timestamp,
|
timestamp: version_timestamp,
|
||||||
state: ObjectVersionState::Uploading {
|
state: ObjectVersionState::Uploading {
|
||||||
encryption: encryption.encrypt_meta(meta.clone())?,
|
encryption: object_encryption.clone(),
|
||||||
checksum_algorithm: None, // don't care; overwritten later
|
|
||||||
multipart: false,
|
multipart: false,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -211,37 +196,25 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
);
|
);
|
||||||
garage.version_table.insert(&version).await?;
|
garage.version_table.insert(&version).await?;
|
||||||
|
|
||||||
// Transfer data
|
// Transfer data and verify checksum
|
||||||
let (total_size, checksums, first_block_hash) = read_and_put_blocks(
|
let (total_size, data_md5sum, data_sha256sum, first_block_hash) =
|
||||||
ctx,
|
read_and_put_blocks(ctx, &version, encryption, 1, first_block, &mut chunker).await?;
|
||||||
&version,
|
|
||||||
encryption,
|
|
||||||
1,
|
|
||||||
first_block,
|
|
||||||
&mut chunker,
|
|
||||||
checksummer,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Verify checksums are ok / add calculated checksum to metadata
|
ensure_checksum_matches(
|
||||||
match checksum_mode {
|
&data_md5sum,
|
||||||
ChecksumMode::Verify(expected) => {
|
data_sha256sum,
|
||||||
checksums.verify(&expected)?;
|
content_md5.as_deref(),
|
||||||
}
|
content_sha256,
|
||||||
ChecksumMode::Calculate(algo) => {
|
)?;
|
||||||
meta.checksum = checksums.extract(algo);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Verify quotas are respsected
|
|
||||||
check_quotas(ctx, total_size, existing_object.as_ref()).await?;
|
check_quotas(ctx, total_size, existing_object.as_ref()).await?;
|
||||||
|
|
||||||
// Save final object state, marked as Complete
|
// Save final object state, marked as Complete
|
||||||
let etag = encryption.etag_from_md5(&checksums.md5);
|
let etag = encryption.etag_from_md5(&data_md5sum);
|
||||||
|
|
||||||
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||||
ObjectVersionMeta {
|
ObjectVersionMeta {
|
||||||
encryption: encryption.encrypt_meta(meta)?,
|
encryption: object_encryption,
|
||||||
size: total_size,
|
size: total_size,
|
||||||
etag: etag.clone(),
|
etag: etag.clone(),
|
||||||
},
|
},
|
||||||
|
@ -261,6 +234,33 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Validate MD5 sum against content-md5 header
|
||||||
|
/// and sha256sum against signed content-sha256
|
||||||
|
pub(crate) fn ensure_checksum_matches(
|
||||||
|
data_md5sum: &[u8],
|
||||||
|
data_sha256sum: garage_util::data::FixedBytes32,
|
||||||
|
content_md5: Option<&str>,
|
||||||
|
content_sha256: Option<garage_util::data::FixedBytes32>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if let Some(expected_sha256) = content_sha256 {
|
||||||
|
if expected_sha256 != data_sha256sum {
|
||||||
|
return Err(Error::bad_request(
|
||||||
|
"Unable to validate x-amz-content-sha256",
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
trace!("Successfully validated x-amz-content-sha256");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(expected_md5) = content_md5 {
|
||||||
|
if expected_md5.trim_matches('"') != BASE64_STANDARD.encode(data_md5sum) {
|
||||||
|
return Err(Error::bad_request("Unable to validate content-md5"));
|
||||||
|
} else {
|
||||||
|
trace!("Successfully validated content-md5");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Check that inserting this object with this size doesn't exceed bucket quotas
|
/// Check that inserting this object with this size doesn't exceed bucket quotas
|
||||||
pub(crate) async fn check_quotas(
|
pub(crate) async fn check_quotas(
|
||||||
ctx: &ReqCtx,
|
ctx: &ReqCtx,
|
||||||
|
@ -332,8 +332,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
first_block: Bytes,
|
first_block: Bytes,
|
||||||
chunker: &mut StreamChunker<S>,
|
chunker: &mut StreamChunker<S>,
|
||||||
checksummer: Checksummer,
|
) -> Result<(u64, GenericArray<u8, typenum::U16>, Hash, Hash), Error> {
|
||||||
) -> Result<(u64, Checksums, Hash), Error> {
|
|
||||||
let tracer = opentelemetry::global::tracer("garage");
|
let tracer = opentelemetry::global::tracer("garage");
|
||||||
|
|
||||||
let (block_tx, mut block_rx) = mpsc::channel::<Result<Bytes, Error>>(2);
|
let (block_tx, mut block_rx) = mpsc::channel::<Result<Bytes, Error>>(2);
|
||||||
|
@ -361,20 +360,20 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
||||||
|
|
||||||
let (block_tx2, mut block_rx2) = mpsc::channel::<Result<Bytes, Error>>(1);
|
let (block_tx2, mut block_rx2) = mpsc::channel::<Result<Bytes, Error>>(1);
|
||||||
let hash_stream = async {
|
let hash_stream = async {
|
||||||
let mut checksummer = checksummer;
|
let md5hasher = AsyncHasher::<Md5>::new();
|
||||||
|
let sha256hasher = AsyncHasher::<Sha256>::new();
|
||||||
while let Some(next) = block_rx.recv().await {
|
while let Some(next) = block_rx.recv().await {
|
||||||
match next {
|
match next {
|
||||||
Ok(block) => {
|
Ok(block) => {
|
||||||
block_tx2.send(Ok(block.clone())).await?;
|
block_tx2.send(Ok(block.clone())).await?;
|
||||||
checksummer = tokio::task::spawn_blocking(move || {
|
futures::future::join(
|
||||||
checksummer.update(&block);
|
md5hasher.update(block.clone()),
|
||||||
checksummer
|
sha256hasher.update(block.clone()),
|
||||||
})
|
)
|
||||||
.with_context(Context::current_with_span(
|
.with_context(Context::current_with_span(
|
||||||
tracer.start("Hash block (md5, sha256)"),
|
tracer.start("Hash block (md5, sha256)"),
|
||||||
))
|
))
|
||||||
.await
|
.await;
|
||||||
.unwrap()
|
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
block_tx2.send(Err(e)).await?;
|
block_tx2.send(Err(e)).await?;
|
||||||
|
@ -383,7 +382,10 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drop(block_tx2);
|
drop(block_tx2);
|
||||||
Ok::<_, mpsc::error::SendError<_>>(checksummer)
|
Ok::<_, mpsc::error::SendError<_>>(futures::join!(
|
||||||
|
md5hasher.finalize(),
|
||||||
|
sha256hasher.finalize()
|
||||||
|
))
|
||||||
};
|
};
|
||||||
|
|
||||||
let (block_tx3, mut block_rx3) = mpsc::channel::<Result<(Bytes, u64, Hash), Error>>(1);
|
let (block_tx3, mut block_rx3) = mpsc::channel::<Result<(Bytes, u64, Hash), Error>>(1);
|
||||||
|
@ -393,28 +395,33 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
||||||
match next {
|
match next {
|
||||||
Ok(block) => {
|
Ok(block) => {
|
||||||
let unencrypted_len = block.len() as u64;
|
let unencrypted_len = block.len() as u64;
|
||||||
let res = tokio::task::spawn_blocking(move || {
|
let block = if encryption.is_encrypted() {
|
||||||
let block = encryption.encrypt_block(block)?;
|
let res =
|
||||||
let hash = blake2sum(&block);
|
tokio::task::spawn_blocking(move || encryption.encrypt_block(block))
|
||||||
Ok((block, hash))
|
|
||||||
})
|
|
||||||
.with_context(Context::current_with_span(
|
.with_context(Context::current_with_span(
|
||||||
tracer.start("Encrypt and hash (blake2) block"),
|
tracer.start("Encrypt block"),
|
||||||
))
|
))
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
match res {
|
match res {
|
||||||
Ok((block, hash)) => {
|
Ok(b) => b,
|
||||||
if first_block_hash.is_none() {
|
|
||||||
first_block_hash = Some(hash);
|
|
||||||
}
|
|
||||||
block_tx3.send(Ok((block, unencrypted_len, hash))).await?;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
block_tx3.send(Err(e)).await?;
|
block_tx3.send(Err(e)).await?;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
block
|
||||||
|
};
|
||||||
|
let hash = async_blake2sum(block.clone())
|
||||||
|
.with_context(Context::current_with_span(
|
||||||
|
tracer.start("Hash block (blake2)"),
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
if first_block_hash.is_none() {
|
||||||
|
first_block_hash = Some(hash);
|
||||||
|
}
|
||||||
|
block_tx3.send(Ok((block, unencrypted_len, hash))).await?;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
block_tx3.send(Err(e)).await?;
|
block_tx3.send(Err(e)).await?;
|
||||||
|
@ -486,10 +493,12 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
||||||
let total_size = final_result?;
|
let total_size = final_result?;
|
||||||
// unwrap here is ok, because if hasher failed, it is because something failed
|
// unwrap here is ok, because if hasher failed, it is because something failed
|
||||||
// later in the pipeline which already caused a return at the ? on previous line
|
// later in the pipeline which already caused a return at the ? on previous line
|
||||||
|
let (data_md5sum, data_sha256sum) = stream_hash_result.unwrap();
|
||||||
let first_block_hash = block_hash_result.unwrap();
|
let first_block_hash = block_hash_result.unwrap();
|
||||||
let checksums = stream_hash_result.unwrap().finalize();
|
|
||||||
|
|
||||||
Ok((total_size, checksums, first_block_hash))
|
let data_sha256sum = Hash::try_from(&data_sha256sum[..]).unwrap();
|
||||||
|
|
||||||
|
Ok((total_size, data_md5sum, data_sha256sum, first_block_hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn put_block_and_meta(
|
async fn put_block_and_meta(
|
||||||
|
@ -600,7 +609,7 @@ impl Drop for InterruptedCleanup {
|
||||||
|
|
||||||
// ============ helpers ============
|
// ============ helpers ============
|
||||||
|
|
||||||
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList, Error> {
|
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<ObjectVersionHeaders, Error> {
|
||||||
let mut ret = Vec::new();
|
let mut ret = Vec::new();
|
||||||
|
|
||||||
// Preserve standard headers
|
// Preserve standard headers
|
||||||
|
@ -628,7 +637,7 @@ pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(ret)
|
Ok(ObjectVersionHeaders(ret))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn next_timestamp(existing_object: Option<&Object>) -> u64 {
|
pub(crate) fn next_timestamp(existing_object: Option<&Object>) -> u64 {
|
||||||
|
|
|
@ -131,14 +131,6 @@ pub struct CompleteMultipartUploadResult {
|
||||||
pub key: Value,
|
pub key: Value,
|
||||||
#[serde(rename = "ETag")]
|
#[serde(rename = "ETag")]
|
||||||
pub etag: Value,
|
pub etag: Value,
|
||||||
#[serde(rename = "ChecksumCRC32")]
|
|
||||||
pub checksum_crc32: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumCRC32C")]
|
|
||||||
pub checksum_crc32c: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumSHA1")]
|
|
||||||
pub checksum_sha1: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumSHA256")]
|
|
||||||
pub checksum_sha256: Option<Value>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||||
|
@ -205,14 +197,6 @@ pub struct PartItem {
|
||||||
pub part_number: IntValue,
|
pub part_number: IntValue,
|
||||||
#[serde(rename = "Size")]
|
#[serde(rename = "Size")]
|
||||||
pub size: IntValue,
|
pub size: IntValue,
|
||||||
#[serde(rename = "ChecksumCRC32")]
|
|
||||||
pub checksum_crc32: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumCRC32C")]
|
|
||||||
pub checksum_crc32c: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumSHA1")]
|
|
||||||
pub checksum_sha1: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumSHA256")]
|
|
||||||
pub checksum_sha256: Option<Value>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||||
|
@ -516,10 +500,6 @@ mod tests {
|
||||||
bucket: Value("mybucket".to_string()),
|
bucket: Value("mybucket".to_string()),
|
||||||
key: Value("a/plop".to_string()),
|
key: Value("a/plop".to_string()),
|
||||||
etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()),
|
etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()),
|
||||||
checksum_crc32: None,
|
|
||||||
checksum_crc32c: None,
|
|
||||||
checksum_sha1: Some(Value("ZJAnHyG8PeKz9tI8UTcHrJos39A=".into())),
|
|
||||||
checksum_sha256: None,
|
|
||||||
};
|
};
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
to_xml_with_header(&result)?,
|
to_xml_with_header(&result)?,
|
||||||
|
@ -529,7 +509,6 @@ mod tests {
|
||||||
<Bucket>mybucket</Bucket>\
|
<Bucket>mybucket</Bucket>\
|
||||||
<Key>a/plop</Key>\
|
<Key>a/plop</Key>\
|
||||||
<ETag>"3858f62230ac3c915f300c664312c11f-9"</ETag>\
|
<ETag>"3858f62230ac3c915f300c664312c11f-9"</ETag>\
|
||||||
<ChecksumSHA1>ZJAnHyG8PeKz9tI8UTcHrJos39A=</ChecksumSHA1>\
|
|
||||||
</CompleteMultipartUploadResult>"
|
</CompleteMultipartUploadResult>"
|
||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -801,22 +780,12 @@ mod tests {
|
||||||
last_modified: Value("2010-11-10T20:48:34.000Z".to_string()),
|
last_modified: Value("2010-11-10T20:48:34.000Z".to_string()),
|
||||||
part_number: IntValue(2),
|
part_number: IntValue(2),
|
||||||
size: IntValue(10485760),
|
size: IntValue(10485760),
|
||||||
checksum_crc32: None,
|
|
||||||
checksum_crc32c: None,
|
|
||||||
checksum_sha256: Some(Value(
|
|
||||||
"5RQ3A5uk0w7ojNjvegohch4JRBBGN/cLhsNrPzfv/hA=".into(),
|
|
||||||
)),
|
|
||||||
checksum_sha1: None,
|
|
||||||
},
|
},
|
||||||
PartItem {
|
PartItem {
|
||||||
etag: Value("\"aaaa18db4cc2f85cedef654fccc4a4x8\"".to_string()),
|
etag: Value("\"aaaa18db4cc2f85cedef654fccc4a4x8\"".to_string()),
|
||||||
last_modified: Value("2010-11-10T20:48:33.000Z".to_string()),
|
last_modified: Value("2010-11-10T20:48:33.000Z".to_string()),
|
||||||
part_number: IntValue(3),
|
part_number: IntValue(3),
|
||||||
size: IntValue(10485760),
|
size: IntValue(10485760),
|
||||||
checksum_sha256: None,
|
|
||||||
checksum_crc32c: None,
|
|
||||||
checksum_crc32: Some(Value("ZJAnHyG8=".into())),
|
|
||||||
checksum_sha1: None,
|
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
initiator: Initiator {
|
initiator: Initiator {
|
||||||
|
@ -851,14 +820,12 @@ mod tests {
|
||||||
<LastModified>2010-11-10T20:48:34.000Z</LastModified>\
|
<LastModified>2010-11-10T20:48:34.000Z</LastModified>\
|
||||||
<PartNumber>2</PartNumber>\
|
<PartNumber>2</PartNumber>\
|
||||||
<Size>10485760</Size>\
|
<Size>10485760</Size>\
|
||||||
<ChecksumSHA256>5RQ3A5uk0w7ojNjvegohch4JRBBGN/cLhsNrPzfv/hA=</ChecksumSHA256>\
|
|
||||||
</Part>\
|
</Part>\
|
||||||
<Part>\
|
<Part>\
|
||||||
<ETag>"aaaa18db4cc2f85cedef654fccc4a4x8"</ETag>\
|
<ETag>"aaaa18db4cc2f85cedef654fccc4a4x8"</ETag>\
|
||||||
<LastModified>2010-11-10T20:48:33.000Z</LastModified>\
|
<LastModified>2010-11-10T20:48:33.000Z</LastModified>\
|
||||||
<PartNumber>3</PartNumber>\
|
<PartNumber>3</PartNumber>\
|
||||||
<Size>10485760</Size>\
|
<Size>10485760</Size>\
|
||||||
<ChecksumCRC32>ZJAnHyG8=</ChecksumCRC32>\
|
|
||||||
</Part>\
|
</Part>\
|
||||||
<Initiator>\
|
<Initiator>\
|
||||||
<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\
|
<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
@ -14,12 +13,9 @@ const DRIVE_NPART: usize = 1024;
|
||||||
|
|
||||||
const HASH_DRIVE_BYTES: (usize, usize) = (2, 3);
|
const HASH_DRIVE_BYTES: (usize, usize) = (2, 3);
|
||||||
|
|
||||||
const MARKER_FILE_NAME: &str = "garage-marker";
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
pub(crate) struct DataLayout {
|
pub(crate) struct DataLayout {
|
||||||
pub(crate) data_dirs: Vec<DataDir>,
|
pub(crate) data_dirs: Vec<DataDir>,
|
||||||
markers: HashMap<PathBuf, String>,
|
|
||||||
|
|
||||||
/// Primary storage location (index in data_dirs) for each partition
|
/// Primary storage location (index in data_dirs) for each partition
|
||||||
/// = the location where the data is supposed to be, blocks are always
|
/// = the location where the data is supposed to be, blocks are always
|
||||||
|
@ -79,17 +75,16 @@ impl DataLayout {
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
data_dirs,
|
data_dirs,
|
||||||
markers: HashMap::new(),
|
|
||||||
part_prim,
|
part_prim,
|
||||||
part_sec,
|
part_sec,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn update(self, dirs: &DataDirEnum) -> Result<Self, Error> {
|
pub(crate) fn update(&mut self, dirs: &DataDirEnum) -> Result<(), Error> {
|
||||||
// Make list of new data directories, exit if nothing changed
|
// Make list of new data directories, exit if nothing changed
|
||||||
let data_dirs = make_data_dirs(dirs)?;
|
let data_dirs = make_data_dirs(dirs)?;
|
||||||
if data_dirs == self.data_dirs {
|
if data_dirs == self.data_dirs {
|
||||||
return Ok(self);
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>();
|
let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>();
|
||||||
|
@ -219,43 +214,11 @@ impl DataLayout {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply newly generated config
|
// Apply newly generated config
|
||||||
Ok(Self {
|
*self = Self {
|
||||||
data_dirs,
|
data_dirs,
|
||||||
markers: self.markers,
|
|
||||||
part_prim,
|
part_prim,
|
||||||
part_sec,
|
part_sec,
|
||||||
})
|
};
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn check_markers(&mut self) -> Result<(), Error> {
|
|
||||||
let data_dirs = &self.data_dirs;
|
|
||||||
self.markers
|
|
||||||
.retain(|k, _| data_dirs.iter().any(|x| x.path == *k));
|
|
||||||
|
|
||||||
for dir in self.data_dirs.iter() {
|
|
||||||
let mut marker_path = dir.path.clone();
|
|
||||||
marker_path.push(MARKER_FILE_NAME);
|
|
||||||
let existing_marker = std::fs::read_to_string(&marker_path).ok();
|
|
||||||
match (existing_marker, self.markers.get(&dir.path)) {
|
|
||||||
(Some(m1), Some(m2)) => {
|
|
||||||
if m1 != *m2 {
|
|
||||||
return Err(Error::Message(format!("Mismatched content for marker file `{}` in data directory `{}`. If you moved data directories or changed their mountpoints, you should remove the `data_layout` file in Garage's metadata directory and restart Garage.", MARKER_FILE_NAME, dir.path.display())));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(None, Some(_)) => {
|
|
||||||
return Err(Error::Message(format!("Could not find expected marker file `{}` in data directory `{}`, make sure this data directory is mounted correctly.", MARKER_FILE_NAME, dir.path.display())));
|
|
||||||
}
|
|
||||||
(Some(mkr), None) => {
|
|
||||||
self.markers.insert(dir.path.clone(), mkr);
|
|
||||||
}
|
|
||||||
(None, None) => {
|
|
||||||
let mkr = hex::encode(garage_util::data::gen_uuid().as_slice());
|
|
||||||
std::fs::write(&marker_path, &mkr)?;
|
|
||||||
self.markers.insert(dir.path.clone(), mkr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,7 +255,6 @@ impl DataLayout {
|
||||||
pub(crate) fn without_secondary_locations(&self) -> Self {
|
pub(crate) fn without_secondary_locations(&self) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data_dirs: self.data_dirs.clone(),
|
data_dirs: self.data_dirs.clone(),
|
||||||
markers: self.markers.clone(),
|
|
||||||
part_prim: self.part_prim.clone(),
|
part_prim: self.part_prim.clone(),
|
||||||
part_sec: self.part_sec.iter().map(|_| vec![]).collect::<Vec<_>>(),
|
part_sec: self.part_sec.iter().map(|_| vec![]).collect::<Vec<_>>(),
|
||||||
}
|
}
|
||||||
|
@ -360,12 +322,14 @@ fn make_data_dirs(dirs: &DataDirEnum) -> Result<Vec<DataDir>, Error> {
|
||||||
fn dir_not_empty(path: &PathBuf) -> Result<bool, Error> {
|
fn dir_not_empty(path: &PathBuf) -> Result<bool, Error> {
|
||||||
for entry in std::fs::read_dir(&path)? {
|
for entry in std::fs::read_dir(&path)? {
|
||||||
let dir = entry?;
|
let dir = entry?;
|
||||||
let ft = dir.file_type()?;
|
if dir.file_type()?.is_dir()
|
||||||
let name = dir.file_name().into_string().ok();
|
&& dir
|
||||||
if ft.is_file() && name.as_deref() == Some(MARKER_FILE_NAME) {
|
.file_name()
|
||||||
return Ok(true);
|
.into_string()
|
||||||
}
|
.ok()
|
||||||
if ft.is_dir() && name.and_then(|hex| hex::decode(&hex).ok()).is_some() {
|
.and_then(|hex| hex::decode(&hex).ok())
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,4 +11,3 @@ mod metrics;
|
||||||
mod rc;
|
mod rc;
|
||||||
|
|
||||||
pub use block::zstd_encode;
|
pub use block::zstd_encode;
|
||||||
pub use rc::CalculateRefcount;
|
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
use std::convert::TryInto;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
@ -11,7 +10,7 @@ use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use tokio::fs;
|
use tokio::fs;
|
||||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader};
|
use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader};
|
||||||
use tokio::sync::{mpsc, Mutex, MutexGuard, Semaphore};
|
use tokio::sync::{mpsc, Mutex, MutexGuard};
|
||||||
|
|
||||||
use opentelemetry::{
|
use opentelemetry::{
|
||||||
trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer},
|
trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer},
|
||||||
|
@ -23,7 +22,7 @@ use garage_net::stream::{read_stream_to_end, stream_asyncread, ByteStream};
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::background::{vars, BackgroundRunner};
|
use garage_util::background::{vars, BackgroundRunner};
|
||||||
use garage_util::config::Config;
|
use garage_util::config::DataDirEnum;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
|
@ -85,16 +84,14 @@ pub struct BlockManager {
|
||||||
|
|
||||||
data_fsync: bool,
|
data_fsync: bool,
|
||||||
compression_level: Option<i32>,
|
compression_level: Option<i32>,
|
||||||
disable_scrub: bool,
|
|
||||||
|
|
||||||
mutation_lock: Vec<Mutex<BlockManagerLocked>>,
|
mutation_lock: Vec<Mutex<BlockManagerLocked>>,
|
||||||
|
|
||||||
pub rc: BlockRc,
|
pub(crate) rc: BlockRc,
|
||||||
pub resync: BlockResyncManager,
|
pub resync: BlockResyncManager,
|
||||||
|
|
||||||
pub(crate) system: Arc<System>,
|
pub(crate) system: Arc<System>,
|
||||||
pub(crate) endpoint: Arc<Endpoint<BlockRpc, Self>>,
|
pub(crate) endpoint: Arc<Endpoint<BlockRpc, Self>>,
|
||||||
buffer_kb_semaphore: Arc<Semaphore>,
|
|
||||||
|
|
||||||
pub(crate) metrics: BlockManagerMetrics,
|
pub(crate) metrics: BlockManagerMetrics,
|
||||||
|
|
||||||
|
@ -122,22 +119,24 @@ struct BlockManagerLocked();
|
||||||
impl BlockManager {
|
impl BlockManager {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
db: &db::Db,
|
db: &db::Db,
|
||||||
config: &Config,
|
data_dir: DataDirEnum,
|
||||||
|
data_fsync: bool,
|
||||||
|
compression_level: Option<i32>,
|
||||||
replication: TableShardedReplication,
|
replication: TableShardedReplication,
|
||||||
system: Arc<System>,
|
system: Arc<System>,
|
||||||
) -> Result<Arc<Self>, Error> {
|
) -> Result<Arc<Self>, Error> {
|
||||||
// Load or compute layout, i.e. assignment of data blocks to the different data directories
|
// Load or compute layout, i.e. assignment of data blocks to the different data directories
|
||||||
let data_layout_persister: Persister<DataLayout> =
|
let data_layout_persister: Persister<DataLayout> =
|
||||||
Persister::new(&system.metadata_dir, "data_layout");
|
Persister::new(&system.metadata_dir, "data_layout");
|
||||||
let mut data_layout = match data_layout_persister.load() {
|
let data_layout = match data_layout_persister.load() {
|
||||||
Ok(layout) => layout
|
Ok(mut layout) => {
|
||||||
.update(&config.data_dir)
|
layout
|
||||||
.ok_or_message("invalid data_dir config")?,
|
.update(&data_dir)
|
||||||
Err(_) => {
|
.ok_or_message("invalid data_dir config")?;
|
||||||
DataLayout::initialize(&config.data_dir).ok_or_message("invalid data_dir config")?
|
layout
|
||||||
}
|
}
|
||||||
|
Err(_) => DataLayout::initialize(&data_dir).ok_or_message("invalid data_dir config")?,
|
||||||
};
|
};
|
||||||
data_layout.check_markers()?;
|
|
||||||
data_layout_persister
|
data_layout_persister
|
||||||
.save(&data_layout)
|
.save(&data_layout)
|
||||||
.expect("cannot save data_layout");
|
.expect("cannot save data_layout");
|
||||||
|
@ -154,14 +153,11 @@ impl BlockManager {
|
||||||
.netapp
|
.netapp
|
||||||
.endpoint("garage_block/manager.rs/Rpc".to_string());
|
.endpoint("garage_block/manager.rs/Rpc".to_string());
|
||||||
|
|
||||||
let buffer_kb_semaphore = Arc::new(Semaphore::new(config.block_ram_buffer_max / 1024));
|
|
||||||
|
|
||||||
let metrics = BlockManagerMetrics::new(
|
let metrics = BlockManagerMetrics::new(
|
||||||
config.compression_level,
|
compression_level,
|
||||||
rc.rc_table.clone(),
|
rc.rc.clone(),
|
||||||
resync.queue.clone(),
|
resync.queue.clone(),
|
||||||
resync.errors.clone(),
|
resync.errors.clone(),
|
||||||
buffer_kb_semaphore.clone(),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info");
|
let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info");
|
||||||
|
@ -170,9 +166,8 @@ impl BlockManager {
|
||||||
replication,
|
replication,
|
||||||
data_layout: ArcSwap::new(Arc::new(data_layout)),
|
data_layout: ArcSwap::new(Arc::new(data_layout)),
|
||||||
data_layout_persister,
|
data_layout_persister,
|
||||||
data_fsync: config.data_fsync,
|
data_fsync,
|
||||||
disable_scrub: config.disable_scrub,
|
compression_level,
|
||||||
compression_level: config.compression_level,
|
|
||||||
mutation_lock: vec![(); MUTEX_COUNT]
|
mutation_lock: vec![(); MUTEX_COUNT]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|_| Mutex::new(BlockManagerLocked()))
|
.map(|_| Mutex::new(BlockManagerLocked()))
|
||||||
|
@ -181,7 +176,6 @@ impl BlockManager {
|
||||||
resync,
|
resync,
|
||||||
system,
|
system,
|
||||||
endpoint,
|
endpoint,
|
||||||
buffer_kb_semaphore,
|
|
||||||
metrics,
|
metrics,
|
||||||
scrub_persister,
|
scrub_persister,
|
||||||
tx_scrub_command: ArcSwapOption::new(None),
|
tx_scrub_command: ArcSwapOption::new(None),
|
||||||
|
@ -200,7 +194,6 @@ impl BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spawn scrub worker
|
// Spawn scrub worker
|
||||||
if !self.disable_scrub {
|
|
||||||
let (scrub_tx, scrub_rx) = mpsc::channel(1);
|
let (scrub_tx, scrub_rx) = mpsc::channel(1);
|
||||||
self.tx_scrub_command.store(Some(Arc::new(scrub_tx)));
|
self.tx_scrub_command.store(Some(Arc::new(scrub_tx)));
|
||||||
bg.spawn_worker(ScrubWorker::new(
|
bg.spawn_worker(ScrubWorker::new(
|
||||||
|
@ -209,12 +202,10 @@ impl BlockManager {
|
||||||
self.scrub_persister.clone(),
|
self.scrub_persister.clone(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
||||||
self.resync.register_bg_vars(vars);
|
self.resync.register_bg_vars(vars);
|
||||||
|
|
||||||
if !self.disable_scrub {
|
|
||||||
vars.register_rw(
|
vars.register_rw(
|
||||||
&self.scrub_persister,
|
&self.scrub_persister,
|
||||||
"scrub-tranquility",
|
"scrub-tranquility",
|
||||||
|
@ -231,28 +222,15 @@ impl BlockManager {
|
||||||
p.get_with(|x| x.corruptions_detected)
|
p.get_with(|x| x.corruptions_detected)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// Initialization: set how block references are recalculated
|
|
||||||
/// for repair operations
|
|
||||||
pub fn set_recalc_rc(&self, recalc: Vec<CalculateRefcount>) {
|
|
||||||
self.rc.recalc_rc.store(Some(Arc::new(recalc)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Ask nodes that might have a (possibly compressed) block for it
|
/// Ask nodes that might have a (possibly compressed) block for it
|
||||||
/// Return it as a stream with a header
|
/// Return it as a stream with a header
|
||||||
async fn rpc_get_raw_block_streaming(
|
async fn rpc_get_raw_block_streaming(
|
||||||
&self,
|
&self,
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
priority: RequestPriority,
|
|
||||||
order_tag: Option<OrderTag>,
|
order_tag: Option<OrderTag>,
|
||||||
) -> Result<DataBlockStream, Error> {
|
) -> Result<DataBlockStream, Error> {
|
||||||
self.rpc_get_raw_block_internal(
|
self.rpc_get_raw_block_internal(hash, order_tag, |stream| async move { Ok(stream) })
|
||||||
hash,
|
|
||||||
priority,
|
|
||||||
order_tag,
|
|
||||||
|stream| async move { Ok(stream) },
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,10 +239,9 @@ impl BlockManager {
|
||||||
pub(crate) async fn rpc_get_raw_block(
|
pub(crate) async fn rpc_get_raw_block(
|
||||||
&self,
|
&self,
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
priority: RequestPriority,
|
|
||||||
order_tag: Option<OrderTag>,
|
order_tag: Option<OrderTag>,
|
||||||
) -> Result<DataBlock, Error> {
|
) -> Result<DataBlock, Error> {
|
||||||
self.rpc_get_raw_block_internal(hash, priority, order_tag, |block_stream| async move {
|
self.rpc_get_raw_block_internal(hash, order_tag, |block_stream| async move {
|
||||||
let (header, stream) = block_stream.into_parts();
|
let (header, stream) = block_stream.into_parts();
|
||||||
read_stream_to_end(stream)
|
read_stream_to_end(stream)
|
||||||
.await
|
.await
|
||||||
|
@ -277,7 +254,6 @@ impl BlockManager {
|
||||||
async fn rpc_get_raw_block_internal<F, Fut, T>(
|
async fn rpc_get_raw_block_internal<F, Fut, T>(
|
||||||
&self,
|
&self,
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
priority: RequestPriority,
|
|
||||||
order_tag: Option<OrderTag>,
|
order_tag: Option<OrderTag>,
|
||||||
f: F,
|
f: F,
|
||||||
) -> Result<T, Error>
|
) -> Result<T, Error>
|
||||||
|
@ -295,7 +271,7 @@ impl BlockManager {
|
||||||
let rpc = self.endpoint.call_streaming(
|
let rpc = self.endpoint.call_streaming(
|
||||||
&node_id,
|
&node_id,
|
||||||
BlockRpc::GetBlock(*hash, order_tag),
|
BlockRpc::GetBlock(*hash, order_tag),
|
||||||
priority,
|
PRIO_NORMAL | PRIO_SECONDARY,
|
||||||
);
|
);
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
res = rpc => {
|
res = rpc => {
|
||||||
|
@ -334,9 +310,9 @@ impl BlockManager {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
let err = Error::MissingBlock(*hash);
|
let msg = format!("Get block {:?}: no node returned a valid block", hash);
|
||||||
debug!("{}", err);
|
debug!("{}", msg);
|
||||||
Err(err)
|
Err(Error::Message(msg))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- Public interface ----
|
// ---- Public interface ----
|
||||||
|
@ -347,9 +323,7 @@ impl BlockManager {
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
order_tag: Option<OrderTag>,
|
order_tag: Option<OrderTag>,
|
||||||
) -> Result<ByteStream, Error> {
|
) -> Result<ByteStream, Error> {
|
||||||
let block_stream = self
|
let block_stream = self.rpc_get_raw_block_streaming(hash, order_tag).await?;
|
||||||
.rpc_get_raw_block_streaming(hash, PRIO_NORMAL | PRIO_SECONDARY, order_tag)
|
|
||||||
.await?;
|
|
||||||
let (header, stream) = block_stream.into_parts();
|
let (header, stream) = block_stream.into_parts();
|
||||||
match header {
|
match header {
|
||||||
DataBlockHeader::Plain => Ok(stream),
|
DataBlockHeader::Plain => Ok(stream),
|
||||||
|
@ -377,14 +351,6 @@ impl BlockManager {
|
||||||
let (header, bytes) = DataBlock::from_buffer(data, compression_level)
|
let (header, bytes) = DataBlock::from_buffer(data, compression_level)
|
||||||
.await
|
.await
|
||||||
.into_parts();
|
.into_parts();
|
||||||
|
|
||||||
let permit = self
|
|
||||||
.buffer_kb_semaphore
|
|
||||||
.clone()
|
|
||||||
.acquire_many_owned((bytes.len() / 1024).try_into().unwrap())
|
|
||||||
.await
|
|
||||||
.ok_or_message("could not reserve space for buffer of data to send to remote nodes")?;
|
|
||||||
|
|
||||||
let put_block_rpc =
|
let put_block_rpc =
|
||||||
Req::new(BlockRpc::PutBlock { hash, header })?.with_stream_from_buffer(bytes);
|
Req::new(BlockRpc::PutBlock { hash, header })?.with_stream_from_buffer(bytes);
|
||||||
let put_block_rpc = if let Some(tag) = order_tag {
|
let put_block_rpc = if let Some(tag) = order_tag {
|
||||||
|
@ -400,7 +366,6 @@ impl BlockManager {
|
||||||
who.as_ref(),
|
who.as_ref(),
|
||||||
put_block_rpc,
|
put_block_rpc,
|
||||||
RequestStrategy::with_priority(PRIO_NORMAL | PRIO_SECONDARY)
|
RequestStrategy::with_priority(PRIO_NORMAL | PRIO_SECONDARY)
|
||||||
.with_drop_on_completion(permit)
|
|
||||||
.with_quorum(self.replication.write_quorum()),
|
.with_quorum(self.replication.write_quorum()),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -410,7 +375,7 @@ impl BlockManager {
|
||||||
|
|
||||||
/// Get number of items in the refcount table
|
/// Get number of items in the refcount table
|
||||||
pub fn rc_len(&self) -> Result<usize, Error> {
|
pub fn rc_len(&self) -> Result<usize, Error> {
|
||||||
Ok(self.rc.rc_table.len()?)
|
Ok(self.rc.rc.len()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send command to start/stop/manager scrub worker
|
/// Send command to start/stop/manager scrub worker
|
||||||
|
|
|
@ -1,7 +1,3 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use tokio::sync::Semaphore;
|
|
||||||
|
|
||||||
use opentelemetry::{global, metrics::*};
|
use opentelemetry::{global, metrics::*};
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
@ -12,7 +8,6 @@ pub struct BlockManagerMetrics {
|
||||||
pub(crate) _rc_size: ValueObserver<u64>,
|
pub(crate) _rc_size: ValueObserver<u64>,
|
||||||
pub(crate) _resync_queue_len: ValueObserver<u64>,
|
pub(crate) _resync_queue_len: ValueObserver<u64>,
|
||||||
pub(crate) _resync_errored_blocks: ValueObserver<u64>,
|
pub(crate) _resync_errored_blocks: ValueObserver<u64>,
|
||||||
pub(crate) _buffer_free_kb: ValueObserver<u64>,
|
|
||||||
|
|
||||||
pub(crate) resync_counter: BoundCounter<u64>,
|
pub(crate) resync_counter: BoundCounter<u64>,
|
||||||
pub(crate) resync_error_counter: BoundCounter<u64>,
|
pub(crate) resync_error_counter: BoundCounter<u64>,
|
||||||
|
@ -35,7 +30,6 @@ impl BlockManagerMetrics {
|
||||||
rc_tree: db::Tree,
|
rc_tree: db::Tree,
|
||||||
resync_queue: db::Tree,
|
resync_queue: db::Tree,
|
||||||
resync_errors: db::Tree,
|
resync_errors: db::Tree,
|
||||||
buffer_semaphore: Arc<Semaphore>,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let meter = global::meter("garage_model/block");
|
let meter = global::meter("garage_model/block");
|
||||||
Self {
|
Self {
|
||||||
|
@ -75,15 +69,6 @@ impl BlockManagerMetrics {
|
||||||
.with_description("Number of block hashes whose last resync resulted in an error")
|
.with_description("Number of block hashes whose last resync resulted in an error")
|
||||||
.init(),
|
.init(),
|
||||||
|
|
||||||
_buffer_free_kb: meter
|
|
||||||
.u64_value_observer("block.ram_buffer_free_kb", move |observer| {
|
|
||||||
observer.observe(buffer_semaphore.available_permits() as u64, &[])
|
|
||||||
})
|
|
||||||
.with_description(
|
|
||||||
"Available RAM in KiB to use for buffering data blocks to be written to remote nodes",
|
|
||||||
)
|
|
||||||
.init(),
|
|
||||||
|
|
||||||
resync_counter: meter
|
resync_counter: meter
|
||||||
.u64_counter("block.resync_counter")
|
.u64_counter("block.resync_counter")
|
||||||
.with_description("Number of calls to resync_block")
|
.with_description("Number of calls to resync_block")
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
|
|
||||||
use arc_swap::ArcSwapOption;
|
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
@ -10,20 +8,13 @@ use garage_util::time::*;
|
||||||
|
|
||||||
use crate::manager::BLOCK_GC_DELAY;
|
use crate::manager::BLOCK_GC_DELAY;
|
||||||
|
|
||||||
pub type CalculateRefcount =
|
|
||||||
Box<dyn Fn(&db::Transaction, &Hash) -> db::TxResult<usize, Error> + Send + Sync>;
|
|
||||||
|
|
||||||
pub struct BlockRc {
|
pub struct BlockRc {
|
||||||
pub rc_table: db::Tree,
|
pub(crate) rc: db::Tree,
|
||||||
pub(crate) recalc_rc: ArcSwapOption<Vec<CalculateRefcount>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockRc {
|
impl BlockRc {
|
||||||
pub(crate) fn new(rc: db::Tree) -> Self {
|
pub(crate) fn new(rc: db::Tree) -> Self {
|
||||||
Self {
|
Self { rc }
|
||||||
rc_table: rc,
|
|
||||||
recalc_rc: ArcSwapOption::new(None),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Increment the reference counter associated to a hash.
|
/// Increment the reference counter associated to a hash.
|
||||||
|
@ -33,9 +24,9 @@ impl BlockRc {
|
||||||
tx: &mut db::Transaction,
|
tx: &mut db::Transaction,
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
) -> db::TxOpResult<bool> {
|
) -> db::TxOpResult<bool> {
|
||||||
let old_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
|
let old_rc = RcEntry::parse_opt(tx.get(&self.rc, hash)?);
|
||||||
match old_rc.increment().serialize() {
|
match old_rc.increment().serialize() {
|
||||||
Some(x) => tx.insert(&self.rc_table, hash, x)?,
|
Some(x) => tx.insert(&self.rc, hash, x)?,
|
||||||
None => unreachable!(),
|
None => unreachable!(),
|
||||||
};
|
};
|
||||||
Ok(old_rc.is_zero())
|
Ok(old_rc.is_zero())
|
||||||
|
@ -48,28 +39,28 @@ impl BlockRc {
|
||||||
tx: &mut db::Transaction,
|
tx: &mut db::Transaction,
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
) -> db::TxOpResult<bool> {
|
) -> db::TxOpResult<bool> {
|
||||||
let new_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?).decrement();
|
let new_rc = RcEntry::parse_opt(tx.get(&self.rc, hash)?).decrement();
|
||||||
match new_rc.serialize() {
|
match new_rc.serialize() {
|
||||||
Some(x) => tx.insert(&self.rc_table, hash, x)?,
|
Some(x) => tx.insert(&self.rc, hash, x)?,
|
||||||
None => tx.remove(&self.rc_table, hash)?,
|
None => tx.remove(&self.rc, hash)?,
|
||||||
};
|
};
|
||||||
Ok(matches!(new_rc, RcEntry::Deletable { .. }))
|
Ok(matches!(new_rc, RcEntry::Deletable { .. }))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read a block's reference count
|
/// Read a block's reference count
|
||||||
pub(crate) fn get_block_rc(&self, hash: &Hash) -> Result<RcEntry, Error> {
|
pub(crate) fn get_block_rc(&self, hash: &Hash) -> Result<RcEntry, Error> {
|
||||||
Ok(RcEntry::parse_opt(self.rc_table.get(hash.as_ref())?))
|
Ok(RcEntry::parse_opt(self.rc.get(hash.as_ref())?))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delete an entry in the RC table if it is deletable and the
|
/// Delete an entry in the RC table if it is deletable and the
|
||||||
/// deletion time has passed
|
/// deletion time has passed
|
||||||
pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> {
|
pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> {
|
||||||
let now = now_msec();
|
let now = now_msec();
|
||||||
self.rc_table.db().transaction(|tx| {
|
self.rc.db().transaction(|tx| {
|
||||||
let rcval = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
|
let rcval = RcEntry::parse_opt(tx.get(&self.rc, hash)?);
|
||||||
match rcval {
|
match rcval {
|
||||||
RcEntry::Deletable { at_time } if now > at_time => {
|
RcEntry::Deletable { at_time } if now > at_time => {
|
||||||
tx.remove(&self.rc_table, hash)?;
|
tx.remove(&self.rc, hash)?;
|
||||||
}
|
}
|
||||||
_ => (),
|
_ => (),
|
||||||
};
|
};
|
||||||
|
@ -77,58 +68,6 @@ impl BlockRc {
|
||||||
})?;
|
})?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Recalculate the reference counter of a block
|
|
||||||
/// to fix potential inconsistencies
|
|
||||||
pub fn recalculate_rc(&self, hash: &Hash) -> Result<(usize, bool), Error> {
|
|
||||||
if let Some(recalc_fns) = self.recalc_rc.load().as_ref() {
|
|
||||||
trace!("Repair block RC for {:?}", hash);
|
|
||||||
let res = self
|
|
||||||
.rc_table
|
|
||||||
.db()
|
|
||||||
.transaction(|tx| {
|
|
||||||
let mut cnt = 0;
|
|
||||||
for f in recalc_fns.iter() {
|
|
||||||
cnt += f(&tx, hash)?;
|
|
||||||
}
|
|
||||||
let old_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
|
|
||||||
trace!(
|
|
||||||
"Block RC for {:?}: stored={}, calculated={}",
|
|
||||||
hash,
|
|
||||||
old_rc.as_u64(),
|
|
||||||
cnt
|
|
||||||
);
|
|
||||||
if cnt as u64 != old_rc.as_u64() {
|
|
||||||
warn!(
|
|
||||||
"Fixing inconsistent block RC for {:?}: was {}, should be {}",
|
|
||||||
hash,
|
|
||||||
old_rc.as_u64(),
|
|
||||||
cnt
|
|
||||||
);
|
|
||||||
let new_rc = if cnt > 0 {
|
|
||||||
RcEntry::Present { count: cnt as u64 }
|
|
||||||
} else {
|
|
||||||
RcEntry::Deletable {
|
|
||||||
at_time: now_msec() + BLOCK_GC_DELAY.as_millis() as u64,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
tx.insert(&self.rc_table, hash, new_rc.serialize().unwrap())?;
|
|
||||||
Ok((cnt, true))
|
|
||||||
} else {
|
|
||||||
Ok((cnt, false))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.map_err(Error::from);
|
|
||||||
if let Err(e) = &res {
|
|
||||||
error!("Failed to fix RC for block {:?}: {}", hash, e);
|
|
||||||
}
|
|
||||||
res
|
|
||||||
} else {
|
|
||||||
Err(Error::Message(
|
|
||||||
"Block RC recalculation is not available at this point".into(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Describes the state of the reference counter for a block
|
/// Describes the state of the reference counter for a block
|
||||||
|
|
|
@ -107,7 +107,7 @@ impl Worker for RepairWorker {
|
||||||
for entry in self
|
for entry in self
|
||||||
.manager
|
.manager
|
||||||
.rc
|
.rc
|
||||||
.rc_table
|
.rc
|
||||||
.range::<&[u8], _>((start_bound, Bound::Unbounded))?
|
.range::<&[u8], _>((start_bound, Bound::Unbounded))?
|
||||||
{
|
{
|
||||||
let (hash, _) = entry?;
|
let (hash, _) = entry?;
|
||||||
|
|
|
@ -367,13 +367,6 @@ impl BlockResyncManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
if exists && rc.is_deletable() {
|
if exists && rc.is_deletable() {
|
||||||
if manager.rc.recalculate_rc(hash)?.0 > 0 {
|
|
||||||
return Err(Error::Message(format!(
|
|
||||||
"Refcount for block {:?} was inconsistent, retrying later",
|
|
||||||
hash
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Resync block {:?}: offloading and deleting", hash);
|
info!("Resync block {:?}: offloading and deleting", hash);
|
||||||
let existing_path = existing_path.unwrap();
|
let existing_path = existing_path.unwrap();
|
||||||
|
|
||||||
|
@ -436,7 +429,7 @@ impl BlockResyncManager {
|
||||||
&manager.endpoint,
|
&manager.endpoint,
|
||||||
&need_nodes,
|
&need_nodes,
|
||||||
put_block_message,
|
put_block_message,
|
||||||
RequestStrategy::with_priority(PRIO_BACKGROUND | PRIO_SECONDARY)
|
RequestStrategy::with_priority(PRIO_BACKGROUND)
|
||||||
.with_quorum(need_nodes.len()),
|
.with_quorum(need_nodes.len()),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -460,17 +453,7 @@ impl BlockResyncManager {
|
||||||
hash
|
hash
|
||||||
);
|
);
|
||||||
|
|
||||||
let block_data = manager
|
let block_data = manager.rpc_get_raw_block(hash, None).await?;
|
||||||
.rpc_get_raw_block(hash, PRIO_BACKGROUND | PRIO_SECONDARY, None)
|
|
||||||
.await;
|
|
||||||
if matches!(block_data, Err(Error::MissingBlock(_))) {
|
|
||||||
warn!(
|
|
||||||
"Could not fetch needed block {:?}, no node returned valid data. Checking that refcount is correct.",
|
|
||||||
hash
|
|
||||||
);
|
|
||||||
manager.rc.recalculate_rc(hash)?;
|
|
||||||
}
|
|
||||||
let block_data = block_data?;
|
|
||||||
|
|
||||||
manager.metrics.resync_recv_counter.add(1);
|
manager.metrics.resync_recv_counter.add(1);
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_db"
|
name = "garage_db"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,12 +14,11 @@ path = "lib.rs"
|
||||||
[dependencies]
|
[dependencies]
|
||||||
err-derive.workspace = true
|
err-derive.workspace = true
|
||||||
hexdump.workspace = true
|
hexdump.workspace = true
|
||||||
|
ouroboros.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
|
||||||
heed = { workspace = true, optional = true }
|
heed = { workspace = true, optional = true }
|
||||||
rusqlite = { workspace = true, optional = true, features = ["backup"] }
|
rusqlite = { workspace = true, optional = true }
|
||||||
r2d2 = { workspace = true, optional = true }
|
|
||||||
r2d2_sqlite = { workspace = true, optional = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
mktemp.workspace = true
|
mktemp.workspace = true
|
||||||
|
@ -28,4 +27,4 @@ mktemp.workspace = true
|
||||||
default = [ "lmdb", "sqlite" ]
|
default = [ "lmdb", "sqlite" ]
|
||||||
bundled-libs = [ "rusqlite?/bundled" ]
|
bundled-libs = [ "rusqlite?/bundled" ]
|
||||||
lmdb = [ "heed" ]
|
lmdb = [ "heed" ]
|
||||||
sqlite = [ "rusqlite", "r2d2", "r2d2_sqlite" ]
|
sqlite = [ "rusqlite" ]
|
||||||
|
|
|
@ -15,7 +15,6 @@ use core::ops::{Bound, RangeBounds};
|
||||||
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::cell::Cell;
|
use std::cell::Cell;
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use err_derive::Error;
|
use err_derive::Error;
|
||||||
|
@ -45,12 +44,6 @@ pub type TxValueIter<'a> = Box<dyn std::iter::Iterator<Item = TxOpResult<(Value,
|
||||||
#[error(display = "{}", _0)]
|
#[error(display = "{}", _0)]
|
||||||
pub struct Error(pub Cow<'static, str>);
|
pub struct Error(pub Cow<'static, str>);
|
||||||
|
|
||||||
impl From<std::io::Error> for Error {
|
|
||||||
fn from(e: std::io::Error) -> Error {
|
|
||||||
Error(format!("IO: {}", e).into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
|
@ -133,10 +126,6 @@ impl Db {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn snapshot(&self, path: &PathBuf) -> Result<()> {
|
|
||||||
self.0.snapshot(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn import(&self, other: &Db) -> Result<()> {
|
pub fn import(&self, other: &Db) -> Result<()> {
|
||||||
let existing_trees = self.list_trees()?;
|
let existing_trees = self.list_trees()?;
|
||||||
if !existing_trees.is_empty() {
|
if !existing_trees.is_empty() {
|
||||||
|
@ -334,7 +323,6 @@ pub(crate) trait IDb: Send + Sync {
|
||||||
fn engine(&self) -> String;
|
fn engine(&self) -> String;
|
||||||
fn open_tree(&self, name: &str) -> Result<usize>;
|
fn open_tree(&self, name: &str) -> Result<usize>;
|
||||||
fn list_trees(&self) -> Result<Vec<String>>;
|
fn list_trees(&self) -> Result<Vec<String>>;
|
||||||
fn snapshot(&self, path: &PathBuf) -> Result<()>;
|
|
||||||
|
|
||||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||||
fn len(&self, tree: usize) -> Result<usize>;
|
fn len(&self, tree: usize) -> Result<usize>;
|
||||||
|
|
|
@ -3,7 +3,6 @@ use core::ptr::NonNull;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
@ -104,15 +103,6 @@ impl IDb for LmdbDb {
|
||||||
Ok(ret2)
|
Ok(ret2)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
|
||||||
std::fs::create_dir_all(to)?;
|
|
||||||
let mut path = to.clone();
|
|
||||||
path.push("data.mdb");
|
|
||||||
self.db
|
|
||||||
.copy_to_path(path, heed::CompactionOption::Disabled)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
|
||||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||||
|
|
|
@ -36,7 +36,7 @@ impl std::str::FromStr for Engine {
|
||||||
match text {
|
match text {
|
||||||
"lmdb" | "heed" => Ok(Self::Lmdb),
|
"lmdb" | "heed" => Ok(Self::Lmdb),
|
||||||
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
|
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
|
||||||
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.4).".into())),
|
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.3).".into())),
|
||||||
kind => Err(Error(
|
kind => Err(Error(
|
||||||
format!(
|
format!(
|
||||||
"Invalid DB engine: {} (options are: lmdb, sqlite)",
|
"Invalid DB engine: {} (options are: lmdb, sqlite)",
|
||||||
|
@ -68,8 +68,14 @@ pub fn open_db(path: &PathBuf, engine: Engine, opt: &OpenOpt) -> Result<Db> {
|
||||||
#[cfg(feature = "sqlite")]
|
#[cfg(feature = "sqlite")]
|
||||||
Engine::Sqlite => {
|
Engine::Sqlite => {
|
||||||
info!("Opening Sqlite database at: {}", path.display());
|
info!("Opening Sqlite database at: {}", path.display());
|
||||||
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
|
let db = crate::sqlite_adapter::rusqlite::Connection::open(&path)?;
|
||||||
Ok(crate::sqlite_adapter::SqliteDb::new(manager, opt.fsync)?)
|
db.pragma_update(None, "journal_mode", "WAL")?;
|
||||||
|
if opt.fsync {
|
||||||
|
db.pragma_update(None, "synchronous", "NORMAL")?;
|
||||||
|
} else {
|
||||||
|
db.pragma_update(None, "synchronous", "OFF")?;
|
||||||
|
}
|
||||||
|
Ok(crate::sqlite_adapter::SqliteDb::init(db))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- LMDB DB ----
|
// ---- LMDB DB ----
|
||||||
|
|
|
@ -1,14 +1,10 @@
|
||||||
use core::ops::Bound;
|
use core::ops::Bound;
|
||||||
|
|
||||||
use std::marker::PhantomPinned;
|
use std::borrow::BorrowMut;
|
||||||
use std::path::PathBuf;
|
use std::sync::{Arc, Mutex, MutexGuard};
|
||||||
use std::pin::Pin;
|
|
||||||
use std::ptr::NonNull;
|
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
|
||||||
|
|
||||||
use r2d2::Pool;
|
use ouroboros::self_referencing;
|
||||||
use r2d2_sqlite::SqliteConnectionManager;
|
use rusqlite::{params, Connection, Rows, Statement, Transaction};
|
||||||
use rusqlite::{params, Rows, Statement, Transaction};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||||
|
@ -17,8 +13,6 @@ use crate::{
|
||||||
|
|
||||||
pub use rusqlite;
|
pub use rusqlite;
|
||||||
|
|
||||||
type Connection = r2d2::PooledConnection<SqliteConnectionManager>;
|
|
||||||
|
|
||||||
// --- err
|
// --- err
|
||||||
|
|
||||||
impl From<rusqlite::Error> for Error {
|
impl From<rusqlite::Error> for Error {
|
||||||
|
@ -27,12 +21,6 @@ impl From<rusqlite::Error> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<r2d2::Error> for Error {
|
|
||||||
fn from(e: r2d2::Error) -> Error {
|
|
||||||
Error(format!("Sqlite: {}", e).into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<rusqlite::Error> for TxOpError {
|
impl From<rusqlite::Error> for TxOpError {
|
||||||
fn from(e: rusqlite::Error) -> TxOpError {
|
fn from(e: rusqlite::Error) -> TxOpError {
|
||||||
TxOpError(e.into())
|
TxOpError(e.into())
|
||||||
|
@ -41,47 +29,35 @@ impl From<rusqlite::Error> for TxOpError {
|
||||||
|
|
||||||
// -- db
|
// -- db
|
||||||
|
|
||||||
pub struct SqliteDb {
|
pub struct SqliteDb(Mutex<SqliteDbInner>);
|
||||||
db: Pool<SqliteConnectionManager>,
|
|
||||||
trees: RwLock<Vec<Arc<str>>>,
|
struct SqliteDbInner {
|
||||||
// All operations that might write on the DB must take this lock first.
|
db: Connection,
|
||||||
// This emulates LMDB's approach where a single writer can be
|
trees: Vec<String>,
|
||||||
// active at once.
|
|
||||||
write_lock: Mutex<()>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SqliteDb {
|
impl SqliteDb {
|
||||||
pub fn new(manager: SqliteConnectionManager, sync_mode: bool) -> Result<Db> {
|
pub fn init(db: rusqlite::Connection) -> Db {
|
||||||
let manager = manager.with_init(move |db| {
|
let s = Self(Mutex::new(SqliteDbInner {
|
||||||
db.pragma_update(None, "journal_mode", "WAL")?;
|
db,
|
||||||
if sync_mode {
|
trees: Vec::new(),
|
||||||
db.pragma_update(None, "synchronous", "NORMAL")?;
|
}));
|
||||||
} else {
|
Db(Arc::new(s))
|
||||||
db.pragma_update(None, "synchronous", "OFF")?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
});
|
|
||||||
let s = Self {
|
|
||||||
db: Pool::builder().build(manager)?,
|
|
||||||
trees: RwLock::new(vec![]),
|
|
||||||
write_lock: Mutex::new(()),
|
|
||||||
};
|
|
||||||
Ok(Db(Arc::new(s)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SqliteDb {
|
impl SqliteDbInner {
|
||||||
fn get_tree(&self, i: usize) -> Result<Arc<str>> {
|
fn get_tree(&self, i: usize) -> Result<&'_ str> {
|
||||||
self.trees
|
self.trees
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.get(i)
|
.get(i)
|
||||||
.cloned()
|
.map(String::as_str)
|
||||||
.ok_or_else(|| Error("invalid tree id".into()))
|
.ok_or_else(|| Error("invalid tree id".into()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn internal_get(&self, db: &Connection, tree: &str, key: &[u8]) -> Result<Option<Value>> {
|
fn internal_get(&self, tree: &str, key: &[u8]) -> Result<Option<Value>> {
|
||||||
let mut stmt = db.prepare(&format!("SELECT v FROM {} WHERE k = ?1", tree))?;
|
let mut stmt = self
|
||||||
|
.db
|
||||||
|
.prepare(&format!("SELECT v FROM {} WHERE k = ?1", tree))?;
|
||||||
let mut res_iter = stmt.query([key])?;
|
let mut res_iter = stmt.query([key])?;
|
||||||
match res_iter.next()? {
|
match res_iter.next()? {
|
||||||
None => Ok(None),
|
None => Ok(None),
|
||||||
|
@ -97,14 +73,13 @@ impl IDb for SqliteDb {
|
||||||
|
|
||||||
fn open_tree(&self, name: &str) -> Result<usize> {
|
fn open_tree(&self, name: &str) -> Result<usize> {
|
||||||
let name = format!("tree_{}", name.replace(':', "_COLON_"));
|
let name = format!("tree_{}", name.replace(':', "_COLON_"));
|
||||||
let mut trees = self.trees.write().unwrap();
|
let mut this = self.0.lock().unwrap();
|
||||||
|
|
||||||
if let Some(i) = trees.iter().position(|x| x.as_ref() == &name) {
|
if let Some(i) = this.trees.iter().position(|x| x == &name) {
|
||||||
Ok(i)
|
Ok(i)
|
||||||
} else {
|
} else {
|
||||||
let db = self.db.get()?;
|
|
||||||
trace!("create table {}", name);
|
trace!("create table {}", name);
|
||||||
db.execute(
|
this.db.execute(
|
||||||
&format!(
|
&format!(
|
||||||
"CREATE TABLE IF NOT EXISTS {} (
|
"CREATE TABLE IF NOT EXISTS {} (
|
||||||
k BLOB PRIMARY KEY,
|
k BLOB PRIMARY KEY,
|
||||||
|
@ -116,8 +91,8 @@ impl IDb for SqliteDb {
|
||||||
)?;
|
)?;
|
||||||
trace!("table created: {}, unlocking", name);
|
trace!("table created: {}, unlocking", name);
|
||||||
|
|
||||||
let i = trees.len();
|
let i = this.trees.len();
|
||||||
trees.push(name.to_string().into_boxed_str().into());
|
this.trees.push(name.to_string());
|
||||||
Ok(i)
|
Ok(i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -125,8 +100,11 @@ impl IDb for SqliteDb {
|
||||||
fn list_trees(&self) -> Result<Vec<String>> {
|
fn list_trees(&self) -> Result<Vec<String>> {
|
||||||
let mut trees = vec![];
|
let mut trees = vec![];
|
||||||
|
|
||||||
let db = self.db.get()?;
|
trace!("list_trees: lock db");
|
||||||
let mut stmt = db.prepare(
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("list_trees: lock acquired");
|
||||||
|
|
||||||
|
let mut stmt = this.db.prepare(
|
||||||
"SELECT name FROM sqlite_schema WHERE type = 'table' AND name LIKE 'tree_%'",
|
"SELECT name FROM sqlite_schema WHERE type = 'table' AND name LIKE 'tree_%'",
|
||||||
)?;
|
)?;
|
||||||
let mut rows = stmt.query([])?;
|
let mut rows = stmt.query([])?;
|
||||||
|
@ -139,29 +117,24 @@ impl IDb for SqliteDb {
|
||||||
Ok(trees)
|
Ok(trees)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
|
||||||
fn progress(p: rusqlite::backup::Progress) {
|
|
||||||
let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
|
|
||||||
info!("Sqlite snapshot progres: {}%", percent);
|
|
||||||
}
|
|
||||||
self.db
|
|
||||||
.get()?
|
|
||||||
.backup(rusqlite::DatabaseName::Main, to, Some(progress))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
|
||||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("get {}: lock db", tree);
|
||||||
self.internal_get(&self.db.get()?, &tree, key)
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("get {}: lock acquired", tree);
|
||||||
|
|
||||||
|
let tree = this.get_tree(tree)?;
|
||||||
|
this.internal_get(tree, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn len(&self, tree: usize) -> Result<usize> {
|
fn len(&self, tree: usize) -> Result<usize> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("len {}: lock db", tree);
|
||||||
let db = self.db.get()?;
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("len {}: lock acquired", tree);
|
||||||
|
|
||||||
let mut stmt = db.prepare(&format!("SELECT COUNT(*) FROM {}", tree))?;
|
let tree = this.get_tree(tree)?;
|
||||||
|
let mut stmt = this.db.prepare(&format!("SELECT COUNT(*) FROM {}", tree))?;
|
||||||
let mut res_iter = stmt.query([])?;
|
let mut res_iter = stmt.query([])?;
|
||||||
match res_iter.next()? {
|
match res_iter.next()? {
|
||||||
None => Ok(0),
|
None => Ok(0),
|
||||||
|
@ -170,60 +143,69 @@ impl IDb for SqliteDb {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("insert {}: lock db", tree);
|
||||||
let db = self.db.get()?;
|
let this = self.0.lock().unwrap();
|
||||||
let lock = self.write_lock.lock();
|
trace!("insert {}: lock acquired", tree);
|
||||||
|
|
||||||
let old_val = self.internal_get(&db, &tree, key)?;
|
let tree = this.get_tree(tree)?;
|
||||||
|
let old_val = this.internal_get(tree, key)?;
|
||||||
|
|
||||||
let sql = match &old_val {
|
let sql = match &old_val {
|
||||||
Some(_) => format!("UPDATE {} SET v = ?2 WHERE k = ?1", tree),
|
Some(_) => format!("UPDATE {} SET v = ?2 WHERE k = ?1", tree),
|
||||||
None => format!("INSERT INTO {} (k, v) VALUES (?1, ?2)", tree),
|
None => format!("INSERT INTO {} (k, v) VALUES (?1, ?2)", tree),
|
||||||
};
|
};
|
||||||
let n = db.execute(&sql, params![key, value])?;
|
let n = this.db.execute(&sql, params![key, value])?;
|
||||||
assert_eq!(n, 1);
|
assert_eq!(n, 1);
|
||||||
|
|
||||||
drop(lock);
|
|
||||||
Ok(old_val)
|
Ok(old_val)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("remove {}: lock db", tree);
|
||||||
let db = self.db.get()?;
|
let this = self.0.lock().unwrap();
|
||||||
let lock = self.write_lock.lock();
|
trace!("remove {}: lock acquired", tree);
|
||||||
|
|
||||||
let old_val = self.internal_get(&db, &tree, key)?;
|
let tree = this.get_tree(tree)?;
|
||||||
|
let old_val = this.internal_get(tree, key)?;
|
||||||
|
|
||||||
if old_val.is_some() {
|
if old_val.is_some() {
|
||||||
let n = db.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
|
let n = this
|
||||||
|
.db
|
||||||
|
.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
|
||||||
assert_eq!(n, 1);
|
assert_eq!(n, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
drop(lock);
|
|
||||||
Ok(old_val)
|
Ok(old_val)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear(&self, tree: usize) -> Result<()> {
|
fn clear(&self, tree: usize) -> Result<()> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("clear {}: lock db", tree);
|
||||||
let db = self.db.get()?;
|
let this = self.0.lock().unwrap();
|
||||||
let lock = self.write_lock.lock();
|
trace!("clear {}: lock acquired", tree);
|
||||||
|
|
||||||
db.execute(&format!("DELETE FROM {}", tree), [])?;
|
let tree = this.get_tree(tree)?;
|
||||||
|
this.db.execute(&format!("DELETE FROM {}", tree), [])?;
|
||||||
drop(lock);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter(&self, tree: usize) -> Result<ValueIter<'_>> {
|
fn iter(&self, tree: usize) -> Result<ValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("iter {}: lock db", tree);
|
||||||
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("iter {}: lock acquired", tree);
|
||||||
|
|
||||||
|
let tree = this.get_tree(tree)?;
|
||||||
let sql = format!("SELECT k, v FROM {} ORDER BY k ASC", tree);
|
let sql = format!("SELECT k, v FROM {} ORDER BY k ASC", tree);
|
||||||
DbValueIterator::make(self.db.get()?, &sql, [])
|
make_iterator(this, &sql, [])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> {
|
fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("iter_rev {}: lock db", tree);
|
||||||
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("iter_rev {}: lock acquired", tree);
|
||||||
|
|
||||||
|
let tree = this.get_tree(tree)?;
|
||||||
let sql = format!("SELECT k, v FROM {} ORDER BY k DESC", tree);
|
let sql = format!("SELECT k, v FROM {} ORDER BY k DESC", tree);
|
||||||
DbValueIterator::make(self.db.get()?, &sql, [])
|
make_iterator(this, &sql, [])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn range<'r>(
|
fn range<'r>(
|
||||||
|
@ -232,7 +214,11 @@ impl IDb for SqliteDb {
|
||||||
low: Bound<&'r [u8]>,
|
low: Bound<&'r [u8]>,
|
||||||
high: Bound<&'r [u8]>,
|
high: Bound<&'r [u8]>,
|
||||||
) -> Result<ValueIter<'_>> {
|
) -> Result<ValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("range {}: lock db", tree);
|
||||||
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("range {}: lock acquired", tree);
|
||||||
|
|
||||||
|
let tree = this.get_tree(tree)?;
|
||||||
|
|
||||||
let (bounds_sql, params) = bounds_sql(low, high);
|
let (bounds_sql, params) = bounds_sql(low, high);
|
||||||
let sql = format!("SELECT k, v FROM {} {} ORDER BY k ASC", tree, bounds_sql);
|
let sql = format!("SELECT k, v FROM {} {} ORDER BY k ASC", tree, bounds_sql);
|
||||||
|
@ -242,7 +228,7 @@ impl IDb for SqliteDb {
|
||||||
.map(|x| x as &dyn rusqlite::ToSql)
|
.map(|x| x as &dyn rusqlite::ToSql)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
DbValueIterator::make::<&[&dyn rusqlite::ToSql]>(self.db.get()?, &sql, params.as_ref())
|
make_iterator::<&[&dyn rusqlite::ToSql]>(this, &sql, params.as_ref())
|
||||||
}
|
}
|
||||||
fn range_rev<'r>(
|
fn range_rev<'r>(
|
||||||
&self,
|
&self,
|
||||||
|
@ -250,7 +236,11 @@ impl IDb for SqliteDb {
|
||||||
low: Bound<&'r [u8]>,
|
low: Bound<&'r [u8]>,
|
||||||
high: Bound<&'r [u8]>,
|
high: Bound<&'r [u8]>,
|
||||||
) -> Result<ValueIter<'_>> {
|
) -> Result<ValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("range_rev {}: lock db", tree);
|
||||||
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("range_rev {}: lock acquired", tree);
|
||||||
|
|
||||||
|
let tree = this.get_tree(tree)?;
|
||||||
|
|
||||||
let (bounds_sql, params) = bounds_sql(low, high);
|
let (bounds_sql, params) = bounds_sql(low, high);
|
||||||
let sql = format!("SELECT k, v FROM {} {} ORDER BY k DESC", tree, bounds_sql);
|
let sql = format!("SELECT k, v FROM {} {} ORDER BY k DESC", tree, bounds_sql);
|
||||||
|
@ -260,20 +250,25 @@ impl IDb for SqliteDb {
|
||||||
.map(|x| x as &dyn rusqlite::ToSql)
|
.map(|x| x as &dyn rusqlite::ToSql)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
DbValueIterator::make::<&[&dyn rusqlite::ToSql]>(self.db.get()?, &sql, params.as_ref())
|
make_iterator::<&[&dyn rusqlite::ToSql]>(this, &sql, params.as_ref())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
|
||||||
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
|
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
|
||||||
let mut db = self.db.get().map_err(Error::from).map_err(TxError::Db)?;
|
trace!("transaction: lock db");
|
||||||
let trees = self.trees.read().unwrap();
|
let mut this = self.0.lock().unwrap();
|
||||||
let lock = self.write_lock.lock();
|
trace!("transaction: lock acquired");
|
||||||
|
|
||||||
|
let this_mut_ref: &mut SqliteDbInner = this.borrow_mut();
|
||||||
|
|
||||||
trace!("trying transaction");
|
|
||||||
let mut tx = SqliteTx {
|
let mut tx = SqliteTx {
|
||||||
tx: db.transaction().map_err(Error::from).map_err(TxError::Db)?,
|
tx: this_mut_ref
|
||||||
trees: &trees,
|
.db
|
||||||
|
.transaction()
|
||||||
|
.map_err(Error::from)
|
||||||
|
.map_err(TxError::Db)?,
|
||||||
|
trees: &this_mut_ref.trees,
|
||||||
};
|
};
|
||||||
let res = match f.try_on(&mut tx) {
|
let res = match f.try_on(&mut tx) {
|
||||||
TxFnResult::Ok(on_commit) => {
|
TxFnResult::Ok(on_commit) => {
|
||||||
|
@ -293,8 +288,7 @@ impl IDb for SqliteDb {
|
||||||
};
|
};
|
||||||
|
|
||||||
trace!("transaction done");
|
trace!("transaction done");
|
||||||
drop(lock);
|
res
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -302,12 +296,12 @@ impl IDb for SqliteDb {
|
||||||
|
|
||||||
struct SqliteTx<'a> {
|
struct SqliteTx<'a> {
|
||||||
tx: Transaction<'a>,
|
tx: Transaction<'a>,
|
||||||
trees: &'a [Arc<str>],
|
trees: &'a [String],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> SqliteTx<'a> {
|
impl<'a> SqliteTx<'a> {
|
||||||
fn get_tree(&self, i: usize) -> TxOpResult<&'_ str> {
|
fn get_tree(&self, i: usize) -> TxOpResult<&'_ str> {
|
||||||
self.trees.get(i).map(Arc::as_ref).ok_or_else(|| {
|
self.trees.get(i).map(String::as_ref).ok_or_else(|| {
|
||||||
TxOpError(Error(
|
TxOpError(Error(
|
||||||
"invalid tree id (it might have been openned after the transaction started)".into(),
|
"invalid tree id (it might have been openned after the transaction started)".into(),
|
||||||
))
|
))
|
||||||
|
@ -376,12 +370,12 @@ impl<'a> ITx for SqliteTx<'a> {
|
||||||
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
let tree = self.get_tree(tree)?;
|
||||||
let sql = format!("SELECT k, v FROM {} ORDER BY k ASC", tree);
|
let sql = format!("SELECT k, v FROM {} ORDER BY k ASC", tree);
|
||||||
TxValueIterator::make(self, &sql, [])
|
make_tx_iterator(self, &sql, [])
|
||||||
}
|
}
|
||||||
fn iter_rev(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
fn iter_rev(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
let tree = self.get_tree(tree)?;
|
||||||
let sql = format!("SELECT k, v FROM {} ORDER BY k DESC", tree);
|
let sql = format!("SELECT k, v FROM {} ORDER BY k DESC", tree);
|
||||||
TxValueIterator::make(self, &sql, [])
|
make_tx_iterator(self, &sql, [])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn range<'r>(
|
fn range<'r>(
|
||||||
|
@ -400,7 +394,7 @@ impl<'a> ITx for SqliteTx<'a> {
|
||||||
.map(|x| x as &dyn rusqlite::ToSql)
|
.map(|x| x as &dyn rusqlite::ToSql)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
TxValueIterator::make::<&[&dyn rusqlite::ToSql]>(self, &sql, params.as_ref())
|
make_tx_iterator::<&[&dyn rusqlite::ToSql]>(self, &sql, params.as_ref())
|
||||||
}
|
}
|
||||||
fn range_rev<'r>(
|
fn range_rev<'r>(
|
||||||
&self,
|
&self,
|
||||||
|
@ -418,73 +412,47 @@ impl<'a> ITx for SqliteTx<'a> {
|
||||||
.map(|x| x as &dyn rusqlite::ToSql)
|
.map(|x| x as &dyn rusqlite::ToSql)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
TxValueIterator::make::<&[&dyn rusqlite::ToSql]>(self, &sql, params.as_ref())
|
make_tx_iterator::<&[&dyn rusqlite::ToSql]>(self, &sql, params.as_ref())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- iterators outside transactions ----
|
// ---- iterators outside transactions ----
|
||||||
// complicated, they must hold the Statement and Row objects
|
// complicated, they must hold the Statement and Row objects
|
||||||
// therefore quite some unsafe code (it is a self-referential struct)
|
// so we need a self_referencing struct
|
||||||
|
|
||||||
|
// need to split in two because sequential mutable borrows are broken,
|
||||||
|
// see https://github.com/someguynamedjosh/ouroboros/issues/100
|
||||||
|
#[self_referencing]
|
||||||
|
struct DbValueIterator1<'a> {
|
||||||
|
db: MutexGuard<'a, SqliteDbInner>,
|
||||||
|
#[borrows(mut db)]
|
||||||
|
#[covariant]
|
||||||
|
stmt: Statement<'this>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[self_referencing]
|
||||||
struct DbValueIterator<'a> {
|
struct DbValueIterator<'a> {
|
||||||
db: Connection,
|
aux: DbValueIterator1<'a>,
|
||||||
stmt: Option<Statement<'a>>,
|
#[borrows(mut aux)]
|
||||||
iter: Option<Rows<'a>>,
|
#[covariant]
|
||||||
_pin: PhantomPinned,
|
iter: Rows<'this>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> DbValueIterator<'a> {
|
fn make_iterator<'a, P: rusqlite::Params>(
|
||||||
fn make<P: rusqlite::Params>(db: Connection, sql: &str, args: P) -> Result<ValueIter<'a>> {
|
db: MutexGuard<'a, SqliteDbInner>,
|
||||||
let res = DbValueIterator {
|
sql: &str,
|
||||||
db,
|
args: P,
|
||||||
stmt: None,
|
) -> Result<ValueIter<'a>> {
|
||||||
iter: None,
|
let aux = DbValueIterator1::try_new(db, |db| db.db.prepare(sql))?;
|
||||||
_pin: PhantomPinned,
|
let res = DbValueIterator::try_new(aux, |aux| aux.with_stmt_mut(|stmt| stmt.query(args)))?;
|
||||||
};
|
Ok(Box::new(res))
|
||||||
let mut boxed = Box::pin(res);
|
|
||||||
trace!("make iterator with sql: {}", sql);
|
|
||||||
|
|
||||||
// This unsafe allows us to bypass lifetime checks
|
|
||||||
let db = unsafe { NonNull::from(&boxed.db).as_ref() };
|
|
||||||
let stmt = db.prepare(sql)?;
|
|
||||||
|
|
||||||
let mut_ref = Pin::as_mut(&mut boxed);
|
|
||||||
// This unsafe allows us to write in a field of the pinned struct
|
|
||||||
unsafe {
|
|
||||||
Pin::get_unchecked_mut(mut_ref).stmt = Some(stmt);
|
|
||||||
}
|
|
||||||
|
|
||||||
// This unsafe allows us to bypass lifetime checks
|
|
||||||
let stmt = unsafe { NonNull::from(&boxed.stmt).as_mut() };
|
|
||||||
let iter = stmt.as_mut().unwrap().query(args)?;
|
|
||||||
|
|
||||||
let mut_ref = Pin::as_mut(&mut boxed);
|
|
||||||
// This unsafe allows us to write in a field of the pinned struct
|
|
||||||
unsafe {
|
|
||||||
Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Box::new(DbValueIteratorPin(boxed)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Drop for DbValueIterator<'a> {
|
impl<'a> Iterator for DbValueIterator<'a> {
|
||||||
fn drop(&mut self) {
|
|
||||||
trace!("drop iter");
|
|
||||||
drop(self.iter.take());
|
|
||||||
drop(self.stmt.take());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct DbValueIteratorPin<'a>(Pin<Box<DbValueIterator<'a>>>);
|
|
||||||
|
|
||||||
impl<'a> Iterator for DbValueIteratorPin<'a> {
|
|
||||||
type Item = Result<(Value, Value)>;
|
type Item = Result<(Value, Value)>;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
let mut_ref = Pin::as_mut(&mut self.0);
|
let next = self.with_iter_mut(|iter| iter.next());
|
||||||
// This unsafe allows us to mutably access the iterator field
|
|
||||||
let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
|
|
||||||
iter_next_row(next)
|
iter_next_row(next)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -493,57 +461,29 @@ impl<'a> Iterator for DbValueIteratorPin<'a> {
|
||||||
// it's the same except we don't hold a mutex guard,
|
// it's the same except we don't hold a mutex guard,
|
||||||
// only a Statement and a Rows object
|
// only a Statement and a Rows object
|
||||||
|
|
||||||
|
#[self_referencing]
|
||||||
struct TxValueIterator<'a> {
|
struct TxValueIterator<'a> {
|
||||||
stmt: Statement<'a>,
|
stmt: Statement<'a>,
|
||||||
iter: Option<Rows<'a>>,
|
#[borrows(mut stmt)]
|
||||||
_pin: PhantomPinned,
|
#[covariant]
|
||||||
|
iter: Rows<'this>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TxValueIterator<'a> {
|
fn make_tx_iterator<'a, P: rusqlite::Params>(
|
||||||
fn make<P: rusqlite::Params>(
|
|
||||||
tx: &'a SqliteTx<'a>,
|
tx: &'a SqliteTx<'a>,
|
||||||
sql: &str,
|
sql: &str,
|
||||||
args: P,
|
args: P,
|
||||||
) -> TxOpResult<TxValueIter<'a>> {
|
) -> TxOpResult<TxValueIter<'a>> {
|
||||||
let stmt = tx.tx.prepare(sql)?;
|
let stmt = tx.tx.prepare(sql)?;
|
||||||
let res = TxValueIterator {
|
let res = TxValueIterator::try_new(stmt, |stmt| stmt.query(args))?;
|
||||||
stmt,
|
Ok(Box::new(res))
|
||||||
iter: None,
|
|
||||||
_pin: PhantomPinned,
|
|
||||||
};
|
|
||||||
let mut boxed = Box::pin(res);
|
|
||||||
trace!("make iterator with sql: {}", sql);
|
|
||||||
|
|
||||||
// This unsafe allows us to bypass lifetime checks
|
|
||||||
let stmt = unsafe { NonNull::from(&boxed.stmt).as_mut() };
|
|
||||||
let iter = stmt.query(args)?;
|
|
||||||
|
|
||||||
let mut_ref = Pin::as_mut(&mut boxed);
|
|
||||||
// This unsafe allows us to write in a field of the pinned struct
|
|
||||||
unsafe {
|
|
||||||
Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Box::new(TxValueIteratorPin(boxed)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Drop for TxValueIterator<'a> {
|
impl<'a> Iterator for TxValueIterator<'a> {
|
||||||
fn drop(&mut self) {
|
|
||||||
trace!("drop iter");
|
|
||||||
drop(self.iter.take());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TxValueIteratorPin<'a>(Pin<Box<TxValueIterator<'a>>>);
|
|
||||||
|
|
||||||
impl<'a> Iterator for TxValueIteratorPin<'a> {
|
|
||||||
type Item = TxOpResult<(Value, Value)>;
|
type Item = TxOpResult<(Value, Value)>;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
let mut_ref = Pin::as_mut(&mut self.0);
|
let next = self.with_iter_mut(|iter| iter.next());
|
||||||
// This unsafe allows us to mutably access the iterator field
|
|
||||||
let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
|
|
||||||
iter_next_row(next)
|
iter_next_row(next)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -144,7 +144,6 @@ fn test_lmdb_db() {
|
||||||
fn test_sqlite_db() {
|
fn test_sqlite_db() {
|
||||||
use crate::sqlite_adapter::SqliteDb;
|
use crate::sqlite_adapter::SqliteDb;
|
||||||
|
|
||||||
let manager = r2d2_sqlite::SqliteConnectionManager::memory();
|
let db = SqliteDb::init(rusqlite::Connection::open_in_memory().unwrap());
|
||||||
let db = SqliteDb::new(manager, false).unwrap();
|
|
||||||
test_suite(db);
|
test_suite(db);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -42,7 +42,6 @@ tracing.workspace = true
|
||||||
tracing-subscriber.workspace = true
|
tracing-subscriber.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
sha1.workspace = true
|
|
||||||
sodiumoxide.workspace = true
|
sodiumoxide.workspace = true
|
||||||
structopt.workspace = true
|
structopt.workspace = true
|
||||||
git-version.workspace = true
|
git-version.workspace = true
|
||||||
|
@ -59,7 +58,6 @@ opentelemetry.workspace = true
|
||||||
opentelemetry-prometheus = { workspace = true, optional = true }
|
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||||
opentelemetry-otlp = { workspace = true, optional = true }
|
opentelemetry-otlp = { workspace = true, optional = true }
|
||||||
prometheus = { workspace = true, optional = true }
|
prometheus = { workspace = true, optional = true }
|
||||||
syslog-tracing = { workspace = true, optional = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
aws-config.workspace = true
|
aws-config.workspace = true
|
||||||
|
@ -98,8 +96,6 @@ kubernetes-discovery = [ "garage_rpc/kubernetes-discovery" ]
|
||||||
metrics = [ "garage_api/metrics", "opentelemetry-prometheus", "prometheus" ]
|
metrics = [ "garage_api/metrics", "opentelemetry-prometheus", "prometheus" ]
|
||||||
# Exporter for the OpenTelemetry Collector.
|
# Exporter for the OpenTelemetry Collector.
|
||||||
telemetry-otlp = [ "opentelemetry-otlp" ]
|
telemetry-otlp = [ "opentelemetry-otlp" ]
|
||||||
# Logging to syslog
|
|
||||||
syslog = [ "syslog-tracing" ]
|
|
||||||
|
|
||||||
# NOTE: bundled-libs and system-libs should be treat as mutually exclusive;
|
# NOTE: bundled-libs and system-libs should be treat as mutually exclusive;
|
||||||
# exactly one of them should be enabled.
|
# exactly one of them should be enabled.
|
||||||
|
|
|
@ -54,8 +54,9 @@ impl AdminRpcHandler {
|
||||||
let bucket_id = self
|
let bucket_id = self
|
||||||
.garage
|
.garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.admin_get_existing_matching_bucket(&query.name)
|
.resolve_global_bucket_name(&query.name)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
|
|
||||||
let bucket = self
|
let bucket = self
|
||||||
.garage
|
.garage
|
||||||
|
@ -156,8 +157,9 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
let bucket_id = helper
|
let bucket_id = helper
|
||||||
.bucket()
|
.bucket()
|
||||||
.admin_get_existing_matching_bucket(&query.name)
|
.resolve_global_bucket_name(&query.name)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
|
|
||||||
// Get the alias, but keep in minde here the bucket name
|
// Get the alias, but keep in minde here the bucket name
|
||||||
// given in parameter can also be directly the bucket's ID.
|
// given in parameter can also be directly the bucket's ID.
|
||||||
|
@ -233,8 +235,9 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
let bucket_id = helper
|
let bucket_id = helper
|
||||||
.bucket()
|
.bucket()
|
||||||
.admin_get_existing_matching_bucket(&query.existing_bucket)
|
.resolve_global_bucket_name(&query.existing_bucket)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
|
|
||||||
if let Some(key_pattern) = &query.local {
|
if let Some(key_pattern) = &query.local {
|
||||||
let key = helper.key().get_existing_matching_key(key_pattern).await?;
|
let key = helper.key().get_existing_matching_key(key_pattern).await?;
|
||||||
|
@ -304,8 +307,9 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
let bucket_id = helper
|
let bucket_id = helper
|
||||||
.bucket()
|
.bucket()
|
||||||
.admin_get_existing_matching_bucket(&query.bucket)
|
.resolve_global_bucket_name(&query.bucket)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
let key = helper
|
let key = helper
|
||||||
.key()
|
.key()
|
||||||
.get_existing_matching_key(&query.key_pattern)
|
.get_existing_matching_key(&query.key_pattern)
|
||||||
|
@ -339,8 +343,9 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
let bucket_id = helper
|
let bucket_id = helper
|
||||||
.bucket()
|
.bucket()
|
||||||
.admin_get_existing_matching_bucket(&query.bucket)
|
.resolve_global_bucket_name(&query.bucket)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
let key = helper
|
let key = helper
|
||||||
.key()
|
.key()
|
||||||
.get_existing_matching_key(&query.key_pattern)
|
.get_existing_matching_key(&query.key_pattern)
|
||||||
|
@ -373,8 +378,9 @@ impl AdminRpcHandler {
|
||||||
let bucket_id = self
|
let bucket_id = self
|
||||||
.garage
|
.garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.admin_get_existing_matching_bucket(&query.bucket)
|
.resolve_global_bucket_name(&query.bucket)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
|
|
||||||
let mut bucket = self
|
let mut bucket = self
|
||||||
.garage
|
.garage
|
||||||
|
@ -414,8 +420,9 @@ impl AdminRpcHandler {
|
||||||
let bucket_id = self
|
let bucket_id = self
|
||||||
.garage
|
.garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.admin_get_existing_matching_bucket(&query.bucket)
|
.resolve_global_bucket_name(&query.bucket)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
|
|
||||||
let mut bucket = self
|
let mut bucket = self
|
||||||
.garage
|
.garage
|
||||||
|
@ -472,8 +479,9 @@ impl AdminRpcHandler {
|
||||||
bucket_ids.push(
|
bucket_ids.push(
|
||||||
self.garage
|
self.garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.admin_get_existing_matching_bucket(b)
|
.resolve_global_bucket_name(b)
|
||||||
.await?,
|
.await?
|
||||||
|
.ok_or_bad_request(format!("Bucket not found: {}", b))?,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,6 @@ pub enum AdminRpc {
|
||||||
Stats(StatsOpt),
|
Stats(StatsOpt),
|
||||||
Worker(WorkerOperation),
|
Worker(WorkerOperation),
|
||||||
BlockOperation(BlockOperation),
|
BlockOperation(BlockOperation),
|
||||||
MetaOperation(MetaOperation),
|
|
||||||
|
|
||||||
// Replies
|
// Replies
|
||||||
Ok(String),
|
Ok(String),
|
||||||
|
@ -466,43 +465,6 @@ impl AdminRpcHandler {
|
||||||
)]))
|
)]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ================ META DB COMMANDS ====================
|
|
||||||
|
|
||||||
async fn handle_meta_cmd(self: &Arc<Self>, mo: &MetaOperation) -> Result<AdminRpc, Error> {
|
|
||||||
match mo {
|
|
||||||
MetaOperation::Snapshot { all: true } => {
|
|
||||||
let to = self.garage.system.cluster_layout().all_nodes().to_vec();
|
|
||||||
|
|
||||||
let resps = futures::future::join_all(to.iter().map(|to| async move {
|
|
||||||
let to = (*to).into();
|
|
||||||
self.endpoint
|
|
||||||
.call(
|
|
||||||
&to,
|
|
||||||
AdminRpc::MetaOperation(MetaOperation::Snapshot { all: false }),
|
|
||||||
PRIO_NORMAL,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}))
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let mut ret = vec![];
|
|
||||||
for (to, resp) in to.iter().zip(resps.iter()) {
|
|
||||||
let res_str = match resp {
|
|
||||||
Ok(_) => "ok".to_string(),
|
|
||||||
Err(e) => format!("error: {}", e),
|
|
||||||
};
|
|
||||||
ret.push(format!("{:?}\t{}", to, res_str));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(AdminRpc::Ok(format_table_to_string(ret)))
|
|
||||||
}
|
|
||||||
MetaOperation::Snapshot { all: false } => {
|
|
||||||
garage_model::snapshot::async_snapshot_metadata(&self.garage).await?;
|
|
||||||
Ok(AdminRpc::Ok("Snapshot has been saved.".into()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
@ -519,7 +481,6 @@ impl EndpointHandler<AdminRpc> for AdminRpcHandler {
|
||||||
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
|
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
|
||||||
AdminRpc::Worker(wo) => self.handle_worker_cmd(wo).await,
|
AdminRpc::Worker(wo) => self.handle_worker_cmd(wo).await,
|
||||||
AdminRpc::BlockOperation(bo) => self.handle_block_cmd(bo).await,
|
AdminRpc::BlockOperation(bo) => self.handle_block_cmd(bo).await,
|
||||||
AdminRpc::MetaOperation(mo) => self.handle_meta_cmd(mo).await,
|
|
||||||
m => Err(GarageError::unexpected_rpc_message(m).into()),
|
m => Err(GarageError::unexpected_rpc_message(m).into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,9 +41,6 @@ pub async fn cli_command_dispatch(
|
||||||
Command::Block(bo) => {
|
Command::Block(bo) => {
|
||||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await
|
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await
|
||||||
}
|
}
|
||||||
Command::Meta(mo) => {
|
|
||||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::MetaOperation(mo)).await
|
|
||||||
}
|
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -57,10 +54,6 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()];
|
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()];
|
||||||
for adv in status.iter().filter(|adv| adv.is_up) {
|
for adv in status.iter().filter(|adv| adv.is_up) {
|
||||||
let host = adv.status.hostname.as_deref().unwrap_or("?");
|
let host = adv.status.hostname.as_deref().unwrap_or("?");
|
||||||
let addr = match adv.addr {
|
|
||||||
Some(addr) => addr.to_string(),
|
|
||||||
None => "N/A".to_string(),
|
|
||||||
};
|
|
||||||
if let Some(NodeRoleV(Some(cfg))) = layout.current().roles.get(&adv.id) {
|
if let Some(NodeRoleV(Some(cfg))) = layout.current().roles.get(&adv.id) {
|
||||||
let data_avail = match &adv.status.data_disk_avail {
|
let data_avail = match &adv.status.data_disk_avail {
|
||||||
_ if cfg.capacity.is_none() => "N/A".into(),
|
_ if cfg.capacity.is_none() => "N/A".into(),
|
||||||
|
@ -75,7 +68,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
|
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
|
||||||
id = adv.id,
|
id = adv.id,
|
||||||
host = host,
|
host = host,
|
||||||
addr = addr,
|
addr = adv.addr,
|
||||||
tags = cfg.tags.join(","),
|
tags = cfg.tags.join(","),
|
||||||
zone = cfg.zone,
|
zone = cfg.zone,
|
||||||
capacity = cfg.capacity_string(),
|
capacity = cfg.capacity_string(),
|
||||||
|
@ -95,7 +88,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\tdraining metadata...",
|
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\tdraining metadata...",
|
||||||
id = adv.id,
|
id = adv.id,
|
||||||
host = host,
|
host = host,
|
||||||
addr = addr,
|
addr = adv.addr,
|
||||||
tags = cfg.tags.join(","),
|
tags = cfg.tags.join(","),
|
||||||
zone = cfg.zone,
|
zone = cfg.zone,
|
||||||
));
|
));
|
||||||
|
@ -108,7 +101,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
"{id:?}\t{h}\t{addr}\t\t\t{new_role}",
|
"{id:?}\t{h}\t{addr}\t\t\t{new_role}",
|
||||||
id = adv.id,
|
id = adv.id,
|
||||||
h = host,
|
h = host,
|
||||||
addr = addr,
|
addr = adv.addr,
|
||||||
new_role = new_role,
|
new_role = new_role,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -124,7 +117,8 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
|
|
||||||
let tf = timeago::Formatter::new();
|
let tf = timeago::Formatter::new();
|
||||||
let mut drain_msg = false;
|
let mut drain_msg = false;
|
||||||
let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()];
|
let mut failed_nodes =
|
||||||
|
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tLast seen".to_string()];
|
||||||
let mut listed = HashSet::new();
|
let mut listed = HashSet::new();
|
||||||
for ver in layout.versions.iter().rev() {
|
for ver in layout.versions.iter().rev() {
|
||||||
for (node, _, role) in ver.roles.items().iter() {
|
for (node, _, role) in ver.roles.items().iter() {
|
||||||
|
@ -145,14 +139,15 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
|
|
||||||
// Node is in a layout version, is not a gateway node, and is not up:
|
// Node is in a layout version, is not a gateway node, and is not up:
|
||||||
// it is in a failed state, add proper line to the output
|
// it is in a failed state, add proper line to the output
|
||||||
let (host, last_seen) = match adv {
|
let (host, addr, last_seen) = match adv {
|
||||||
Some(adv) => (
|
Some(adv) => (
|
||||||
adv.status.hostname.as_deref().unwrap_or("?"),
|
adv.status.hostname.as_deref().unwrap_or("?"),
|
||||||
|
adv.addr.to_string(),
|
||||||
adv.last_seen_secs_ago
|
adv.last_seen_secs_ago
|
||||||
.map(|s| tf.convert(Duration::from_secs(s)))
|
.map(|s| tf.convert(Duration::from_secs(s)))
|
||||||
.unwrap_or_else(|| "never seen".into()),
|
.unwrap_or_else(|| "never seen".into()),
|
||||||
),
|
),
|
||||||
None => ("??", "never seen".into()),
|
None => ("??", "??".into(), "never seen".into()),
|
||||||
};
|
};
|
||||||
let capacity = if ver.version == layout.current().version {
|
let capacity = if ver.version == layout.current().version {
|
||||||
cfg.capacity_string()
|
cfg.capacity_string()
|
||||||
|
@ -161,9 +156,10 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
"draining metadata...".to_string()
|
"draining metadata...".to_string()
|
||||||
};
|
};
|
||||||
failed_nodes.push(format!(
|
failed_nodes.push(format!(
|
||||||
"{id:?}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
|
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
|
||||||
id = node,
|
id = node,
|
||||||
host = host,
|
host = host,
|
||||||
|
addr = addr,
|
||||||
tags = cfg.tags.join(","),
|
tags = cfg.tags.join(","),
|
||||||
zone = cfg.zone,
|
zone = cfg.zone,
|
||||||
capacity = capacity,
|
capacity = capacity,
|
||||||
|
|
|
@ -48,14 +48,10 @@ pub enum Command {
|
||||||
#[structopt(name = "worker", version = garage_version())]
|
#[structopt(name = "worker", version = garage_version())]
|
||||||
Worker(WorkerOperation),
|
Worker(WorkerOperation),
|
||||||
|
|
||||||
/// Low-level node-local debug operations on data blocks
|
/// Low-level debug operations on data blocks
|
||||||
#[structopt(name = "block", version = garage_version())]
|
#[structopt(name = "block", version = garage_version())]
|
||||||
Block(BlockOperation),
|
Block(BlockOperation),
|
||||||
|
|
||||||
/// Operations on the metadata db
|
|
||||||
#[structopt(name = "meta", version = garage_version())]
|
|
||||||
Meta(MetaOperation),
|
|
||||||
|
|
||||||
/// Convert metadata db between database engine formats
|
/// Convert metadata db between database engine formats
|
||||||
#[structopt(name = "convert-db", version = garage_version())]
|
#[structopt(name = "convert-db", version = garage_version())]
|
||||||
ConvertDb(convert_db::ConvertDbOpt),
|
ConvertDb(convert_db::ConvertDbOpt),
|
||||||
|
@ -473,11 +469,8 @@ pub enum RepairWhat {
|
||||||
#[structopt(name = "mpu", version = garage_version())]
|
#[structopt(name = "mpu", version = garage_version())]
|
||||||
MultipartUploads,
|
MultipartUploads,
|
||||||
/// Repropagate version deletions to the block ref table
|
/// Repropagate version deletions to the block ref table
|
||||||
#[structopt(name = "block-refs", version = garage_version())]
|
#[structopt(name = "block_refs", version = garage_version())]
|
||||||
BlockRefs,
|
BlockRefs,
|
||||||
/// Recalculate block reference counters
|
|
||||||
#[structopt(name = "block-rc", version = garage_version())]
|
|
||||||
BlockRc,
|
|
||||||
/// Verify integrity of all blocks on disc
|
/// Verify integrity of all blocks on disc
|
||||||
#[structopt(name = "scrub", version = garage_version())]
|
#[structopt(name = "scrub", version = garage_version())]
|
||||||
Scrub {
|
Scrub {
|
||||||
|
@ -618,14 +611,3 @@ pub enum BlockOperation {
|
||||||
blocks: Vec<String>,
|
blocks: Vec<String>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone, Copy)]
|
|
||||||
pub enum MetaOperation {
|
|
||||||
/// Save a snapshot of the metadata db file
|
|
||||||
#[structopt(name = "snapshot", version = garage_version())]
|
|
||||||
Snapshot {
|
|
||||||
/// Run on all nodes instead of only local node
|
|
||||||
#[structopt(long = "all")]
|
|
||||||
all: bool,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
|
@ -451,7 +451,7 @@ pub fn print_block_info(
|
||||||
if refcount != nondeleted_count {
|
if refcount != nondeleted_count {
|
||||||
println!();
|
println!();
|
||||||
println!(
|
println!(
|
||||||
"Warning: refcount does not match number of non-deleted versions, you should try `garage repair block-rc`."
|
"Warning: refcount does not match number of non-deleted versions (see issue #644)."
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -138,8 +138,17 @@ async fn main() {
|
||||||
let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches());
|
let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches());
|
||||||
|
|
||||||
// Initialize logging as well as other libraries used in Garage
|
// Initialize logging as well as other libraries used in Garage
|
||||||
init_logging(&opt);
|
if std::env::var("RUST_LOG").is_err() {
|
||||||
|
let default_log = match &opt.cmd {
|
||||||
|
Command::Server => "netapp=info,garage=info",
|
||||||
|
_ => "netapp=warn,garage=warn",
|
||||||
|
};
|
||||||
|
std::env::set_var("RUST_LOG", default_log)
|
||||||
|
}
|
||||||
|
tracing_subscriber::fmt()
|
||||||
|
.with_writer(std::io::stderr)
|
||||||
|
.with_env_filter(tracing_subscriber::filter::EnvFilter::from_default_env())
|
||||||
|
.init();
|
||||||
sodiumoxide::init().expect("Unable to init sodiumoxide");
|
sodiumoxide::init().expect("Unable to init sodiumoxide");
|
||||||
|
|
||||||
let res = match opt.cmd {
|
let res = match opt.cmd {
|
||||||
|
@ -162,58 +171,6 @@ async fn main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_logging(opt: &Opt) {
|
|
||||||
if std::env::var("RUST_LOG").is_err() {
|
|
||||||
let default_log = match &opt.cmd {
|
|
||||||
Command::Server => "netapp=info,garage=info",
|
|
||||||
_ => "netapp=warn,garage=warn",
|
|
||||||
};
|
|
||||||
std::env::set_var("RUST_LOG", default_log)
|
|
||||||
}
|
|
||||||
|
|
||||||
let env_filter = tracing_subscriber::filter::EnvFilter::from_default_env();
|
|
||||||
|
|
||||||
if std::env::var("GARAGE_LOG_TO_SYSLOG")
|
|
||||||
.map(|x| x == "1" || x == "true")
|
|
||||||
.unwrap_or(false)
|
|
||||||
{
|
|
||||||
#[cfg(feature = "syslog")]
|
|
||||||
{
|
|
||||||
use std::ffi::CStr;
|
|
||||||
use syslog_tracing::{Facility, Options, Syslog};
|
|
||||||
|
|
||||||
let syslog = Syslog::new(
|
|
||||||
CStr::from_bytes_with_nul(b"garage\0").unwrap(),
|
|
||||||
Options::LOG_PID | Options::LOG_PERROR,
|
|
||||||
Facility::Daemon,
|
|
||||||
)
|
|
||||||
.expect("Unable to init syslog");
|
|
||||||
|
|
||||||
tracing_subscriber::fmt()
|
|
||||||
.with_writer(syslog)
|
|
||||||
.with_env_filter(env_filter)
|
|
||||||
.with_ansi(false) // disable ANSI escape sequences (colours)
|
|
||||||
.with_file(false)
|
|
||||||
.with_level(false)
|
|
||||||
.without_time()
|
|
||||||
.compact()
|
|
||||||
.init();
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#[cfg(not(feature = "syslog"))]
|
|
||||||
{
|
|
||||||
eprintln!("Syslog support is not enabled in this build.");
|
|
||||||
std::process::exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing_subscriber::fmt()
|
|
||||||
.with_writer(std::io::stderr)
|
|
||||||
.with_env_filter(env_filter)
|
|
||||||
.init();
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn cli_command(opt: Opt) -> Result<(), Error> {
|
async fn cli_command(opt: Opt) -> Result<(), Error> {
|
||||||
let config = if (opt.secrets.rpc_secret.is_none() && opt.secrets.rpc_secret_file.is_none())
|
let config = if (opt.secrets.rpc_secret.is_none() && opt.secrets.rpc_secret_file.is_none())
|
||||||
|| opt.rpc_host.is_none()
|
|| opt.rpc_host.is_none()
|
||||||
|
|
|
@ -4,7 +4,6 @@ use std::time::Duration;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use garage_block::manager::BlockManager;
|
|
||||||
use garage_block::repair::ScrubWorkerCommand;
|
use garage_block::repair::ScrubWorkerCommand;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
|
@ -17,14 +16,11 @@ use garage_table::replication::*;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
|
||||||
use garage_util::background::*;
|
use garage_util::background::*;
|
||||||
use garage_util::data::*;
|
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
use garage_util::migrate::Migrate;
|
use garage_util::migrate::Migrate;
|
||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
const RC_REPAIR_ITER_COUNT: usize = 64;
|
|
||||||
|
|
||||||
pub async fn launch_online_repair(
|
pub async fn launch_online_repair(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
bg: &BackgroundRunner,
|
bg: &BackgroundRunner,
|
||||||
|
@ -51,13 +47,6 @@ pub async fn launch_online_repair(
|
||||||
info!("Repairing the block refs table");
|
info!("Repairing the block refs table");
|
||||||
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs));
|
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs));
|
||||||
}
|
}
|
||||||
RepairWhat::BlockRc => {
|
|
||||||
info!("Repairing the block reference counters");
|
|
||||||
bg.spawn_worker(BlockRcRepair::new(
|
|
||||||
garage.block_manager.clone(),
|
|
||||||
garage.block_ref_table.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
RepairWhat::Blocks => {
|
RepairWhat::Blocks => {
|
||||||
info!("Repairing the stored blocks");
|
info!("Repairing the stored blocks");
|
||||||
bg.spawn_worker(garage_block::repair::RepairWorker::new(
|
bg.spawn_worker(garage_block::repair::RepairWorker::new(
|
||||||
|
@ -293,98 +282,3 @@ impl TableRepair for RepairMpu {
|
||||||
Ok(false)
|
Ok(false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ===== block reference counter repair =====
|
|
||||||
|
|
||||||
pub struct BlockRcRepair {
|
|
||||||
block_manager: Arc<BlockManager>,
|
|
||||||
block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
|
||||||
cursor: Hash,
|
|
||||||
counter: u64,
|
|
||||||
repairs: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockRcRepair {
|
|
||||||
fn new(
|
|
||||||
block_manager: Arc<BlockManager>,
|
|
||||||
block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
block_manager,
|
|
||||||
block_ref_table,
|
|
||||||
cursor: [0u8; 32].into(),
|
|
||||||
counter: 0,
|
|
||||||
repairs: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Worker for BlockRcRepair {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
format!("Block refcount repair worker")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
|
||||||
WorkerStatus {
|
|
||||||
progress: Some(format!("{} ({})", self.counter, self.repairs)),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
|
||||||
for _i in 0..RC_REPAIR_ITER_COUNT {
|
|
||||||
let next1 = self
|
|
||||||
.block_manager
|
|
||||||
.rc
|
|
||||||
.rc_table
|
|
||||||
.range(self.cursor.as_slice()..)?
|
|
||||||
.next()
|
|
||||||
.transpose()?
|
|
||||||
.map(|(k, _)| Hash::try_from(k.as_slice()).unwrap());
|
|
||||||
let next2 = self
|
|
||||||
.block_ref_table
|
|
||||||
.data
|
|
||||||
.store
|
|
||||||
.range(self.cursor.as_slice()..)?
|
|
||||||
.next()
|
|
||||||
.transpose()?
|
|
||||||
.map(|(k, _)| Hash::try_from(&k[..32]).unwrap());
|
|
||||||
let next = match (next1, next2) {
|
|
||||||
(Some(k1), Some(k2)) => std::cmp::min(k1, k2),
|
|
||||||
(Some(k), None) | (None, Some(k)) => k,
|
|
||||||
(None, None) => {
|
|
||||||
info!(
|
|
||||||
"{}: finished, done {}, fixed {}",
|
|
||||||
self.name(),
|
|
||||||
self.counter,
|
|
||||||
self.repairs
|
|
||||||
);
|
|
||||||
return Ok(WorkerState::Done);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if self.block_manager.rc.recalculate_rc(&next)?.1 {
|
|
||||||
self.repairs += 1;
|
|
||||||
}
|
|
||||||
self.counter += 1;
|
|
||||||
if let Some(next_incr) = next.increment() {
|
|
||||||
self.cursor = next_incr;
|
|
||||||
} else {
|
|
||||||
info!(
|
|
||||||
"{}: finished, done {}, fixed {}",
|
|
||||||
self.name(),
|
|
||||||
self.counter,
|
|
||||||
self.repairs
|
|
||||||
);
|
|
||||||
return Ok(WorkerState::Done);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(WorkerState::Busy)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er
|
||||||
let (background, await_background_done) = BackgroundRunner::new(watch_cancel.clone());
|
let (background, await_background_done) = BackgroundRunner::new(watch_cancel.clone());
|
||||||
|
|
||||||
info!("Spawning Garage workers...");
|
info!("Spawning Garage workers...");
|
||||||
garage.spawn_workers(&background)?;
|
garage.spawn_workers(&background);
|
||||||
|
|
||||||
if config.admin.trace_sink.is_some() {
|
if config.admin.trace_sink.is_some() {
|
||||||
info!("Initialize tracing...");
|
info!("Initialize tracing...");
|
||||||
|
|
|
@ -42,10 +42,6 @@ impl Instance {
|
||||||
.ok()
|
.ok()
|
||||||
.unwrap_or_else(|| env::temp_dir().join(format!("garage-integ-test-{}", port)));
|
.unwrap_or_else(|| env::temp_dir().join(format!("garage-integ-test-{}", port)));
|
||||||
|
|
||||||
let db_engine = env::var("GARAGE_TEST_INTEGRATION_DB_ENGINE")
|
|
||||||
.ok()
|
|
||||||
.unwrap_or_else(|| "lmdb".into());
|
|
||||||
|
|
||||||
// Clean test runtime directory
|
// Clean test runtime directory
|
||||||
if path.exists() {
|
if path.exists() {
|
||||||
fs::remove_dir_all(&path).expect("Could not clean test runtime directory");
|
fs::remove_dir_all(&path).expect("Could not clean test runtime directory");
|
||||||
|
@ -56,7 +52,7 @@ impl Instance {
|
||||||
r#"
|
r#"
|
||||||
metadata_dir = "{path}/meta"
|
metadata_dir = "{path}/meta"
|
||||||
data_dir = "{path}/data"
|
data_dir = "{path}/data"
|
||||||
db_engine = "{db_engine}"
|
db_engine = "lmdb"
|
||||||
|
|
||||||
replication_factor = 1
|
replication_factor = 1
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,6 @@ use crate::common;
|
||||||
use crate::json_body;
|
use crate::json_body;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "currently broken"]
|
|
||||||
async fn test_poll_item() {
|
async fn test_poll_item() {
|
||||||
let ctx = common::context();
|
let ctx = common::context();
|
||||||
let bucket = ctx.create_bucket("test-k2v-poll-item");
|
let bucket = ctx.create_bucket("test-k2v-poll-item");
|
||||||
|
@ -99,7 +98,6 @@ async fn test_poll_item() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "currently broken"]
|
|
||||||
async fn test_poll_range() {
|
async fn test_poll_range() {
|
||||||
let ctx = common::context();
|
let ctx = common::context();
|
||||||
let bucket = ctx.create_bucket("test-k2v-poll-range");
|
let bucket = ctx.create_bucket("test-k2v-poll-range");
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
use crate::common;
|
use crate::common;
|
||||||
use aws_sdk_s3::primitives::ByteStream;
|
use aws_sdk_s3::primitives::ByteStream;
|
||||||
use aws_sdk_s3::types::{ChecksumAlgorithm, CompletedMultipartUpload, CompletedPart};
|
use aws_sdk_s3::types::{CompletedMultipartUpload, CompletedPart};
|
||||||
use base64::prelude::*;
|
|
||||||
|
|
||||||
const SZ_5MB: usize = 5 * 1024 * 1024;
|
const SZ_5MB: usize = 5 * 1024 * 1024;
|
||||||
const SZ_10MB: usize = 10 * 1024 * 1024;
|
const SZ_10MB: usize = 10 * 1024 * 1024;
|
||||||
|
@ -190,153 +189,6 @@ async fn test_multipart_upload() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_multipart_with_checksum() {
|
|
||||||
let ctx = common::context();
|
|
||||||
let bucket = ctx.create_bucket("testmpu-cksum");
|
|
||||||
|
|
||||||
let u1 = vec![0x11; SZ_5MB];
|
|
||||||
let u2 = vec![0x22; SZ_5MB];
|
|
||||||
let u3 = vec![0x33; SZ_5MB];
|
|
||||||
|
|
||||||
let ck1 = calculate_sha1(&u1);
|
|
||||||
let ck2 = calculate_sha1(&u2);
|
|
||||||
let ck3 = calculate_sha1(&u3);
|
|
||||||
|
|
||||||
let up = ctx
|
|
||||||
.client
|
|
||||||
.create_multipart_upload()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.checksum_algorithm(ChecksumAlgorithm::Sha1)
|
|
||||||
.key("a")
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
assert!(up.upload_id.is_some());
|
|
||||||
|
|
||||||
let uid = up.upload_id.as_ref().unwrap();
|
|
||||||
|
|
||||||
let p1 = ctx
|
|
||||||
.client
|
|
||||||
.upload_part()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.part_number(1)
|
|
||||||
.checksum_sha1(&ck1)
|
|
||||||
.body(ByteStream::from(u1.clone()))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// wrong checksum value should return an error
|
|
||||||
let err1 = ctx
|
|
||||||
.client
|
|
||||||
.upload_part()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.part_number(2)
|
|
||||||
.checksum_sha1(&ck1)
|
|
||||||
.body(ByteStream::from(u2.clone()))
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
assert!(err1.is_err());
|
|
||||||
|
|
||||||
let p2 = ctx
|
|
||||||
.client
|
|
||||||
.upload_part()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.part_number(2)
|
|
||||||
.checksum_sha1(&ck2)
|
|
||||||
.body(ByteStream::from(u2))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let p3 = ctx
|
|
||||||
.client
|
|
||||||
.upload_part()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.part_number(3)
|
|
||||||
.checksum_sha1(&ck3)
|
|
||||||
.body(ByteStream::from(u3.clone()))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
{
|
|
||||||
let r = ctx
|
|
||||||
.client
|
|
||||||
.list_parts()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let parts = r.parts.unwrap();
|
|
||||||
assert_eq!(parts.len(), 3);
|
|
||||||
assert!(parts[0].checksum_crc32.is_none());
|
|
||||||
assert!(parts[0].checksum_crc32_c.is_none());
|
|
||||||
assert!(parts[0].checksum_sha256.is_none());
|
|
||||||
assert_eq!(parts[0].checksum_sha1.as_deref().unwrap(), ck1);
|
|
||||||
assert_eq!(parts[1].checksum_sha1.as_deref().unwrap(), ck2);
|
|
||||||
assert_eq!(parts[2].checksum_sha1.as_deref().unwrap(), ck3);
|
|
||||||
}
|
|
||||||
|
|
||||||
let cmp = CompletedMultipartUpload::builder()
|
|
||||||
.parts(
|
|
||||||
CompletedPart::builder()
|
|
||||||
.part_number(1)
|
|
||||||
.checksum_sha1(&ck1)
|
|
||||||
.e_tag(p1.e_tag.unwrap())
|
|
||||||
.build(),
|
|
||||||
)
|
|
||||||
.parts(
|
|
||||||
CompletedPart::builder()
|
|
||||||
.part_number(2)
|
|
||||||
.checksum_sha1(&ck2)
|
|
||||||
.e_tag(p2.e_tag.unwrap())
|
|
||||||
.build(),
|
|
||||||
)
|
|
||||||
.parts(
|
|
||||||
CompletedPart::builder()
|
|
||||||
.part_number(3)
|
|
||||||
.checksum_sha1(&ck3)
|
|
||||||
.e_tag(p3.e_tag.unwrap())
|
|
||||||
.build(),
|
|
||||||
)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
let expected_checksum = calculate_sha1(
|
|
||||||
&vec![
|
|
||||||
BASE64_STANDARD.decode(&ck1).unwrap(),
|
|
||||||
BASE64_STANDARD.decode(&ck2).unwrap(),
|
|
||||||
BASE64_STANDARD.decode(&ck3).unwrap(),
|
|
||||||
]
|
|
||||||
.concat(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let res = ctx
|
|
||||||
.client
|
|
||||||
.complete_multipart_upload()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.checksum_sha1(expected_checksum.clone())
|
|
||||||
.multipart_upload(cmp)
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(res.checksum_sha1, Some(expected_checksum));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_uploadlistpart() {
|
async fn test_uploadlistpart() {
|
||||||
let ctx = common::context();
|
let ctx = common::context();
|
||||||
|
@ -772,11 +624,3 @@ async fn test_uploadpartcopy() {
|
||||||
assert_eq!(real_obj.len(), exp_obj.len());
|
assert_eq!(real_obj.len(), exp_obj.len());
|
||||||
assert_eq!(real_obj, exp_obj);
|
assert_eq!(real_obj, exp_obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn calculate_sha1(bytes: &[u8]) -> String {
|
|
||||||
use sha1::{Digest, Sha1};
|
|
||||||
|
|
||||||
let mut hasher = Sha1::new();
|
|
||||||
hasher.update(bytes);
|
|
||||||
BASE64_STANDARD.encode(&hasher.finalize()[..])
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -29,7 +29,6 @@ err-derive.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
http.workspace = true
|
http.workspace = true
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
parse_duration.workspace = true
|
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
zstd.workspace = true
|
zstd.workspace = true
|
||||||
|
|
|
@ -170,7 +170,14 @@ impl Garage {
|
||||||
};
|
};
|
||||||
|
|
||||||
info!("Initialize block manager...");
|
info!("Initialize block manager...");
|
||||||
let block_manager = BlockManager::new(&db, &config, data_rep_param, system.clone())?;
|
let block_manager = BlockManager::new(
|
||||||
|
&db,
|
||||||
|
config.data_dir.clone(),
|
||||||
|
config.data_fsync,
|
||||||
|
config.compression_level,
|
||||||
|
data_rep_param,
|
||||||
|
system.clone(),
|
||||||
|
)?;
|
||||||
block_manager.register_bg_vars(&mut bg_vars);
|
block_manager.register_bg_vars(&mut bg_vars);
|
||||||
|
|
||||||
// ---- admin tables ----
|
// ---- admin tables ----
|
||||||
|
@ -247,14 +254,6 @@ impl Garage {
|
||||||
#[cfg(feature = "k2v")]
|
#[cfg(feature = "k2v")]
|
||||||
let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param);
|
let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param);
|
||||||
|
|
||||||
// ---- setup block refcount recalculation ----
|
|
||||||
// this function can be used to fix inconsistencies in the RC table
|
|
||||||
block_manager.set_recalc_rc(vec![
|
|
||||||
block_ref_recount_fn(&block_ref_table),
|
|
||||||
// other functions could be added here if we had other tables
|
|
||||||
// that hold references to data blocks
|
|
||||||
]);
|
|
||||||
|
|
||||||
// -- done --
|
// -- done --
|
||||||
Ok(Arc::new(Self {
|
Ok(Arc::new(Self {
|
||||||
config,
|
config,
|
||||||
|
@ -279,7 +278,7 @@ impl Garage {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) -> Result<(), Error> {
|
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
||||||
self.block_manager.spawn_workers(bg);
|
self.block_manager.spawn_workers(bg);
|
||||||
|
|
||||||
self.bucket_table.spawn_workers(bg);
|
self.bucket_table.spawn_workers(bg);
|
||||||
|
@ -300,23 +299,6 @@ impl Garage {
|
||||||
|
|
||||||
#[cfg(feature = "k2v")]
|
#[cfg(feature = "k2v")]
|
||||||
self.k2v.spawn_workers(bg);
|
self.k2v.spawn_workers(bg);
|
||||||
|
|
||||||
if let Some(itv) = self.config.metadata_auto_snapshot_interval.as_deref() {
|
|
||||||
let interval = parse_duration::parse(itv)
|
|
||||||
.ok_or_message("Invalid `metadata_auto_snapshot_interval`")?;
|
|
||||||
if interval < std::time::Duration::from_secs(600) {
|
|
||||||
return Err(Error::Message(
|
|
||||||
"metadata_auto_snapshot_interval too small or negative".into(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
bg.spawn_worker(crate::snapshot::AutoSnapshotWorker::new(
|
|
||||||
self.clone(),
|
|
||||||
interval,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
||||||
|
|
|
@ -67,49 +67,6 @@ impl<'a> BucketHelper<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Find a bucket by its global alias or a prefix of its uuid
|
|
||||||
pub async fn admin_get_existing_matching_bucket(
|
|
||||||
&self,
|
|
||||||
pattern: &String,
|
|
||||||
) -> Result<Uuid, Error> {
|
|
||||||
if let Some(uuid) = self.resolve_global_bucket_name(pattern).await? {
|
|
||||||
return Ok(uuid);
|
|
||||||
} else if pattern.len() >= 2 {
|
|
||||||
let hexdec = pattern
|
|
||||||
.get(..pattern.len() & !1)
|
|
||||||
.and_then(|x| hex::decode(x).ok());
|
|
||||||
if let Some(hex) = hexdec {
|
|
||||||
let mut start = [0u8; 32];
|
|
||||||
start
|
|
||||||
.as_mut_slice()
|
|
||||||
.get_mut(..hex.len())
|
|
||||||
.ok_or_bad_request("invalid length")?
|
|
||||||
.copy_from_slice(&hex);
|
|
||||||
let mut candidates = self
|
|
||||||
.0
|
|
||||||
.bucket_table
|
|
||||||
.get_range(
|
|
||||||
&EmptyKey,
|
|
||||||
Some(start.into()),
|
|
||||||
Some(DeletedFilter::NotDeleted),
|
|
||||||
10,
|
|
||||||
EnumerationOrder::Forward,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
candidates.retain(|x| hex::encode(x.id).starts_with(pattern));
|
|
||||||
if candidates.len() == 1 {
|
|
||||||
return Ok(candidates.into_iter().next().unwrap().id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(Error::BadRequest(format!(
|
|
||||||
"Bucket not found / several matching buckets: {}",
|
|
||||||
pattern
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a Bucket if it is present in bucket table,
|
/// Returns a Bucket if it is present in bucket table,
|
||||||
/// even if it is in deleted state. Querying a non-existing
|
/// even if it is in deleted state. Querying a non-existing
|
||||||
/// bucket ID returns an internal error.
|
/// bucket ID returns an internal error.
|
||||||
|
|
|
@ -219,11 +219,12 @@ impl K2VRpcHandler {
|
||||||
},
|
},
|
||||||
sort_key,
|
sort_key,
|
||||||
};
|
};
|
||||||
|
// TODO figure this out with write sets, is it still appropriate???
|
||||||
let nodes = self
|
let nodes = self
|
||||||
.item_table
|
.item_table
|
||||||
.data
|
.data
|
||||||
.replication
|
.replication
|
||||||
.storage_nodes(&poll_key.partition.hash());
|
.read_nodes(&poll_key.partition.hash());
|
||||||
|
|
||||||
let rpc = self.system.rpc_helper().try_call_many(
|
let rpc = self.system.rpc_helper().try_call_many(
|
||||||
&self.endpoint,
|
&self.endpoint,
|
||||||
|
@ -238,7 +239,8 @@ impl K2VRpcHandler {
|
||||||
.send_all_at_once(true)
|
.send_all_at_once(true)
|
||||||
.without_timeout(),
|
.without_timeout(),
|
||||||
);
|
);
|
||||||
let timeout_duration = Duration::from_millis(timeout_msec);
|
let timeout_duration =
|
||||||
|
Duration::from_millis(timeout_msec) + self.system.rpc_helper().rpc_timeout();
|
||||||
let resps = select! {
|
let resps = select! {
|
||||||
r = rpc => r?,
|
r = rpc => r?,
|
||||||
_ = tokio::time::sleep(timeout_duration) => return Ok(None),
|
_ = tokio::time::sleep(timeout_duration) => return Ok(None),
|
||||||
|
@ -280,11 +282,12 @@ impl K2VRpcHandler {
|
||||||
seen.restrict(&range);
|
seen.restrict(&range);
|
||||||
|
|
||||||
// Prepare PollRange RPC to send to the storage nodes responsible for the parititon
|
// Prepare PollRange RPC to send to the storage nodes responsible for the parititon
|
||||||
|
// TODO figure this out with write sets, does it still work????
|
||||||
let nodes = self
|
let nodes = self
|
||||||
.item_table
|
.item_table
|
||||||
.data
|
.data
|
||||||
.replication
|
.replication
|
||||||
.storage_nodes(&range.partition.hash());
|
.read_nodes(&range.partition.hash());
|
||||||
let quorum = self.item_table.data.replication.read_quorum();
|
let quorum = self.item_table.data.replication.read_quorum();
|
||||||
let msg = K2VRpc::PollRange {
|
let msg = K2VRpc::PollRange {
|
||||||
range,
|
range,
|
||||||
|
@ -300,7 +303,7 @@ impl K2VRpcHandler {
|
||||||
.map(|node| {
|
.map(|node| {
|
||||||
self.system
|
self.system
|
||||||
.rpc_helper()
|
.rpc_helper()
|
||||||
.call(&self.endpoint, *node, msg.clone(), rs.clone())
|
.call(&self.endpoint, *node, msg.clone(), rs)
|
||||||
})
|
})
|
||||||
.collect::<FuturesUnordered<_>>();
|
.collect::<FuturesUnordered<_>>();
|
||||||
|
|
||||||
|
@ -317,7 +320,9 @@ impl K2VRpcHandler {
|
||||||
// kind: all items produced by that node until time ts have been returned, so we can
|
// kind: all items produced by that node until time ts have been returned, so we can
|
||||||
// bump the entry in the global vector clock and possibly remove some item-specific
|
// bump the entry in the global vector clock and possibly remove some item-specific
|
||||||
// vector clocks)
|
// vector clocks)
|
||||||
let mut deadline = Instant::now() + Duration::from_millis(timeout_msec);
|
let mut deadline = Instant::now()
|
||||||
|
+ Duration::from_millis(timeout_msec)
|
||||||
|
+ self.system.rpc_helper().rpc_timeout();
|
||||||
let mut resps = vec![];
|
let mut resps = vec![];
|
||||||
let mut errors = vec![];
|
let mut errors = vec![];
|
||||||
loop {
|
loop {
|
||||||
|
|
|
@ -15,4 +15,3 @@ pub mod s3;
|
||||||
|
|
||||||
pub mod garage;
|
pub mod garage;
|
||||||
pub mod helper;
|
pub mod helper;
|
||||||
pub mod snapshot;
|
|
||||||
|
|
|
@ -3,12 +3,8 @@ use std::sync::Arc;
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
|
||||||
use garage_util::migrate::Migrate;
|
|
||||||
|
|
||||||
use garage_block::CalculateRefcount;
|
|
||||||
use garage_table::crdt::Crdt;
|
use garage_table::crdt::Crdt;
|
||||||
use garage_table::replication::TableShardedReplication;
|
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
|
||||||
use garage_block::manager::*;
|
use garage_block::manager::*;
|
||||||
|
@ -88,38 +84,3 @@ impl TableSchema for BlockRefTable {
|
||||||
filter.apply(entry.deleted.get())
|
filter.apply(entry.deleted.get())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn block_ref_recount_fn(
|
|
||||||
block_ref_table: &Arc<Table<BlockRefTable, TableShardedReplication>>,
|
|
||||||
) -> CalculateRefcount {
|
|
||||||
let table = Arc::downgrade(block_ref_table);
|
|
||||||
Box::new(move |tx: &db::Transaction, block: &Hash| {
|
|
||||||
let table = table
|
|
||||||
.upgrade()
|
|
||||||
.ok_or_message("cannot upgrade weak ptr to block_ref_table")
|
|
||||||
.map_err(db::TxError::Abort)?;
|
|
||||||
Ok(calculate_refcount(&table, tx, block)?)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn calculate_refcount(
|
|
||||||
block_ref_table: &Table<BlockRefTable, TableShardedReplication>,
|
|
||||||
tx: &db::Transaction,
|
|
||||||
block: &Hash,
|
|
||||||
) -> db::TxResult<usize, Error> {
|
|
||||||
let mut result = 0;
|
|
||||||
for entry in tx.range(&block_ref_table.data.store, block.as_slice()..)? {
|
|
||||||
let (key, value) = entry?;
|
|
||||||
if &key[..32] != block.as_slice() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
let value = BlockRef::decode(&value)
|
|
||||||
.ok_or_message("could not decode block_ref")
|
|
||||||
.map_err(db::TxError::Abort)?;
|
|
||||||
assert_eq!(value.block, *block);
|
|
||||||
if !value.deleted.get() {
|
|
||||||
result += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
|
@ -17,7 +17,6 @@ pub const PARTS: &str = "parts";
|
||||||
pub const BYTES: &str = "bytes";
|
pub const BYTES: &str = "bytes";
|
||||||
|
|
||||||
mod v09 {
|
mod v09 {
|
||||||
use crate::s3::object_table::ChecksumValue;
|
|
||||||
use garage_util::crdt;
|
use garage_util::crdt;
|
||||||
use garage_util::data::Uuid;
|
use garage_util::data::Uuid;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
@ -62,9 +61,6 @@ mod v09 {
|
||||||
pub version: Uuid,
|
pub version: Uuid,
|
||||||
/// ETag of the content of this part (known only once done uploading)
|
/// ETag of the content of this part (known only once done uploading)
|
||||||
pub etag: Option<String>,
|
pub etag: Option<String>,
|
||||||
/// Checksum requested by x-amz-checksum-algorithm
|
|
||||||
#[serde(default)]
|
|
||||||
pub checksum: Option<ChecksumValue>,
|
|
||||||
/// Size of this part (known only once done uploading)
|
/// Size of this part (known only once done uploading)
|
||||||
pub size: Option<u64>,
|
pub size: Option<u64>,
|
||||||
}
|
}
|
||||||
|
@ -159,11 +155,6 @@ impl Crdt for MpuPart {
|
||||||
(Some(x), Some(y)) if x < y => other.size,
|
(Some(x), Some(y)) if x < y => other.size,
|
||||||
(x, _) => x,
|
(x, _) => x,
|
||||||
};
|
};
|
||||||
self.checksum = match (self.checksum.take(), &other.checksum) {
|
|
||||||
(None, Some(_)) => other.checksum.clone(),
|
|
||||||
(Some(x), Some(y)) if x < *y => other.checksum.clone(),
|
|
||||||
(x, _) => x,
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -208,8 +208,6 @@ mod v010 {
|
||||||
Uploading {
|
Uploading {
|
||||||
/// Indicates whether this is a multipart upload
|
/// Indicates whether this is a multipart upload
|
||||||
multipart: bool,
|
multipart: bool,
|
||||||
/// Checksum algorithm to use
|
|
||||||
checksum_algorithm: Option<ChecksumAlgorithm>,
|
|
||||||
/// Encryption params + headers to be included in the final object
|
/// Encryption params + headers to be included in the final object
|
||||||
encryption: ObjectVersionEncryption,
|
encryption: ObjectVersionEncryption,
|
||||||
},
|
},
|
||||||
|
@ -249,10 +247,10 @@ mod v010 {
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub enum ObjectVersionEncryption {
|
pub enum ObjectVersionEncryption {
|
||||||
SseC {
|
SseC {
|
||||||
/// Encrypted serialized ObjectVersionInner struct.
|
/// Encrypted serialized ObjectVersionHeaders struct.
|
||||||
/// This is never compressed, just encrypted using AES256-GCM.
|
/// This is never compressed, just encrypted using AES256-GCM.
|
||||||
#[serde(with = "serde_bytes")]
|
#[serde(with = "serde_bytes")]
|
||||||
inner: Vec<u8>,
|
headers: Vec<u8>,
|
||||||
/// Whether data blocks are compressed in addition to being encrypted
|
/// Whether data blocks are compressed in addition to being encrypted
|
||||||
/// (compression happens before encryption, whereas for non-encrypted
|
/// (compression happens before encryption, whereas for non-encrypted
|
||||||
/// objects, compression is handled at the level of the block manager)
|
/// objects, compression is handled at the level of the block manager)
|
||||||
|
@ -260,35 +258,13 @@ mod v010 {
|
||||||
},
|
},
|
||||||
Plaintext {
|
Plaintext {
|
||||||
/// Plain-text headers
|
/// Plain-text headers
|
||||||
inner: ObjectVersionMetaInner,
|
headers: ObjectVersionHeaders,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Vector of headers, as tuples of the format (header name, header value)
|
/// Vector of headers, as tuples of the format (header name, header value)
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct ObjectVersionMetaInner {
|
pub struct ObjectVersionHeaders(pub Vec<(String, String)>);
|
||||||
pub headers: HeaderList,
|
|
||||||
pub checksum: Option<ChecksumValue>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type HeaderList = Vec<(String, String)>;
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum ChecksumAlgorithm {
|
|
||||||
Crc32,
|
|
||||||
Crc32c,
|
|
||||||
Sha1,
|
|
||||||
Sha256,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checksum value for x-amz-checksum-algorithm
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum ChecksumValue {
|
|
||||||
Crc32(#[serde(with = "serde_bytes")] [u8; 4]),
|
|
||||||
Crc32c(#[serde(with = "serde_bytes")] [u8; 4]),
|
|
||||||
Sha1(#[serde(with = "serde_bytes")] [u8; 20]),
|
|
||||||
Sha256(#[serde(with = "serde_bytes")] [u8; 32]),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl garage_util::migrate::Migrate for Object {
|
impl garage_util::migrate::Migrate for Object {
|
||||||
const VERSION_MARKER: &'static [u8] = b"G010s3ob";
|
const VERSION_MARKER: &'static [u8] = b"G010s3ob";
|
||||||
|
@ -312,7 +288,6 @@ mod v010 {
|
||||||
v09::ObjectVersionState::Uploading { multipart, headers } => {
|
v09::ObjectVersionState::Uploading { multipart, headers } => {
|
||||||
ObjectVersionState::Uploading {
|
ObjectVersionState::Uploading {
|
||||||
multipart,
|
multipart,
|
||||||
checksum_algorithm: None,
|
|
||||||
encryption: migrate_headers(headers),
|
encryption: migrate_headers(headers),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -356,18 +331,15 @@ mod v010 {
|
||||||
}
|
}
|
||||||
|
|
||||||
ObjectVersionEncryption::Plaintext {
|
ObjectVersionEncryption::Plaintext {
|
||||||
inner: ObjectVersionMetaInner {
|
headers: ObjectVersionHeaders(new_headers),
|
||||||
headers: new_headers,
|
|
||||||
checksum: None,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since ObjectVersionMetaInner can now be serialized independently, for the
|
// Since ObjectVersionHeaders can now be serialized independently, for the
|
||||||
// purpose of being encrypted, we need it to support migrations on its own
|
// purpose of being encrypted, we need it to support migrations on its own
|
||||||
// as well.
|
// as well.
|
||||||
impl garage_util::migrate::InitialFormat for ObjectVersionMetaInner {
|
impl garage_util::migrate::InitialFormat for ObjectVersionHeaders {
|
||||||
const VERSION_MARKER: &'static [u8] = b"G010s3om";
|
const VERSION_MARKER: &'static [u8] = b"G010s3oh";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -482,17 +454,6 @@ impl Entry<Uuid, String> for Object {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChecksumValue {
|
|
||||||
pub fn algorithm(&self) -> ChecksumAlgorithm {
|
|
||||||
match self {
|
|
||||||
ChecksumValue::Crc32(_) => ChecksumAlgorithm::Crc32,
|
|
||||||
ChecksumValue::Crc32c(_) => ChecksumAlgorithm::Crc32c,
|
|
||||||
ChecksumValue::Sha1(_) => ChecksumAlgorithm::Sha1,
|
|
||||||
ChecksumValue::Sha256(_) => ChecksumAlgorithm::Sha256,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Crdt for Object {
|
impl Crdt for Object {
|
||||||
fn merge(&mut self, other: &Self) {
|
fn merge(&mut self, other: &Self) {
|
||||||
// Merge versions from other into here
|
// Merge versions from other into here
|
||||||
|
|
|
@ -1,136 +0,0 @@
|
||||||
use std::fs;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::Mutex;
|
|
||||||
use std::time::{Duration, Instant};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use rand::prelude::*;
|
|
||||||
use tokio::sync::watch;
|
|
||||||
|
|
||||||
use garage_util::background::*;
|
|
||||||
use garage_util::error::*;
|
|
||||||
|
|
||||||
use crate::garage::Garage;
|
|
||||||
|
|
||||||
// The two most recent snapshots are kept
|
|
||||||
const KEEP_SNAPSHOTS: usize = 2;
|
|
||||||
|
|
||||||
static SNAPSHOT_MUTEX: Mutex<()> = Mutex::new(());
|
|
||||||
|
|
||||||
// ================ snapshotting logic =====================
|
|
||||||
|
|
||||||
/// Run snashot_metadata in a blocking thread and async await on it
|
|
||||||
pub async fn async_snapshot_metadata(garage: &Arc<Garage>) -> Result<(), Error> {
|
|
||||||
let garage = garage.clone();
|
|
||||||
let worker = tokio::task::spawn_blocking(move || snapshot_metadata(&garage));
|
|
||||||
worker.await.unwrap()?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Take a snapshot of the metadata database, and erase older
|
|
||||||
/// snapshots if necessary.
|
|
||||||
/// This is not an async function, it should be spawned on a thread pool
|
|
||||||
pub fn snapshot_metadata(garage: &Garage) -> Result<(), Error> {
|
|
||||||
let lock = match SNAPSHOT_MUTEX.try_lock() {
|
|
||||||
Ok(lock) => lock,
|
|
||||||
Err(_) => {
|
|
||||||
return Err(Error::Message(
|
|
||||||
"Cannot acquire lock, another snapshot might be in progress".into(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut snapshots_dir = garage.config.metadata_dir.clone();
|
|
||||||
snapshots_dir.push("snapshots");
|
|
||||||
fs::create_dir_all(&snapshots_dir)?;
|
|
||||||
|
|
||||||
let mut new_path = snapshots_dir.clone();
|
|
||||||
new_path.push(chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Secs, true));
|
|
||||||
|
|
||||||
info!("Snapshotting metadata db to {}", new_path.display());
|
|
||||||
garage.db.snapshot(&new_path)?;
|
|
||||||
info!("Metadata db snapshot finished");
|
|
||||||
|
|
||||||
if let Err(e) = cleanup_snapshots(&snapshots_dir) {
|
|
||||||
error!("Failed to do cleanup in snapshots directory: {}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(lock);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cleanup_snapshots(snapshots_dir: &PathBuf) -> Result<(), Error> {
|
|
||||||
let mut snapshots =
|
|
||||||
fs::read_dir(&snapshots_dir)?.collect::<Result<Vec<fs::DirEntry>, std::io::Error>>()?;
|
|
||||||
|
|
||||||
snapshots.retain(|x| x.file_name().len() > 8);
|
|
||||||
snapshots.sort_by_key(|x| x.file_name());
|
|
||||||
|
|
||||||
for to_delete in snapshots.iter().rev().skip(KEEP_SNAPSHOTS) {
|
|
||||||
let path = snapshots_dir.join(to_delete.path());
|
|
||||||
if to_delete.metadata()?.file_type().is_dir() {
|
|
||||||
for file in fs::read_dir(&path)? {
|
|
||||||
let file = file?;
|
|
||||||
if file.metadata()?.is_file() {
|
|
||||||
fs::remove_file(path.join(file.path()))?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
std::fs::remove_dir(&path)?;
|
|
||||||
} else {
|
|
||||||
std::fs::remove_file(&path)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ================ auto snapshot worker =====================
|
|
||||||
|
|
||||||
pub struct AutoSnapshotWorker {
|
|
||||||
garage: Arc<Garage>,
|
|
||||||
next_snapshot: Instant,
|
|
||||||
snapshot_interval: Duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AutoSnapshotWorker {
|
|
||||||
pub(crate) fn new(garage: Arc<Garage>, snapshot_interval: Duration) -> Self {
|
|
||||||
Self {
|
|
||||||
garage,
|
|
||||||
snapshot_interval,
|
|
||||||
next_snapshot: Instant::now() + (snapshot_interval / 2),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Worker for AutoSnapshotWorker {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
"Metadata snapshot worker".into()
|
|
||||||
}
|
|
||||||
fn status(&self) -> WorkerStatus {
|
|
||||||
WorkerStatus {
|
|
||||||
freeform: vec![format!(
|
|
||||||
"Next snapshot: {}",
|
|
||||||
(chrono::Utc::now() + (self.next_snapshot - Instant::now())).to_rfc3339()
|
|
||||||
)],
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
|
||||||
if Instant::now() < self.next_snapshot {
|
|
||||||
return Ok(WorkerState::Idle);
|
|
||||||
}
|
|
||||||
|
|
||||||
async_snapshot_metadata(&self.garage).await?;
|
|
||||||
|
|
||||||
let rand_factor = 1f32 + thread_rng().gen::<f32>() / 5f32;
|
|
||||||
self.next_snapshot = Instant::now() + self.snapshot_interval.mul_f32(rand_factor);
|
|
||||||
|
|
||||||
Ok(WorkerState::Idle)
|
|
||||||
}
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
|
||||||
tokio::time::sleep_until(self.next_snapshot.into()).await;
|
|
||||||
WorkerState::Busy
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_net"
|
name = "garage_net"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
|
@ -28,30 +28,12 @@ use crate::util::*;
|
||||||
/// The same priority value is given to a request and to its associated response.
|
/// The same priority value is given to a request and to its associated response.
|
||||||
pub type RequestPriority = u8;
|
pub type RequestPriority = u8;
|
||||||
|
|
||||||
// Usage of priority levels in Garage:
|
|
||||||
//
|
|
||||||
// PRIO_HIGH
|
|
||||||
// for liveness check events such as pings and important
|
|
||||||
// reconfiguration events such as layout changes
|
|
||||||
//
|
|
||||||
// PRIO_NORMAL
|
|
||||||
// for standard interactive requests to exchange metadata
|
|
||||||
//
|
|
||||||
// PRIO_NORMAL | PRIO_SECONDARY
|
|
||||||
// for standard interactive requests to exchange block data
|
|
||||||
//
|
|
||||||
// PRIO_BACKGROUND
|
|
||||||
// for background resync requests to exchange metadata
|
|
||||||
// PRIO_BACKGROUND | PRIO_SECONDARY
|
|
||||||
// for background resync requests to exchange block data
|
|
||||||
|
|
||||||
/// Priority class: high
|
/// Priority class: high
|
||||||
pub const PRIO_HIGH: RequestPriority = 0x20;
|
pub const PRIO_HIGH: RequestPriority = 0x20;
|
||||||
/// Priority class: normal
|
/// Priority class: normal
|
||||||
pub const PRIO_NORMAL: RequestPriority = 0x40;
|
pub const PRIO_NORMAL: RequestPriority = 0x40;
|
||||||
/// Priority class: background
|
/// Priority class: background
|
||||||
pub const PRIO_BACKGROUND: RequestPriority = 0x80;
|
pub const PRIO_BACKGROUND: RequestPriority = 0x80;
|
||||||
|
|
||||||
/// Priority: primary among given class
|
/// Priority: primary among given class
|
||||||
pub const PRIO_PRIMARY: RequestPriority = 0x00;
|
pub const PRIO_PRIMARY: RequestPriority = 0x00;
|
||||||
/// Priority: secondary among given class (ex: `PRIO_HIGH | PRIO_SECONDARY`)
|
/// Priority: secondary among given class (ex: `PRIO_HIGH | PRIO_SECONDARY`)
|
||||||
|
|
|
@ -35,10 +35,8 @@ pub type NetworkKey = sodiumoxide::crypto::auth::Key;
|
||||||
/// composed of 8 bytes for Netapp version and 8 bytes for client version
|
/// composed of 8 bytes for Netapp version and 8 bytes for client version
|
||||||
pub(crate) type VersionTag = [u8; 16];
|
pub(crate) type VersionTag = [u8; 16];
|
||||||
|
|
||||||
/// Value of garage_net version used in the version tag
|
/// Value of the Netapp version used in the version tag
|
||||||
/// We are no longer using prefix `netapp` as garage_net is forked from the netapp crate.
|
pub(crate) const NETAPP_VERSION_TAG: u64 = 0x6e65746170700005; // netapp 0x0005
|
||||||
/// Since Garage v1.0, we have replaced the prefix by `grgnet` (shorthand for garage_net).
|
|
||||||
pub(crate) const NETAPP_VERSION_TAG: u64 = 0x6772676e65740010; // grgnet 0x0010 (1.0)
|
|
||||||
|
|
||||||
/// HelloMessage is sent by the client on a Netapp connection to indicate
|
/// HelloMessage is sent by the client on a Netapp connection to indicate
|
||||||
/// that they are also a server and ready to recieve incoming connections
|
/// that they are also a server and ready to recieve incoming connections
|
||||||
|
@ -125,7 +123,7 @@ impl NetApp {
|
||||||
|
|
||||||
netapp
|
netapp
|
||||||
.hello_endpoint
|
.hello_endpoint
|
||||||
.swap(Some(netapp.endpoint("garage_net/netapp.rs/Hello".into())));
|
.swap(Some(netapp.endpoint("__netapp/netapp.rs/Hello".into())));
|
||||||
netapp
|
netapp
|
||||||
.hello_endpoint
|
.hello_endpoint
|
||||||
.load_full()
|
.load_full()
|
||||||
|
@ -294,7 +292,13 @@ impl NetApp {
|
||||||
/// the other node with `Netapp::request`
|
/// the other node with `Netapp::request`
|
||||||
pub async fn try_connect(self: Arc<Self>, ip: SocketAddr, id: NodeID) -> Result<(), Error> {
|
pub async fn try_connect(self: Arc<Self>, ip: SocketAddr, id: NodeID) -> Result<(), Error> {
|
||||||
// Don't connect to ourself, we don't care
|
// Don't connect to ourself, we don't care
|
||||||
|
// but pretend we did
|
||||||
if id == self.id {
|
if id == self.id {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Some(h) = self.on_connected_handler.load().as_ref() {
|
||||||
|
h(id, ip, false);
|
||||||
|
}
|
||||||
|
});
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -323,15 +327,9 @@ impl NetApp {
|
||||||
/// Close the outgoing connection we have to a node specified by its public key,
|
/// Close the outgoing connection we have to a node specified by its public key,
|
||||||
/// if such a connection is currently open.
|
/// if such a connection is currently open.
|
||||||
pub fn disconnect(self: &Arc<Self>, id: &NodeID) {
|
pub fn disconnect(self: &Arc<Self>, id: &NodeID) {
|
||||||
let conn = self.client_conns.write().unwrap().remove(id);
|
|
||||||
|
|
||||||
// If id is ourself, we're not supposed to have a connection open
|
// If id is ourself, we're not supposed to have a connection open
|
||||||
if *id == self.id {
|
if *id != self.id {
|
||||||
// sanity check
|
let conn = self.client_conns.write().unwrap().remove(id);
|
||||||
assert!(conn.is_none(), "had a connection to local node");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(c) = conn {
|
if let Some(c) = conn {
|
||||||
debug!(
|
debug!(
|
||||||
"Closing connection to {} ({})",
|
"Closing connection to {} ({})",
|
||||||
|
@ -339,8 +337,14 @@ impl NetApp {
|
||||||
c.remote_addr
|
c.remote_addr
|
||||||
);
|
);
|
||||||
c.close();
|
c.close();
|
||||||
|
} else {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// call on_disconnected_handler immediately, since the connection was removed
|
// call on_disconnected_handler immediately, since the connection
|
||||||
|
// was removed
|
||||||
|
// (if id == self.id, we pretend we disconnected)
|
||||||
let id = *id;
|
let id = *id;
|
||||||
let self2 = self.clone();
|
let self2 = self.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
|
@ -349,7 +353,6 @@ impl NetApp {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Called from conn.rs when an incoming connection is successfully established
|
// Called from conn.rs when an incoming connection is successfully established
|
||||||
// Registers the connection in our list of connections
|
// Registers the connection in our list of connections
|
||||||
|
|
|
@ -54,8 +54,12 @@ impl Message for PeerListMessage {
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct PeerInfoInternal {
|
struct PeerInfoInternal {
|
||||||
// known_addrs contains all of the addresses everyone gave us
|
// addr is the currently connected address,
|
||||||
known_addrs: Vec<SocketAddr>,
|
// or the last address we were connected to,
|
||||||
|
// or an arbitrary address some other peer gave us
|
||||||
|
addr: SocketAddr,
|
||||||
|
// all_addrs contains all of the addresses everyone gave us
|
||||||
|
all_addrs: Vec<SocketAddr>,
|
||||||
|
|
||||||
state: PeerConnState,
|
state: PeerConnState,
|
||||||
last_send_ping: Option<Instant>,
|
last_send_ping: Option<Instant>,
|
||||||
|
@ -65,9 +69,10 @@ struct PeerInfoInternal {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PeerInfoInternal {
|
impl PeerInfoInternal {
|
||||||
fn new(state: PeerConnState, known_addr: Option<SocketAddr>) -> Self {
|
fn new(addr: SocketAddr, state: PeerConnState) -> Self {
|
||||||
Self {
|
Self {
|
||||||
known_addrs: known_addr.map(|x| vec![x]).unwrap_or_default(),
|
addr,
|
||||||
|
all_addrs: vec![addr],
|
||||||
state,
|
state,
|
||||||
last_send_ping: None,
|
last_send_ping: None,
|
||||||
last_seen: None,
|
last_seen: None,
|
||||||
|
@ -76,8 +81,8 @@ impl PeerInfoInternal {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn add_addr(&mut self, addr: SocketAddr) -> bool {
|
fn add_addr(&mut self, addr: SocketAddr) -> bool {
|
||||||
if !self.known_addrs.contains(&addr) {
|
if !self.all_addrs.contains(&addr) {
|
||||||
self.known_addrs.push(addr);
|
self.all_addrs.push(addr);
|
||||||
// If we are learning a new address for this node,
|
// If we are learning a new address for this node,
|
||||||
// we want to retry connecting
|
// we want to retry connecting
|
||||||
self.state = match self.state {
|
self.state = match self.state {
|
||||||
|
@ -85,7 +90,7 @@ impl PeerInfoInternal {
|
||||||
PeerConnState::Waiting(_, _) | PeerConnState::Abandonned => {
|
PeerConnState::Waiting(_, _) | PeerConnState::Abandonned => {
|
||||||
PeerConnState::Waiting(0, Instant::now())
|
PeerConnState::Waiting(0, Instant::now())
|
||||||
}
|
}
|
||||||
x @ (PeerConnState::Ourself | PeerConnState::Connected { .. }) => x,
|
x @ (PeerConnState::Ourself | PeerConnState::Connected) => x,
|
||||||
};
|
};
|
||||||
true
|
true
|
||||||
} else {
|
} else {
|
||||||
|
@ -99,6 +104,8 @@ impl PeerInfoInternal {
|
||||||
pub struct PeerInfo {
|
pub struct PeerInfo {
|
||||||
/// The node's identifier (its public key)
|
/// The node's identifier (its public key)
|
||||||
pub id: NodeID,
|
pub id: NodeID,
|
||||||
|
/// The node's network address
|
||||||
|
pub addr: SocketAddr,
|
||||||
/// The current status of our connection to this node
|
/// The current status of our connection to this node
|
||||||
pub state: PeerConnState,
|
pub state: PeerConnState,
|
||||||
/// The last time at which the node was seen
|
/// The last time at which the node was seen
|
||||||
|
@ -129,7 +136,7 @@ pub enum PeerConnState {
|
||||||
Ourself,
|
Ourself,
|
||||||
|
|
||||||
/// We currently have a connection to this peer
|
/// We currently have a connection to this peer
|
||||||
Connected { addr: SocketAddr },
|
Connected,
|
||||||
|
|
||||||
/// Our next connection tentative (the nth, where n is the first value of the tuple)
|
/// Our next connection tentative (the nth, where n is the first value of the tuple)
|
||||||
/// will be at given Instant
|
/// will be at given Instant
|
||||||
|
@ -145,7 +152,7 @@ pub enum PeerConnState {
|
||||||
impl PeerConnState {
|
impl PeerConnState {
|
||||||
/// Returns true if we can currently send requests to this peer
|
/// Returns true if we can currently send requests to this peer
|
||||||
pub fn is_up(&self) -> bool {
|
pub fn is_up(&self) -> bool {
|
||||||
matches!(self, Self::Ourself | Self::Connected { .. })
|
matches!(self, Self::Ourself | Self::Connected)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,42 +164,29 @@ struct KnownHosts {
|
||||||
impl KnownHosts {
|
impl KnownHosts {
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
let list = HashMap::new();
|
let list = HashMap::new();
|
||||||
let mut ret = Self {
|
let hash = Self::calculate_hash(vec![]);
|
||||||
list,
|
Self { list, hash }
|
||||||
hash: hash::Digest::from_slice(&[0u8; 64][..]).unwrap(),
|
|
||||||
};
|
|
||||||
ret.update_hash();
|
|
||||||
ret
|
|
||||||
}
|
}
|
||||||
fn update_hash(&mut self) {
|
fn update_hash(&mut self) {
|
||||||
// The hash is a value that is exchanged between nodes when they ping one
|
self.hash = Self::calculate_hash(self.connected_peers_vec());
|
||||||
// another. Nodes compare their known hosts hash to know if they are connected
|
|
||||||
// to the same set of nodes. If the hashes differ, they are connected to
|
|
||||||
// different nodes and they trigger an exchange of the full list of active
|
|
||||||
// connections. The hash value only represents the set of node IDs and not
|
|
||||||
// their actual socket addresses, because nodes can be connected via different
|
|
||||||
// addresses and that shouldn't necessarily trigger a full peer exchange.
|
|
||||||
let mut list = self
|
|
||||||
.list
|
|
||||||
.iter()
|
|
||||||
.filter(|(_, peer)| peer.state.is_up())
|
|
||||||
.map(|(id, _)| *id)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
list.sort();
|
|
||||||
let mut hash_state = hash::State::new();
|
|
||||||
for id in list {
|
|
||||||
hash_state.update(&id[..]);
|
|
||||||
}
|
|
||||||
self.hash = hash_state.finalize();
|
|
||||||
}
|
}
|
||||||
fn connected_peers_vec(&self) -> Vec<(NodeID, SocketAddr)> {
|
fn connected_peers_vec(&self) -> Vec<(NodeID, SocketAddr)> {
|
||||||
self.list
|
let mut list = Vec::with_capacity(self.list.len());
|
||||||
.iter()
|
for (id, peer) in self.list.iter() {
|
||||||
.filter_map(|(id, peer)| match peer.state {
|
if peer.state.is_up() {
|
||||||
PeerConnState::Connected { addr } => Some((*id, addr)),
|
list.push((*id, peer.addr));
|
||||||
_ => None,
|
}
|
||||||
})
|
}
|
||||||
.collect::<Vec<_>>()
|
list
|
||||||
|
}
|
||||||
|
fn calculate_hash(mut list: Vec<(NodeID, SocketAddr)>) -> hash::Digest {
|
||||||
|
list.sort();
|
||||||
|
let mut hash_state = hash::State::new();
|
||||||
|
for (id, addr) in list {
|
||||||
|
hash_state.update(&id[..]);
|
||||||
|
hash_state.update(&format!("{}\n", addr).into_bytes()[..]);
|
||||||
|
}
|
||||||
|
hash_state.finalize()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,24 +220,27 @@ impl PeeringManager {
|
||||||
if id != netapp.id {
|
if id != netapp.id {
|
||||||
known_hosts.list.insert(
|
known_hosts.list.insert(
|
||||||
id,
|
id,
|
||||||
PeerInfoInternal::new(PeerConnState::Waiting(0, Instant::now()), Some(addr)),
|
PeerInfoInternal::new(addr, PeerConnState::Waiting(0, Instant::now())),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(addr) = our_addr {
|
||||||
known_hosts.list.insert(
|
known_hosts.list.insert(
|
||||||
netapp.id,
|
netapp.id,
|
||||||
PeerInfoInternal::new(PeerConnState::Ourself, our_addr),
|
PeerInfoInternal::new(addr, PeerConnState::Ourself),
|
||||||
);
|
);
|
||||||
known_hosts.update_hash();
|
known_hosts.update_hash();
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO for v0.10 / v1.0 : rename the endpoint (it will break compatibility)
|
||||||
let strat = Arc::new(Self {
|
let strat = Arc::new(Self {
|
||||||
netapp: netapp.clone(),
|
netapp: netapp.clone(),
|
||||||
known_hosts: RwLock::new(known_hosts),
|
known_hosts: RwLock::new(known_hosts),
|
||||||
public_peer_list: ArcSwap::new(Arc::new(Vec::new())),
|
public_peer_list: ArcSwap::new(Arc::new(Vec::new())),
|
||||||
next_ping_id: AtomicU64::new(42),
|
next_ping_id: AtomicU64::new(42),
|
||||||
ping_endpoint: netapp.endpoint("garage_net/peering.rs/Ping".into()),
|
ping_endpoint: netapp.endpoint("__netapp/peering/fullmesh.rs/Ping".into()),
|
||||||
peer_list_endpoint: netapp.endpoint("garage_net/peering.rs/PeerList".into()),
|
peer_list_endpoint: netapp.endpoint("__netapp/peering/fullmesh.rs/PeerList".into()),
|
||||||
ping_timeout_millis: DEFAULT_PING_TIMEOUT_MILLIS.into(),
|
ping_timeout_millis: DEFAULT_PING_TIMEOUT_MILLIS.into(),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -279,7 +276,7 @@ impl PeeringManager {
|
||||||
for (id, info) in known_hosts.list.iter() {
|
for (id, info) in known_hosts.list.iter() {
|
||||||
trace!("{}, {:?}", hex::encode(&id[..8]), info);
|
trace!("{}, {:?}", hex::encode(&id[..8]), info);
|
||||||
match info.state {
|
match info.state {
|
||||||
PeerConnState::Connected { .. } => {
|
PeerConnState::Connected => {
|
||||||
let must_ping = match info.last_send_ping {
|
let must_ping = match info.last_send_ping {
|
||||||
None => true,
|
None => true,
|
||||||
Some(t) => Instant::now() - t > PING_INTERVAL,
|
Some(t) => Instant::now() - t > PING_INTERVAL,
|
||||||
|
@ -322,7 +319,7 @@ impl PeeringManager {
|
||||||
info!(
|
info!(
|
||||||
"Retrying connection to {} at {} ({})",
|
"Retrying connection to {} at {} ({})",
|
||||||
hex::encode(&id[..8]),
|
hex::encode(&id[..8]),
|
||||||
h.known_addrs
|
h.all_addrs
|
||||||
.iter()
|
.iter()
|
||||||
.map(|x| format!("{}", x))
|
.map(|x| format!("{}", x))
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
|
@ -331,8 +328,13 @@ impl PeeringManager {
|
||||||
);
|
);
|
||||||
h.state = PeerConnState::Trying(i);
|
h.state = PeerConnState::Trying(i);
|
||||||
|
|
||||||
let addresses = h.known_addrs.clone();
|
let alternate_addrs = h
|
||||||
tokio::spawn(self.clone().try_connect(id, addresses));
|
.all_addrs
|
||||||
|
.iter()
|
||||||
|
.filter(|x| **x != h.addr)
|
||||||
|
.cloned()
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
tokio::spawn(self.clone().try_connect(id, h.addr, alternate_addrs));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -360,24 +362,27 @@ impl PeeringManager {
|
||||||
fn update_public_peer_list(&self, known_hosts: &KnownHosts) {
|
fn update_public_peer_list(&self, known_hosts: &KnownHosts) {
|
||||||
let mut pub_peer_list = Vec::with_capacity(known_hosts.list.len());
|
let mut pub_peer_list = Vec::with_capacity(known_hosts.list.len());
|
||||||
for (id, info) in known_hosts.list.iter() {
|
for (id, info) in known_hosts.list.iter() {
|
||||||
if *id == self.netapp.id {
|
|
||||||
// sanity check
|
|
||||||
assert!(matches!(info.state, PeerConnState::Ourself));
|
|
||||||
}
|
|
||||||
let mut pings = info.ping.iter().cloned().collect::<Vec<_>>();
|
let mut pings = info.ping.iter().cloned().collect::<Vec<_>>();
|
||||||
pings.sort();
|
pings.sort();
|
||||||
if !pings.is_empty() {
|
if !pings.is_empty() {
|
||||||
pub_peer_list.push(PeerInfo {
|
pub_peer_list.push(PeerInfo {
|
||||||
id: *id,
|
id: *id,
|
||||||
|
addr: info.addr,
|
||||||
state: info.state,
|
state: info.state,
|
||||||
last_seen: info.last_seen,
|
last_seen: info.last_seen,
|
||||||
avg_ping: Some(pings.iter().sum::<Duration>().div_f64(pings.len() as f64)),
|
avg_ping: Some(
|
||||||
|
pings
|
||||||
|
.iter()
|
||||||
|
.fold(Duration::from_secs(0), |x, y| x + *y)
|
||||||
|
.div_f64(pings.len() as f64),
|
||||||
|
),
|
||||||
max_ping: pings.last().cloned(),
|
max_ping: pings.last().cloned(),
|
||||||
med_ping: Some(pings[pings.len() / 2]),
|
med_ping: Some(pings[pings.len() / 2]),
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
pub_peer_list.push(PeerInfo {
|
pub_peer_list.push(PeerInfo {
|
||||||
id: *id,
|
id: *id,
|
||||||
|
addr: info.addr,
|
||||||
state: info.state,
|
state: info.state,
|
||||||
last_seen: info.last_seen,
|
last_seen: info.last_seen,
|
||||||
avg_ping: None,
|
avg_ping: None,
|
||||||
|
@ -490,10 +495,15 @@ impl PeeringManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn try_connect(self: Arc<Self>, id: NodeID, addresses: Vec<SocketAddr>) {
|
async fn try_connect(
|
||||||
|
self: Arc<Self>,
|
||||||
|
id: NodeID,
|
||||||
|
default_addr: SocketAddr,
|
||||||
|
alternate_addrs: Vec<SocketAddr>,
|
||||||
|
) {
|
||||||
let conn_addr = {
|
let conn_addr = {
|
||||||
let mut ret = None;
|
let mut ret = None;
|
||||||
for addr in addresses.iter() {
|
for addr in [default_addr].iter().chain(alternate_addrs.iter()) {
|
||||||
debug!("Trying address {} for peer {}", addr, hex::encode(&id[..8]));
|
debug!("Trying address {} for peer {}", addr, hex::encode(&id[..8]));
|
||||||
match self.netapp.clone().try_connect(*addr, id).await {
|
match self.netapp.clone().try_connect(*addr, id).await {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
|
@ -519,7 +529,7 @@ impl PeeringManager {
|
||||||
warn!(
|
warn!(
|
||||||
"Could not connect to peer {} ({} addresses tried)",
|
"Could not connect to peer {} ({} addresses tried)",
|
||||||
hex::encode(&id[..8]),
|
hex::encode(&id[..8]),
|
||||||
addresses.len()
|
1 + alternate_addrs.len()
|
||||||
);
|
);
|
||||||
let mut known_hosts = self.known_hosts.write().unwrap();
|
let mut known_hosts = self.known_hosts.write().unwrap();
|
||||||
if let Some(host) = known_hosts.list.get_mut(&id) {
|
if let Some(host) = known_hosts.list.get_mut(&id) {
|
||||||
|
@ -539,14 +549,6 @@ impl PeeringManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_connected(self: &Arc<Self>, id: NodeID, addr: SocketAddr, is_incoming: bool) {
|
fn on_connected(self: &Arc<Self>, id: NodeID, addr: SocketAddr, is_incoming: bool) {
|
||||||
if id == self.netapp.id {
|
|
||||||
// sanity check
|
|
||||||
panic!(
|
|
||||||
"on_connected from local node, id={:?}, addr={}, incoming={}",
|
|
||||||
id, addr, is_incoming
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut known_hosts = self.known_hosts.write().unwrap();
|
let mut known_hosts = self.known_hosts.write().unwrap();
|
||||||
if is_incoming {
|
if is_incoming {
|
||||||
if let Some(host) = known_hosts.list.get_mut(&id) {
|
if let Some(host) = known_hosts.list.get_mut(&id) {
|
||||||
|
@ -561,13 +563,13 @@ impl PeeringManager {
|
||||||
addr
|
addr
|
||||||
);
|
);
|
||||||
if let Some(host) = known_hosts.list.get_mut(&id) {
|
if let Some(host) = known_hosts.list.get_mut(&id) {
|
||||||
host.state = PeerConnState::Connected { addr };
|
host.state = PeerConnState::Connected;
|
||||||
|
host.addr = addr;
|
||||||
host.add_addr(addr);
|
host.add_addr(addr);
|
||||||
} else {
|
} else {
|
||||||
known_hosts.list.insert(
|
known_hosts
|
||||||
id,
|
.list
|
||||||
PeerInfoInternal::new(PeerConnState::Connected { addr }, Some(addr)),
|
.insert(id, PeerInfoInternal::new(addr, PeerConnState::Connected));
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
known_hosts.update_hash();
|
known_hosts.update_hash();
|
||||||
|
@ -587,8 +589,12 @@ impl PeeringManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_peer(&self, id: &NodeID, addr: SocketAddr) -> PeerInfoInternal {
|
fn new_peer(&self, id: &NodeID, addr: SocketAddr) -> PeerInfoInternal {
|
||||||
assert!(*id != self.netapp.id);
|
let state = if *id == self.netapp.id {
|
||||||
PeerInfoInternal::new(PeerConnState::Waiting(0, Instant::now()), Some(addr))
|
PeerConnState::Ourself
|
||||||
|
} else {
|
||||||
|
PeerConnState::Waiting(0, Instant::now())
|
||||||
|
};
|
||||||
|
PeerInfoInternal::new(addr, state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -109,7 +109,7 @@ impl SendQueuePriority {
|
||||||
let i = order_vec.iter().take_while(|o2| **o2 < order).count();
|
let i = order_vec.iter().take_while(|o2| **o2 < order).count();
|
||||||
order_vec.insert(i, order);
|
order_vec.insert(i, order);
|
||||||
}
|
}
|
||||||
self.items.push_back(item);
|
self.items.push_front(item);
|
||||||
}
|
}
|
||||||
fn remove(&mut self, id: RequestID) {
|
fn remove(&mut self, id: RequestID) {
|
||||||
if let Some(i) = self.items.iter().position(|x| x.id == id) {
|
if let Some(i) = self.items.iter().position(|x| x.id == id) {
|
||||||
|
@ -128,10 +128,6 @@ impl SendQueuePriority {
|
||||||
self.items.is_empty()
|
self.items.is_empty()
|
||||||
}
|
}
|
||||||
fn poll_next_ready(&mut self, ctx: &mut Context<'_>) -> Poll<(RequestID, DataFrame)> {
|
fn poll_next_ready(&mut self, ctx: &mut Context<'_>) -> Poll<(RequestID, DataFrame)> {
|
||||||
// in step 1: poll only streams that have sent 0 bytes, we want to send them in priority
|
|
||||||
// as they most likely represent small requests to be sent first
|
|
||||||
// in step 2: poll all streams
|
|
||||||
for step in 0..2 {
|
|
||||||
for (j, item) in self.items.iter_mut().enumerate() {
|
for (j, item) in self.items.iter_mut().enumerate() {
|
||||||
if let Some(OrderTag(stream, order)) = item.order_tag {
|
if let Some(OrderTag(stream, order)) = item.order_tag {
|
||||||
if order > *self.order.get(&stream).unwrap().front().unwrap() {
|
if order > *self.order.get(&stream).unwrap().front().unwrap() {
|
||||||
|
@ -139,10 +135,6 @@ impl SendQueuePriority {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if step == 0 && item.sent > 0 {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut item_reader = item.data.read_exact_or_eos(MAX_CHUNK_LENGTH as usize);
|
let mut item_reader = item.data.read_exact_or_eos(MAX_CHUNK_LENGTH as usize);
|
||||||
if let Poll::Ready(bytes_or_err) = Pin::new(&mut item_reader).poll(ctx) {
|
if let Poll::Ready(bytes_or_err) = Pin::new(&mut item_reader).poll(ctx) {
|
||||||
let id = item.id;
|
let id = item.id;
|
||||||
|
@ -168,18 +160,21 @@ impl SendQueuePriority {
|
||||||
}
|
}
|
||||||
// Remove item from sending queue
|
// Remove item from sending queue
|
||||||
self.items.remove(j);
|
self.items.remove(j);
|
||||||
} else if step == 0 {
|
} else {
|
||||||
// Step 0 means that this stream had not sent any bytes yet.
|
// Move item later in send queue to implement LAS scheduling
|
||||||
// Now that it has, and it was not an EOS, we know that it is bigger
|
// (LAS = Least Attained Service)
|
||||||
// than one chunk so move it at the end of the queue.
|
for k in j..self.items.len() - 1 {
|
||||||
let item = self.items.remove(j).unwrap();
|
if self.items[k].sent >= self.items[k + 1].sent {
|
||||||
self.items.push_back(item);
|
self.items.swap(k, k + 1);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return Poll::Ready((id, data_frame));
|
return Poll::Ready((id, data_frame));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Poll::Pending
|
Poll::Pending
|
||||||
}
|
}
|
||||||
|
|
|
@ -190,7 +190,7 @@ impl RecvLoop for ServerConn {
|
||||||
|
|
||||||
let (prio, resp_enc_result) = match ReqEnc::decode(stream).await {
|
let (prio, resp_enc_result) = match ReqEnc::decode(stream).await {
|
||||||
Ok(req_enc) => (req_enc.prio, self2.recv_handler_aux(req_enc).await),
|
Ok(req_enc) => (req_enc.prio, self2.recv_handler_aux(req_enc).await),
|
||||||
Err(e) => (PRIO_NORMAL, Err(e)),
|
Err(e) => (PRIO_HIGH, Err(e)),
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!("server: sending response to {}", id);
|
debug!("server: sending response to {}", id);
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_rpc"
|
name = "garage_rpc"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::ops::Deref;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
@ -41,7 +42,6 @@ pub struct LayoutHelper {
|
||||||
|
|
||||||
trackers_hash: Hash,
|
trackers_hash: Hash,
|
||||||
staging_hash: Hash,
|
staging_hash: Hash,
|
||||||
is_check_ok: bool,
|
|
||||||
|
|
||||||
// ack lock: counts in-progress write operations for each
|
// ack lock: counts in-progress write operations for each
|
||||||
// layout version ; we don't increase the ack update tracker
|
// layout version ; we don't increase the ack update tracker
|
||||||
|
@ -49,6 +49,13 @@ pub struct LayoutHelper {
|
||||||
pub(crate) ack_lock: HashMap<u64, AtomicUsize>,
|
pub(crate) ack_lock: HashMap<u64, AtomicUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Deref for LayoutHelper {
|
||||||
|
type Target = LayoutHistory;
|
||||||
|
fn deref(&self) -> &LayoutHistory {
|
||||||
|
self.layout()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl LayoutHelper {
|
impl LayoutHelper {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
replication_factor: ReplicationFactor,
|
replication_factor: ReplicationFactor,
|
||||||
|
@ -108,8 +115,6 @@ impl LayoutHelper {
|
||||||
.entry(layout.current().version)
|
.entry(layout.current().version)
|
||||||
.or_insert(AtomicUsize::new(0));
|
.or_insert(AtomicUsize::new(0));
|
||||||
|
|
||||||
let is_check_ok = layout.check().is_ok();
|
|
||||||
|
|
||||||
LayoutHelper {
|
LayoutHelper {
|
||||||
replication_factor,
|
replication_factor,
|
||||||
consistency_mode,
|
consistency_mode,
|
||||||
|
@ -121,12 +126,15 @@ impl LayoutHelper {
|
||||||
trackers_hash,
|
trackers_hash,
|
||||||
staging_hash,
|
staging_hash,
|
||||||
ack_lock,
|
ack_lock,
|
||||||
is_check_ok,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------ single updating function --------------
|
// ------------------ single updating function --------------
|
||||||
|
|
||||||
|
fn layout(&self) -> &LayoutHistory {
|
||||||
|
self.layout.as_ref().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn update<F>(&mut self, f: F) -> bool
|
pub(crate) fn update<F>(&mut self, f: F) -> bool
|
||||||
where
|
where
|
||||||
F: FnOnce(&mut LayoutHistory) -> bool,
|
F: FnOnce(&mut LayoutHistory) -> bool,
|
||||||
|
@ -145,30 +153,10 @@ impl LayoutHelper {
|
||||||
|
|
||||||
// ------------------ read helpers ---------------
|
// ------------------ read helpers ---------------
|
||||||
|
|
||||||
pub fn inner(&self) -> &LayoutHistory {
|
|
||||||
self.layout.as_ref().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn current(&self) -> &LayoutVersion {
|
|
||||||
self.inner().current()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn versions(&self) -> &[LayoutVersion] {
|
|
||||||
&self.inner().versions
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_check_ok(&self) -> bool {
|
|
||||||
self.is_check_ok
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return all nodes that have a role (gateway or storage)
|
|
||||||
/// in one of the currently active layout versions
|
|
||||||
pub fn all_nodes(&self) -> &[Uuid] {
|
pub fn all_nodes(&self) -> &[Uuid] {
|
||||||
&self.all_nodes
|
&self.all_nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return all nodes that are configured to store data
|
|
||||||
/// in one of the currently active layout versions
|
|
||||||
pub fn all_nongateway_nodes(&self) -> &[Uuid] {
|
pub fn all_nongateway_nodes(&self) -> &[Uuid] {
|
||||||
&self.all_nongateway_nodes
|
&self.all_nongateway_nodes
|
||||||
}
|
}
|
||||||
|
@ -183,19 +171,20 @@ impl LayoutHelper {
|
||||||
|
|
||||||
pub fn sync_digest(&self) -> SyncLayoutDigest {
|
pub fn sync_digest(&self) -> SyncLayoutDigest {
|
||||||
SyncLayoutDigest {
|
SyncLayoutDigest {
|
||||||
current: self.current().version,
|
current: self.layout().current().version,
|
||||||
ack_map_min: self.ack_map_min(),
|
ack_map_min: self.ack_map_min(),
|
||||||
min_stored: self.inner().min_stored(),
|
min_stored: self.layout().min_stored(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_nodes_of(&self, position: &Hash) -> Vec<Uuid> {
|
pub fn read_nodes_of(&self, position: &Hash) -> Vec<Uuid> {
|
||||||
let sync_min = self.sync_map_min;
|
let sync_min = self.sync_map_min;
|
||||||
let version = self
|
let version = self
|
||||||
.versions()
|
.layout()
|
||||||
|
.versions
|
||||||
.iter()
|
.iter()
|
||||||
.find(|x| x.version == sync_min)
|
.find(|x| x.version == sync_min)
|
||||||
.or(self.versions().last())
|
.or(self.layout().versions.last())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
version
|
version
|
||||||
.nodes_of(position, version.replication_factor)
|
.nodes_of(position, version.replication_factor)
|
||||||
|
@ -203,7 +192,8 @@ impl LayoutHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn storage_sets_of(&self, position: &Hash) -> Vec<Vec<Uuid>> {
|
pub fn storage_sets_of(&self, position: &Hash) -> Vec<Vec<Uuid>> {
|
||||||
self.versions()
|
self.layout()
|
||||||
|
.versions
|
||||||
.iter()
|
.iter()
|
||||||
.map(|x| x.nodes_of(position, x.replication_factor).collect())
|
.map(|x| x.nodes_of(position, x.replication_factor).collect())
|
||||||
.collect()
|
.collect()
|
||||||
|
@ -211,7 +201,7 @@ impl LayoutHelper {
|
||||||
|
|
||||||
pub fn storage_nodes_of(&self, position: &Hash) -> Vec<Uuid> {
|
pub fn storage_nodes_of(&self, position: &Hash) -> Vec<Uuid> {
|
||||||
let mut ret = vec![];
|
let mut ret = vec![];
|
||||||
for version in self.versions().iter() {
|
for version in self.layout().versions.iter() {
|
||||||
ret.extend(version.nodes_of(position, version.replication_factor));
|
ret.extend(version.nodes_of(position, version.replication_factor));
|
||||||
}
|
}
|
||||||
ret.sort();
|
ret.sort();
|
||||||
|
@ -230,7 +220,7 @@ impl LayoutHelper {
|
||||||
pub fn digest(&self) -> RpcLayoutDigest {
|
pub fn digest(&self) -> RpcLayoutDigest {
|
||||||
RpcLayoutDigest {
|
RpcLayoutDigest {
|
||||||
current_version: self.current().version,
|
current_version: self.current().version,
|
||||||
active_versions: self.versions().len(),
|
active_versions: self.versions.len(),
|
||||||
trackers_hash: self.trackers_hash,
|
trackers_hash: self.trackers_hash,
|
||||||
staging_hash: self.staging_hash,
|
staging_hash: self.staging_hash,
|
||||||
}
|
}
|
||||||
|
@ -238,24 +228,36 @@ impl LayoutHelper {
|
||||||
|
|
||||||
// ------------------ helpers for update tracking ---------------
|
// ------------------ helpers for update tracking ---------------
|
||||||
|
|
||||||
pub(crate) fn update_update_trackers(&mut self, local_node_id: Uuid) {
|
pub(crate) fn update_trackers(&mut self, local_node_id: Uuid) {
|
||||||
// Ensure trackers for this node's values are up-to-date
|
// Ensure trackers for this node's values are up-to-date
|
||||||
|
|
||||||
// 1. Acknowledge the last layout version which is not currently
|
// 1. Acknowledge the last layout version which is not currently
|
||||||
// locked by an in-progress write operation
|
// locked by an in-progress write operation
|
||||||
self.update_ack_to_max_free(local_node_id);
|
self.ack_max_free(local_node_id);
|
||||||
|
|
||||||
// 2. Assume the data on this node is sync'ed up at least to
|
// 2. Assume the data on this node is sync'ed up at least to
|
||||||
// the first layout version in the history
|
// the first layout version in the history
|
||||||
let first_version = self.inner().min_stored();
|
self.sync_first(local_node_id);
|
||||||
|
|
||||||
|
// 3. Acknowledge everyone has synced up to min(self.sync_map)
|
||||||
|
self.sync_ack(local_node_id);
|
||||||
|
|
||||||
|
debug!("ack_map: {:?}", self.update_trackers.ack_map);
|
||||||
|
debug!("sync_map: {:?}", self.update_trackers.sync_map);
|
||||||
|
debug!("sync_ack_map: {:?}", self.update_trackers.sync_ack_map);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sync_first(&mut self, local_node_id: Uuid) {
|
||||||
|
let first_version = self.min_stored();
|
||||||
self.update(|layout| {
|
self.update(|layout| {
|
||||||
layout
|
layout
|
||||||
.update_trackers
|
.update_trackers
|
||||||
.sync_map
|
.sync_map
|
||||||
.set_max(local_node_id, first_version)
|
.set_max(local_node_id, first_version)
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// 3. Acknowledge everyone has synced up to min(self.sync_map)
|
fn sync_ack(&mut self, local_node_id: Uuid) {
|
||||||
let sync_map_min = self.sync_map_min;
|
let sync_map_min = self.sync_map_min;
|
||||||
self.update(|layout| {
|
self.update(|layout| {
|
||||||
layout
|
layout
|
||||||
|
@ -263,18 +265,25 @@ impl LayoutHelper {
|
||||||
.sync_ack_map
|
.sync_ack_map
|
||||||
.set_max(local_node_id, sync_map_min)
|
.set_max(local_node_id, sync_map_min)
|
||||||
});
|
});
|
||||||
|
|
||||||
debug!("ack_map: {:?}", self.inner().update_trackers.ack_map);
|
|
||||||
debug!("sync_map: {:?}", self.inner().update_trackers.sync_map);
|
|
||||||
debug!(
|
|
||||||
"sync_ack_map: {:?}",
|
|
||||||
self.inner().update_trackers.sync_ack_map
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn update_ack_to_max_free(&mut self, local_node_id: Uuid) -> bool {
|
pub(crate) fn ack_max_free(&mut self, local_node_id: Uuid) -> bool {
|
||||||
let max_free = self
|
let max_ack = self.max_free_ack();
|
||||||
.versions()
|
let changed = self.update(|layout| {
|
||||||
|
layout
|
||||||
|
.update_trackers
|
||||||
|
.ack_map
|
||||||
|
.set_max(local_node_id, max_ack)
|
||||||
|
});
|
||||||
|
if changed {
|
||||||
|
info!("ack_until updated to {}", max_ack);
|
||||||
|
}
|
||||||
|
changed
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn max_free_ack(&self) -> u64 {
|
||||||
|
self.layout()
|
||||||
|
.versions
|
||||||
.iter()
|
.iter()
|
||||||
.map(|x| x.version)
|
.map(|x| x.version)
|
||||||
.skip_while(|v| {
|
.skip_while(|v| {
|
||||||
|
@ -284,16 +293,6 @@ impl LayoutHelper {
|
||||||
.unwrap_or(true)
|
.unwrap_or(true)
|
||||||
})
|
})
|
||||||
.next()
|
.next()
|
||||||
.unwrap_or(self.current().version);
|
.unwrap_or(self.current().version)
|
||||||
let changed = self.update(|layout| {
|
|
||||||
layout
|
|
||||||
.update_trackers
|
|
||||||
.ack_map
|
|
||||||
.set_max(local_node_id, max_free)
|
|
||||||
});
|
|
||||||
if changed {
|
|
||||||
info!("ack_until updated to {}", max_free);
|
|
||||||
}
|
|
||||||
changed
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,18 +27,14 @@ impl LayoutHistory {
|
||||||
|
|
||||||
// ------------------ who stores what now? ---------------
|
// ------------------ who stores what now? ---------------
|
||||||
|
|
||||||
/// Returns the layout version with the highest number
|
|
||||||
pub fn current(&self) -> &LayoutVersion {
|
pub fn current(&self) -> &LayoutVersion {
|
||||||
self.versions.last().as_ref().unwrap()
|
self.versions.last().as_ref().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the version number of the oldest layout version still active
|
|
||||||
pub fn min_stored(&self) -> u64 {
|
pub fn min_stored(&self) -> u64 {
|
||||||
self.versions.first().as_ref().unwrap().version
|
self.versions.first().as_ref().unwrap().version
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculate the set of all nodes that have a role (gateway or storage)
|
|
||||||
/// in one of the currently active layout versions
|
|
||||||
pub fn get_all_nodes(&self) -> Vec<Uuid> {
|
pub fn get_all_nodes(&self) -> Vec<Uuid> {
|
||||||
if self.versions.len() == 1 {
|
if self.versions.len() == 1 {
|
||||||
self.versions[0].all_nodes().to_vec()
|
self.versions[0].all_nodes().to_vec()
|
||||||
|
@ -52,8 +48,6 @@ impl LayoutHistory {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculate the set of all nodes that are configured to store data
|
|
||||||
/// in one of the currently active layout versions
|
|
||||||
pub(crate) fn get_all_nongateway_nodes(&self) -> Vec<Uuid> {
|
pub(crate) fn get_all_nongateway_nodes(&self) -> Vec<Uuid> {
|
||||||
if self.versions.len() == 1 {
|
if self.versions.len() == 1 {
|
||||||
self.versions[0].nongateway_nodes().to_vec()
|
self.versions[0].nongateway_nodes().to_vec()
|
||||||
|
|
|
@ -70,7 +70,7 @@ impl LayoutManager {
|
||||||
cluster_layout,
|
cluster_layout,
|
||||||
Default::default(),
|
Default::default(),
|
||||||
);
|
);
|
||||||
cluster_layout.update_update_trackers(node_id.into());
|
cluster_layout.update_trackers(node_id.into());
|
||||||
|
|
||||||
let layout = Arc::new(RwLock::new(cluster_layout));
|
let layout = Arc::new(RwLock::new(cluster_layout));
|
||||||
let change_notify = Arc::new(Notify::new());
|
let change_notify = Arc::new(Notify::new());
|
||||||
|
@ -109,7 +109,7 @@ impl LayoutManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_table(&self, table_name: &'static str) {
|
pub fn add_table(&self, table_name: &'static str) {
|
||||||
let first_version = self.layout().versions().first().unwrap().version;
|
let first_version = self.layout().versions.first().unwrap().version;
|
||||||
|
|
||||||
self.table_sync_version
|
self.table_sync_version
|
||||||
.lock()
|
.lock()
|
||||||
|
@ -127,16 +127,16 @@ impl LayoutManager {
|
||||||
if layout.update(|l| l.update_trackers.sync_map.set_max(self.node_id, sync_until)) {
|
if layout.update(|l| l.update_trackers.sync_map.set_max(self.node_id, sync_until)) {
|
||||||
info!("sync_until updated to {}", sync_until);
|
info!("sync_until updated to {}", sync_until);
|
||||||
self.broadcast_update(SystemRpc::AdvertiseClusterLayoutTrackers(
|
self.broadcast_update(SystemRpc::AdvertiseClusterLayoutTrackers(
|
||||||
layout.inner().update_trackers.clone(),
|
layout.update_trackers.clone(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ack_new_version(self: &Arc<Self>) {
|
fn ack_new_version(self: &Arc<Self>) {
|
||||||
let mut layout = self.layout.write().unwrap();
|
let mut layout = self.layout.write().unwrap();
|
||||||
if layout.update_ack_to_max_free(self.node_id) {
|
if layout.ack_max_free(self.node_id) {
|
||||||
self.broadcast_update(SystemRpc::AdvertiseClusterLayoutTrackers(
|
self.broadcast_update(SystemRpc::AdvertiseClusterLayoutTrackers(
|
||||||
layout.inner().update_trackers.clone(),
|
layout.update_trackers.clone(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -160,16 +160,16 @@ impl LayoutManager {
|
||||||
fn merge_layout(&self, adv: &LayoutHistory) -> Option<LayoutHistory> {
|
fn merge_layout(&self, adv: &LayoutHistory) -> Option<LayoutHistory> {
|
||||||
let mut layout = self.layout.write().unwrap();
|
let mut layout = self.layout.write().unwrap();
|
||||||
let prev_digest = layout.digest();
|
let prev_digest = layout.digest();
|
||||||
let prev_layout_check = layout.is_check_ok();
|
let prev_layout_check = layout.check().is_ok();
|
||||||
|
|
||||||
if !prev_layout_check || adv.check().is_ok() {
|
if !prev_layout_check || adv.check().is_ok() {
|
||||||
if layout.update(|l| l.merge(adv)) {
|
if layout.update(|l| l.merge(adv)) {
|
||||||
layout.update_update_trackers(self.node_id);
|
layout.update_trackers(self.node_id);
|
||||||
if prev_layout_check && !layout.is_check_ok() {
|
if prev_layout_check && layout.check().is_err() {
|
||||||
panic!("Merged two correct layouts and got an incorrect layout.");
|
panic!("Merged two correct layouts and got an incorrect layout.");
|
||||||
}
|
}
|
||||||
assert!(layout.digest() != prev_digest);
|
assert!(layout.digest() != prev_digest);
|
||||||
return Some(layout.inner().clone());
|
return Some(layout.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -180,11 +180,11 @@ impl LayoutManager {
|
||||||
let mut layout = self.layout.write().unwrap();
|
let mut layout = self.layout.write().unwrap();
|
||||||
let prev_digest = layout.digest();
|
let prev_digest = layout.digest();
|
||||||
|
|
||||||
if layout.inner().update_trackers != *adv {
|
if layout.update_trackers != *adv {
|
||||||
if layout.update(|l| l.update_trackers.merge(adv)) {
|
if layout.update(|l| l.update_trackers.merge(adv)) {
|
||||||
layout.update_update_trackers(self.node_id);
|
layout.update_trackers(self.node_id);
|
||||||
assert!(layout.digest() != prev_digest);
|
assert!(layout.digest() != prev_digest);
|
||||||
return Some(layout.inner().update_trackers.clone());
|
return Some(layout.update_trackers.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,7 +230,7 @@ impl LayoutManager {
|
||||||
|
|
||||||
/// Save cluster layout data to disk
|
/// Save cluster layout data to disk
|
||||||
async fn save_cluster_layout(&self) -> Result<(), Error> {
|
async fn save_cluster_layout(&self) -> Result<(), Error> {
|
||||||
let layout = self.layout.read().unwrap().inner().clone();
|
let layout = self.layout.read().unwrap().clone();
|
||||||
self.persist_cluster_layout
|
self.persist_cluster_layout
|
||||||
.save_async(&layout)
|
.save_async(&layout)
|
||||||
.await
|
.await
|
||||||
|
@ -278,13 +278,13 @@ impl LayoutManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn handle_pull_cluster_layout(&self) -> SystemRpc {
|
pub(crate) fn handle_pull_cluster_layout(&self) -> SystemRpc {
|
||||||
let layout = self.layout.read().unwrap().inner().clone();
|
let layout = self.layout.read().unwrap().clone();
|
||||||
SystemRpc::AdvertiseClusterLayout(layout)
|
SystemRpc::AdvertiseClusterLayout(layout)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn handle_pull_cluster_layout_trackers(&self) -> SystemRpc {
|
pub(crate) fn handle_pull_cluster_layout_trackers(&self) -> SystemRpc {
|
||||||
let layout = self.layout.read().unwrap();
|
let layout = self.layout.read().unwrap();
|
||||||
SystemRpc::AdvertiseClusterLayoutTrackers(layout.inner().update_trackers.clone())
|
SystemRpc::AdvertiseClusterLayoutTrackers(layout.update_trackers.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn handle_advertise_cluster_layout(
|
pub(crate) async fn handle_advertise_cluster_layout(
|
||||||
|
|
|
@ -26,14 +26,15 @@ use garage_util::data::*;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
|
|
||||||
use crate::layout::{LayoutHelper, LayoutVersion};
|
use crate::layout::{LayoutHelper, LayoutHistory};
|
||||||
use crate::metrics::RpcMetrics;
|
use crate::metrics::RpcMetrics;
|
||||||
|
|
||||||
// Default RPC timeout = 5 minutes
|
// Default RPC timeout = 5 minutes
|
||||||
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(300);
|
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(300);
|
||||||
|
|
||||||
/// Strategy to apply when making RPC
|
/// Strategy to apply when making RPC
|
||||||
pub struct RequestStrategy<T> {
|
#[derive(Copy, Clone)]
|
||||||
|
pub struct RequestStrategy {
|
||||||
/// Min number of response to consider the request successful
|
/// Min number of response to consider the request successful
|
||||||
rs_quorum: Option<usize>,
|
rs_quorum: Option<usize>,
|
||||||
/// Send all requests at once
|
/// Send all requests at once
|
||||||
|
@ -42,8 +43,6 @@ pub struct RequestStrategy<T> {
|
||||||
rs_priority: RequestPriority,
|
rs_priority: RequestPriority,
|
||||||
/// Custom timeout for this request
|
/// Custom timeout for this request
|
||||||
rs_timeout: Timeout,
|
rs_timeout: Timeout,
|
||||||
/// Data to drop when everything completes
|
|
||||||
rs_drop_on_complete: T,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
|
@ -53,19 +52,7 @@ enum Timeout {
|
||||||
Custom(Duration),
|
Custom(Duration),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for RequestStrategy<()> {
|
impl RequestStrategy {
|
||||||
fn clone(&self) -> Self {
|
|
||||||
RequestStrategy {
|
|
||||||
rs_quorum: self.rs_quorum,
|
|
||||||
rs_send_all_at_once: self.rs_send_all_at_once,
|
|
||||||
rs_priority: self.rs_priority,
|
|
||||||
rs_timeout: self.rs_timeout,
|
|
||||||
rs_drop_on_complete: (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RequestStrategy<()> {
|
|
||||||
/// Create a RequestStrategy with default timeout and not interrupting when quorum reached
|
/// Create a RequestStrategy with default timeout and not interrupting when quorum reached
|
||||||
pub fn with_priority(prio: RequestPriority) -> Self {
|
pub fn with_priority(prio: RequestPriority) -> Self {
|
||||||
RequestStrategy {
|
RequestStrategy {
|
||||||
|
@ -73,22 +60,8 @@ impl RequestStrategy<()> {
|
||||||
rs_send_all_at_once: None,
|
rs_send_all_at_once: None,
|
||||||
rs_priority: prio,
|
rs_priority: prio,
|
||||||
rs_timeout: Timeout::Default,
|
rs_timeout: Timeout::Default,
|
||||||
rs_drop_on_complete: (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Add an item to be dropped on completion
|
|
||||||
pub fn with_drop_on_completion<T>(self, drop_on_complete: T) -> RequestStrategy<T> {
|
|
||||||
RequestStrategy {
|
|
||||||
rs_quorum: self.rs_quorum,
|
|
||||||
rs_send_all_at_once: self.rs_send_all_at_once,
|
|
||||||
rs_priority: self.rs_priority,
|
|
||||||
rs_timeout: self.rs_timeout,
|
|
||||||
rs_drop_on_complete: drop_on_complete,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> RequestStrategy<T> {
|
|
||||||
/// Set quorum to be reached for request
|
/// Set quorum to be reached for request
|
||||||
pub fn with_quorum(mut self, quorum: usize) -> Self {
|
pub fn with_quorum(mut self, quorum: usize) -> Self {
|
||||||
self.rs_quorum = Some(quorum);
|
self.rs_quorum = Some(quorum);
|
||||||
|
@ -109,19 +82,6 @@ impl<T> RequestStrategy<T> {
|
||||||
self.rs_timeout = Timeout::Custom(timeout);
|
self.rs_timeout = Timeout::Custom(timeout);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
/// Extract drop_on_complete item
|
|
||||||
fn extract_drop_on_complete(self) -> (RequestStrategy<()>, T) {
|
|
||||||
(
|
|
||||||
RequestStrategy {
|
|
||||||
rs_quorum: self.rs_quorum,
|
|
||||||
rs_send_all_at_once: self.rs_send_all_at_once,
|
|
||||||
rs_priority: self.rs_priority,
|
|
||||||
rs_timeout: self.rs_timeout,
|
|
||||||
rs_drop_on_complete: (),
|
|
||||||
},
|
|
||||||
self.rs_drop_on_complete,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
@ -162,7 +122,7 @@ impl RpcHelper {
|
||||||
endpoint: &Endpoint<M, H>,
|
endpoint: &Endpoint<M, H>,
|
||||||
to: Uuid,
|
to: Uuid,
|
||||||
msg: N,
|
msg: N,
|
||||||
strat: RequestStrategy<()>,
|
strat: RequestStrategy,
|
||||||
) -> Result<S, Error>
|
) -> Result<S, Error>
|
||||||
where
|
where
|
||||||
M: Rpc<Response = Result<S, Error>>,
|
M: Rpc<Response = Result<S, Error>>,
|
||||||
|
@ -222,7 +182,7 @@ impl RpcHelper {
|
||||||
endpoint: &Endpoint<M, H>,
|
endpoint: &Endpoint<M, H>,
|
||||||
to: &[Uuid],
|
to: &[Uuid],
|
||||||
msg: N,
|
msg: N,
|
||||||
strat: RequestStrategy<()>,
|
strat: RequestStrategy,
|
||||||
) -> Result<Vec<(Uuid, Result<S, Error>)>, Error>
|
) -> Result<Vec<(Uuid, Result<S, Error>)>, Error>
|
||||||
where
|
where
|
||||||
M: Rpc<Response = Result<S, Error>>,
|
M: Rpc<Response = Result<S, Error>>,
|
||||||
|
@ -237,7 +197,7 @@ impl RpcHelper {
|
||||||
|
|
||||||
let resps = join_all(
|
let resps = join_all(
|
||||||
to.iter()
|
to.iter()
|
||||||
.map(|to| self.call(endpoint, *to, msg.clone(), strat.clone())),
|
.map(|to| self.call(endpoint, *to, msg.clone(), strat)),
|
||||||
)
|
)
|
||||||
.with_context(Context::current_with_span(span))
|
.with_context(Context::current_with_span(span))
|
||||||
.await;
|
.await;
|
||||||
|
@ -252,7 +212,7 @@ impl RpcHelper {
|
||||||
&self,
|
&self,
|
||||||
endpoint: &Endpoint<M, H>,
|
endpoint: &Endpoint<M, H>,
|
||||||
msg: N,
|
msg: N,
|
||||||
strat: RequestStrategy<()>,
|
strat: RequestStrategy,
|
||||||
) -> Result<Vec<(Uuid, Result<S, Error>)>, Error>
|
) -> Result<Vec<(Uuid, Result<S, Error>)>, Error>
|
||||||
where
|
where
|
||||||
M: Rpc<Response = Result<S, Error>>,
|
M: Rpc<Response = Result<S, Error>>,
|
||||||
|
@ -292,7 +252,7 @@ impl RpcHelper {
|
||||||
endpoint: &Arc<Endpoint<M, H>>,
|
endpoint: &Arc<Endpoint<M, H>>,
|
||||||
to: &[Uuid],
|
to: &[Uuid],
|
||||||
msg: N,
|
msg: N,
|
||||||
strategy: RequestStrategy<()>,
|
strategy: RequestStrategy,
|
||||||
) -> Result<Vec<S>, Error>
|
) -> Result<Vec<S>, Error>
|
||||||
where
|
where
|
||||||
M: Rpc<Response = Result<S, Error>> + 'static,
|
M: Rpc<Response = Result<S, Error>> + 'static,
|
||||||
|
@ -325,7 +285,7 @@ impl RpcHelper {
|
||||||
endpoint: &Arc<Endpoint<M, H>>,
|
endpoint: &Arc<Endpoint<M, H>>,
|
||||||
to: &[Uuid],
|
to: &[Uuid],
|
||||||
msg: N,
|
msg: N,
|
||||||
strategy: RequestStrategy<()>,
|
strategy: RequestStrategy,
|
||||||
quorum: usize,
|
quorum: usize,
|
||||||
) -> Result<Vec<S>, Error>
|
) -> Result<Vec<S>, Error>
|
||||||
where
|
where
|
||||||
|
@ -344,8 +304,7 @@ impl RpcHelper {
|
||||||
// preemptively send an additional request to any remaining nodes.
|
// preemptively send an additional request to any remaining nodes.
|
||||||
|
|
||||||
// Reorder requests to priorize closeness / low latency
|
// Reorder requests to priorize closeness / low latency
|
||||||
let request_order =
|
let request_order = self.request_order(&self.0.layout.read().unwrap(), to.iter().copied());
|
||||||
self.request_order(&self.0.layout.read().unwrap().current(), to.iter().copied());
|
|
||||||
let send_all_at_once = strategy.rs_send_all_at_once.unwrap_or(false);
|
let send_all_at_once = strategy.rs_send_all_at_once.unwrap_or(false);
|
||||||
|
|
||||||
// Build future for each request
|
// Build future for each request
|
||||||
|
@ -356,7 +315,6 @@ impl RpcHelper {
|
||||||
let self2 = self.clone();
|
let self2 = self.clone();
|
||||||
let msg = msg.clone();
|
let msg = msg.clone();
|
||||||
let endpoint2 = endpoint.clone();
|
let endpoint2 = endpoint.clone();
|
||||||
let strategy = strategy.clone();
|
|
||||||
async move { self2.call(&endpoint2, to, msg, strategy).await }
|
async move { self2.call(&endpoint2, to, msg, strategy).await }
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -429,19 +387,18 @@ impl RpcHelper {
|
||||||
/// changes, where data has to be written both in the old layout and in the
|
/// changes, where data has to be written both in the old layout and in the
|
||||||
/// new one as long as all nodes have not successfully tranisitionned and
|
/// new one as long as all nodes have not successfully tranisitionned and
|
||||||
/// moved all data to the new layout.
|
/// moved all data to the new layout.
|
||||||
pub async fn try_write_many_sets<M, N, H, S, T>(
|
pub async fn try_write_many_sets<M, N, H, S>(
|
||||||
&self,
|
&self,
|
||||||
endpoint: &Arc<Endpoint<M, H>>,
|
endpoint: &Arc<Endpoint<M, H>>,
|
||||||
to_sets: &[Vec<Uuid>],
|
to_sets: &[Vec<Uuid>],
|
||||||
msg: N,
|
msg: N,
|
||||||
strategy: RequestStrategy<T>,
|
strategy: RequestStrategy,
|
||||||
) -> Result<Vec<S>, Error>
|
) -> Result<Vec<S>, Error>
|
||||||
where
|
where
|
||||||
M: Rpc<Response = Result<S, Error>> + 'static,
|
M: Rpc<Response = Result<S, Error>> + 'static,
|
||||||
N: IntoReq<M>,
|
N: IntoReq<M>,
|
||||||
H: StreamingEndpointHandler<M> + 'static,
|
H: StreamingEndpointHandler<M> + 'static,
|
||||||
S: Send + 'static,
|
S: Send + 'static,
|
||||||
T: Send + 'static,
|
|
||||||
{
|
{
|
||||||
let quorum = strategy
|
let quorum = strategy
|
||||||
.rs_quorum
|
.rs_quorum
|
||||||
|
@ -465,12 +422,12 @@ impl RpcHelper {
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn try_write_many_sets_inner<M, N, H, S, T>(
|
async fn try_write_many_sets_inner<M, N, H, S>(
|
||||||
&self,
|
&self,
|
||||||
endpoint: &Arc<Endpoint<M, H>>,
|
endpoint: &Arc<Endpoint<M, H>>,
|
||||||
to_sets: &[Vec<Uuid>],
|
to_sets: &[Vec<Uuid>],
|
||||||
msg: N,
|
msg: N,
|
||||||
strategy: RequestStrategy<T>,
|
strategy: RequestStrategy,
|
||||||
quorum: usize,
|
quorum: usize,
|
||||||
) -> Result<Vec<S>, Error>
|
) -> Result<Vec<S>, Error>
|
||||||
where
|
where
|
||||||
|
@ -478,14 +435,11 @@ impl RpcHelper {
|
||||||
N: IntoReq<M>,
|
N: IntoReq<M>,
|
||||||
H: StreamingEndpointHandler<M> + 'static,
|
H: StreamingEndpointHandler<M> + 'static,
|
||||||
S: Send + 'static,
|
S: Send + 'static,
|
||||||
T: Send + 'static,
|
|
||||||
{
|
{
|
||||||
// Peers may appear in many quorum sets. Here, build a list of peers,
|
// Peers may appear in many quorum sets. Here, build a list of peers,
|
||||||
// mapping to the index of the quorum sets in which they appear.
|
// mapping to the index of the quorum sets in which they appear.
|
||||||
let mut result_tracker = QuorumSetResultTracker::new(to_sets, quorum);
|
let mut result_tracker = QuorumSetResultTracker::new(to_sets, quorum);
|
||||||
|
|
||||||
let (strategy, drop_on_complete) = strategy.extract_drop_on_complete();
|
|
||||||
|
|
||||||
// Send one request to each peer of the quorum sets
|
// Send one request to each peer of the quorum sets
|
||||||
let msg = msg.into_req().map_err(garage_net::error::Error::from)?;
|
let msg = msg.into_req().map_err(garage_net::error::Error::from)?;
|
||||||
let requests = result_tracker.nodes.keys().map(|peer| {
|
let requests = result_tracker.nodes.keys().map(|peer| {
|
||||||
|
@ -493,7 +447,6 @@ impl RpcHelper {
|
||||||
let msg = msg.clone();
|
let msg = msg.clone();
|
||||||
let endpoint2 = endpoint.clone();
|
let endpoint2 = endpoint.clone();
|
||||||
let to = *peer;
|
let to = *peer;
|
||||||
let strategy = strategy.clone();
|
|
||||||
async move { (to, self2.call(&endpoint2, to, msg, strategy).await) }
|
async move { (to, self2.call(&endpoint2, to, msg, strategy).await) }
|
||||||
});
|
});
|
||||||
let mut resp_stream = requests.collect::<FuturesUnordered<_>>();
|
let mut resp_stream = requests.collect::<FuturesUnordered<_>>();
|
||||||
|
@ -509,7 +462,6 @@ impl RpcHelper {
|
||||||
// Continue all other requets in background
|
// Continue all other requets in background
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
resp_stream.collect::<Vec<(Uuid, Result<_, _>)>>().await;
|
resp_stream.collect::<Vec<(Uuid, Result<_, _>)>>().await;
|
||||||
drop(drop_on_complete);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
return Ok(result_tracker.success_values());
|
return Ok(result_tracker.success_values());
|
||||||
|
@ -545,16 +497,16 @@ impl RpcHelper {
|
||||||
|
|
||||||
let mut ret = Vec::with_capacity(12);
|
let mut ret = Vec::with_capacity(12);
|
||||||
let ver_iter = layout
|
let ver_iter = layout
|
||||||
.versions()
|
.versions
|
||||||
.iter()
|
.iter()
|
||||||
.rev()
|
.rev()
|
||||||
.chain(layout.inner().old_versions.iter().rev());
|
.chain(layout.old_versions.iter().rev());
|
||||||
for ver in ver_iter {
|
for ver in ver_iter {
|
||||||
if ver.version > layout.sync_map_min() {
|
if ver.version > layout.sync_map_min() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let nodes = ver.nodes_of(position, ver.replication_factor);
|
let nodes = ver.nodes_of(position, ver.replication_factor);
|
||||||
for node in rpc_helper.request_order(layout.current(), nodes) {
|
for node in rpc_helper.request_order(&layout, nodes) {
|
||||||
if !ret.contains(&node) {
|
if !ret.contains(&node) {
|
||||||
ret.push(node);
|
ret.push(node);
|
||||||
}
|
}
|
||||||
|
@ -565,12 +517,15 @@ impl RpcHelper {
|
||||||
|
|
||||||
fn request_order(
|
fn request_order(
|
||||||
&self,
|
&self,
|
||||||
layout: &LayoutVersion,
|
layout: &LayoutHistory,
|
||||||
nodes: impl Iterator<Item = Uuid>,
|
nodes: impl Iterator<Item = Uuid>,
|
||||||
) -> Vec<Uuid> {
|
) -> Vec<Uuid> {
|
||||||
// Retrieve some status variables that we will use to sort requests
|
// Retrieve some status variables that we will use to sort requests
|
||||||
let peer_list = self.0.peering.get_peer_list();
|
let peer_list = self.0.peering.get_peer_list();
|
||||||
let our_zone = layout.get_node_zone(&self.0.our_node_id).unwrap_or("");
|
let our_zone = layout
|
||||||
|
.current()
|
||||||
|
.get_node_zone(&self.0.our_node_id)
|
||||||
|
.unwrap_or("");
|
||||||
|
|
||||||
// Augment requests with some information used to sort them.
|
// Augment requests with some information used to sort them.
|
||||||
// The tuples are as follows:
|
// The tuples are as follows:
|
||||||
|
@ -580,7 +535,7 @@ impl RpcHelper {
|
||||||
// and within a same zone we priorize nodes with the lowest latency.
|
// and within a same zone we priorize nodes with the lowest latency.
|
||||||
let mut nodes = nodes
|
let mut nodes = nodes
|
||||||
.map(|to| {
|
.map(|to| {
|
||||||
let peer_zone = layout.get_node_zone(&to).unwrap_or("");
|
let peer_zone = layout.current().get_node_zone(&to).unwrap_or("");
|
||||||
let peer_avg_ping = peer_list
|
let peer_avg_ping = peer_list
|
||||||
.iter()
|
.iter()
|
||||||
.find(|x| x.id.as_ref() == to.as_slice())
|
.find(|x| x.id.as_ref() == to.as_slice())
|
||||||
|
|
|
@ -16,7 +16,7 @@ use tokio::sync::{watch, Notify};
|
||||||
|
|
||||||
use garage_net::endpoint::{Endpoint, EndpointHandler};
|
use garage_net::endpoint::{Endpoint, EndpointHandler};
|
||||||
use garage_net::message::*;
|
use garage_net::message::*;
|
||||||
use garage_net::peering::{PeerConnState, PeeringManager};
|
use garage_net::peering::PeeringManager;
|
||||||
use garage_net::util::parse_and_resolve_peer_addr_async;
|
use garage_net::util::parse_and_resolve_peer_addr_async;
|
||||||
use garage_net::{NetApp, NetworkKey, NodeID, NodeKey};
|
use garage_net::{NetApp, NetworkKey, NodeID, NodeKey};
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ const STATUS_EXCHANGE_INTERVAL: Duration = Duration::from_secs(10);
|
||||||
/// Version tag used for version check upon Netapp connection.
|
/// Version tag used for version check upon Netapp connection.
|
||||||
/// Cluster nodes with different version tags are deemed
|
/// Cluster nodes with different version tags are deemed
|
||||||
/// incompatible and will refuse to connect.
|
/// incompatible and will refuse to connect.
|
||||||
pub const GARAGE_VERSION_TAG: u64 = 0x6761726167650010; // garage 0x0010 (1.0)
|
pub const GARAGE_VERSION_TAG: u64 = 0x676172616765000A; // garage 0x000A
|
||||||
|
|
||||||
/// RPC endpoint used for calls related to membership
|
/// RPC endpoint used for calls related to membership
|
||||||
pub const SYSTEM_RPC_PATH: &str = "garage_rpc/system.rs/SystemRpc";
|
pub const SYSTEM_RPC_PATH: &str = "garage_rpc/system.rs/SystemRpc";
|
||||||
|
@ -142,7 +142,7 @@ pub struct NodeStatus {
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct KnownNodeInfo {
|
pub struct KnownNodeInfo {
|
||||||
pub id: Uuid,
|
pub id: Uuid,
|
||||||
pub addr: Option<SocketAddr>,
|
pub addr: SocketAddr,
|
||||||
pub is_up: bool,
|
pub is_up: bool,
|
||||||
pub last_seen_secs_ago: Option<u64>,
|
pub last_seen_secs_ago: Option<u64>,
|
||||||
pub status: NodeStatus,
|
pub status: NodeStatus,
|
||||||
|
@ -381,11 +381,7 @@ impl System {
|
||||||
.iter()
|
.iter()
|
||||||
.map(|n| KnownNodeInfo {
|
.map(|n| KnownNodeInfo {
|
||||||
id: n.id.into(),
|
id: n.id.into(),
|
||||||
addr: match n.state {
|
addr: n.addr,
|
||||||
PeerConnState::Ourself => self.rpc_public_addr,
|
|
||||||
PeerConnState::Connected { addr } => Some(addr),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
is_up: n.is_up(),
|
is_up: n.is_up(),
|
||||||
last_seen_secs_ago: n
|
last_seen_secs_ago: n
|
||||||
.last_seen
|
.last_seen
|
||||||
|
@ -451,7 +447,7 @@ impl System {
|
||||||
// Obtain information about nodes that have a role as storage nodes
|
// Obtain information about nodes that have a role as storage nodes
|
||||||
// in one of the active layout versions
|
// in one of the active layout versions
|
||||||
let mut storage_nodes = HashSet::<Uuid>::with_capacity(16);
|
let mut storage_nodes = HashSet::<Uuid>::with_capacity(16);
|
||||||
for ver in layout.versions().iter() {
|
for ver in layout.versions.iter() {
|
||||||
storage_nodes.extend(
|
storage_nodes.extend(
|
||||||
ver.roles
|
ver.roles
|
||||||
.items()
|
.items()
|
||||||
|
@ -470,7 +466,7 @@ impl System {
|
||||||
let mut partitions_all_ok = 0;
|
let mut partitions_all_ok = 0;
|
||||||
for (_, hash) in partitions.iter() {
|
for (_, hash) in partitions.iter() {
|
||||||
let mut write_sets = layout
|
let mut write_sets = layout
|
||||||
.versions()
|
.versions
|
||||||
.iter()
|
.iter()
|
||||||
.map(|x| x.nodes_of(hash, x.replication_factor));
|
.map(|x| x.nodes_of(hash, x.replication_factor));
|
||||||
let has_quorum = write_sets
|
let has_quorum = write_sets
|
||||||
|
@ -634,7 +630,7 @@ impl System {
|
||||||
.filter(|p| p.is_up())
|
.filter(|p| p.is_up())
|
||||||
.count();
|
.count();
|
||||||
|
|
||||||
let not_configured = !self.cluster_layout().is_check_ok();
|
let not_configured = self.cluster_layout().check().is_err();
|
||||||
let no_peers = n_connected < self.replication_factor.into();
|
let no_peers = n_connected < self.replication_factor.into();
|
||||||
let expected_n_nodes = self.cluster_layout().all_nodes().len();
|
let expected_n_nodes = self.cluster_layout().all_nodes().len();
|
||||||
let bad_peers = n_connected != expected_n_nodes;
|
let bad_peers = n_connected != expected_n_nodes;
|
||||||
|
@ -726,10 +722,7 @@ impl System {
|
||||||
.peering
|
.peering
|
||||||
.get_peer_list()
|
.get_peer_list()
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|n| match n.state {
|
.map(|n| (n.id.into(), n.addr))
|
||||||
PeerConnState::Connected { addr } => Some((n.id.into(), addr)),
|
|
||||||
_ => None,
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
// Before doing it, we read the current peer list file (if it exists)
|
// Before doing it, we read the current peer list file (if it exists)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_table"
|
name = "garage_table"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_util"
|
name = "garage_util"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
61
src/util/async_hash.rs
Normal file
61
src/util/async_hash.rs
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
use bytes::Bytes;
|
||||||
|
use digest::Digest;
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tokio::task::JoinHandle;
|
||||||
|
|
||||||
|
use crate::data::*;
|
||||||
|
|
||||||
|
/// Compute the sha256 of a slice,
|
||||||
|
/// spawning on a tokio thread for CPU-intensive processing
|
||||||
|
/// The argument has to be an owned Bytes, as it is moved out to a new thread.
|
||||||
|
pub async fn async_sha256sum(data: Bytes) -> Hash {
|
||||||
|
tokio::task::spawn_blocking(move || sha256sum(&data))
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute the blake2sum of a slice,
|
||||||
|
/// spawning on a tokio thread for CPU-intensive processing.
|
||||||
|
/// The argument has to be an owned Bytes, as it is moved out to a new thread.
|
||||||
|
pub async fn async_blake2sum(data: Bytes) -> Hash {
|
||||||
|
tokio::task::spawn_blocking(move || blake2sum(&data))
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----
|
||||||
|
|
||||||
|
pub struct AsyncHasher<D: Digest> {
|
||||||
|
sendblk: mpsc::Sender<Bytes>,
|
||||||
|
task: JoinHandle<digest::Output<D>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Digest> AsyncHasher<D> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
let (sendblk, mut recvblk) = mpsc::channel::<Bytes>(1);
|
||||||
|
let task = tokio::task::spawn_blocking(move || {
|
||||||
|
let mut digest = D::new();
|
||||||
|
while let Some(blk) = recvblk.blocking_recv() {
|
||||||
|
digest.update(&blk[..]);
|
||||||
|
}
|
||||||
|
digest.finalize()
|
||||||
|
});
|
||||||
|
Self { sendblk, task }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update(&self, b: Bytes) {
|
||||||
|
self.sendblk.send(b).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn finalize(self) -> digest::Output<D> {
|
||||||
|
drop(self.sendblk);
|
||||||
|
self.task.await.unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Digest> Default for AsyncHasher<D> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
|
@ -23,14 +23,6 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub data_fsync: bool,
|
pub data_fsync: bool,
|
||||||
|
|
||||||
/// Disable automatic scrubbing of the data directory
|
|
||||||
#[serde(default)]
|
|
||||||
pub disable_scrub: bool,
|
|
||||||
|
|
||||||
/// Automatic snapshot interval for metadata
|
|
||||||
#[serde(default)]
|
|
||||||
pub metadata_auto_snapshot_interval: Option<String>,
|
|
||||||
|
|
||||||
/// Size of data blocks to save to disk
|
/// Size of data blocks to save to disk
|
||||||
#[serde(
|
#[serde(
|
||||||
deserialize_with = "deserialize_capacity",
|
deserialize_with = "deserialize_capacity",
|
||||||
|
@ -60,14 +52,6 @@ pub struct Config {
|
||||||
)]
|
)]
|
||||||
pub compression_level: Option<i32>,
|
pub compression_level: Option<i32>,
|
||||||
|
|
||||||
/// Maximum amount of block data to buffer in RAM for sending to
|
|
||||||
/// remote nodes when these nodes are on slower links
|
|
||||||
#[serde(
|
|
||||||
deserialize_with = "deserialize_capacity",
|
|
||||||
default = "default_block_ram_buffer_max"
|
|
||||||
)]
|
|
||||||
pub block_ram_buffer_max: usize,
|
|
||||||
|
|
||||||
/// Skip the permission check of secret files. Useful when
|
/// Skip the permission check of secret files. Useful when
|
||||||
/// POSIX ACLs (or more complex chmods) are used.
|
/// POSIX ACLs (or more complex chmods) are used.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
|
@ -255,9 +239,6 @@ fn default_db_engine() -> String {
|
||||||
fn default_block_size() -> usize {
|
fn default_block_size() -> usize {
|
||||||
1048576
|
1048576
|
||||||
}
|
}
|
||||||
fn default_block_ram_buffer_max() -> usize {
|
|
||||||
256 * 1024 * 1024
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_consistency_mode() -> String {
|
fn default_consistency_mode() -> String {
|
||||||
"consistent".into()
|
"consistent".into()
|
||||||
|
|
|
@ -83,19 +83,6 @@ impl FixedBytes32 {
|
||||||
ret.copy_from_slice(by);
|
ret.copy_from_slice(by);
|
||||||
Some(Self(ret))
|
Some(Self(ret))
|
||||||
}
|
}
|
||||||
/// Return the next hash
|
|
||||||
pub fn increment(&self) -> Option<Self> {
|
|
||||||
let mut ret = *self;
|
|
||||||
for byte in ret.0.iter_mut().rev() {
|
|
||||||
if *byte == u8::MAX {
|
|
||||||
*byte = 0;
|
|
||||||
} else {
|
|
||||||
*byte = *byte + 1;
|
|
||||||
return Some(ret);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<garage_net::NodeID> for FixedBytes32 {
|
impl From<garage_net::NodeID> for FixedBytes32 {
|
||||||
|
@ -153,25 +140,3 @@ pub fn fasthash(data: &[u8]) -> FastHash {
|
||||||
pub fn gen_uuid() -> Uuid {
|
pub fn gen_uuid() -> Uuid {
|
||||||
rand::thread_rng().gen::<[u8; 32]>().into()
|
rand::thread_rng().gen::<[u8; 32]>().into()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_increment() {
|
|
||||||
let zero: FixedBytes32 = [0u8; 32].into();
|
|
||||||
let mut one: FixedBytes32 = [0u8; 32].into();
|
|
||||||
one.0[31] = 1;
|
|
||||||
let max: FixedBytes32 = [0xFFu8; 32].into();
|
|
||||||
assert_eq!(zero.increment(), Some(one));
|
|
||||||
assert_eq!(max.increment(), None);
|
|
||||||
|
|
||||||
let mut test: FixedBytes32 = [0u8; 32].into();
|
|
||||||
let i = 0x198DF97209F8FFFFu64;
|
|
||||||
test.0[24..32].copy_from_slice(&u64::to_be_bytes(i));
|
|
||||||
let mut test2: FixedBytes32 = [0u8; 32].into();
|
|
||||||
test2.0[24..32].copy_from_slice(&u64::to_be_bytes(i + 1));
|
|
||||||
assert_eq!(test.increment(), Some(test2));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -70,9 +70,6 @@ pub enum Error {
|
||||||
#[error(display = "Corrupt data: does not match hash {:?}", _0)]
|
#[error(display = "Corrupt data: does not match hash {:?}", _0)]
|
||||||
CorruptData(Hash),
|
CorruptData(Hash),
|
||||||
|
|
||||||
#[error(display = "Missing block {:?}: no node returned a valid block", _0)]
|
|
||||||
MissingBlock(Hash),
|
|
||||||
|
|
||||||
#[error(display = "{}", _0)]
|
#[error(display = "{}", _0)]
|
||||||
Message(String),
|
Message(String),
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate tracing;
|
extern crate tracing;
|
||||||
|
|
||||||
|
pub mod async_hash;
|
||||||
pub mod background;
|
pub mod background;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod crdt;
|
pub mod crdt;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_web"
|
name = "garage_web"
|
||||||
version = "1.0.0"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
Loading…
Reference in a new issue