Compare commits
92 commits
main
...
docs-s3-li
Author | SHA1 | Date | |
---|---|---|---|
94d723f27c | |||
be6b8f419d | |||
|
4cfb469d2b | ||
|
df1d9a9873 | ||
|
aac348fe93 | ||
d6ea0cbefa | |||
7b62fe3f0b | |||
f2106c2733 | |||
02e8eb167e | |||
329c0e64f9 | |||
29dbcb8278 | |||
f3f27293df | |||
13c5549886 | |||
936b6cb563 | |||
0650a43cf1 | |||
4eb8ca3a52 | |||
1fc220886a | |||
73ed9c7403 | |||
1d5bdc17a4 | |||
c106304b9c | |||
33f25d26c7 | |||
d6d571d512 | |||
a54b67740d | |||
8d5505514f | |||
426d8784da | |||
a81200d345 | |||
cdb2a591e9 | |||
582b076179 | |||
939a6d67e8 | |||
76230f2028 | |||
6775569525 | |||
6b857a9b8c | |||
1649002e2b | |||
822e344845 | |||
7f7d53cfa9 | |||
fd10200bec | |||
0c7ed0b0af | |||
1af4a5ed56 | |||
1fcd0b371b | |||
13c8662126 | |||
e6f14ab5cf | |||
510b620108 | |||
dfc131850a | |||
d4af27f920 | |||
0d6b05bb6c | |||
a19bfef508 | |||
d56c472712 | |||
2183518edc | |||
83c8467e23 | |||
f8e528c15d | |||
d1279e04f3 | |||
041b60ed1d | |||
f8d5409894 | |||
d6040e32a6 | |||
d7f90cabb0 | |||
687660b27f | |||
9d82196945 | |||
a51e8d94c6 | |||
de9d6cddf7 | |||
f7c65e830e | |||
0e61e3b6fb | |||
a0abf41762 | |||
2ac75018a1 | |||
980572a887 | |||
7a0014b6f7 | |||
edb0b9c1ee | |||
f58a813a36 | |||
defd7d9e63 | |||
533afcf4e1 | |||
5ea5fd2130 | |||
35f8e8e2fb | |||
d5a2502b09 | |||
d7868c48a4 | |||
280d1be7b1 | |||
2065f011ca | |||
243b7c9a1c | |||
a3afc761b6 | |||
19bdd1c799 | |||
448dcc5cf4 | |||
26121bb619 | |||
280330ac72 | |||
4d7b4d9d20 | |||
fc450ec13a | |||
379b2049f5 | |||
293139a94a | |||
54e800ef8d | |||
1e40c93fd0 | |||
0cfb56d33e | |||
c1fb65194c | |||
67941000ee | |||
60c26fbc62 | |||
e76dba9561 |
1
.envrc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
use flake
|
1
.gitattributes
vendored
|
@ -1 +0,0 @@
|
||||||
*.pdf filter=lfs diff=lfs merge=lfs -text
|
|
1
.gitignore
vendored
|
@ -3,3 +3,4 @@
|
||||||
/pki
|
/pki
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
*.swp
|
*.swp
|
||||||
|
/.direnv
|
26
Cargo.lock
generated
|
@ -1048,7 +1048,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"assert-json-diff",
|
"assert-json-diff",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1080,7 +1080,6 @@ dependencies = [
|
||||||
"parse_duration",
|
"parse_duration",
|
||||||
"prometheus",
|
"prometheus",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rmp-serde",
|
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
@ -1096,7 +1095,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base64",
|
"base64",
|
||||||
|
@ -1141,7 +1140,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-compression",
|
"async-compression",
|
||||||
|
@ -1156,7 +1155,6 @@ dependencies = [
|
||||||
"hex",
|
"hex",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rmp-serde",
|
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
@ -1167,7 +1165,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_db"
|
name = "garage_db"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 3.1.18",
|
"clap 3.1.18",
|
||||||
"err-derive",
|
"err-derive",
|
||||||
|
@ -1182,7 +1180,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1200,7 +1198,6 @@ dependencies = [
|
||||||
"netapp",
|
"netapp",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rmp-serde",
|
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
@ -1210,7 +1207,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_rpc"
|
name = "garage_rpc"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1229,7 +1226,6 @@ dependencies = [
|
||||||
"pnet_datalink",
|
"pnet_datalink",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"rmp-serde",
|
|
||||||
"schemars",
|
"schemars",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
|
@ -1241,8 +1237,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_table"
|
name = "garage_table"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures",
|
"futures",
|
||||||
|
@ -1254,7 +1251,6 @@ dependencies = [
|
||||||
"hexdump",
|
"hexdump",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rmp-serde",
|
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
@ -1263,7 +1259,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_util"
|
name = "garage_util"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1276,9 +1272,11 @@ dependencies = [
|
||||||
"garage_db",
|
"garage_db",
|
||||||
"git-version",
|
"git-version",
|
||||||
"hex",
|
"hex",
|
||||||
|
"hexdump",
|
||||||
"http",
|
"http",
|
||||||
"hyper",
|
"hyper",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
|
"mktemp",
|
||||||
"netapp",
|
"netapp",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
|
@ -1294,7 +1292,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_web"
|
name = "garage_web"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"err-derive",
|
"err-derive",
|
||||||
"futures",
|
"futures",
|
||||||
|
|
201
Cargo.nix
|
@ -32,7 +32,7 @@ args@{
|
||||||
ignoreLockHash,
|
ignoreLockHash,
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
nixifiedLockHash = "90b29705f5037c7e1b33f4650841f1266f2e86fa03d5d0c87ad80be7619985c7";
|
nixifiedLockHash = "8461dcfb984a8d042fecb5745d5da17912135dbf2a8ef7e6c3ae8e64c03d9744";
|
||||||
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
||||||
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
||||||
lockHashIgnored = if ignoreLockHash
|
lockHashIgnored = if ignoreLockHash
|
||||||
|
@ -56,15 +56,15 @@ in
|
||||||
{
|
{
|
||||||
cargo2nixVersion = "0.11.0";
|
cargo2nixVersion = "0.11.0";
|
||||||
workspace = {
|
workspace = {
|
||||||
garage_db = rustPackages.unknown.garage_db."0.8.0";
|
garage_db = rustPackages.unknown.garage_db."0.8.1";
|
||||||
garage_util = rustPackages.unknown.garage_util."0.8.0";
|
garage_util = rustPackages.unknown.garage_util."0.8.1";
|
||||||
garage_rpc = rustPackages.unknown.garage_rpc."0.8.0";
|
garage_rpc = rustPackages.unknown.garage_rpc."0.8.1";
|
||||||
garage_table = rustPackages.unknown.garage_table."0.8.0";
|
garage_table = rustPackages.unknown.garage_table."0.8.1";
|
||||||
garage_block = rustPackages.unknown.garage_block."0.8.0";
|
garage_block = rustPackages.unknown.garage_block."0.8.1";
|
||||||
garage_model = rustPackages.unknown.garage_model."0.8.0";
|
garage_model = rustPackages.unknown.garage_model."0.8.1";
|
||||||
garage_api = rustPackages.unknown.garage_api."0.8.0";
|
garage_api = rustPackages.unknown.garage_api."0.8.1";
|
||||||
garage_web = rustPackages.unknown.garage_web."0.8.0";
|
garage_web = rustPackages.unknown.garage_web."0.8.1";
|
||||||
garage = rustPackages.unknown.garage."0.8.0";
|
garage = rustPackages.unknown.garage."0.8.1";
|
||||||
k2v-client = rustPackages.unknown.k2v-client."0.0.1";
|
k2v-client = rustPackages.unknown.k2v-client."0.0.1";
|
||||||
};
|
};
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.17.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.17.0" = overridableMkRustCrate (profileName: rec {
|
||||||
|
@ -946,20 +946,20 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c"; };
|
src = fetchCratesIo { inherit name version; sha256 = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "alloc")
|
[ "alloc" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "default")
|
[ "default" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "lazy_static")
|
[ "lazy_static" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "std")
|
[ "std" ]
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
cfg_if = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "crossbeam_utils" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.8" { inherit profileName; }).out;
|
crossbeam_utils = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.8" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "lazy_static" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
|
lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "memoffset" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".memoffset."0.6.5" { inherit profileName; }).out;
|
memoffset = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".memoffset."0.6.5" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "scopeguard" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".scopeguard."1.1.0" { inherit profileName; }).out;
|
scopeguard = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".scopeguard."1.1.0" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
buildDependencies = {
|
buildDependencies = {
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "autocfg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".autocfg."1.1.0" { profileName = "__noProfile"; }).out;
|
autocfg = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".autocfg."1.1.0" { profileName = "__noProfile"; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -995,7 +995,7 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38"; };
|
src = fetchCratesIo { inherit name version; sha256 = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "default")
|
[ "default" ]
|
||||||
[ "lazy_static" ]
|
[ "lazy_static" ]
|
||||||
[ "std" ]
|
[ "std" ]
|
||||||
];
|
];
|
||||||
|
@ -1321,8 +1321,8 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"; };
|
src = fetchCratesIo { inherit name version; sha256 = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"; };
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }).out;
|
${ if hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }).out;
|
||||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") && hostPlatform.isWindows then "winapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }).out;
|
${ if hostPlatform.isWindows then "winapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -1490,20 +1490,20 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"; };
|
src = fetchCratesIo { inherit name version; sha256 = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"; };
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "byteorder" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
|
byteorder = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage";
|
name = "garage";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/garage");
|
src = fetchCrateLocal (workspaceSrc + "/src/garage");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default") "bundled-libs")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default") "bundled-libs")
|
||||||
(lib.optional (rootFeatures' ? "garage/consul-discovery") "consul-discovery")
|
(lib.optional (rootFeatures' ? "garage/consul-discovery") "consul-discovery")
|
||||||
(lib.optional (rootFeatures' ? "garage/default") "default")
|
(lib.optional (rootFeatures' ? "garage/default") "default")
|
||||||
(lib.optional (rootFeatures' ? "garage/k2v") "k2v")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v") "k2v")
|
||||||
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery") "kubernetes-discovery")
|
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery") "kubernetes-discovery")
|
||||||
(lib.optional (rootFeatures' ? "garage/lmdb") "lmdb")
|
(lib.optional (rootFeatures' ? "garage/lmdb") "lmdb")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics") "metrics")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics") "metrics")
|
||||||
|
@ -1522,14 +1522,14 @@ in
|
||||||
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.1.0" { inherit profileName; }).out;
|
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.1.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_api = (rustPackages."unknown".garage_api."0.8.0" { inherit profileName; }).out;
|
garage_api = (rustPackages."unknown".garage_api."0.8.1" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
garage_web = (rustPackages."unknown".garage_web."0.8.0" { inherit profileName; }).out;
|
garage_web = (rustPackages."unknown".garage_web."0.8.1" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
|
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
|
||||||
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
||||||
|
@ -1539,7 +1539,6 @@ in
|
||||||
parse_duration = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parse_duration."2.1.1" { inherit profileName; }).out;
|
parse_duration = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parse_duration."2.1.1" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus" then "prometheus" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".prometheus."0.13.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus" then "prometheus" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".prometheus."0.13.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
|
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
||||||
structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out;
|
structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out;
|
||||||
|
@ -1563,13 +1562,13 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_api."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_api."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_api";
|
name = "garage_api";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/api");
|
src = fetchCrateLocal (workspaceSrc + "/src/api");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v") "k2v")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v") "k2v")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics") "metrics")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics") "metrics")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus") "opentelemetry-prometheus")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus") "opentelemetry-prometheus")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/prometheus") "prometheus")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/prometheus") "prometheus")
|
||||||
|
@ -1584,11 +1583,11 @@ in
|
||||||
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.0.1" { inherit profileName; }).out;
|
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.0.1" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
|
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
|
@ -1617,9 +1616,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_block."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_block."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_block";
|
name = "garage_block";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/block");
|
src = fetchCrateLocal (workspaceSrc + "/src/block");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1632,14 +1631,13 @@ in
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
|
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
||||||
|
@ -1649,20 +1647,21 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_db."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_db."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_db";
|
name = "garage_db";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/db");
|
src = fetchCrateLocal (workspaceSrc + "/src/db");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled-libs")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled-libs")
|
||||||
(lib.optional (rootFeatures' ? "garage_db/clap" || rootFeatures' ? "garage_db/cli") "clap")
|
(lib.optional (rootFeatures' ? "garage_db/clap" || rootFeatures' ? "garage_db/cli") "clap")
|
||||||
(lib.optional (rootFeatures' ? "garage_db/cli") "cli")
|
(lib.optional (rootFeatures' ? "garage_db/cli") "cli")
|
||||||
|
[ "default" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "heed")
|
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "heed")
|
||||||
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
||||||
(lib.optional (rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger") "pretty_env_logger")
|
(lib.optional (rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger") "pretty_env_logger")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "rusqlite")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "rusqlite")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "sled")
|
[ "sled" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
|
@ -1672,7 +1671,7 @@ in
|
||||||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger" then "pretty_env_logger" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pretty_env_logger."0.4.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger" then "pretty_env_logger" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pretty_env_logger."0.4.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.27.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.27.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "sled" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }).out;
|
sled = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }).out;
|
||||||
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }).out;
|
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
devDependencies = {
|
devDependencies = {
|
||||||
|
@ -1680,15 +1679,16 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_model."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_model."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_model";
|
name = "garage_model";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/model");
|
src = fetchCrateLocal (workspaceSrc + "/src/model");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
|
[ "default" ]
|
||||||
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
|
||||||
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_model/sled") "sled")
|
[ "sled" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
|
@ -1699,16 +1699,15 @@ in
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
|
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
||||||
|
@ -1717,9 +1716,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_rpc."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_rpc."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_rpc";
|
name = "garage_rpc";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
|
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1739,7 +1738,7 @@ in
|
||||||
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive" then "err_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive" then "err_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.3" { inherit profileName; }).out;
|
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.3" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.16.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.16.0" { inherit profileName; }).out;
|
||||||
|
@ -1750,7 +1749,6 @@ in
|
||||||
pnet_datalink = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pnet_datalink."0.28.0" { inherit profileName; }).out;
|
pnet_datalink = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pnet_datalink."0.28.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest" then "reqwest" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".reqwest."0.11.12" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest" then "reqwest" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".reqwest."0.11.12" { inherit profileName; }).out;
|
||||||
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kubernetes-discovery" || rootFeatures' ? "garage_rpc/schemars" then "schemars" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.8" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kubernetes-discovery" || rootFeatures' ? "garage_rpc/schemars" then "schemars" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.8" { inherit profileName; }).out;
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
||||||
|
@ -1761,24 +1759,24 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_table."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_table."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_table";
|
name = "garage_table";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/table");
|
src = fetchCrateLocal (workspaceSrc + "/src/table");
|
||||||
dependencies = {
|
dependencies = {
|
||||||
|
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }).out;
|
||||||
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }).out;
|
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }).out;
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
|
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
||||||
|
@ -1786,13 +1784,13 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_util."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_util."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_util";
|
name = "garage_util";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/util");
|
src = fetchCrateLocal (workspaceSrc + "/src/util");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v" || rootFeatures' ? "garage_util/k2v") "k2v")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v" || rootFeatures' ? "garage_util/k2v") "k2v")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }).out;
|
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }).out;
|
||||||
|
@ -1803,9 +1801,10 @@ in
|
||||||
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.3" { inherit profileName; }).out;
|
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.3" { inherit profileName; }).out;
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
||||||
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out;
|
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
|
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
|
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
|
||||||
lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
|
lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
|
||||||
|
@ -1821,20 +1820,23 @@ in
|
||||||
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }).out;
|
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }).out;
|
||||||
xxhash_rust = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".xxhash-rust."0.8.4" { inherit profileName; }).out;
|
xxhash_rust = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".xxhash-rust."0.8.4" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
|
devDependencies = {
|
||||||
|
mktemp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".mktemp."0.4.1" { inherit profileName; }).out;
|
||||||
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_web."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_web."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_web";
|
name = "garage_web";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/web");
|
src = fetchCrateLocal (workspaceSrc + "/src/web");
|
||||||
dependencies = {
|
dependencies = {
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
garage_api = (rustPackages."unknown".garage_api."0.8.0" { inherit profileName; }).out;
|
garage_api = (rustPackages."unknown".garage_api."0.8.1" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
|
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
|
@ -2448,7 +2450,7 @@ in
|
||||||
dependencies = {
|
dependencies = {
|
||||||
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }).out;
|
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli" then "clap" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".clap."3.1.18" { inherit profileName; }).out;
|
${ if rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli" then "clap" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".clap."3.1.18" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "k2v-client/cli" || rootFeatures' ? "k2v-client/garage_util" then "garage_util" else null } = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "k2v-client/cli" || rootFeatures' ? "k2v-client/garage_util" then "garage_util" else null } = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
log = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out;
|
log = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out;
|
||||||
rusoto_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusoto_core."0.48.0" { inherit profileName; }).out;
|
rusoto_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusoto_core."0.48.0" { inherit profileName; }).out;
|
||||||
|
@ -2846,10 +2848,10 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"; };
|
src = fetchCratesIo { inherit name version; sha256 = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "default")
|
[ "default" ]
|
||||||
];
|
];
|
||||||
buildDependencies = {
|
buildDependencies = {
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "autocfg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".autocfg."1.1.0" { profileName = "__noProfile"; }).out;
|
autocfg = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".autocfg."1.1.0" { profileName = "__noProfile"; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -4732,18 +4734,18 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935"; };
|
src = fetchCratesIo { inherit name version; sha256 = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "default")
|
[ "default" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "no_metrics")
|
[ "no_metrics" ]
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "crc32fast" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" { inherit profileName; }).out;
|
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "crossbeam_epoch" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-epoch."0.9.8" { inherit profileName; }).out;
|
crossbeam_epoch = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-epoch."0.9.8" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "crossbeam_utils" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.8" { inherit profileName; }).out;
|
crossbeam_utils = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.8" { inherit profileName; }).out;
|
||||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") && (hostPlatform.parsed.kernel.name == "linux" || hostPlatform.parsed.kernel.name == "darwin" || hostPlatform.parsed.kernel.name == "windows") then "fs2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fs2."0.4.3" { inherit profileName; }).out;
|
${ if hostPlatform.parsed.kernel.name == "linux" || hostPlatform.parsed.kernel.name == "darwin" || hostPlatform.parsed.kernel.name == "windows" then "fs2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fs2."0.4.3" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "fxhash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fxhash."0.2.1" { inherit profileName; }).out;
|
fxhash = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fxhash."0.2.1" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }).out;
|
libc = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "log" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out;
|
log = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "parking_lot" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot."0.11.2" { inherit profileName; }).out;
|
parking_lot = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot."0.11.2" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -5396,7 +5398,6 @@ in
|
||||||
[ "attributes" ]
|
[ "attributes" ]
|
||||||
[ "default" ]
|
[ "default" ]
|
||||||
[ "log" ]
|
[ "log" ]
|
||||||
[ "log-always" ]
|
|
||||||
[ "std" ]
|
[ "std" ]
|
||||||
[ "tracing-attributes" ]
|
[ "tracing-attributes" ]
|
||||||
];
|
];
|
||||||
|
@ -5890,7 +5891,7 @@ in
|
||||||
[ "ntstatus" ]
|
[ "ntstatus" ]
|
||||||
[ "objbase" ]
|
[ "objbase" ]
|
||||||
[ "processenv" ]
|
[ "processenv" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "processthreadsapi")
|
[ "processthreadsapi" ]
|
||||||
[ "profileapi" ]
|
[ "profileapi" ]
|
||||||
[ "schannel" ]
|
[ "schannel" ]
|
||||||
[ "securitybaseapi" ]
|
[ "securitybaseapi" ]
|
||||||
|
|
|
@ -11,14 +11,14 @@ let
|
||||||
|
|
||||||
build_debug_and_release = (target: {
|
build_debug_and_release = (target: {
|
||||||
debug = (compile {
|
debug = (compile {
|
||||||
inherit target git_version;
|
inherit system target git_version pkgsSrc cargo2nixOverlay;
|
||||||
release = false;
|
release = false;
|
||||||
}).workspace.garage {
|
}).workspace.garage {
|
||||||
compileMode = "build";
|
compileMode = "build";
|
||||||
};
|
};
|
||||||
|
|
||||||
release = (compile {
|
release = (compile {
|
||||||
inherit target git_version;
|
inherit system target git_version pkgsSrc cargo2nixOverlay;
|
||||||
release = true;
|
release = true;
|
||||||
}).workspace.garage {
|
}).workspace.garage {
|
||||||
compileMode = "build";
|
compileMode = "build";
|
||||||
|
@ -39,7 +39,7 @@ in {
|
||||||
};
|
};
|
||||||
test = {
|
test = {
|
||||||
amd64 = test (compile {
|
amd64 = test (compile {
|
||||||
inherit git_version;
|
inherit system git_version pkgsSrc cargo2nixOverlay;
|
||||||
target = "x86_64-unknown-linux-musl";
|
target = "x86_64-unknown-linux-musl";
|
||||||
features = [
|
features = [
|
||||||
"garage/bundled-libs"
|
"garage/bundled-libs"
|
||||||
|
@ -52,7 +52,7 @@ in {
|
||||||
};
|
};
|
||||||
clippy = {
|
clippy = {
|
||||||
amd64 = (compile {
|
amd64 = (compile {
|
||||||
inherit git_version;
|
inherit system git_version pkgsSrc cargo2nixOverlay;
|
||||||
target = "x86_64-unknown-linux-musl";
|
target = "x86_64-unknown-linux-musl";
|
||||||
compiler = "clippy";
|
compiler = "clippy";
|
||||||
}).workspace.garage {
|
}).workspace.garage {
|
||||||
|
|
|
@ -5,13 +5,56 @@ weight = 20
|
||||||
|
|
||||||
## S3
|
## S3
|
||||||
|
|
||||||
|
### Using Minio SDK
|
||||||
|
|
||||||
|
First install the SDK:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install minio
|
||||||
|
```
|
||||||
|
|
||||||
|
Then instantiate a client object using garage root domain, api key and secret:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import minio
|
||||||
|
|
||||||
|
client = minio.Minio(
|
||||||
|
"your.domain.tld",
|
||||||
|
"GKyourapikey",
|
||||||
|
"abcd[...]1234",
|
||||||
|
# Force the region, this is specific to garage
|
||||||
|
region="region",
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then use all the standard S3 endpoints as implemented by the Minio SDK:
|
||||||
|
|
||||||
|
```
|
||||||
|
# List buckets
|
||||||
|
print(client.list_buckets())
|
||||||
|
|
||||||
|
# Put an object containing 'content' to /path in bucket named 'bucket':
|
||||||
|
content = b"content"
|
||||||
|
client.put_object(
|
||||||
|
"bucket",
|
||||||
|
"path",
|
||||||
|
io.BytesIO(content),
|
||||||
|
len(content),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Read the object back and check contents
|
||||||
|
data = client.get_object("bucket", "path").read()
|
||||||
|
assert data == content
|
||||||
|
```
|
||||||
|
|
||||||
|
For further documentation, see the Minio SDK
|
||||||
|
[Reference](https://docs.min.io/docs/python-client-api-reference.html)
|
||||||
|
|
||||||
|
### Using Amazon boto3
|
||||||
|
|
||||||
*Coming soon*
|
*Coming soon*
|
||||||
|
|
||||||
Some refs:
|
See the official documentation:
|
||||||
- Minio SDK
|
|
||||||
- [Reference](https://docs.min.io/docs/python-client-api-reference.html)
|
|
||||||
|
|
||||||
- Amazon boto3
|
|
||||||
- [Installation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html)
|
- [Installation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html)
|
||||||
- [Reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html)
|
- [Reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html)
|
||||||
- [Example](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html)
|
- [Example](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html)
|
||||||
|
|
|
@ -8,7 +8,7 @@ In this section, we cover the following web applications:
|
||||||
| Name | Status | Note |
|
| Name | Status | Note |
|
||||||
|------|--------|------|
|
|------|--------|------|
|
||||||
| [Nextcloud](#nextcloud) | ✅ | Both Primary Storage and External Storage are supported |
|
| [Nextcloud](#nextcloud) | ✅ | Both Primary Storage and External Storage are supported |
|
||||||
| [Peertube](#peertube) | ✅ | Must be configured with the website endpoint |
|
| [Peertube](#peertube) | ✅ | Supported with the website endpoint, proxifying private videos unsupported |
|
||||||
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
||||||
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
||||||
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
||||||
|
@ -128,6 +128,10 @@ In other words, Peertube is only responsible of the "control plane" and offload
|
||||||
In return, this system is a bit harder to configure.
|
In return, this system is a bit harder to configure.
|
||||||
We show how it is still possible to configure Garage with Peertube, allowing you to spread the load and the bandwidth usage on the Garage cluster.
|
We show how it is still possible to configure Garage with Peertube, allowing you to spread the load and the bandwidth usage on the Garage cluster.
|
||||||
|
|
||||||
|
Starting from version 5.0, Peertube also supports improving the security for private videos by not exposing them directly
|
||||||
|
but relying on a single control point in the Peertube instance. This is based on S3 per-object and prefix ACL, which are not currently supported
|
||||||
|
in Garage, so this feature is unsupported. While this technically impedes security for private videos, it is not a blocking issue and could be
|
||||||
|
a reasonable trade-off for some instances.
|
||||||
|
|
||||||
### Create resources in Garage
|
### Create resources in Garage
|
||||||
|
|
||||||
|
@ -195,6 +199,11 @@ object_storage:
|
||||||
|
|
||||||
max_upload_part: 2GB
|
max_upload_part: 2GB
|
||||||
|
|
||||||
|
proxy:
|
||||||
|
# You may enable this feature, yet it will not provide any security benefit, so
|
||||||
|
# you should rather benefit from Garage public endpoint for all videos
|
||||||
|
proxify_private_files: false
|
||||||
|
|
||||||
streaming_playlists:
|
streaming_playlists:
|
||||||
bucket_name: 'peertube-playlist'
|
bucket_name: 'peertube-playlist'
|
||||||
|
|
||||||
|
|
|
@ -47,12 +47,8 @@ garage:
|
||||||
# Use only 2 replicas per object
|
# Use only 2 replicas per object
|
||||||
replicationMode: "2"
|
replicationMode: "2"
|
||||||
|
|
||||||
# Use recommended lmdb db engine
|
|
||||||
dbEngine: "lmdb"
|
|
||||||
|
|
||||||
# Start 4 instances (StatefulSets) of garage
|
# Start 4 instances (StatefulSets) of garage
|
||||||
deployment:
|
replicaCount: 4
|
||||||
replicaCount: 4
|
|
||||||
|
|
||||||
# Override default storage class and size
|
# Override default storage class and size
|
||||||
persistence:
|
persistence:
|
||||||
|
|
|
@ -109,7 +109,7 @@ especially you must consider the following folders/files:
|
||||||
this folder will be your main data storage and must be on a large storage (e.g. large HDD)
|
this folder will be your main data storage and must be on a large storage (e.g. large HDD)
|
||||||
|
|
||||||
|
|
||||||
A valid `/etc/garage/garage.toml` for our cluster would look as follows:
|
A valid `/etc/garage.toml` for our cluster would look as follows:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
metadata_dir = "/var/lib/garage/meta"
|
metadata_dir = "/var/lib/garage/meta"
|
||||||
|
|
|
@ -12,7 +12,7 @@ as pictures, video, images, documents, etc., in a redundant multi-node
|
||||||
setting. S3 is versatile enough to also be used to publish a static
|
setting. S3 is versatile enough to also be used to publish a static
|
||||||
website.
|
website.
|
||||||
|
|
||||||
Garage is an opinionated object storage solutoin, we focus on the following **desirable properties**:
|
Garage is an opinionated object storage solution, we focus on the following **desirable properties**:
|
||||||
|
|
||||||
- **Internet enabled**: made for multi-sites (eg. datacenters, offices, households, etc.) interconnected through regular Internet connections.
|
- **Internet enabled**: made for multi-sites (eg. datacenters, offices, households, etc.) interconnected through regular Internet connections.
|
||||||
- **Self-contained & lightweight**: works everywhere and integrates well in existing environments to target [hyperconverged infrastructures](https://en.wikipedia.org/wiki/Hyper-converged_infrastructure).
|
- **Self-contained & lightweight**: works everywhere and integrates well in existing environments to target [hyperconverged infrastructures](https://en.wikipedia.org/wiki/Hyper-converged_infrastructure).
|
||||||
|
|
|
@ -39,7 +39,7 @@ Now you can enter our nix-shell, all the required packages will be downloaded bu
|
||||||
nix-shell
|
nix-shell
|
||||||
```
|
```
|
||||||
|
|
||||||
You can use the traditionnal Rust development workflow:
|
You can use the traditional Rust development workflow:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cargo build # compile the project
|
cargo build # compile the project
|
||||||
|
|
|
@ -96,7 +96,7 @@ Performance characteristics of the different DB engines are as follows:
|
||||||
|
|
||||||
- Sled: the default database engine, which tends to produce
|
- Sled: the default database engine, which tends to produce
|
||||||
large data files and also has performance issues, especially when the metadata folder
|
large data files and also has performance issues, especially when the metadata folder
|
||||||
is on a traditionnal HDD and not on SSD.
|
is on a traditional HDD and not on SSD.
|
||||||
- LMDB: the recommended alternative on 64-bit systems,
|
- LMDB: the recommended alternative on 64-bit systems,
|
||||||
much more space-efficiant and slightly faster. Note that the data format of LMDB is not portable
|
much more space-efficiant and slightly faster. Note that the data format of LMDB is not portable
|
||||||
between architectures, so for instance the Garage database of an x86-64
|
between architectures, so for instance the Garage database of an x86-64
|
||||||
|
@ -267,6 +267,10 @@ This key should be specified here in the form of a 32-byte hex-encoded
|
||||||
random string. Such a string can be generated with a command
|
random string. Such a string can be generated with a command
|
||||||
such as `openssl rand -hex 32`.
|
such as `openssl rand -hex 32`.
|
||||||
|
|
||||||
|
### `rpc_secret_file`
|
||||||
|
|
||||||
|
Like `rpc_secret` above, just that this is the path to a file that Garage will try to read the secret from.
|
||||||
|
|
||||||
### `rpc_bind_addr`
|
### `rpc_bind_addr`
|
||||||
|
|
||||||
The address and port on which to bind for inter-cluster communcations
|
The address and port on which to bind for inter-cluster communcations
|
||||||
|
|
|
@ -83,7 +83,7 @@ This feature is totally invisible to S3 clients and does not break compatibility
|
||||||
### Cluster administration API
|
### Cluster administration API
|
||||||
|
|
||||||
Garage provides a fully-fledged REST API to administer your cluster programatically.
|
Garage provides a fully-fledged REST API to administer your cluster programatically.
|
||||||
Functionnality included in the admin API include: setting up and monitoring
|
Functionality included in the admin API include: setting up and monitoring
|
||||||
cluster nodes, managing access credentials, and managing storage buckets and bucket aliases.
|
cluster nodes, managing access credentials, and managing storage buckets and bucket aliases.
|
||||||
A full reference of the administration API is available [here](@/documentation/reference-manual/admin-api.md).
|
A full reference of the administration API is available [here](@/documentation/reference-manual/admin-api.md).
|
||||||
|
|
||||||
|
|
686
doc/drafts/admin-api.md
Normal file
|
@ -0,0 +1,686 @@
|
||||||
|
+++
|
||||||
|
title = "Administration API"
|
||||||
|
weight = 60
|
||||||
|
+++
|
||||||
|
|
||||||
|
The Garage administration API is accessible through a dedicated server whose
|
||||||
|
listen address is specified in the `[admin]` section of the configuration
|
||||||
|
file (see [configuration file
|
||||||
|
reference](@/documentation/reference-manual/configuration.md))
|
||||||
|
|
||||||
|
**WARNING.** At this point, there is no comittement to stability of the APIs described in this document.
|
||||||
|
We will bump the version numbers prefixed to each API endpoint at each time the syntax
|
||||||
|
or semantics change, meaning that code that relies on these endpoint will break
|
||||||
|
when changes are introduced.
|
||||||
|
|
||||||
|
The Garage administration API was introduced in version 0.7.2, this document
|
||||||
|
does not apply to older versions of Garage.
|
||||||
|
|
||||||
|
|
||||||
|
## Access control
|
||||||
|
|
||||||
|
The admin API uses two different tokens for acces control, that are specified in the config file's `[admin]` section:
|
||||||
|
|
||||||
|
- `metrics_token`: the token for accessing the Metrics endpoint (if this token
|
||||||
|
is not set in the config file, the Metrics endpoint can be accessed without
|
||||||
|
access control);
|
||||||
|
|
||||||
|
- `admin_token`: the token for accessing all of the other administration
|
||||||
|
endpoints (if this token is not set in the config file, access to these
|
||||||
|
endpoints is disabled entirely).
|
||||||
|
|
||||||
|
These tokens are used as simple HTTP bearer tokens. In other words, to
|
||||||
|
authenticate access to an admin API endpoint, add the following HTTP header
|
||||||
|
to your request:
|
||||||
|
|
||||||
|
```
|
||||||
|
Authorization: Bearer <token>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Administration API endpoints
|
||||||
|
|
||||||
|
### Metrics-related endpoints
|
||||||
|
|
||||||
|
#### Metrics `GET /metrics`
|
||||||
|
|
||||||
|
Returns internal Garage metrics in Prometheus format.
|
||||||
|
|
||||||
|
#### Health `GET /health`
|
||||||
|
|
||||||
|
Used for simple health checks in a cluster setting with an orchestrator.
|
||||||
|
Returns an HTTP status 200 if the node is ready to answer user's requests,
|
||||||
|
and an HTTP status 503 (Service Unavailable) if there are some partitions
|
||||||
|
for which a quorum of nodes is not available.
|
||||||
|
A simple textual message is also returned in a body with content-type `text/plain`.
|
||||||
|
See `/v0/health` for an API that also returns JSON output.
|
||||||
|
|
||||||
|
### Cluster operations
|
||||||
|
|
||||||
|
#### GetClusterStatus `GET /v0/status`
|
||||||
|
|
||||||
|
Returns the cluster's current status in JSON, including:
|
||||||
|
|
||||||
|
- ID of the node being queried and its version of the Garage daemon
|
||||||
|
- Live nodes
|
||||||
|
- Currently configured cluster layout
|
||||||
|
- Staged changes to the cluster layout
|
||||||
|
|
||||||
|
Example response body:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
|
||||||
|
"garage_version": "git:v0.8.0",
|
||||||
|
"knownNodes": {
|
||||||
|
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
|
||||||
|
"addr": "10.0.0.11:3901",
|
||||||
|
"is_up": true,
|
||||||
|
"last_seen_secs_ago": 9,
|
||||||
|
"hostname": "node1"
|
||||||
|
},
|
||||||
|
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
|
||||||
|
"addr": "10.0.0.12:3901",
|
||||||
|
"is_up": true,
|
||||||
|
"last_seen_secs_ago": 1,
|
||||||
|
"hostname": "node2"
|
||||||
|
},
|
||||||
|
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
|
||||||
|
"addr": "10.0.0.21:3901",
|
||||||
|
"is_up": true,
|
||||||
|
"last_seen_secs_ago": 7,
|
||||||
|
"hostname": "node3"
|
||||||
|
},
|
||||||
|
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
|
||||||
|
"addr": "10.0.0.22:3901",
|
||||||
|
"is_up": true,
|
||||||
|
"last_seen_secs_ago": 1,
|
||||||
|
"hostname": "node4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"layout": {
|
||||||
|
"version": 12,
|
||||||
|
"roles": {
|
||||||
|
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
|
||||||
|
"zone": "dc1",
|
||||||
|
"capacity": 4,
|
||||||
|
"tags": [
|
||||||
|
"node1"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
|
||||||
|
"zone": "dc1",
|
||||||
|
"capacity": 6,
|
||||||
|
"tags": [
|
||||||
|
"node2"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
|
||||||
|
"zone": "dc2",
|
||||||
|
"capacity": 10,
|
||||||
|
"tags": [
|
||||||
|
"node3"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"stagedRoleChanges": {
|
||||||
|
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
|
||||||
|
"zone": "dc2",
|
||||||
|
"capacity": 5,
|
||||||
|
"tags": [
|
||||||
|
"node4"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### GetClusterHealth `GET /v0/health`
|
||||||
|
|
||||||
|
Returns the cluster's current health in JSON format, with the following variables:
|
||||||
|
|
||||||
|
- `status`: one of `Healthy`, `Degraded` or `Unavailable`:
|
||||||
|
- Healthy: Garage node is connected to all storage nodes
|
||||||
|
- Degraded: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions
|
||||||
|
- Unavailable: a quorum of write nodes is not available for some partitions
|
||||||
|
- `known_nodes`: the number of nodes this Garage node has had a TCP connection to since the daemon started
|
||||||
|
- `connected_nodes`: the nubmer of nodes this Garage node currently has an open connection to
|
||||||
|
- `storage_nodes`: the number of storage nodes currently registered in the cluster layout
|
||||||
|
- `storage_nodes_ok`: the number of storage nodes to which a connection is currently open
|
||||||
|
- `partitions`: the total number of partitions of the data (currently always 256)
|
||||||
|
- `partitions_quorum`: the number of partitions for which a quorum of write nodes is available
|
||||||
|
- `partitions_all_ok`: the number of partitions for which we are connected to all storage nodes responsible of storing it
|
||||||
|
|
||||||
|
Contrarily to `GET /health`, this endpoint always returns a 200 OK HTTP response code.
|
||||||
|
|
||||||
|
Example response body:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"status": "Degraded",
|
||||||
|
"known_nodes": 3,
|
||||||
|
"connected_nodes": 2,
|
||||||
|
"storage_nodes": 3,
|
||||||
|
"storage_nodes_ok": 2,
|
||||||
|
"partitions": 256,
|
||||||
|
"partitions_quorum": 256,
|
||||||
|
"partitions_all_ok": 0
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### ConnectClusterNodes `POST /v0/connect`
|
||||||
|
|
||||||
|
Instructs this Garage node to connect to other Garage nodes at specified addresses.
|
||||||
|
|
||||||
|
Example request body:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f@10.0.0.11:3901",
|
||||||
|
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff@10.0.0.12:3901"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
The format of the string for a node to connect to is: `<node ID>@<ip address>:<port>`, same as in the `garage node connect` CLI call.
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"error": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Handshake error"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### GetClusterLayout `GET /v0/layout`
|
||||||
|
|
||||||
|
Returns the cluster's current layout in JSON, including:
|
||||||
|
|
||||||
|
- Currently configured cluster layout
|
||||||
|
- Staged changes to the cluster layout
|
||||||
|
|
||||||
|
(the info returned by this endpoint is a subset of the info returned by GetClusterStatus)
|
||||||
|
|
||||||
|
Example response body:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": 12,
|
||||||
|
"roles": {
|
||||||
|
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
|
||||||
|
"zone": "dc1",
|
||||||
|
"capacity": 4,
|
||||||
|
"tags": [
|
||||||
|
"node1"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
|
||||||
|
"zone": "dc1",
|
||||||
|
"capacity": 6,
|
||||||
|
"tags": [
|
||||||
|
"node2"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
|
||||||
|
"zone": "dc2",
|
||||||
|
"capacity": 10,
|
||||||
|
"tags": [
|
||||||
|
"node3"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"stagedRoleChanges": {
|
||||||
|
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
|
||||||
|
"zone": "dc2",
|
||||||
|
"capacity": 5,
|
||||||
|
"tags": [
|
||||||
|
"node4"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### UpdateClusterLayout `POST /v0/layout`
|
||||||
|
|
||||||
|
Send modifications to the cluster layout. These modifications will
|
||||||
|
be included in the staged role changes, visible in subsequent calls
|
||||||
|
of `GetClusterLayout`. Once the set of staged changes is satisfactory,
|
||||||
|
the user may call `ApplyClusterLayout` to apply the changed changes,
|
||||||
|
or `Revert ClusterLayout` to clear all of the staged changes in
|
||||||
|
the layout.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
<node_id>: {
|
||||||
|
"capacity": <new_capacity>,
|
||||||
|
"zone": <new_zone>,
|
||||||
|
"tags": [
|
||||||
|
<new_tag>,
|
||||||
|
...
|
||||||
|
]
|
||||||
|
},
|
||||||
|
<node_id_to_remove>: null,
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Contrary to the CLI that may update only a subset of the fields
|
||||||
|
`capacity`, `zone` and `tags`, when calling this API all of these
|
||||||
|
values must be specified.
|
||||||
|
|
||||||
|
|
||||||
|
#### ApplyClusterLayout `POST /v0/layout/apply`
|
||||||
|
|
||||||
|
Applies to the cluster the layout changes currently registered as
|
||||||
|
staged layout changes.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": 13
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Similarly to the CLI, the body must include the version of the new layout
|
||||||
|
that will be created, which MUST be 1 + the value of the currently
|
||||||
|
existing layout in the cluster.
|
||||||
|
|
||||||
|
#### RevertClusterLayout `POST /v0/layout/revert`
|
||||||
|
|
||||||
|
Clears all of the staged layout changes.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": 13
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Reverting the staged changes is done by incrementing the version number
|
||||||
|
and clearing the contents of the staged change list.
|
||||||
|
Similarly to the CLI, the body must include the incremented
|
||||||
|
version number, which MUST be 1 + the value of the currently
|
||||||
|
existing layout in the cluster.
|
||||||
|
|
||||||
|
|
||||||
|
### Access key operations
|
||||||
|
|
||||||
|
#### ListKeys `GET /v0/key`
|
||||||
|
|
||||||
|
Returns all API access keys in the cluster.
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"name": "test"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "GKe10061ac9c2921f09e4c5540",
|
||||||
|
"name": "test2"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### CreateKey `POST /v0/key`
|
||||||
|
|
||||||
|
Creates a new API access key.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "NameOfMyKey"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### ImportKey `POST /v0/key/import`
|
||||||
|
|
||||||
|
Imports an existing API key.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835",
|
||||||
|
"name": "NameOfMyKey"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### GetKeyInfo `GET /v0/key?id=<acces key id>`
|
||||||
|
#### GetKeyInfo `GET /v0/key?search=<pattern>`
|
||||||
|
|
||||||
|
Returns information about the requested API access key.
|
||||||
|
|
||||||
|
If `id` is set, the key is looked up using its exact identifier (faster).
|
||||||
|
If `search` is set, the key is looked up using its name or prefix
|
||||||
|
of identifier (slower, all keys are enumerated to do this).
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "test",
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835",
|
||||||
|
"permissions": {
|
||||||
|
"createBucket": false
|
||||||
|
},
|
||||||
|
"buckets": [
|
||||||
|
{
|
||||||
|
"id": "70dc3bed7fe83a75e46b66e7ddef7d56e65f3c02f9f80b6749fb97eccb5e1033",
|
||||||
|
"globalAliases": [
|
||||||
|
"test2"
|
||||||
|
],
|
||||||
|
"localAliases": [],
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "d7452a935e663fc1914f3a5515163a6d3724010ce8dfd9e4743ca8be5974f995",
|
||||||
|
"globalAliases": [
|
||||||
|
"test3"
|
||||||
|
],
|
||||||
|
"localAliases": [],
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||||
|
"globalAliases": [],
|
||||||
|
"localAliases": [
|
||||||
|
"test"
|
||||||
|
],
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "96470e0df00ec28807138daf01915cfda2bee8eccc91dea9558c0b4855b5bf95",
|
||||||
|
"globalAliases": [
|
||||||
|
"alex"
|
||||||
|
],
|
||||||
|
"localAliases": [],
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### DeleteKey `DELETE /v0/key?id=<acces key id>`
|
||||||
|
|
||||||
|
Deletes an API access key.
|
||||||
|
|
||||||
|
#### UpdateKey `POST /v0/key?id=<acces key id>`
|
||||||
|
|
||||||
|
Updates information about the specified API access key.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "NameOfMyKey",
|
||||||
|
"allow": {
|
||||||
|
"createBucket": true,
|
||||||
|
},
|
||||||
|
"deny": {}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
All fields (`name`, `allow` and `deny`) are optionnal.
|
||||||
|
If they are present, the corresponding modifications are applied to the key, otherwise nothing is changed.
|
||||||
|
The possible flags in `allow` and `deny` are: `createBucket`.
|
||||||
|
|
||||||
|
|
||||||
|
### Bucket operations
|
||||||
|
|
||||||
|
#### ListBuckets `GET /v0/bucket`
|
||||||
|
|
||||||
|
Returns all storage buckets in the cluster.
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "70dc3bed7fe83a75e46b66e7ddef7d56e65f3c02f9f80b6749fb97eccb5e1033",
|
||||||
|
"globalAliases": [
|
||||||
|
"test2"
|
||||||
|
],
|
||||||
|
"localAliases": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "96470e0df00ec28807138daf01915cfda2bee8eccc91dea9558c0b4855b5bf95",
|
||||||
|
"globalAliases": [
|
||||||
|
"alex"
|
||||||
|
],
|
||||||
|
"localAliases": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "d7452a935e663fc1914f3a5515163a6d3724010ce8dfd9e4743ca8be5974f995",
|
||||||
|
"globalAliases": [
|
||||||
|
"test3"
|
||||||
|
],
|
||||||
|
"localAliases": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||||
|
"globalAliases": [],
|
||||||
|
"localAliases": [
|
||||||
|
{
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"alias": "test"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### GetBucketInfo `GET /v0/bucket?id=<bucket id>`
|
||||||
|
#### GetBucketInfo `GET /v0/bucket?globalAlias=<alias>`
|
||||||
|
|
||||||
|
Returns information about the requested storage bucket.
|
||||||
|
|
||||||
|
If `id` is set, the bucket is looked up using its exact identifier.
|
||||||
|
If `globalAlias` is set, the bucket is looked up using its global alias.
|
||||||
|
(both are fast)
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "afa8f0a22b40b1247ccd0affb869b0af5cff980924a20e4b5e0720a44deb8d39",
|
||||||
|
"globalAliases": [],
|
||||||
|
"websiteAccess": false,
|
||||||
|
"websiteConfig": null,
|
||||||
|
"keys": [
|
||||||
|
{
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"name": "Imported key",
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": true
|
||||||
|
},
|
||||||
|
"bucketLocalAliases": [
|
||||||
|
"debug"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"objects": 14827,
|
||||||
|
"bytes": 13189855625,
|
||||||
|
"unfinshedUploads": 0,
|
||||||
|
"quotas": {
|
||||||
|
"maxSize": null,
|
||||||
|
"maxObjects": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### CreateBucket `POST /v0/bucket`
|
||||||
|
|
||||||
|
Creates a new storage bucket.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"globalAlias": "NameOfMyBucket"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
OR
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"localAlias": {
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"alias": "NameOfMyBucket",
|
||||||
|
"allow": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
OR
|
||||||
|
|
||||||
|
```json
|
||||||
|
{}
|
||||||
|
```
|
||||||
|
|
||||||
|
Creates a new bucket, either with a global alias, a local one,
|
||||||
|
or no alias at all.
|
||||||
|
|
||||||
|
Technically, you can also specify both `globalAlias` and `localAlias` and that would create
|
||||||
|
two aliases, but I don't see why you would want to do that.
|
||||||
|
|
||||||
|
#### DeleteBucket `DELETE /v0/bucket?id=<bucket id>`
|
||||||
|
|
||||||
|
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
|
||||||
|
|
||||||
|
Warning: this will delete all aliases associated with the bucket!
|
||||||
|
|
||||||
|
#### UpdateBucket `PUT /v0/bucket?id=<bucket id>`
|
||||||
|
|
||||||
|
Updates configuration of the given bucket.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"websiteAccess": {
|
||||||
|
"enabled": true,
|
||||||
|
"indexDocument": "index.html",
|
||||||
|
"errorDocument": "404.html"
|
||||||
|
},
|
||||||
|
"quotas": {
|
||||||
|
"maxSize": 19029801,
|
||||||
|
"maxObjects": null,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
All fields (`websiteAccess` and `quotas`) are optionnal.
|
||||||
|
If they are present, the corresponding modifications are applied to the bucket, otherwise nothing is changed.
|
||||||
|
|
||||||
|
In `websiteAccess`: if `enabled` is `true`, `indexDocument` must be specified.
|
||||||
|
The field `errorDocument` is optional, if no error document is set a generic
|
||||||
|
error message is displayed when errors happen. Conversely, if `enabled` is
|
||||||
|
`false`, neither `indexDocument` nor `errorDocument` must be specified.
|
||||||
|
|
||||||
|
In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or set to `null`
|
||||||
|
to remove the quotas. An absent value will be considered the same as a `null`. It is not possible
|
||||||
|
to change only one of the two quotas.
|
||||||
|
|
||||||
|
### Operations on permissions for keys on buckets
|
||||||
|
|
||||||
|
#### BucketAllowKey `POST /v0/bucket/allow`
|
||||||
|
|
||||||
|
Allows a key to do read/write/owner operations on a bucket.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": true
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Flags in `permissions` which have the value `true` will be activated.
|
||||||
|
Other flags will remain unchanged.
|
||||||
|
|
||||||
|
#### BucketDenyKey `POST /v0/bucket/deny`
|
||||||
|
|
||||||
|
Denies a key from doing read/write/owner operations on a bucket.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"permissions": {
|
||||||
|
"read": false,
|
||||||
|
"write": false,
|
||||||
|
"owner": true
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Flags in `permissions` which have the value `true` will be deactivated.
|
||||||
|
Other flags will remain unchanged.
|
||||||
|
|
||||||
|
|
||||||
|
### Operations on bucket aliases
|
||||||
|
|
||||||
|
#### GlobalAliasBucket `PUT /v0/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||||
|
|
||||||
|
Empty body. Creates a global alias for a bucket.
|
||||||
|
|
||||||
|
#### GlobalUnaliasBucket `DELETE /v0/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||||
|
|
||||||
|
Removes a global alias for a bucket.
|
||||||
|
|
||||||
|
#### LocalAliasBucket `PUT /v0/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>`
|
||||||
|
|
||||||
|
Empty body. Creates a local alias for a bucket in the namespace of a specific access key.
|
||||||
|
|
||||||
|
#### LocalUnaliasBucket `DELETE /v0/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>`
|
||||||
|
|
||||||
|
Removes a local alias for a bucket in the namespace of a specific access key.
|
||||||
|
|
10
doc/talks/2022-11-19-Capitole-du-Libre/.gitignore
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
*.aux
|
||||||
|
*.bbl
|
||||||
|
*.blg
|
||||||
|
*.log
|
||||||
|
*.nav
|
||||||
|
*.out
|
||||||
|
*.snm
|
||||||
|
*.synctex.gz
|
||||||
|
*.toc
|
||||||
|
*.dvi
|
8
doc/talks/2022-11-19-Capitole-du-Libre/Makefile
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
all:
|
||||||
|
pdflatex présentation.tex
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -f *.aux *.bbl *.blg *.log *.nav *.out *.snm *.synctex.gz *.toc *.dvi présentation.pdf
|
||||||
|
|
||||||
|
clean_sauf_pdf:
|
||||||
|
rm -f *.aux *.bbl *.blg *.log *.nav *.out *.snm *.synctex.gz *.toc *.dvi
|
BIN
doc/talks/2022-11-19-Capitole-du-Libre/NGI.png
Normal file
After Width: | Height: | Size: 61 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/agpl-v3-logo.png
Normal file
After Width: | Height: | Size: 196 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/carte-Europe.pdf
Normal file
BIN
doc/talks/2022-11-19-Capitole-du-Libre/deuxfleurs-logo.png
Normal file
After Width: | Height: | Size: 105 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/drapeau_européen.png
Normal file
After Width: | Height: | Size: 2.5 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/garage-logo.png
Normal file
After Width: | Height: | Size: 73 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/logo_chatons.png
Normal file
After Width: | Height: | Size: 199 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/mastodon-logo.png
Normal file
After Width: | Height: | Size: 41 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/matrix-logo.png
Normal file
After Width: | Height: | Size: 52 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/neptune.jpg
Normal file
After Width: | Height: | Size: 174 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/nextcloud-logo.png
Normal file
After Width: | Height: | Size: 126 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/peertube-logo.png
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/présentation.pdf
Normal file
340
doc/talks/2022-11-19-Capitole-du-Libre/présentation.tex
Normal file
|
@ -0,0 +1,340 @@
|
||||||
|
\documentclass[11pt, aspectratio=1610]{beamer}
|
||||||
|
\usetheme{Warsaw}
|
||||||
|
\usepackage[utf8]{inputenc}
|
||||||
|
\usepackage[french]{babel}
|
||||||
|
\usepackage{amsmath}
|
||||||
|
\usepackage{amsfonts}
|
||||||
|
\usepackage{amssymb}
|
||||||
|
\usepackage{tikz}
|
||||||
|
\usepackage{graphicx}
|
||||||
|
\usepackage{xcolor}
|
||||||
|
\usepackage{setspace}
|
||||||
|
\usepackage{todonotes}
|
||||||
|
\presetkeys{todonotes}{inline}{}
|
||||||
|
\renewcommand{\baselinestretch}{1.25}
|
||||||
|
|
||||||
|
\definecolor{orange_garage}{RGB}{255,147,41}
|
||||||
|
\definecolor{gris_garage}{RGB}{78,78,78}
|
||||||
|
|
||||||
|
\author[Association Deuxfleurs]{~\linebreak Vincent Giraud}
|
||||||
|
\title[De l'auto-hébergement à l'entre-hébergement avec Garage]{De l'auto-hébergement à l'entre-hébergement :\\Garage, pour conserver ses données ensemble}
|
||||||
|
%\setbeamercovered{transparent}
|
||||||
|
%\setbeamertemplate{navigation symbols}{}
|
||||||
|
\date{Capitole du Libre 2022\linebreak
|
||||||
|
|
||||||
|
\scriptsize Samedi 19 novembre 2022\linebreak
|
||||||
|
}
|
||||||
|
|
||||||
|
\setbeamercolor{palette primary}{fg=gris_garage,bg=orange_garage}
|
||||||
|
\setbeamercolor{palette secondary}{fg=gris_garage,bg=gris_garage}
|
||||||
|
\setbeamercolor{palette tiertary}{fg=white,bg=gris_garage}
|
||||||
|
\setbeamercolor{palette quaternary}{fg=white,bg=gris_garage}
|
||||||
|
\setbeamercolor{navigation symbols}{fg=black, bg=white}
|
||||||
|
\setbeamercolor{navigation symbols dimmed}{fg=darkgray, bg=white}
|
||||||
|
\setbeamercolor{itemize item}{fg=gris_garage}
|
||||||
|
\setbeamertemplate{itemize item}[circle]
|
||||||
|
|
||||||
|
\addtobeamertemplate{navigation symbols}{}{%
|
||||||
|
\usebeamerfont{footline}%
|
||||||
|
\usebeamercolor[fg]{footline}%
|
||||||
|
\hspace{1em}%
|
||||||
|
\insertframenumber/\inserttotalframenumber
|
||||||
|
}
|
||||||
|
|
||||||
|
\setbeamertemplate{headline}
|
||||||
|
{%
|
||||||
|
\leavevmode%
|
||||||
|
\begin{beamercolorbox}[wd=.5\paperwidth,ht=2.5ex,dp=1.125ex]{section in head/foot}%
|
||||||
|
\hbox to .5\paperwidth{\hfil\insertsectionhead\hfil}
|
||||||
|
\end{beamercolorbox}%
|
||||||
|
\begin{beamercolorbox}[wd=.5\paperwidth,ht=2.5ex,dp=1.125ex]{subsection in head/foot}%
|
||||||
|
\hbox to .5\paperwidth{\hfil\insertsubsectionhead\hfil}
|
||||||
|
\end{beamercolorbox}%
|
||||||
|
}
|
||||||
|
\addtobeamertemplate{footnote}{}{\vspace{2ex}}
|
||||||
|
|
||||||
|
\begin{document}
|
||||||
|
\begin{frame}
|
||||||
|
\titlepage
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\section{Introduction}
|
||||||
|
\subsection{Présentation}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{columns}
|
||||||
|
\column{0.5 \linewidth}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=3.5cm]{deuxfleurs-logo.png}\linebreak
|
||||||
|
|
||||||
|
\texttt{https://deuxfleurs.fr}
|
||||||
|
\end{center}
|
||||||
|
\column{0.4 \linewidth}
|
||||||
|
\begin{center}
|
||||||
|
Deuxfleurs est une association militant en faveur d'un internet plus convivial, avec une organisation et des rapports de force repensés.\linebreak
|
||||||
|
|
||||||
|
Nous faisons partie du CHATONS\footnote[frame]{Collectif des Hébergeurs Alternatifs, Transparents, Ouverts, Neutres et Solidaires} depuis avril 2022.
|
||||||
|
|
||||||
|
\includegraphics[width=2cm]{logo_chatons.png}
|
||||||
|
\end{center}
|
||||||
|
\end{columns}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{Héberger à la maison}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{columns}
|
||||||
|
\begin{column}{0.5 \linewidth}
|
||||||
|
\begin{center}
|
||||||
|
Pour échapper au contrôle et au giron des opérateurs de clouds, héberger ses données à la maison présente de nombreux avantages...
|
||||||
|
\end{center}
|
||||||
|
|
||||||
|
\vspace{0.5cm}
|
||||||
|
|
||||||
|
\begin{itemize}[<+(1)->]
|
||||||
|
\item On récupère la souveraineté sur ses données
|
||||||
|
\item On gagne en vie privée
|
||||||
|
\item On gagne en libertés
|
||||||
|
\item On est responsabilisé face à ses besoins
|
||||||
|
\end{itemize}
|
||||||
|
\end{column}
|
||||||
|
\vrule{}
|
||||||
|
\begin{column}{0.5 \linewidth}
|
||||||
|
\begin{center}
|
||||||
|
\onslide<6->{... mais aussi bien des contraintes...}
|
||||||
|
\end{center}
|
||||||
|
|
||||||
|
\vspace{0.5cm}
|
||||||
|
|
||||||
|
\begin{itemize}[<+(2)->]
|
||||||
|
\item On repose sur une connexion internet pour particulier
|
||||||
|
\item Un certain savoir-faire et moultes compétences sont requis
|
||||||
|
\item Assurer la résilience de ses services est difficile
|
||||||
|
\item Bien sauvegarder ses données, et ceci au-delà de son site géographique, n'est pas évident
|
||||||
|
\end{itemize}
|
||||||
|
\end{column}
|
||||||
|
\end{columns}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{Sauvegarder pour se parer à tout imprévu}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
Sauvegarder pour se parer contre les pannes matérielles est une chose...
|
||||||
|
|
||||||
|
Sauvegarder pour se parer contre les cambriolages et les incendies en est une autre !\linebreak
|
||||||
|
|
||||||
|
\vspace{1cm}
|
||||||
|
\onslide<2->{Répartir géographiquement ses données devient alors nécessaire.}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\section{Les solutions à explorer}
|
||||||
|
\subsection{L'entre-hébergement}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
On a vu récemment se développer au sein du CHATONS la notion d'entre-hébergement : en plus de renforcer l'intégrité des sauvegardes, on va améliorer la disponibilité pendant les coupures de liaison internet, de courant, ou pendant les déménagements d'administrateurs par exemple.\linebreak
|
||||||
|
|
||||||
|
\vspace{1cm}
|
||||||
|
\onslide<2->
|
||||||
|
{
|
||||||
|
Dans le cadre du collectif, il s'agit de partager ses volumes de données entre hébergeurs.\linebreak
|
||||||
|
|
||||||
|
Pour assurer la confidentialité, on peut chiffrer les données au niveau applicatif.
|
||||||
|
}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{S3 contre les systèmes de fichiers}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
Dans le cadre de l'administration de services en ligne, les systèmes de fichiers recèlent certaines difficultés.\linebreak
|
||||||
|
|
||||||
|
\vspace{1cm}
|
||||||
|
Le standard S3 apporte des facilités; on réduit le stockage à un paradigme de clé-valeur basé essentiellement sur deux opérations seulement: lire ou écrire une clé.
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\section{Garage}
|
||||||
|
\subsection{Présentation}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{columns}
|
||||||
|
\column{0.5 \linewidth}
|
||||||
|
\begin{center}
|
||||||
|
Garage essaye de répondre à l'ensemble de ces besoins.\linebreak
|
||||||
|
|
||||||
|
\vspace{0.5cm}
|
||||||
|
Il s'agit d'un logiciel libre permettant de distribuer un service S3 sur diverses machines éloignées.
|
||||||
|
\end{center}
|
||||||
|
\column{0.5 \linewidth}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=4cm]{garage-logo.png}\linebreak
|
||||||
|
|
||||||
|
\texttt{https://garagehq.deuxfleurs.fr/}
|
||||||
|
\end{center}
|
||||||
|
\end{columns}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{Gestion des zones}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
Garage va prendre en compte les zones géographiques au moment de répliquer les données.\linebreak
|
||||||
|
|
||||||
|
\vspace{1cm}
|
||||||
|
\includegraphics[width=13.25cm]{zones.png}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{Comment ça marche ?}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{columns}
|
||||||
|
\column{0.5 \linewidth}
|
||||||
|
\input{schéma europe}
|
||||||
|
\column{0.5 \linewidth}
|
||||||
|
\begin{center}
|
||||||
|
Chaque objet est dupliqué sur plusieurs zones différentes.\linebreak
|
||||||
|
|
||||||
|
\onslide<5->{Lorsqu'un nouvel hébergeur rejoint le réseau, la charge se voit équilibrée.}\linebreak
|
||||||
|
|
||||||
|
\onslide<12->{Si une zone devient indisponible, les autres continuent d'assurer le service.}\linebreak
|
||||||
|
\end{center}
|
||||||
|
\end{columns}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{Financement}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
Dans le cadre du programme \textit{Horizon 2021} de l'Union Européenne, nous avons reçu une subvention de la part de l'initiative NGI Pointer\footnote[frame]{Next Generation Internet Program for Open Internet Renovation}.\linebreak
|
||||||
|
|
||||||
|
\includegraphics[width=3cm]{drapeau_européen.png}\hspace{1cm}
|
||||||
|
\includegraphics[width=3cm]{NGI.png}\linebreak
|
||||||
|
|
||||||
|
Nous avons ainsi pu financer le développement de Garage pendant 1 an.
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{Licence}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
De par nos valeurs, nous avons attribué la licence AGPL version 3 à Garage, notamment afin qu'il reste parmi les biens communs.\linebreak
|
||||||
|
|
||||||
|
\vspace{0.5cm}
|
||||||
|
\includegraphics[width=5cm]{agpl-v3-logo.png}\linebreak
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{Langage utilisé}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
Nous avons décidé d'écrire Garage à l'aide du langage Rust, afin d'obtenir une compilation vers des binaires natifs et efficaces.\linebreak
|
||||||
|
|
||||||
|
\includegraphics[width=3.5cm]{rust-logo.png}\linebreak
|
||||||
|
|
||||||
|
Ce choix permet également de bénéficier des avantages reconnus de Rust en termes de sécurité.
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{Matériel requis}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
Garage peut ainsi être performant sur des machines limitées. Les prérequis sont minimes : n'importe quelle machine avec un processeur qui a moins d'une décennie, 1~gigaoctet de mémoire vive, et 16~gigaoctets de stockage suffit.\linebreak
|
||||||
|
|
||||||
|
\vspace{1cm}
|
||||||
|
|
||||||
|
Cet aspect est déterminant : il permet en effet d'héberger sur du matériel acheté d'occasion, pour réduire l'impact écologique de nos infrastructures.
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{Performances}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=13.25cm]{rpc-amplification.png}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=11cm]{rpc-complexity.png}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{Services}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
Puisqu'il suit le standard S3, beaucoup de services populaires sont par conséquence compatibles avec Garage :\linebreak
|
||||||
|
|
||||||
|
\begin{columns}
|
||||||
|
\column{0.2 \linewidth}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=2.5cm]{nextcloud-logo.png}
|
||||||
|
\end{center}
|
||||||
|
\column{0.2 \linewidth}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=2.5cm]{peertube-logo.png}
|
||||||
|
\end{center}
|
||||||
|
\column{0.2 \linewidth}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=2.5cm]{matrix-logo.png}
|
||||||
|
\end{center}
|
||||||
|
\column{0.2 \linewidth}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=2.5cm]{mastodon-logo.png}
|
||||||
|
\end{center}
|
||||||
|
\end{columns}
|
||||||
|
~\linebreak
|
||||||
|
|
||||||
|
Et comme souvent avec S3, on peut assimiler un bucket à un site, et utiliser le serveur pour héberger des sites web statiques.
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\section{Intégration chez Deuxfleurs}
|
||||||
|
\subsection{Matériel}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=13cm]{neptune.jpg}\linebreak
|
||||||
|
|
||||||
|
En pratique, nos serveurs ne sont effectivement que des machines achetées d'occasion (très souvent des anciens ordinateurs destinés à la bureautique en entreprise).
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{Environnement logiciel}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
Pour faciliter la reproduction d'un environnement connu, NixOS est installé sur nos machines.\linebreak
|
||||||
|
|
||||||
|
\vspace{1cm}
|
||||||
|
Pour s’accommoder des réseaux qu'on trouve derrière des routeurs pour particuliers, on s'aide de notre logiciel Diplonat\footnote[frame]{\texttt{https://git.deuxfleurs.fr/Deuxfleurs/diplonat}}.
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\section{Au-delà...}
|
||||||
|
\subsection{... de Deuxfleurs}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=10cm]{tedomum.png}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\subsection{... de Garage}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
Nous avons récemment lancé le développement d'Aérogramme\footnote[frame]{\texttt{https://git.deuxfleurs.fr/Deuxfleurs/aerogramme}}.\linebreak
|
||||||
|
|
||||||
|
\vspace{1cm}
|
||||||
|
Il s'agit d'un serveur de stockage de courriels chiffrés.\linebreak
|
||||||
|
|
||||||
|
\vspace{1cm}
|
||||||
|
Il est conçu pour pouvoir travailler avec Garage.
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\section{Fin}
|
||||||
|
\subsection{Contacts}
|
||||||
|
\begin{frame}
|
||||||
|
\begin{center}
|
||||||
|
\begin{tikzpicture}
|
||||||
|
\node (ronce) {\includegraphics[width=0.95\textwidth]{ronce.jpg}};
|
||||||
|
\node[white] at (-5.1,3.6) {Intéressé(e) ?};
|
||||||
|
\node[white, align=center] at (4.2,-2.6) {Contactez-nous !\\\texttt{coucou@deuxfleurs.fr}\\\texttt{\#forum:deuxfleurs.fr}};
|
||||||
|
\end{tikzpicture}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
\end{document}
|
||||||
|
|
BIN
doc/talks/2022-11-19-Capitole-du-Libre/ronce.jpg
Normal file
After Width: | Height: | Size: 1.4 MiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/rpc-amplification.png
Normal file
After Width: | Height: | Size: 124 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/rpc-complexity.png
Normal file
After Width: | Height: | Size: 194 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/rust-logo.png
Normal file
After Width: | Height: | Size: 6.5 KiB |
52
doc/talks/2022-11-19-Capitole-du-Libre/schéma europe.tex
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
\begin{tikzpicture}
|
||||||
|
\node (carte) {\includegraphics[width=\textwidth]{carte-Europe.pdf}};
|
||||||
|
|
||||||
|
% \personnage{position X}{position Y}{facteur d'échelle}
|
||||||
|
\newcommand{\personnage}[4]
|
||||||
|
{
|
||||||
|
\fill[#4] ({#1-(0.4 * #3)},{#2-(0.9 * #3)}) .. controls ({#1-(0.4 * #3)},#2) and ({#1+(0.4 * #3)},#2) .. ({#1+(0.4 * #3)},{#2-(0.9 * #3)}) -- ({#1-(0.4 * #3)},{#2-(0.9 * #3)});
|
||||||
|
\fill[#4] (#1,#2) circle ({0.25 * #3});
|
||||||
|
}
|
||||||
|
|
||||||
|
\onslide<1-11>{\personnage{-2.25}{-0.75}{0.75}{green}}
|
||||||
|
\onslide<1-11>{\draw (-1.9,-1.6) rectangle ++(1,1.2);}
|
||||||
|
\onslide<2-11>{\draw[fill=green] (-1.8,-1.525) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 1};}
|
||||||
|
\onslide<4-5>{\draw[fill=red] (-1.8,-1.15) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 3};}
|
||||||
|
\onslide<7-11>{\draw[fill=yellow] (-1.8,-1.15) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 4};}
|
||||||
|
\onslide<9-11>{\draw[fill=red] (-1.8,-0.775) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 6};}
|
||||||
|
\onslide<3-11>{\draw[fill=blue] (-1.35,-1.525) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 2};}
|
||||||
|
\onslide<8-11>{\draw[fill=blue] (-1.35,-1.15) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 5};}
|
||||||
|
\onslide<11-11>{\draw[fill=yellow] (-1.35,-0.775) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 8};}
|
||||||
|
|
||||||
|
\personnage{1.65}{1.5}{0.75}{blue}
|
||||||
|
\draw (0.3,0.7) rectangle ++(1,1.2);
|
||||||
|
\onslide<2->{\draw[fill=green] (0.4,0.775) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 1};}
|
||||||
|
\onslide<4->{\draw[fill=red] (0.4,1.15) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 3};}
|
||||||
|
\onslide<10->{\draw[fill=green] (0.4,1.525) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 7};}
|
||||||
|
\onslide<3->{\draw[fill=blue] (0.85,0.775) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 2};}
|
||||||
|
\onslide<9->{\draw[fill=red] (0.85,1.15) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 6};}
|
||||||
|
\onslide<11->{\draw[fill=yellow] (0.85,1.525) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 8};}
|
||||||
|
|
||||||
|
\personnage{1.85}{-2.3}{0.75}{red}
|
||||||
|
\draw (0.5,-3.15) rectangle ++(1,1.2);
|
||||||
|
\onslide<2->{\draw[fill=green] (0.6,-3.075) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 1};}
|
||||||
|
\onslide<4-5>{\draw[fill=red] (0.6,-2.7) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 3};}
|
||||||
|
\onslide<7->{\draw[fill=yellow] (0.6,-2.7) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 4};}
|
||||||
|
\onslide<9->{\draw[fill=red] (0.6,-2.325) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 6};}
|
||||||
|
\onslide<3-5>{\draw[fill=blue] (1.05,-3.075) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 2};}
|
||||||
|
\onslide<6->{\draw[fill=red] (1.05,-3.075) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 3};}
|
||||||
|
\onslide<8->{\draw[fill=blue] (1.05,-2.7) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 5};}
|
||||||
|
\onslide<10->{\draw[fill=green] (1.05,-2.325) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 7};}
|
||||||
|
|
||||||
|
\onslide<5->{\personnage{1.05}{-0.15}{0.75}{yellow}}
|
||||||
|
\onslide<5->{\draw (-0.35,-1) rectangle ++(1,1.2);}
|
||||||
|
\onslide<6->{\draw[fill=blue] (-0.25,-0.925) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 2};}
|
||||||
|
\onslide<7->{\draw[fill=yellow] (-0.25,-0.55) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 4};}
|
||||||
|
\onslide<10->{\draw[fill=green] (-0.25,-0.175) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 7};}
|
||||||
|
\onslide<6->{\draw[fill=red] (0.2,-0.925) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 3};}
|
||||||
|
\onslide<8->{\draw[fill=blue] (0.2,-0.55) rectangle ++(0.35,0.3) node[pos=0.5,white] {\tiny 5};}
|
||||||
|
\onslide<11->{\draw[fill=yellow] (0.2,-0.175) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 8};}
|
||||||
|
|
||||||
|
\onslide<12->{\draw[line width=0.25cm] (-2.15,-0.5) -- ++(1,-1);}
|
||||||
|
\onslide<12->{\draw[line width=0.25cm] (-2.15,-1.5) -- ++(1,1);}
|
||||||
|
\end{tikzpicture}
|
BIN
doc/talks/2022-11-19-Capitole-du-Libre/tedomum.png
Normal file
After Width: | Height: | Size: 236 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/zones.png
Normal file
After Width: | Height: | Size: 97 KiB |
124
flake.lock
Normal file
|
@ -0,0 +1,124 @@
|
||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"cargo2nix": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-compat": "flake-compat",
|
||||||
|
"flake-utils": "flake-utils",
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"rust-overlay": "rust-overlay"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1666087781,
|
||||||
|
"narHash": "sha256-trKVdjMZ8mNkGfLcY5LsJJGtdV3xJDZnMVrkFjErlcs=",
|
||||||
|
"owner": "Alexis211",
|
||||||
|
"repo": "cargo2nix",
|
||||||
|
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "Alexis211",
|
||||||
|
"repo": "cargo2nix",
|
||||||
|
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-compat": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1650374568,
|
||||||
|
"narHash": "sha256-Z+s0J8/r907g149rllvwhb4pKi8Wam5ij0st8PwAh+E=",
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"rev": "b4a34015c698c7793d592d66adbab377907a2be8",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1659877975,
|
||||||
|
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils_2": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1667395993,
|
||||||
|
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1665657542,
|
||||||
|
"narHash": "sha256-mojxNyzbvmp8NtVtxqiHGhRfjCALLfk9i/Uup68Y5q8=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "a3073c49bc0163fea6a121c276f526837672b555",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "a3073c49bc0163fea6a121c276f526837672b555",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"cargo2nix": "cargo2nix",
|
||||||
|
"flake-utils": "flake-utils_2",
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rust-overlay": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-utils": [
|
||||||
|
"cargo2nix",
|
||||||
|
"flake-utils"
|
||||||
|
],
|
||||||
|
"nixpkgs": [
|
||||||
|
"cargo2nix",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1664247556,
|
||||||
|
"narHash": "sha256-J4vazHU3609ekn7dr+3wfqPo5WGlZVAgV7jfux352L0=",
|
||||||
|
"owner": "oxalica",
|
||||||
|
"repo": "rust-overlay",
|
||||||
|
"rev": "524db9c9ea7bc7743bb74cdd45b6d46ea3fcc2ab",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "oxalica",
|
||||||
|
"repo": "rust-overlay",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
36
flake.nix
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
{
|
||||||
|
description = "Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||||
|
|
||||||
|
inputs.nixpkgs.url = "github:NixOS/nixpkgs/a3073c49bc0163fea6a121c276f526837672b555";
|
||||||
|
inputs.cargo2nix = {
|
||||||
|
# As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection
|
||||||
|
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
inputs.flake-utils.url = "github:numtide/flake-utils";
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs, cargo2nix, flake-utils }:
|
||||||
|
let
|
||||||
|
git_version = self.lastModifiedDate;
|
||||||
|
compile = import ./nix/compile.nix;
|
||||||
|
in flake-utils.lib.eachDefaultSystem (system:
|
||||||
|
let pkgs = nixpkgs.legacyPackages.${system};
|
||||||
|
in {
|
||||||
|
packages = {
|
||||||
|
default = (compile {
|
||||||
|
inherit system git_version;
|
||||||
|
pkgsSrc = nixpkgs;
|
||||||
|
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||||
|
release = true;
|
||||||
|
}).workspace.garage { compileMode = "build"; };
|
||||||
|
};
|
||||||
|
devShell = ((compile {
|
||||||
|
inherit system git_version;
|
||||||
|
pkgsSrc = nixpkgs;
|
||||||
|
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||||
|
release = false;
|
||||||
|
}).workspaceShell {
|
||||||
|
packages = [ pkgs.rustfmt cargo2nix.packages.${system}.default ];
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
|
@ -1,24 +1,31 @@
|
||||||
{
|
{
|
||||||
system ? builtins.currentSystem,
|
system,
|
||||||
target,
|
target ? null,
|
||||||
|
pkgsSrc,
|
||||||
|
cargo2nixOverlay,
|
||||||
compiler ? "rustc",
|
compiler ? "rustc",
|
||||||
release ? false,
|
release ? false,
|
||||||
git_version ? null,
|
git_version ? null,
|
||||||
features ? null,
|
features ? null,
|
||||||
}:
|
}:
|
||||||
|
|
||||||
with import ./common.nix;
|
|
||||||
|
|
||||||
let
|
let
|
||||||
log = v: builtins.trace v v;
|
log = v: builtins.trace v v;
|
||||||
|
|
||||||
pkgs = import pkgsSrc {
|
pkgs =
|
||||||
|
if target != null then
|
||||||
|
import pkgsSrc {
|
||||||
inherit system;
|
inherit system;
|
||||||
crossSystem = {
|
crossSystem = {
|
||||||
config = target;
|
config = target;
|
||||||
isStatic = true;
|
isStatic = true;
|
||||||
};
|
};
|
||||||
overlays = [ cargo2nixOverlay ];
|
overlays = [ cargo2nixOverlay ];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
import pkgsSrc {
|
||||||
|
inherit system;
|
||||||
|
overlays = [ cargo2nixOverlay ];
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -34,7 +41,7 @@ let
|
||||||
NixOS ships them in separate ones. We reunite them with symlinkJoin.
|
NixOS ships them in separate ones. We reunite them with symlinkJoin.
|
||||||
*/
|
*/
|
||||||
toolchainOptions =
|
toolchainOptions =
|
||||||
if target == "x86_64-unknown-linux-musl" || target == "aarch64-unknown-linux-musl" then {
|
if target == null || target == "x86_64-unknown-linux-musl" || target == "aarch64-unknown-linux-musl" then {
|
||||||
rustVersion = "1.63.0";
|
rustVersion = "1.63.0";
|
||||||
extraRustComponents = [ "clippy" ];
|
extraRustComponents = [ "clippy" ];
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -15,7 +15,7 @@ type: application
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.1.3
|
version: 0.2.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|
|
@ -7,7 +7,6 @@ data:
|
||||||
metadata_dir = "{{ .Values.garage.metadataDir }}"
|
metadata_dir = "{{ .Values.garage.metadataDir }}"
|
||||||
data_dir = "{{ .Values.garage.dataDir }}"
|
data_dir = "{{ .Values.garage.dataDir }}"
|
||||||
|
|
||||||
db_engine = "{{ .Values.garage.dbEngine }}"
|
|
||||||
replication_mode = "{{ .Values.garage.replicationMode }}"
|
replication_mode = "{{ .Values.garage.replicationMode }}"
|
||||||
|
|
||||||
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
|
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
|
||||||
|
@ -30,6 +29,3 @@ data:
|
||||||
bind_addr = "[::]:3902"
|
bind_addr = "[::]:3902"
|
||||||
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
|
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
|
||||||
index = "{{ .Values.garage.s3.web.index }}"
|
index = "{{ .Values.garage.s3.web.index }}"
|
||||||
|
|
||||||
[admin]
|
|
||||||
api_bind_addr = "[::]:3903"
|
|
||||||
|
|
|
@ -18,6 +18,9 @@ metadata:
|
||||||
name: {{ $fullName }}-s3-api
|
name: {{ $fullName }}-s3-api
|
||||||
labels:
|
labels:
|
||||||
{{- include "garage.labels" . | nindent 4 }}
|
{{- include "garage.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.ingress.s3.api.labels }}
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.ingress.s3.api.annotations }}
|
{{- with .Values.ingress.s3.api.annotations }}
|
||||||
annotations:
|
annotations:
|
||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
|
@ -80,6 +83,9 @@ metadata:
|
||||||
name: {{ $fullName }}-s3-web
|
name: {{ $fullName }}-s3-web
|
||||||
labels:
|
labels:
|
||||||
{{- include "garage.labels" . | nindent 4 }}
|
{{- include "garage.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.ingress.s3.web.labels }}
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.ingress.s3.web.annotations }}
|
{{- with .Values.ingress.s3.web.annotations }}
|
||||||
annotations:
|
annotations:
|
||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
|
|
|
@ -15,9 +15,5 @@ spec:
|
||||||
targetPort: 3902
|
targetPort: 3902
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
name: s3-web
|
name: s3-web
|
||||||
- port: 3903
|
|
||||||
targetPort: 3903
|
|
||||||
protocol: TCP
|
|
||||||
name: admin
|
|
||||||
selector:
|
selector:
|
||||||
{{- include "garage.selectorLabels" . | nindent 4 }}
|
{{- include "garage.selectorLabels" . | nindent 4 }}
|
||||||
|
|
|
@ -1,17 +1,15 @@
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: {{ .Values.deployment.kind }}
|
kind: StatefulSet
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "garage.fullname" . }}
|
name: {{ include "garage.fullname" . }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "garage.labels" . | nindent 4 }}
|
{{- include "garage.labels" . | nindent 4 }}
|
||||||
spec:
|
spec:
|
||||||
|
replicas: {{ .Values.replicaCount }}
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
{{- include "garage.selectorLabels" . | nindent 6 }}
|
{{- include "garage.selectorLabels" . | nindent 6 }}
|
||||||
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
|
||||||
replicas: {{ .Values.deployment.replicaCount }}
|
|
||||||
serviceName: {{ include "garage.fullname" . }}
|
serviceName: {{ include "garage.fullname" . }}
|
||||||
{{- end }}
|
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
{{- with .Values.podAnnotations }}
|
{{- with .Values.podAnnotations }}
|
||||||
|
@ -56,8 +54,6 @@ spec:
|
||||||
name: s3-api
|
name: s3-api
|
||||||
- containerPort: 3902
|
- containerPort: 3902
|
||||||
name: web-api
|
name: web-api
|
||||||
- containerPort: 3903
|
|
||||||
name: admin
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: meta
|
- name: meta
|
||||||
mountPath: /mnt/meta
|
mountPath: /mnt/meta
|
||||||
|
@ -83,23 +79,6 @@ spec:
|
||||||
name: {{ include "garage.fullname" . }}-config
|
name: {{ include "garage.fullname" . }}-config
|
||||||
- name: etc
|
- name: etc
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
{{- if .Values.persistence.enabled }}
|
|
||||||
{{- if eq .Values.deployment.kind "DaemonSet" }}
|
|
||||||
- name: meta
|
|
||||||
hostPath:
|
|
||||||
path: {{ .Values.persistence.meta.hostPath }}
|
|
||||||
type: DirectoryOrCreate
|
|
||||||
- name: data
|
|
||||||
hostPath:
|
|
||||||
path: {{ .Values.persistence.data.hostPath }}
|
|
||||||
type: DirectoryOrCreate
|
|
||||||
{{- end }}
|
|
||||||
{{- else }}
|
|
||||||
- name: meta
|
|
||||||
emptyDir: {}
|
|
||||||
- name: data
|
|
||||||
emptyDir: {}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.nodeSelector }}
|
{{- with .Values.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
|
@ -112,7 +91,7 @@ spec:
|
||||||
tolerations:
|
tolerations:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if and .Values.persistence.enabled (eq .Values.deployment.kind "StatefulSet") }}
|
{{- if .Values.persistence.enabled }}
|
||||||
volumeClaimTemplates:
|
volumeClaimTemplates:
|
||||||
- metadata:
|
- metadata:
|
||||||
name: meta
|
name: meta
|
|
@ -29,20 +29,12 @@ persistence:
|
||||||
meta:
|
meta:
|
||||||
# storageClass: "fast-storage-class"
|
# storageClass: "fast-storage-class"
|
||||||
size: 100Mi
|
size: 100Mi
|
||||||
# used only for daemon sets
|
|
||||||
hostPath: /var/lib/garage/meta
|
|
||||||
data:
|
data:
|
||||||
# storageClass: "slow-storage-class"
|
# storageClass: "slow-storage-class"
|
||||||
size: 100Mi
|
size: 100Mi
|
||||||
# used only for daemon sets
|
|
||||||
hostPath: /var/lib/garage/data
|
|
||||||
|
|
||||||
# Deployment configuration
|
# Number of StatefulSet replicas/garage nodes to start
|
||||||
deployment:
|
replicaCount: 3
|
||||||
# Switchable to DaemonSet
|
|
||||||
kind: StatefulSet
|
|
||||||
# Number of StatefulSet replicas/garage nodes to start
|
|
||||||
replicaCount: 3
|
|
||||||
|
|
||||||
image:
|
image:
|
||||||
repository: dxflrs/amd64_garage
|
repository: dxflrs/amd64_garage
|
||||||
|
@ -93,14 +85,15 @@ service:
|
||||||
ingress:
|
ingress:
|
||||||
s3:
|
s3:
|
||||||
api:
|
api:
|
||||||
enabled: true
|
enabled: false
|
||||||
# Rely either on the className or the annotation below but not both
|
# Rely either on the className or the annotation below but not both
|
||||||
# replace "nginx" by an Ingress controller
|
# replace "nginx" by an Ingress controller
|
||||||
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
|
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
|
||||||
className: "nginx"
|
# className: "nginx"
|
||||||
annotations:
|
annotations: {}
|
||||||
# kubernetes.io/ingress.class: "nginx"
|
# kubernetes.io/ingress.class: "nginx"
|
||||||
# kubernetes.io/tls-acme: "true"
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
labels: {}
|
||||||
hosts:
|
hosts:
|
||||||
- host: "s3.garage.tld" # garage S3 API endpoint
|
- host: "s3.garage.tld" # garage S3 API endpoint
|
||||||
paths:
|
paths:
|
||||||
|
@ -115,11 +108,15 @@ ingress:
|
||||||
# hosts:
|
# hosts:
|
||||||
# - kubernetes.docker.internal
|
# - kubernetes.docker.internal
|
||||||
web:
|
web:
|
||||||
enabled: true
|
enabled: false
|
||||||
className: "nginx"
|
# Rely either on the className or the annotation below but not both
|
||||||
|
# replace "nginx" by an Ingress controller
|
||||||
|
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
|
||||||
|
# className: "nginx"
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# kubernetes.io/ingress.class: nginx
|
# kubernetes.io/ingress.class: nginx
|
||||||
# kubernetes.io/tls-acme: "true"
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
labels: {}
|
||||||
hosts:
|
hosts:
|
||||||
- host: "*.web.garage.tld" # wildcard website access with bucket name prefix
|
- host: "*.web.garage.tld" # wildcard website access with bucket name prefix
|
||||||
paths:
|
paths:
|
||||||
|
|
14
shell.nix
|
@ -71,13 +71,25 @@ function refresh_cache {
|
||||||
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.{debug,release}; do
|
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.{debug,release}; do
|
||||||
echo "Updating cache for ''${attr}"
|
echo "Updating cache for ''${attr}"
|
||||||
derivation=$(nix-instantiate --attr ''${attr})
|
derivation=$(nix-instantiate --attr ''${attr})
|
||||||
nix copy \
|
nix copy -j8 \
|
||||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
||||||
$(nix-store -qR ''${derivation%\!bin})
|
$(nix-store -qR ''${derivation%\!bin})
|
||||||
done
|
done
|
||||||
rm /tmp/nix-signing-key.sec
|
rm /tmp/nix-signing-key.sec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function refresh_flake_cache {
|
||||||
|
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
||||||
|
for attr in packages.x86_64-linux.default; do
|
||||||
|
echo "Updating cache for ''${attr}"
|
||||||
|
derivation=$(nix path-info --derivation ".#''${attr}")
|
||||||
|
nix copy -j8 \
|
||||||
|
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
||||||
|
$(nix-store -qR ''${derivation})
|
||||||
|
done
|
||||||
|
rm /tmp/nix-signing-key.sec
|
||||||
|
}
|
||||||
|
|
||||||
function to_s3 {
|
function to_s3 {
|
||||||
aws \
|
aws \
|
||||||
--endpoint-url https://garage.deuxfleurs.fr \
|
--endpoint-url https://garage.deuxfleurs.fr \
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,11 +14,11 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_model = { version = "0.8.0", path = "../model" }
|
garage_model = { version = "0.8.1", path = "../model" }
|
||||||
garage_table = { version = "0.8.0", path = "../table" }
|
garage_table = { version = "0.8.1", path = "../table" }
|
||||||
garage_block = { version = "0.8.0", path = "../block" }
|
garage_block = { version = "0.8.1", path = "../block" }
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
base64 = "0.13"
|
base64 = "0.13"
|
||||||
|
|
|
@ -15,6 +15,7 @@ use opentelemetry_prometheus::PrometheusExporter;
|
||||||
use prometheus::{Encoder, TextEncoder};
|
use prometheus::{Encoder, TextEncoder};
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
|
use garage_rpc::system::ClusterHealthStatus;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
use crate::generic_server::*;
|
use crate::generic_server::*;
|
||||||
|
@ -76,6 +77,31 @@ impl AdminApiServer {
|
||||||
.body(Body::empty())?)
|
.body(Body::empty())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn handle_health(&self) -> Result<Response<Body>, Error> {
|
||||||
|
let health = self.garage.system.health();
|
||||||
|
|
||||||
|
let (status, status_str) = match health.status {
|
||||||
|
ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
|
||||||
|
ClusterHealthStatus::Degraded => (
|
||||||
|
StatusCode::OK,
|
||||||
|
"Garage is operational but some storage nodes are unavailable",
|
||||||
|
),
|
||||||
|
ClusterHealthStatus::Unavailable => (
|
||||||
|
StatusCode::SERVICE_UNAVAILABLE,
|
||||||
|
"Quorum is not available for some/all partitions, reads and writes will fail",
|
||||||
|
),
|
||||||
|
};
|
||||||
|
let status_str = format!(
|
||||||
|
"{}\nConsult the full health check API endpoint at /v0/health for more details\n",
|
||||||
|
status_str
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(status)
|
||||||
|
.header(http::header::CONTENT_TYPE, "text/plain")
|
||||||
|
.body(Body::from(status_str))?)
|
||||||
|
}
|
||||||
|
|
||||||
fn handle_metrics(&self) -> Result<Response<Body>, Error> {
|
fn handle_metrics(&self) -> Result<Response<Body>, Error> {
|
||||||
#[cfg(feature = "metrics")]
|
#[cfg(feature = "metrics")]
|
||||||
{
|
{
|
||||||
|
@ -124,6 +150,7 @@ impl ApiHandler for AdminApiServer {
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<Body>, Error> {
|
||||||
let expected_auth_header =
|
let expected_auth_header =
|
||||||
match endpoint.authorization_type() {
|
match endpoint.authorization_type() {
|
||||||
|
Authorization::None => None,
|
||||||
Authorization::MetricsToken => self.metrics_token.as_ref(),
|
Authorization::MetricsToken => self.metrics_token.as_ref(),
|
||||||
Authorization::AdminToken => match &self.admin_token {
|
Authorization::AdminToken => match &self.admin_token {
|
||||||
None => return Err(Error::forbidden(
|
None => return Err(Error::forbidden(
|
||||||
|
@ -147,8 +174,10 @@ impl ApiHandler for AdminApiServer {
|
||||||
|
|
||||||
match endpoint {
|
match endpoint {
|
||||||
Endpoint::Options => self.handle_options(&req),
|
Endpoint::Options => self.handle_options(&req),
|
||||||
|
Endpoint::Health => self.handle_health(),
|
||||||
Endpoint::Metrics => self.handle_metrics(),
|
Endpoint::Metrics => self.handle_metrics(),
|
||||||
Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await,
|
Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await,
|
||||||
|
Endpoint::GetClusterHealth => handle_get_cluster_health(&self.garage).await,
|
||||||
Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await,
|
Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await,
|
||||||
// Layout
|
// Layout
|
||||||
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
|
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
|
||||||
|
|
|
@ -43,6 +43,11 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
||||||
|
let health = garage.system.health();
|
||||||
|
Ok(json_ok_response(&health)?)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn handle_connect_cluster_nodes(
|
pub async fn handle_connect_cluster_nodes(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<Body>,
|
||||||
|
|
|
@ -6,6 +6,7 @@ use crate::admin::error::*;
|
||||||
use crate::router_macros::*;
|
use crate::router_macros::*;
|
||||||
|
|
||||||
pub enum Authorization {
|
pub enum Authorization {
|
||||||
|
None,
|
||||||
MetricsToken,
|
MetricsToken,
|
||||||
AdminToken,
|
AdminToken,
|
||||||
}
|
}
|
||||||
|
@ -16,8 +17,10 @@ router_match! {@func
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum Endpoint {
|
pub enum Endpoint {
|
||||||
Options,
|
Options,
|
||||||
|
Health,
|
||||||
Metrics,
|
Metrics,
|
||||||
GetClusterStatus,
|
GetClusterStatus,
|
||||||
|
GetClusterHealth,
|
||||||
ConnectClusterNodes,
|
ConnectClusterNodes,
|
||||||
// Layout
|
// Layout
|
||||||
GetClusterLayout,
|
GetClusterLayout,
|
||||||
|
@ -88,8 +91,10 @@ impl Endpoint {
|
||||||
|
|
||||||
let res = router_match!(@gen_path_parser (req.method(), path, query) [
|
let res = router_match!(@gen_path_parser (req.method(), path, query) [
|
||||||
OPTIONS _ => Options,
|
OPTIONS _ => Options,
|
||||||
|
GET "/health" => Health,
|
||||||
GET "/metrics" => Metrics,
|
GET "/metrics" => Metrics,
|
||||||
GET "/v0/status" => GetClusterStatus,
|
GET "/v0/status" => GetClusterStatus,
|
||||||
|
GET "/v0/health" => GetClusterHealth,
|
||||||
POST "/v0/connect" => ConnectClusterNodes,
|
POST "/v0/connect" => ConnectClusterNodes,
|
||||||
// Layout endpoints
|
// Layout endpoints
|
||||||
GET "/v0/layout" => GetClusterLayout,
|
GET "/v0/layout" => GetClusterLayout,
|
||||||
|
@ -130,6 +135,7 @@ impl Endpoint {
|
||||||
/// Get the kind of authorization which is required to perform the operation.
|
/// Get the kind of authorization which is required to perform the operation.
|
||||||
pub fn authorization_type(&self) -> Authorization {
|
pub fn authorization_type(&self) -> Authorization {
|
||||||
match self {
|
match self {
|
||||||
|
Self::Health => Authorization::None,
|
||||||
Self::Metrics => Authorization::MetricsToken,
|
Self::Metrics => Authorization::MetricsToken,
|
||||||
_ => Authorization::AdminToken,
|
_ => Authorization::AdminToken,
|
||||||
}
|
}
|
||||||
|
@ -137,9 +143,13 @@ impl Endpoint {
|
||||||
}
|
}
|
||||||
|
|
||||||
generateQueryParameters! {
|
generateQueryParameters! {
|
||||||
|
keywords: [],
|
||||||
|
fields: [
|
||||||
|
"format" => format,
|
||||||
"id" => id,
|
"id" => id,
|
||||||
"search" => search,
|
"search" => search,
|
||||||
"globalAlias" => global_alias,
|
"globalAlias" => global_alias,
|
||||||
"alias" => alias,
|
"alias" => alias,
|
||||||
"accessKeyId" => access_key_id
|
"accessKeyId" => access_key_id
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,7 +96,7 @@ impl Endpoint {
|
||||||
fn from_get(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_get(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY if causality_token => PollItem (query::sort_key, query::causality_token, opt_parse::timeout),
|
EMPTY if causality_token => PollItem (query::sort_key, query::causality_token, opt_parse::timeout),
|
||||||
EMPTY => ReadItem (query::sort_key),
|
EMPTY => ReadItem (query::sort_key),
|
||||||
|
@ -111,7 +111,7 @@ impl Endpoint {
|
||||||
fn from_search(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_search(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
],
|
],
|
||||||
no_key: [
|
no_key: [
|
||||||
|
@ -125,7 +125,7 @@ impl Endpoint {
|
||||||
fn from_head(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_head(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
|
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
|
||||||
],
|
],
|
||||||
|
@ -140,7 +140,7 @@ impl Endpoint {
|
||||||
fn from_post(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_post(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
],
|
],
|
||||||
no_key: [
|
no_key: [
|
||||||
|
@ -155,7 +155,7 @@ impl Endpoint {
|
||||||
fn from_put(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_put(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY => InsertItem (query::sort_key),
|
EMPTY => InsertItem (query::sort_key),
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ impl Endpoint {
|
||||||
fn from_delete(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_delete(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY => DeleteItem (query::sort_key),
|
EMPTY => DeleteItem (query::sort_key),
|
||||||
],
|
],
|
||||||
|
@ -232,6 +232,11 @@ impl Endpoint {
|
||||||
|
|
||||||
// parameter name => struct field
|
// parameter name => struct field
|
||||||
generateQueryParameters! {
|
generateQueryParameters! {
|
||||||
|
keywords: [
|
||||||
|
"delete" => DELETE,
|
||||||
|
"search" => SEARCH
|
||||||
|
],
|
||||||
|
fields: [
|
||||||
"prefix" => prefix,
|
"prefix" => prefix,
|
||||||
"start" => start,
|
"start" => start,
|
||||||
"causality_token" => causality_token,
|
"causality_token" => causality_token,
|
||||||
|
@ -240,13 +245,5 @@ generateQueryParameters! {
|
||||||
"reverse" => reverse,
|
"reverse" => reverse,
|
||||||
"sort_key" => sort_key,
|
"sort_key" => sort_key,
|
||||||
"timeout" => timeout
|
"timeout" => timeout
|
||||||
}
|
]
|
||||||
|
|
||||||
mod keywords {
|
|
||||||
//! This module contain all query parameters with no associated value
|
|
||||||
//! used to differentiate endpoints.
|
|
||||||
pub const EMPTY: &str = "";
|
|
||||||
|
|
||||||
pub const DELETE: &str = "delete";
|
|
||||||
pub const SEARCH: &str = "search";
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,10 +4,9 @@ macro_rules! router_match {
|
||||||
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
|
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
|
||||||
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
|
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
|
||||||
// returns true if the variant was one of the listed variants, false otherwise.
|
// returns true if the variant was one of the listed variants, false otherwise.
|
||||||
use Endpoint::*;
|
|
||||||
match $enum {
|
match $enum {
|
||||||
$(
|
$(
|
||||||
$endpoint { .. } => true,
|
Endpoint::$endpoint { .. } => true,
|
||||||
)*
|
)*
|
||||||
_ => false
|
_ => false
|
||||||
}
|
}
|
||||||
|
@ -15,10 +14,9 @@ macro_rules! router_match {
|
||||||
(@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{
|
(@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{
|
||||||
// usage: router_match {@extract my_enum, field_name, [ VariantWithField1, VariantWithField2 ..] }
|
// usage: router_match {@extract my_enum, field_name, [ VariantWithField1, VariantWithField2 ..] }
|
||||||
// returns Some(field_value), or None if the variant was not one of the listed variants.
|
// returns Some(field_value), or None if the variant was not one of the listed variants.
|
||||||
use Endpoint::*;
|
|
||||||
match $enum {
|
match $enum {
|
||||||
$(
|
$(
|
||||||
$endpoint {$param, ..} => Some($param),
|
Endpoint::$endpoint {$param, ..} => Some($param),
|
||||||
)*
|
)*
|
||||||
_ => None
|
_ => None
|
||||||
}
|
}
|
||||||
|
@ -28,10 +26,9 @@ macro_rules! router_match {
|
||||||
$($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)*
|
$($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)*
|
||||||
]) => {{
|
]) => {{
|
||||||
{
|
{
|
||||||
use Endpoint::*;
|
|
||||||
match ($method, $reqpath) {
|
match ($method, $reqpath) {
|
||||||
$(
|
$(
|
||||||
(&Method::$meth, $path) if true $(&& $query.$required.is_some())? => $api {
|
(&Method::$meth, $path) if true $(&& $query.$required.is_some())? => Endpoint::$api {
|
||||||
$($(
|
$($(
|
||||||
$param: router_match!(@@parse_param $query, $conv, $param),
|
$param: router_match!(@@parse_param $query, $conv, $param),
|
||||||
)*)?
|
)*)?
|
||||||
|
@ -60,11 +57,9 @@ macro_rules! router_match {
|
||||||
// ]
|
// ]
|
||||||
// }
|
// }
|
||||||
// See in from_{method} for more detailed usage.
|
// See in from_{method} for more detailed usage.
|
||||||
use Endpoint::*;
|
|
||||||
use keywords::*;
|
|
||||||
match ($keyword, !$key.is_empty()){
|
match ($keyword, !$key.is_empty()){
|
||||||
$(
|
$(
|
||||||
($kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok($api_k {
|
(Keyword::$kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok(Endpoint::$api_k {
|
||||||
$key,
|
$key,
|
||||||
$($(
|
$($(
|
||||||
$param_k: router_match!(@@parse_param $query, $conv_k, $param_k),
|
$param_k: router_match!(@@parse_param $query, $conv_k, $param_k),
|
||||||
|
@ -72,7 +67,7 @@ macro_rules! router_match {
|
||||||
}),
|
}),
|
||||||
)*
|
)*
|
||||||
$(
|
$(
|
||||||
($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk {
|
(Keyword::$kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok(Endpoint::$api_nk {
|
||||||
$($(
|
$($(
|
||||||
$param_nk: router_match!(@@parse_param $query, $conv_nk, $param_nk),
|
$param_nk: router_match!(@@parse_param $query, $conv_nk, $param_nk),
|
||||||
)*)?
|
)*)?
|
||||||
|
@ -144,14 +139,40 @@ macro_rules! router_match {
|
||||||
/// This macro is used to generate part of the code in this module. It must be called only one, and
|
/// This macro is used to generate part of the code in this module. It must be called only one, and
|
||||||
/// is useless outside of this module.
|
/// is useless outside of this module.
|
||||||
macro_rules! generateQueryParameters {
|
macro_rules! generateQueryParameters {
|
||||||
( $($rest:expr => $name:ident),* ) => {
|
(
|
||||||
|
keywords: [ $($kw_param:expr => $kw_name: ident),* ],
|
||||||
|
fields: [ $($f_param:expr => $f_name:ident),* ]
|
||||||
|
) => {
|
||||||
|
#[derive(Debug)]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[allow(clippy::upper_case_acronyms)]
|
||||||
|
enum Keyword {
|
||||||
|
EMPTY,
|
||||||
|
$( $kw_name, )*
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for Keyword {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Keyword::EMPTY => write!(f, "``"),
|
||||||
|
$( Keyword::$kw_name => write!(f, "`{}`", $kw_param), )*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Keyword {
|
||||||
|
fn default() -> Self {
|
||||||
|
Keyword::EMPTY
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Struct containing all query parameters used in endpoints. Think of it as an HashMap,
|
/// Struct containing all query parameters used in endpoints. Think of it as an HashMap,
|
||||||
/// but with keys statically known.
|
/// but with keys statically known.
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
struct QueryParameters<'a> {
|
struct QueryParameters<'a> {
|
||||||
keyword: Option<Cow<'a, str>>,
|
keyword: Option<Keyword>,
|
||||||
$(
|
$(
|
||||||
$name: Option<Cow<'a, str>>,
|
$f_name: Option<Cow<'a, str>>,
|
||||||
)*
|
)*
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,34 +181,29 @@ macro_rules! generateQueryParameters {
|
||||||
fn from_query(query: &'a str) -> Result<Self, Error> {
|
fn from_query(query: &'a str) -> Result<Self, Error> {
|
||||||
let mut res: Self = Default::default();
|
let mut res: Self = Default::default();
|
||||||
for (k, v) in url::form_urlencoded::parse(query.as_bytes()) {
|
for (k, v) in url::form_urlencoded::parse(query.as_bytes()) {
|
||||||
let repeated = match k.as_ref() {
|
match k.as_ref() {
|
||||||
$(
|
$(
|
||||||
$rest => if !v.is_empty() {
|
$kw_param => if let Some(prev_kw) = res.keyword.replace(Keyword::$kw_name) {
|
||||||
res.$name.replace(v).is_some()
|
return Err(Error::bad_request(format!(
|
||||||
} else {
|
"Multiple keywords: '{}' and '{}'", prev_kw, $kw_param
|
||||||
false
|
)));
|
||||||
|
},
|
||||||
|
)*
|
||||||
|
$(
|
||||||
|
$f_param => if !v.is_empty() {
|
||||||
|
if res.$f_name.replace(v).is_some() {
|
||||||
|
return Err(Error::bad_request(format!(
|
||||||
|
"Query parameter repeated: '{}'", k
|
||||||
|
)));
|
||||||
|
}
|
||||||
},
|
},
|
||||||
)*
|
)*
|
||||||
_ => {
|
_ => {
|
||||||
if k.starts_with("response-") || k.starts_with("X-Amz-") {
|
if !(k.starts_with("response-") || k.starts_with("X-Amz-")) {
|
||||||
false
|
|
||||||
} else if v.as_ref().is_empty() {
|
|
||||||
if res.keyword.replace(k).is_some() {
|
|
||||||
return Err(Error::bad_request("Multiple keywords"));
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
debug!("Received an unknown query parameter: '{}'", k);
|
debug!("Received an unknown query parameter: '{}'", k);
|
||||||
false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if repeated {
|
|
||||||
return Err(Error::bad_request(format!(
|
|
||||||
"Query parameter repeated: '{}'",
|
|
||||||
k
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
@ -198,8 +214,8 @@ macro_rules! generateQueryParameters {
|
||||||
if self.keyword.is_some() {
|
if self.keyword.is_some() {
|
||||||
Some("Keyword not used")
|
Some("Keyword not used")
|
||||||
} $(
|
} $(
|
||||||
else if self.$name.is_some() {
|
else if self.$f_name.is_some() {
|
||||||
Some(concat!("'", $rest, "'"))
|
Some(concat!("'", $f_param, "'"))
|
||||||
}
|
}
|
||||||
)* else {
|
)* else {
|
||||||
None
|
None
|
||||||
|
|
|
@ -161,6 +161,15 @@ pub async fn handle_create_bucket(
|
||||||
return Err(CommonError::BucketAlreadyExists.into());
|
return Err(CommonError::BucketAlreadyExists.into());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
// Check user is allowed to create bucket
|
||||||
|
if !key_params.allow_create_bucket.get() {
|
||||||
|
return Err(CommonError::Forbidden(format!(
|
||||||
|
"Access key {} is not allowed to create buckets",
|
||||||
|
api_key.key_id
|
||||||
|
))
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
|
||||||
// Create the bucket!
|
// Create the bucket!
|
||||||
if !is_valid_bucket_name(&bucket_name) {
|
if !is_valid_bucket_name(&bucket_name) {
|
||||||
return Err(Error::bad_request(format!(
|
return Err(Error::bad_request(format!(
|
||||||
|
|
|
@ -119,6 +119,17 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
return Ok((version_uuid, data_md5sum_hex));
|
return Ok((version_uuid, data_md5sum_hex));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The following consists in many steps that can each fail.
|
||||||
|
// Keep track that some cleanup will be needed if things fail
|
||||||
|
// before everything is finished (cleanup is done using the Drop trait).
|
||||||
|
let mut interrupted_cleanup = InterruptedCleanup(Some((
|
||||||
|
garage.clone(),
|
||||||
|
bucket.id,
|
||||||
|
key.into(),
|
||||||
|
version_uuid,
|
||||||
|
version_timestamp,
|
||||||
|
)));
|
||||||
|
|
||||||
// Write version identifier in object table so that we have a trace
|
// Write version identifier in object table so that we have a trace
|
||||||
// that we are uploading something
|
// that we are uploading something
|
||||||
let mut object_version = ObjectVersion {
|
let mut object_version = ObjectVersion {
|
||||||
|
@ -139,7 +150,6 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
// Transfer data and verify checksum
|
// Transfer data and verify checksum
|
||||||
let first_block_hash = async_blake2sum(first_block.clone()).await;
|
let first_block_hash = async_blake2sum(first_block.clone()).await;
|
||||||
|
|
||||||
let tx_result = (|| async {
|
|
||||||
let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
|
let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
|
||||||
&garage,
|
&garage,
|
||||||
&version,
|
&version,
|
||||||
|
@ -159,24 +169,8 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
|
|
||||||
check_quotas(&garage, bucket, key, total_size).await?;
|
check_quotas(&garage, bucket, key, total_size).await?;
|
||||||
|
|
||||||
Ok((total_size, data_md5sum))
|
|
||||||
})()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// If something went wrong, clean up
|
|
||||||
let (total_size, md5sum_arr) = match tx_result {
|
|
||||||
Ok(rv) => rv,
|
|
||||||
Err(e) => {
|
|
||||||
// Mark object as aborted, this will free the blocks further down
|
|
||||||
object_version.state = ObjectVersionState::Aborted;
|
|
||||||
let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]);
|
|
||||||
garage.object_table.insert(&object).await?;
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Save final object state, marked as Complete
|
// Save final object state, marked as Complete
|
||||||
let md5sum_hex = hex::encode(md5sum_arr);
|
let md5sum_hex = hex::encode(data_md5sum);
|
||||||
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||||
ObjectVersionMeta {
|
ObjectVersionMeta {
|
||||||
headers,
|
headers,
|
||||||
|
@ -188,6 +182,10 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
let object = Object::new(bucket.id, key.into(), vec![object_version]);
|
let object = Object::new(bucket.id, key.into(), vec![object_version]);
|
||||||
garage.object_table.insert(&object).await?;
|
garage.object_table.insert(&object).await?;
|
||||||
|
|
||||||
|
// We were not interrupted, everything went fine.
|
||||||
|
// We won't have to clean up on drop.
|
||||||
|
interrupted_cleanup.cancel();
|
||||||
|
|
||||||
Ok((version_uuid, md5sum_hex))
|
Ok((version_uuid, md5sum_hex))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -426,6 +424,33 @@ pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<Body> {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct InterruptedCleanup(Option<(Arc<Garage>, Uuid, String, Uuid, u64)>);
|
||||||
|
|
||||||
|
impl InterruptedCleanup {
|
||||||
|
fn cancel(&mut self) {
|
||||||
|
drop(self.0.take());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Drop for InterruptedCleanup {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some((garage, bucket_id, key, version_uuid, version_ts)) = self.0.take() {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let object_version = ObjectVersion {
|
||||||
|
uuid: version_uuid,
|
||||||
|
timestamp: version_ts,
|
||||||
|
state: ObjectVersionState::Aborted,
|
||||||
|
};
|
||||||
|
let object = Object::new(bucket_id, key, vec![object_version]);
|
||||||
|
if let Err(e) = garage.object_table.insert(&object).await {
|
||||||
|
warn!("Cannot cleanup after aborted PutObject: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----
|
||||||
|
|
||||||
pub async fn handle_create_multipart_upload(
|
pub async fn handle_create_multipart_upload(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: &Request<Body>,
|
req: &Request<Body>,
|
||||||
|
|
|
@ -355,7 +355,7 @@ impl Endpoint {
|
||||||
fn from_get(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_get(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY if upload_id => ListParts (query::upload_id, opt_parse::max_parts, opt_parse::part_number_marker),
|
EMPTY if upload_id => ListParts (query::upload_id, opt_parse::max_parts, opt_parse::part_number_marker),
|
||||||
EMPTY => GetObject (query_opt::version_id, opt_parse::part_number),
|
EMPTY => GetObject (query_opt::version_id, opt_parse::part_number),
|
||||||
|
@ -412,7 +412,7 @@ impl Endpoint {
|
||||||
fn from_head(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_head(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
|
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
|
||||||
],
|
],
|
||||||
|
@ -426,7 +426,7 @@ impl Endpoint {
|
||||||
fn from_post(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_post(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY if upload_id => CompleteMultipartUpload (query::upload_id),
|
EMPTY if upload_id => CompleteMultipartUpload (query::upload_id),
|
||||||
RESTORE => RestoreObject (query_opt::version_id),
|
RESTORE => RestoreObject (query_opt::version_id),
|
||||||
|
@ -448,7 +448,7 @@ impl Endpoint {
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, headers),
|
(query.keyword.take().unwrap_or_default(), key, query, headers),
|
||||||
key: [
|
key: [
|
||||||
EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id),
|
EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id),
|
||||||
EMPTY header "x-amz-copy-source" => CopyObject,
|
EMPTY header "x-amz-copy-source" => CopyObject,
|
||||||
|
@ -490,7 +490,7 @@ impl Endpoint {
|
||||||
fn from_delete(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_delete(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY if upload_id => AbortMultipartUpload (query::upload_id),
|
EMPTY if upload_id => AbortMultipartUpload (query::upload_id),
|
||||||
EMPTY => DeleteObject (query_opt::version_id),
|
EMPTY => DeleteObject (query_opt::version_id),
|
||||||
|
@ -624,6 +624,39 @@ impl Endpoint {
|
||||||
|
|
||||||
// parameter name => struct field
|
// parameter name => struct field
|
||||||
generateQueryParameters! {
|
generateQueryParameters! {
|
||||||
|
keywords: [
|
||||||
|
"accelerate" => ACCELERATE,
|
||||||
|
"acl" => ACL,
|
||||||
|
"analytics" => ANALYTICS,
|
||||||
|
"cors" => CORS,
|
||||||
|
"delete" => DELETE,
|
||||||
|
"encryption" => ENCRYPTION,
|
||||||
|
"intelligent-tiering" => INTELLIGENT_TIERING,
|
||||||
|
"inventory" => INVENTORY,
|
||||||
|
"legal-hold" => LEGAL_HOLD,
|
||||||
|
"lifecycle" => LIFECYCLE,
|
||||||
|
"location" => LOCATION,
|
||||||
|
"logging" => LOGGING,
|
||||||
|
"metrics" => METRICS,
|
||||||
|
"notification" => NOTIFICATION,
|
||||||
|
"object-lock" => OBJECT_LOCK,
|
||||||
|
"ownershipControls" => OWNERSHIP_CONTROLS,
|
||||||
|
"policy" => POLICY,
|
||||||
|
"policyStatus" => POLICY_STATUS,
|
||||||
|
"publicAccessBlock" => PUBLIC_ACCESS_BLOCK,
|
||||||
|
"replication" => REPLICATION,
|
||||||
|
"requestPayment" => REQUEST_PAYMENT,
|
||||||
|
"restore" => RESTORE,
|
||||||
|
"retention" => RETENTION,
|
||||||
|
"select" => SELECT,
|
||||||
|
"tagging" => TAGGING,
|
||||||
|
"torrent" => TORRENT,
|
||||||
|
"uploads" => UPLOADS,
|
||||||
|
"versioning" => VERSIONING,
|
||||||
|
"versions" => VERSIONS,
|
||||||
|
"website" => WEBSITE
|
||||||
|
],
|
||||||
|
fields: [
|
||||||
"continuation-token" => continuation_token,
|
"continuation-token" => continuation_token,
|
||||||
"delimiter" => delimiter,
|
"delimiter" => delimiter,
|
||||||
"encoding-type" => encoding_type,
|
"encoding-type" => encoding_type,
|
||||||
|
@ -644,43 +677,7 @@ generateQueryParameters! {
|
||||||
"upload-id-marker" => upload_id_marker,
|
"upload-id-marker" => upload_id_marker,
|
||||||
"versionId" => version_id,
|
"versionId" => version_id,
|
||||||
"version-id-marker" => version_id_marker
|
"version-id-marker" => version_id_marker
|
||||||
}
|
]
|
||||||
|
|
||||||
mod keywords {
|
|
||||||
//! This module contain all query parameters with no associated value S3 uses to differentiate
|
|
||||||
//! endpoints.
|
|
||||||
pub const EMPTY: &str = "";
|
|
||||||
|
|
||||||
pub const ACCELERATE: &str = "accelerate";
|
|
||||||
pub const ACL: &str = "acl";
|
|
||||||
pub const ANALYTICS: &str = "analytics";
|
|
||||||
pub const CORS: &str = "cors";
|
|
||||||
pub const DELETE: &str = "delete";
|
|
||||||
pub const ENCRYPTION: &str = "encryption";
|
|
||||||
pub const INTELLIGENT_TIERING: &str = "intelligent-tiering";
|
|
||||||
pub const INVENTORY: &str = "inventory";
|
|
||||||
pub const LEGAL_HOLD: &str = "legal-hold";
|
|
||||||
pub const LIFECYCLE: &str = "lifecycle";
|
|
||||||
pub const LOCATION: &str = "location";
|
|
||||||
pub const LOGGING: &str = "logging";
|
|
||||||
pub const METRICS: &str = "metrics";
|
|
||||||
pub const NOTIFICATION: &str = "notification";
|
|
||||||
pub const OBJECT_LOCK: &str = "object-lock";
|
|
||||||
pub const OWNERSHIP_CONTROLS: &str = "ownershipControls";
|
|
||||||
pub const POLICY: &str = "policy";
|
|
||||||
pub const POLICY_STATUS: &str = "policyStatus";
|
|
||||||
pub const PUBLIC_ACCESS_BLOCK: &str = "publicAccessBlock";
|
|
||||||
pub const REPLICATION: &str = "replication";
|
|
||||||
pub const REQUEST_PAYMENT: &str = "requestPayment";
|
|
||||||
pub const RESTORE: &str = "restore";
|
|
||||||
pub const RETENTION: &str = "retention";
|
|
||||||
pub const SELECT: &str = "select";
|
|
||||||
pub const TAGGING: &str = "tagging";
|
|
||||||
pub const TORRENT: &str = "torrent";
|
|
||||||
pub const UPLOADS: &str = "uploads";
|
|
||||||
pub const VERSIONING: &str = "versioning";
|
|
||||||
pub const VERSIONS: &str = "versions";
|
|
||||||
pub const WEBSITE: &str = "website";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,10 +14,10 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.0", path = "../db" }
|
garage_db = { version = "0.8.1", path = "../db" }
|
||||||
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
garage_table = { version = "0.8.0", path = "../table" }
|
garage_table = { version = "0.8.1", path = "../table" }
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
opentelemetry = "0.17"
|
||||||
|
|
||||||
|
@ -31,7 +31,6 @@ rand = "0.8"
|
||||||
async-compression = { version = "0.3", features = ["tokio", "zstd"] }
|
async-compression = { version = "0.3", features = ["tokio", "zstd"] }
|
||||||
zstd = { version = "0.9", default-features = false }
|
zstd = { version = "0.9", default-features = false }
|
||||||
|
|
||||||
rmp-serde = "0.15"
|
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||||
serde_bytes = "0.11"
|
serde_bytes = "0.11"
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,10 @@ use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use arc_swap::ArcSwapOption;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
|
use rand::prelude::*;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
|
@ -22,9 +24,12 @@ use garage_rpc::rpc_helper::netapp::stream::{stream_asyncread, ByteStream};
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
|
use garage_util::background::{vars, BackgroundRunner};
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
|
use garage_util::persister::PersisterShared;
|
||||||
|
use garage_util::time::msec_to_rfc3339;
|
||||||
|
|
||||||
use garage_rpc::rpc_helper::OrderTag;
|
use garage_rpc::rpc_helper::OrderTag;
|
||||||
use garage_rpc::system::System;
|
use garage_rpc::system::System;
|
||||||
|
@ -87,7 +92,17 @@ pub struct BlockManager {
|
||||||
|
|
||||||
pub(crate) metrics: BlockManagerMetrics,
|
pub(crate) metrics: BlockManagerMetrics,
|
||||||
|
|
||||||
tx_scrub_command: mpsc::Sender<ScrubWorkerCommand>,
|
pub scrub_persister: PersisterShared<ScrubWorkerPersisted>,
|
||||||
|
tx_scrub_command: ArcSwapOption<mpsc::Sender<ScrubWorkerCommand>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct BlockResyncErrorInfo {
|
||||||
|
pub hash: Hash,
|
||||||
|
pub refcount: u64,
|
||||||
|
pub error_count: u64,
|
||||||
|
pub last_try: u64,
|
||||||
|
pub next_try: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This custom struct contains functions that must only be ran
|
// This custom struct contains functions that must only be ran
|
||||||
|
@ -114,9 +129,14 @@ impl BlockManager {
|
||||||
.netapp
|
.netapp
|
||||||
.endpoint("garage_block/manager.rs/Rpc".to_string());
|
.endpoint("garage_block/manager.rs/Rpc".to_string());
|
||||||
|
|
||||||
let metrics = BlockManagerMetrics::new(resync.queue.clone(), resync.errors.clone());
|
let metrics = BlockManagerMetrics::new(
|
||||||
|
compression_level,
|
||||||
|
rc.rc.clone(),
|
||||||
|
resync.queue.clone(),
|
||||||
|
resync.errors.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
let (scrub_tx, scrub_rx) = mpsc::channel(1);
|
let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info");
|
||||||
|
|
||||||
let block_manager = Arc::new(Self {
|
let block_manager = Arc::new(Self {
|
||||||
replication,
|
replication,
|
||||||
|
@ -128,21 +148,46 @@ impl BlockManager {
|
||||||
system,
|
system,
|
||||||
endpoint,
|
endpoint,
|
||||||
metrics,
|
metrics,
|
||||||
tx_scrub_command: scrub_tx,
|
scrub_persister,
|
||||||
|
tx_scrub_command: ArcSwapOption::new(None),
|
||||||
});
|
});
|
||||||
block_manager.endpoint.set_handler(block_manager.clone());
|
block_manager.endpoint.set_handler(block_manager.clone());
|
||||||
|
|
||||||
|
block_manager
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
||||||
// Spawn a bunch of resync workers
|
// Spawn a bunch of resync workers
|
||||||
for index in 0..MAX_RESYNC_WORKERS {
|
for index in 0..MAX_RESYNC_WORKERS {
|
||||||
let worker = ResyncWorker::new(index, block_manager.clone());
|
let worker = ResyncWorker::new(index, self.clone());
|
||||||
block_manager.system.background.spawn_worker(worker);
|
bg.spawn_worker(worker);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spawn scrub worker
|
// Spawn scrub worker
|
||||||
let scrub_worker = ScrubWorker::new(block_manager.clone(), scrub_rx);
|
let (scrub_tx, scrub_rx) = mpsc::channel(1);
|
||||||
block_manager.system.background.spawn_worker(scrub_worker);
|
self.tx_scrub_command.store(Some(Arc::new(scrub_tx)));
|
||||||
|
bg.spawn_worker(ScrubWorker::new(
|
||||||
|
self.clone(),
|
||||||
|
scrub_rx,
|
||||||
|
self.scrub_persister.clone(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
block_manager
|
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
||||||
|
self.resync.register_bg_vars(vars);
|
||||||
|
|
||||||
|
vars.register_rw(
|
||||||
|
&self.scrub_persister,
|
||||||
|
"scrub-tranquility",
|
||||||
|
|p| p.get_with(|x| x.tranquility),
|
||||||
|
|p, tranquility| p.set_with(|x| x.tranquility = tranquility),
|
||||||
|
);
|
||||||
|
vars.register_ro(&self.scrub_persister, "scrub-last-completed", |p| {
|
||||||
|
p.get_with(|x| msec_to_rfc3339(x.time_last_complete_scrub))
|
||||||
|
});
|
||||||
|
vars.register_ro(&self.scrub_persister, "scrub-corruptions_detected", |p| {
|
||||||
|
p.get_with(|x| x.corruptions_detected)
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ask nodes that might have a (possibly compressed) block for it
|
/// Ask nodes that might have a (possibly compressed) block for it
|
||||||
|
@ -309,9 +354,42 @@ impl BlockManager {
|
||||||
Ok(self.rc.rc.len()?)
|
Ok(self.rc.rc.len()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get number of items in the refcount table
|
||||||
|
pub fn rc_fast_len(&self) -> Result<Option<usize>, Error> {
|
||||||
|
Ok(self.rc.rc.fast_len()?)
|
||||||
|
}
|
||||||
|
|
||||||
/// Send command to start/stop/manager scrub worker
|
/// Send command to start/stop/manager scrub worker
|
||||||
pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) {
|
pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) -> Result<(), Error> {
|
||||||
let _ = self.tx_scrub_command.send(cmd).await;
|
let tx = self.tx_scrub_command.load();
|
||||||
|
let tx = tx.as_ref().ok_or_message("scrub worker is not running")?;
|
||||||
|
tx.send(cmd).await.ok_or_message("send error")?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the reference count of a block
|
||||||
|
pub fn get_block_rc(&self, hash: &Hash) -> Result<u64, Error> {
|
||||||
|
Ok(self.rc.get_block_rc(hash)?.as_u64())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// List all resync errors
|
||||||
|
pub fn list_resync_errors(&self) -> Result<Vec<BlockResyncErrorInfo>, Error> {
|
||||||
|
let mut blocks = Vec::with_capacity(self.resync.errors.len());
|
||||||
|
for ent in self.resync.errors.iter()? {
|
||||||
|
let (hash, cnt) = ent?;
|
||||||
|
let cnt = ErrorCounter::decode(&cnt);
|
||||||
|
blocks.push(BlockResyncErrorInfo {
|
||||||
|
hash: Hash::try_from(&hash).unwrap(),
|
||||||
|
refcount: 0,
|
||||||
|
error_count: cnt.errors,
|
||||||
|
last_try: cnt.last_try,
|
||||||
|
next_try: cnt.next_try(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
for block in blocks.iter_mut() {
|
||||||
|
block.refcount = self.get_block_rc(&block.hash)?;
|
||||||
|
}
|
||||||
|
Ok(blocks)
|
||||||
}
|
}
|
||||||
|
|
||||||
//// ----- Managing the reference counter ----
|
//// ----- Managing the reference counter ----
|
||||||
|
@ -603,14 +681,21 @@ impl BlockManagerLocked {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut path2 = path.clone();
|
let mut path_tmp = path.clone();
|
||||||
path2.set_extension("tmp");
|
let tmp_extension = format!("tmp{}", hex::encode(thread_rng().gen::<[u8; 4]>()));
|
||||||
let mut f = fs::File::create(&path2).await?;
|
path_tmp.set_extension(tmp_extension);
|
||||||
|
|
||||||
|
let mut delete_on_drop = DeleteOnDrop(Some(path_tmp.clone()));
|
||||||
|
|
||||||
|
let mut f = fs::File::create(&path_tmp).await?;
|
||||||
f.write_all(data).await?;
|
f.write_all(data).await?;
|
||||||
f.sync_all().await?;
|
f.sync_all().await?;
|
||||||
drop(f);
|
drop(f);
|
||||||
|
|
||||||
fs::rename(path2, path).await?;
|
fs::rename(path_tmp, path).await?;
|
||||||
|
|
||||||
|
delete_on_drop.cancel();
|
||||||
|
|
||||||
if let Some(to_delete) = to_delete {
|
if let Some(to_delete) = to_delete {
|
||||||
fs::remove_file(to_delete).await?;
|
fs::remove_file(to_delete).await?;
|
||||||
}
|
}
|
||||||
|
@ -676,3 +761,23 @@ async fn read_stream_to_end(mut stream: ByteStream) -> Result<Bytes, Error> {
|
||||||
.concat()
|
.concat()
|
||||||
.into())
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct DeleteOnDrop(Option<PathBuf>);
|
||||||
|
|
||||||
|
impl DeleteOnDrop {
|
||||||
|
fn cancel(&mut self) {
|
||||||
|
drop(self.0.take());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for DeleteOnDrop {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(path) = self.0.take() {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = fs::remove_file(&path).await {
|
||||||
|
debug!("DeleteOnDrop failed for {}: {}", path.display(), e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
use opentelemetry::{global, metrics::*};
|
use opentelemetry::{global, metrics::*};
|
||||||
|
|
||||||
|
use garage_db as db;
|
||||||
use garage_db::counted_tree_hack::CountedTree;
|
use garage_db::counted_tree_hack::CountedTree;
|
||||||
|
|
||||||
/// TableMetrics reference all counter used for metrics
|
/// TableMetrics reference all counter used for metrics
|
||||||
pub struct BlockManagerMetrics {
|
pub struct BlockManagerMetrics {
|
||||||
|
pub(crate) _compression_level: ValueObserver<u64>,
|
||||||
|
pub(crate) _rc_size: ValueObserver<u64>,
|
||||||
pub(crate) _resync_queue_len: ValueObserver<u64>,
|
pub(crate) _resync_queue_len: ValueObserver<u64>,
|
||||||
pub(crate) _resync_errored_blocks: ValueObserver<u64>,
|
pub(crate) _resync_errored_blocks: ValueObserver<u64>,
|
||||||
|
|
||||||
|
@ -23,9 +26,31 @@ pub struct BlockManagerMetrics {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockManagerMetrics {
|
impl BlockManagerMetrics {
|
||||||
pub fn new(resync_queue: CountedTree, resync_errors: CountedTree) -> Self {
|
pub fn new(
|
||||||
|
compression_level: Option<i32>,
|
||||||
|
rc_tree: db::Tree,
|
||||||
|
resync_queue: CountedTree,
|
||||||
|
resync_errors: CountedTree,
|
||||||
|
) -> Self {
|
||||||
let meter = global::meter("garage_model/block");
|
let meter = global::meter("garage_model/block");
|
||||||
Self {
|
Self {
|
||||||
|
_compression_level: meter
|
||||||
|
.u64_value_observer("block.compression_level", move |observer| {
|
||||||
|
match compression_level {
|
||||||
|
Some(v) => observer.observe(v as u64, &[]),
|
||||||
|
None => observer.observe(0 as u64, &[]),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.with_description("Garage compression level for node")
|
||||||
|
.init(),
|
||||||
|
_rc_size: meter
|
||||||
|
.u64_value_observer("block.rc_size", move |observer| {
|
||||||
|
if let Ok(Some(v)) = rc_tree.fast_len() {
|
||||||
|
observer.observe(v as u64, &[])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.with_description("Number of blocks known to the reference counter")
|
||||||
|
.init(),
|
||||||
_resync_queue_len: meter
|
_resync_queue_len: meter
|
||||||
.u64_value_observer("block.resync_queue_length", move |observer| {
|
.u64_value_observer("block.resync_queue_length", move |observer| {
|
||||||
observer.observe(resync_queue.len() as u64, &[])
|
observer.observe(resync_queue.len() as u64, &[])
|
||||||
|
|
|
@ -169,4 +169,11 @@ impl RcEntry {
|
||||||
pub(crate) fn is_needed(&self) -> bool {
|
pub(crate) fn is_needed(&self) -> bool {
|
||||||
!self.is_deletable()
|
!self.is_deletable()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn as_u64(&self) -> u64 {
|
||||||
|
match self {
|
||||||
|
RcEntry::Present { count } => *count,
|
||||||
|
_ => 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ use tokio::sync::watch;
|
||||||
use garage_util::background::*;
|
use garage_util::background::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::persister::Persister;
|
use garage_util::persister::PersisterShared;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
use garage_util::tranquilizer::Tranquilizer;
|
use garage_util::tranquilizer::Tranquilizer;
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ impl Worker for RepairWorker {
|
||||||
"Block repair worker".into()
|
"Block repair worker".into()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn info(&self) -> Option<String> {
|
fn status(&self) -> WorkerStatus {
|
||||||
match self.block_iter.as_ref() {
|
match self.block_iter.as_ref() {
|
||||||
None => {
|
None => {
|
||||||
let idx_bytes = self
|
let idx_bytes = self
|
||||||
|
@ -66,9 +66,20 @@ impl Worker for RepairWorker {
|
||||||
} else {
|
} else {
|
||||||
idx_bytes
|
idx_bytes
|
||||||
};
|
};
|
||||||
Some(format!("Phase 1: {}", hex::encode(idx_bytes)))
|
WorkerStatus {
|
||||||
|
progress: Some("0.00%".into()),
|
||||||
|
freeform: vec![format!(
|
||||||
|
"Currently in phase 1, iterator position: {}",
|
||||||
|
hex::encode(idx_bytes)
|
||||||
|
)],
|
||||||
|
..Default::default()
|
||||||
}
|
}
|
||||||
Some(bi) => Some(format!("Phase 2: {:.2}% done", bi.progress() * 100.)),
|
}
|
||||||
|
Some(bi) => WorkerStatus {
|
||||||
|
progress: Some(format!("{:.2}%", bi.progress() * 100.)),
|
||||||
|
freeform: vec!["Currently in phase 2".into()],
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,7 +148,7 @@ impl Worker for RepairWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
async fn wait_for_work(&mut self) -> WorkerState {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -157,15 +168,24 @@ pub struct ScrubWorker {
|
||||||
work: ScrubWorkerState,
|
work: ScrubWorkerState,
|
||||||
tranquilizer: Tranquilizer,
|
tranquilizer: Tranquilizer,
|
||||||
|
|
||||||
persister: Persister<ScrubWorkerPersisted>,
|
persister: PersisterShared<ScrubWorkerPersisted>,
|
||||||
persisted: ScrubWorkerPersisted,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
struct ScrubWorkerPersisted {
|
pub struct ScrubWorkerPersisted {
|
||||||
tranquility: u32,
|
pub tranquility: u32,
|
||||||
time_last_complete_scrub: u64,
|
pub(crate) time_last_complete_scrub: u64,
|
||||||
corruptions_detected: u64,
|
pub(crate) corruptions_detected: u64,
|
||||||
|
}
|
||||||
|
impl garage_util::migrate::InitialFormat for ScrubWorkerPersisted {}
|
||||||
|
impl Default for ScrubWorkerPersisted {
|
||||||
|
fn default() -> Self {
|
||||||
|
ScrubWorkerPersisted {
|
||||||
|
time_last_complete_scrub: 0,
|
||||||
|
tranquility: INITIAL_SCRUB_TRANQUILITY,
|
||||||
|
corruptions_detected: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum ScrubWorkerState {
|
enum ScrubWorkerState {
|
||||||
|
@ -186,27 +206,20 @@ pub enum ScrubWorkerCommand {
|
||||||
Pause(Duration),
|
Pause(Duration),
|
||||||
Resume,
|
Resume,
|
||||||
Cancel,
|
Cancel,
|
||||||
SetTranquility(u32),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ScrubWorker {
|
impl ScrubWorker {
|
||||||
pub fn new(manager: Arc<BlockManager>, rx_cmd: mpsc::Receiver<ScrubWorkerCommand>) -> Self {
|
pub(crate) fn new(
|
||||||
let persister = Persister::new(&manager.system.metadata_dir, "scrub_info");
|
manager: Arc<BlockManager>,
|
||||||
let persisted = match persister.load() {
|
rx_cmd: mpsc::Receiver<ScrubWorkerCommand>,
|
||||||
Ok(v) => v,
|
persister: PersisterShared<ScrubWorkerPersisted>,
|
||||||
Err(_) => ScrubWorkerPersisted {
|
) -> Self {
|
||||||
time_last_complete_scrub: 0,
|
|
||||||
tranquility: INITIAL_SCRUB_TRANQUILITY,
|
|
||||||
corruptions_detected: 0,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
Self {
|
Self {
|
||||||
manager,
|
manager,
|
||||||
rx_cmd,
|
rx_cmd,
|
||||||
work: ScrubWorkerState::Finished,
|
work: ScrubWorkerState::Finished,
|
||||||
tranquilizer: Tranquilizer::new(30),
|
tranquilizer: Tranquilizer::new(30),
|
||||||
persister,
|
persister,
|
||||||
persisted,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,12 +268,6 @@ impl ScrubWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ScrubWorkerCommand::SetTranquility(t) => {
|
|
||||||
self.persisted.tranquility = t;
|
|
||||||
if let Err(e) = self.persister.save_async(&self.persisted).await {
|
|
||||||
error!("Could not save new tranquilitiy value: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -271,29 +278,37 @@ impl Worker for ScrubWorker {
|
||||||
"Block scrub worker".into()
|
"Block scrub worker".into()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn info(&self) -> Option<String> {
|
fn status(&self) -> WorkerStatus {
|
||||||
let s = match &self.work {
|
let (corruptions_detected, tranquility, time_last_complete_scrub) =
|
||||||
ScrubWorkerState::Running(bsi) => format!(
|
self.persister.get_with(|p| {
|
||||||
"{:.2}% done (tranquility = {})",
|
(
|
||||||
bsi.progress() * 100.,
|
p.corruptions_detected,
|
||||||
self.persisted.tranquility
|
p.tranquility,
|
||||||
),
|
p.time_last_complete_scrub,
|
||||||
ScrubWorkerState::Paused(bsi, rt) => {
|
|
||||||
format!(
|
|
||||||
"Paused, {:.2}% done, resumes at {}",
|
|
||||||
bsi.progress() * 100.,
|
|
||||||
msec_to_rfc3339(*rt)
|
|
||||||
)
|
)
|
||||||
}
|
});
|
||||||
ScrubWorkerState::Finished => format!(
|
|
||||||
"Last completed scrub: {}",
|
let mut s = WorkerStatus {
|
||||||
msec_to_rfc3339(self.persisted.time_last_complete_scrub)
|
persistent_errors: Some(corruptions_detected),
|
||||||
),
|
tranquility: Some(tranquility),
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
Some(format!(
|
match &self.work {
|
||||||
"{} ; corruptions detected: {}",
|
ScrubWorkerState::Running(bsi) => {
|
||||||
s, self.persisted.corruptions_detected
|
s.progress = Some(format!("{:.2}%", bsi.progress() * 100.));
|
||||||
))
|
}
|
||||||
|
ScrubWorkerState::Paused(bsi, rt) => {
|
||||||
|
s.progress = Some(format!("{:.2}%", bsi.progress() * 100.));
|
||||||
|
s.freeform = vec![format!("Scrub paused, resumes at {}", msec_to_rfc3339(*rt))];
|
||||||
|
}
|
||||||
|
ScrubWorkerState::Finished => {
|
||||||
|
s.freeform = vec![format!(
|
||||||
|
"Last scrub completed at {}",
|
||||||
|
msec_to_rfc3339(time_last_complete_scrub)
|
||||||
|
)];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
|
@ -310,18 +325,17 @@ impl Worker for ScrubWorker {
|
||||||
match self.manager.read_block(&hash).await {
|
match self.manager.read_block(&hash).await {
|
||||||
Err(Error::CorruptData(_)) => {
|
Err(Error::CorruptData(_)) => {
|
||||||
error!("Found corrupt data block during scrub: {:?}", hash);
|
error!("Found corrupt data block during scrub: {:?}", hash);
|
||||||
self.persisted.corruptions_detected += 1;
|
self.persister.set_with(|p| p.corruptions_detected += 1)?;
|
||||||
self.persister.save_async(&self.persisted).await?;
|
|
||||||
}
|
}
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
_ => (),
|
_ => (),
|
||||||
};
|
};
|
||||||
Ok(self
|
Ok(self
|
||||||
.tranquilizer
|
.tranquilizer
|
||||||
.tranquilize_worker(self.persisted.tranquility))
|
.tranquilize_worker(self.persister.get_with(|p| p.tranquility)))
|
||||||
} else {
|
} else {
|
||||||
self.persisted.time_last_complete_scrub = now_msec();
|
self.persister
|
||||||
self.persister.save_async(&self.persisted).await?;
|
.set_with(|p| p.time_last_complete_scrub = now_msec())?;
|
||||||
self.work = ScrubWorkerState::Finished;
|
self.work = ScrubWorkerState::Finished;
|
||||||
self.tranquilizer.clear();
|
self.tranquilizer.clear();
|
||||||
Ok(WorkerState::Idle)
|
Ok(WorkerState::Idle)
|
||||||
|
@ -331,12 +345,13 @@ impl Worker for ScrubWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
async fn wait_for_work(&mut self) -> WorkerState {
|
||||||
let (wait_until, command) = match &self.work {
|
let (wait_until, command) = match &self.work {
|
||||||
ScrubWorkerState::Running(_) => return WorkerState::Busy,
|
ScrubWorkerState::Running(_) => return WorkerState::Busy,
|
||||||
ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume),
|
ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume),
|
||||||
ScrubWorkerState::Finished => (
|
ScrubWorkerState::Finished => (
|
||||||
self.persisted.time_last_complete_scrub + SCRUB_INTERVAL.as_millis() as u64,
|
self.persister.get_with(|p| p.time_last_complete_scrub)
|
||||||
|
+ SCRUB_INTERVAL.as_millis() as u64,
|
||||||
ScrubWorkerCommand::Start,
|
ScrubWorkerCommand::Start,
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
|
@ -3,7 +3,6 @@ use std::convert::TryInto;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use arc_swap::ArcSwap;
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -22,7 +21,7 @@ use garage_util::background::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
use garage_util::persister::Persister;
|
use garage_util::persister::PersisterShared;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
use garage_util::tranquilizer::Tranquilizer;
|
use garage_util::tranquilizer::Tranquilizer;
|
||||||
|
|
||||||
|
@ -49,13 +48,12 @@ const INITIAL_RESYNC_TRANQUILITY: u32 = 2;
|
||||||
|
|
||||||
pub struct BlockResyncManager {
|
pub struct BlockResyncManager {
|
||||||
pub(crate) queue: CountedTree,
|
pub(crate) queue: CountedTree,
|
||||||
pub(crate) notify: Notify,
|
pub(crate) notify: Arc<Notify>,
|
||||||
pub(crate) errors: CountedTree,
|
pub(crate) errors: CountedTree,
|
||||||
|
|
||||||
busy_set: BusySet,
|
busy_set: BusySet,
|
||||||
|
|
||||||
persister: Persister<ResyncPersistedConfig>,
|
persister: PersisterShared<ResyncPersistedConfig>,
|
||||||
persisted: ArcSwap<ResyncPersistedConfig>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Copy)]
|
#[derive(Serialize, Deserialize, Clone, Copy)]
|
||||||
|
@ -63,6 +61,15 @@ struct ResyncPersistedConfig {
|
||||||
n_workers: usize,
|
n_workers: usize,
|
||||||
tranquility: u32,
|
tranquility: u32,
|
||||||
}
|
}
|
||||||
|
impl garage_util::migrate::InitialFormat for ResyncPersistedConfig {}
|
||||||
|
impl Default for ResyncPersistedConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
ResyncPersistedConfig {
|
||||||
|
n_workers: 1,
|
||||||
|
tranquility: INITIAL_RESYNC_TRANQUILITY,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
enum ResyncIterResult {
|
enum ResyncIterResult {
|
||||||
BusyDidSomething,
|
BusyDidSomething,
|
||||||
|
@ -90,22 +97,14 @@ impl BlockResyncManager {
|
||||||
.expect("Unable to open block_local_resync_errors tree");
|
.expect("Unable to open block_local_resync_errors tree");
|
||||||
let errors = CountedTree::new(errors).expect("Could not count block_local_resync_errors");
|
let errors = CountedTree::new(errors).expect("Could not count block_local_resync_errors");
|
||||||
|
|
||||||
let persister = Persister::new(&system.metadata_dir, "resync_cfg");
|
let persister = PersisterShared::new(&system.metadata_dir, "resync_cfg");
|
||||||
let persisted = match persister.load() {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(_) => ResyncPersistedConfig {
|
|
||||||
n_workers: 1,
|
|
||||||
tranquility: INITIAL_RESYNC_TRANQUILITY,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
queue,
|
queue,
|
||||||
notify: Notify::new(),
|
notify: Arc::new(Notify::new()),
|
||||||
errors,
|
errors,
|
||||||
busy_set: Arc::new(Mutex::new(HashSet::new())),
|
busy_set: Arc::new(Mutex::new(HashSet::new())),
|
||||||
persister,
|
persister,
|
||||||
persisted: ArcSwap::new(Arc::new(persisted)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,6 +122,56 @@ impl BlockResyncManager {
|
||||||
Ok(self.errors.len())
|
Ok(self.errors.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Clear the error counter for a block and put it in queue immediately
|
||||||
|
pub fn clear_backoff(&self, hash: &Hash) -> Result<(), Error> {
|
||||||
|
let now = now_msec();
|
||||||
|
if let Some(ec) = self.errors.get(hash)? {
|
||||||
|
let mut ec = ErrorCounter::decode(&ec);
|
||||||
|
if ec.errors > 0 {
|
||||||
|
ec.last_try = now - ec.delay_msec();
|
||||||
|
self.errors.insert(hash, ec.encode())?;
|
||||||
|
self.put_to_resync_at(hash, now)?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(Error::Message(format!(
|
||||||
|
"Block {:?} was not in an errored state",
|
||||||
|
hash
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
||||||
|
let notify = self.notify.clone();
|
||||||
|
vars.register_rw(
|
||||||
|
&self.persister,
|
||||||
|
"resync-worker-count",
|
||||||
|
|p| p.get_with(|x| x.n_workers),
|
||||||
|
move |p, n_workers| {
|
||||||
|
if !(1..=MAX_RESYNC_WORKERS).contains(&n_workers) {
|
||||||
|
return Err(Error::Message(format!(
|
||||||
|
"Invalid number of resync workers, must be between 1 and {}",
|
||||||
|
MAX_RESYNC_WORKERS
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
p.set_with(|x| x.n_workers = n_workers)?;
|
||||||
|
notify.notify_waiters();
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let notify = self.notify.clone();
|
||||||
|
vars.register_rw(
|
||||||
|
&self.persister,
|
||||||
|
"resync-tranquility",
|
||||||
|
|p| p.get_with(|x| x.tranquility),
|
||||||
|
move |p, tranquility| {
|
||||||
|
p.set_with(|x| x.tranquility = tranquility)?;
|
||||||
|
notify.notify_waiters();
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// ---- Resync loop ----
|
// ---- Resync loop ----
|
||||||
|
|
||||||
// This part manages a queue of blocks that need to be
|
// This part manages a queue of blocks that need to be
|
||||||
|
@ -257,7 +306,7 @@ impl BlockResyncManager {
|
||||||
|
|
||||||
if let Err(e) = &res {
|
if let Err(e) = &res {
|
||||||
manager.metrics.resync_error_counter.add(1);
|
manager.metrics.resync_error_counter.add(1);
|
||||||
warn!("Error when resyncing {:?}: {}", hash, e);
|
error!("Error when resyncing {:?}: {}", hash, e);
|
||||||
|
|
||||||
let err_counter = match self.errors.get(hash.as_slice())? {
|
let err_counter = match self.errors.get(hash.as_slice())? {
|
||||||
Some(ec) => ErrorCounter::decode(&ec).add1(now + 1),
|
Some(ec) => ErrorCounter::decode(&ec).add1(now + 1),
|
||||||
|
@ -417,33 +466,6 @@ impl BlockResyncManager {
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn update_persisted(
|
|
||||||
&self,
|
|
||||||
update: impl Fn(&mut ResyncPersistedConfig),
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut cfg: ResyncPersistedConfig = *self.persisted.load().as_ref();
|
|
||||||
update(&mut cfg);
|
|
||||||
self.persister.save_async(&cfg).await?;
|
|
||||||
self.persisted.store(Arc::new(cfg));
|
|
||||||
self.notify.notify_waiters();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn set_n_workers(&self, n_workers: usize) -> Result<(), Error> {
|
|
||||||
if !(1..=MAX_RESYNC_WORKERS).contains(&n_workers) {
|
|
||||||
return Err(Error::Message(format!(
|
|
||||||
"Invalid number of resync workers, must be between 1 and {}",
|
|
||||||
MAX_RESYNC_WORKERS
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
self.update_persisted(|cfg| cfg.n_workers = n_workers).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn set_tranquility(&self, tranquility: u32) -> Result<(), Error> {
|
|
||||||
self.update_persisted(|cfg| cfg.tranquility = tranquility)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for BusyBlock {
|
impl Drop for BusyBlock {
|
||||||
|
@ -458,15 +480,18 @@ pub(crate) struct ResyncWorker {
|
||||||
manager: Arc<BlockManager>,
|
manager: Arc<BlockManager>,
|
||||||
tranquilizer: Tranquilizer,
|
tranquilizer: Tranquilizer,
|
||||||
next_delay: Duration,
|
next_delay: Duration,
|
||||||
|
persister: PersisterShared<ResyncPersistedConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ResyncWorker {
|
impl ResyncWorker {
|
||||||
pub(crate) fn new(index: usize, manager: Arc<BlockManager>) -> Self {
|
pub(crate) fn new(index: usize, manager: Arc<BlockManager>) -> Self {
|
||||||
|
let persister = manager.resync.persister.clone();
|
||||||
Self {
|
Self {
|
||||||
index,
|
index,
|
||||||
manager,
|
manager,
|
||||||
tranquilizer: Tranquilizer::new(30),
|
tranquilizer: Tranquilizer::new(30),
|
||||||
next_delay: Duration::from_secs(10),
|
next_delay: Duration::from_secs(10),
|
||||||
|
persister,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -477,39 +502,36 @@ impl Worker for ResyncWorker {
|
||||||
format!("Block resync worker #{}", self.index + 1)
|
format!("Block resync worker #{}", self.index + 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn info(&self) -> Option<String> {
|
fn status(&self) -> WorkerStatus {
|
||||||
let persisted = self.manager.resync.persisted.load();
|
let (n_workers, tranquility) = self.persister.get_with(|x| (x.n_workers, x.tranquility));
|
||||||
|
|
||||||
if self.index >= persisted.n_workers {
|
if self.index >= n_workers {
|
||||||
return Some("(unused)".into());
|
return WorkerStatus {
|
||||||
|
freeform: vec!["This worker is currently disabled".into()],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut ret = vec![];
|
WorkerStatus {
|
||||||
ret.push(format!("tranquility = {}", persisted.tranquility));
|
queue_length: Some(self.manager.resync.queue_len().unwrap_or(0) as u64),
|
||||||
|
tranquility: Some(tranquility),
|
||||||
let qlen = self.manager.resync.queue_len().unwrap_or(0);
|
persistent_errors: Some(self.manager.resync.errors_len().unwrap_or(0) as u64),
|
||||||
if qlen > 0 {
|
..Default::default()
|
||||||
ret.push(format!("{} blocks in queue", qlen));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let elen = self.manager.resync.errors_len().unwrap_or(0);
|
|
||||||
if elen > 0 {
|
|
||||||
ret.push(format!("{} blocks in error state", elen));
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(ret.join(", "))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
if self.index >= self.manager.resync.persisted.load().n_workers {
|
let (n_workers, tranquility) = self.persister.get_with(|x| (x.n_workers, x.tranquility));
|
||||||
|
|
||||||
|
if self.index >= n_workers {
|
||||||
return Ok(WorkerState::Idle);
|
return Ok(WorkerState::Idle);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.tranquilizer.reset();
|
self.tranquilizer.reset();
|
||||||
match self.manager.resync.resync_iter(&self.manager).await {
|
match self.manager.resync.resync_iter(&self.manager).await {
|
||||||
Ok(ResyncIterResult::BusyDidSomething) => Ok(self
|
Ok(ResyncIterResult::BusyDidSomething) => {
|
||||||
.tranquilizer
|
Ok(self.tranquilizer.tranquilize_worker(tranquility))
|
||||||
.tranquilize_worker(self.manager.resync.persisted.load().tranquility)),
|
}
|
||||||
Ok(ResyncIterResult::BusyDidNothing) => Ok(WorkerState::Busy),
|
Ok(ResyncIterResult::BusyDidNothing) => Ok(WorkerState::Busy),
|
||||||
Ok(ResyncIterResult::IdleFor(delay)) => {
|
Ok(ResyncIterResult::IdleFor(delay)) => {
|
||||||
self.next_delay = delay;
|
self.next_delay = delay;
|
||||||
|
@ -527,8 +549,8 @@ impl Worker for ResyncWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
async fn wait_for_work(&mut self) -> WorkerState {
|
||||||
while self.index >= self.manager.resync.persisted.load().n_workers {
|
while self.index >= self.persister.get_with(|x| x.n_workers) {
|
||||||
self.manager.resync.notify.notified().await
|
self.manager.resync.notify.notified().await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -545,9 +567,9 @@ impl Worker for ResyncWorker {
|
||||||
/// and the time of the last try.
|
/// and the time of the last try.
|
||||||
/// Used to implement exponential backoff.
|
/// Used to implement exponential backoff.
|
||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
struct ErrorCounter {
|
pub(crate) struct ErrorCounter {
|
||||||
errors: u64,
|
pub(crate) errors: u64,
|
||||||
last_try: u64,
|
pub(crate) last_try: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ErrorCounter {
|
impl ErrorCounter {
|
||||||
|
@ -558,12 +580,13 @@ impl ErrorCounter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode(data: &[u8]) -> Self {
|
pub(crate) fn decode(data: &[u8]) -> Self {
|
||||||
Self {
|
Self {
|
||||||
errors: u64::from_be_bytes(data[0..8].try_into().unwrap()),
|
errors: u64::from_be_bytes(data[0..8].try_into().unwrap()),
|
||||||
last_try: u64::from_be_bytes(data[8..16].try_into().unwrap()),
|
last_try: u64::from_be_bytes(data[8..16].try_into().unwrap()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode(&self) -> Vec<u8> {
|
fn encode(&self) -> Vec<u8> {
|
||||||
[
|
[
|
||||||
u64::to_be_bytes(self.errors),
|
u64::to_be_bytes(self.errors),
|
||||||
|
@ -583,7 +606,8 @@ impl ErrorCounter {
|
||||||
(RESYNC_RETRY_DELAY.as_millis() as u64)
|
(RESYNC_RETRY_DELAY.as_millis() as u64)
|
||||||
<< std::cmp::min(self.errors - 1, RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER)
|
<< std::cmp::min(self.errors - 1, RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER)
|
||||||
}
|
}
|
||||||
fn next_try(&self) -> u64 {
|
|
||||||
|
pub(crate) fn next_try(&self) -> u64 {
|
||||||
self.last_try + self.delay_msec()
|
self.last_try + self.delay_msec()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_db"
|
name = "garage_db"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -33,6 +33,7 @@ pretty_env_logger = { version = "0.4", optional = true }
|
||||||
mktemp = "0.4"
|
mktemp = "0.4"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
default = [ "sled" ]
|
||||||
bundled-libs = [ "rusqlite/bundled" ]
|
bundled-libs = [ "rusqlite/bundled" ]
|
||||||
cli = ["clap", "pretty_env_logger"]
|
cli = ["clap", "pretty_env_logger"]
|
||||||
lmdb = [ "heed" ]
|
lmdb = [ "heed" ]
|
||||||
|
|
|
@ -181,6 +181,10 @@ impl Tree {
|
||||||
pub fn len(&self) -> Result<usize> {
|
pub fn len(&self) -> Result<usize> {
|
||||||
self.0.len(self.1)
|
self.0.len(self.1)
|
||||||
}
|
}
|
||||||
|
#[inline]
|
||||||
|
pub fn fast_len(&self) -> Result<Option<usize>> {
|
||||||
|
self.0.fast_len(self.1)
|
||||||
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn first(&self) -> Result<Option<(Value, Value)>> {
|
pub fn first(&self) -> Result<Option<(Value, Value)>> {
|
||||||
|
@ -323,6 +327,9 @@ pub(crate) trait IDb: Send + Sync {
|
||||||
|
|
||||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||||
fn len(&self, tree: usize) -> Result<usize>;
|
fn len(&self, tree: usize) -> Result<usize>;
|
||||||
|
fn fast_len(&self, _tree: usize) -> Result<Option<usize>> {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>>;
|
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>>;
|
||||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||||
|
|
|
@ -121,6 +121,10 @@ impl IDb for LmdbDb {
|
||||||
Ok(tree.len(&tx)?.try_into().unwrap())
|
Ok(tree.len(&tx)?.try_into().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn fast_len(&self, tree: usize) -> Result<Option<usize>> {
|
||||||
|
Ok(Some(self.len(tree)?))
|
||||||
|
}
|
||||||
|
|
||||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
let tree = self.get_tree(tree)?;
|
||||||
let mut tx = self.db.write_txn()?;
|
let mut tx = self.db.write_txn()?;
|
||||||
|
|
|
@ -144,6 +144,10 @@ impl IDb for SqliteDb {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn fast_len(&self, tree: usize) -> Result<Option<usize>> {
|
||||||
|
Ok(Some(self.len(tree)?))
|
||||||
|
}
|
||||||
|
|
||||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
||||||
trace!("insert {}: lock db", tree);
|
trace!("insert {}: lock db", tree);
|
||||||
let this = self.0.lock().unwrap();
|
let this = self.0.lock().unwrap();
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -21,14 +21,14 @@ path = "tests/lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.0", path = "../db" }
|
garage_db = { version = "0.8.1", path = "../db" }
|
||||||
garage_api = { version = "0.8.0", path = "../api" }
|
garage_api = { version = "0.8.1", path = "../api" }
|
||||||
garage_block = { version = "0.8.0", path = "../block" }
|
garage_block = { version = "0.8.1", path = "../block" }
|
||||||
garage_model = { version = "0.8.0", path = "../model" }
|
garage_model = { version = "0.8.1", path = "../model" }
|
||||||
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
||||||
garage_table = { version = "0.8.0", path = "../table" }
|
garage_table = { version = "0.8.1", path = "../table" }
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
garage_web = { version = "0.8.0", path = "../web" }
|
garage_web = { version = "0.8.1", path = "../web" }
|
||||||
|
|
||||||
backtrace = "0.3"
|
backtrace = "0.3"
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
|
@ -36,13 +36,12 @@ bytesize = "1.1"
|
||||||
timeago = "0.3"
|
timeago = "0.3"
|
||||||
parse_duration = "2.1"
|
parse_duration = "2.1"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
tracing = { version = "0.1.30", features = ["log-always"] }
|
tracing = { version = "0.1.30" }
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
||||||
|
|
||||||
rmp-serde = "0.15"
|
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||||
serde_bytes = "0.11"
|
serde_bytes = "0.11"
|
||||||
structopt = { version = "0.3", default-features = false }
|
structopt = { version = "0.3", default-features = false }
|
||||||
|
@ -74,7 +73,7 @@ base64 = "0.13"
|
||||||
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = [ "bundled-libs", "metrics", "sled" ]
|
default = [ "bundled-libs", "metrics", "sled", "k2v" ]
|
||||||
|
|
||||||
k2v = [ "garage_util/k2v", "garage_api/k2v" ]
|
k2v = [ "garage_util/k2v", "garage_api/k2v" ]
|
||||||
|
|
||||||
|
|
|
@ -5,9 +5,11 @@ use std::sync::Arc;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
|
use garage_util::formater::format_table_to_string;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_table::replication::*;
|
use garage_table::replication::*;
|
||||||
|
@ -15,7 +17,7 @@ use garage_table::*;
|
||||||
|
|
||||||
use garage_rpc::*;
|
use garage_rpc::*;
|
||||||
|
|
||||||
use garage_block::repair::ScrubWorkerCommand;
|
use garage_block::manager::BlockResyncErrorInfo;
|
||||||
|
|
||||||
use garage_model::bucket_alias_table::*;
|
use garage_model::bucket_alias_table::*;
|
||||||
use garage_model::bucket_table::*;
|
use garage_model::bucket_table::*;
|
||||||
|
@ -24,6 +26,8 @@ use garage_model::helper::error::{Error, OkOrBadRequest};
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
use garage_model::migrate::Migrate;
|
use garage_model::migrate::Migrate;
|
||||||
use garage_model::permission::*;
|
use garage_model::permission::*;
|
||||||
|
use garage_model::s3::object_table::*;
|
||||||
|
use garage_model::s3::version_table::Version;
|
||||||
|
|
||||||
use crate::cli::*;
|
use crate::cli::*;
|
||||||
use crate::repair::online::launch_online_repair;
|
use crate::repair::online::launch_online_repair;
|
||||||
|
@ -38,7 +42,8 @@ pub enum AdminRpc {
|
||||||
LaunchRepair(RepairOpt),
|
LaunchRepair(RepairOpt),
|
||||||
Migrate(MigrateOpt),
|
Migrate(MigrateOpt),
|
||||||
Stats(StatsOpt),
|
Stats(StatsOpt),
|
||||||
Worker(WorkerOpt),
|
Worker(WorkerOperation),
|
||||||
|
BlockOperation(BlockOperation),
|
||||||
|
|
||||||
// Replies
|
// Replies
|
||||||
Ok(String),
|
Ok(String),
|
||||||
|
@ -54,6 +59,14 @@ pub enum AdminRpc {
|
||||||
HashMap<usize, garage_util::background::WorkerInfo>,
|
HashMap<usize, garage_util::background::WorkerInfo>,
|
||||||
WorkerListOpt,
|
WorkerListOpt,
|
||||||
),
|
),
|
||||||
|
WorkerVars(Vec<(Uuid, String, String)>),
|
||||||
|
WorkerInfo(usize, garage_util::background::WorkerInfo),
|
||||||
|
BlockErrorList(Vec<BlockResyncErrorInfo>),
|
||||||
|
BlockInfo {
|
||||||
|
hash: Hash,
|
||||||
|
refcount: u64,
|
||||||
|
versions: Vec<Result<Version, Uuid>>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Rpc for AdminRpc {
|
impl Rpc for AdminRpc {
|
||||||
|
@ -62,17 +75,24 @@ impl Rpc for AdminRpc {
|
||||||
|
|
||||||
pub struct AdminRpcHandler {
|
pub struct AdminRpcHandler {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
|
background: Arc<BackgroundRunner>,
|
||||||
endpoint: Arc<Endpoint<AdminRpc, Self>>,
|
endpoint: Arc<Endpoint<AdminRpc, Self>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AdminRpcHandler {
|
impl AdminRpcHandler {
|
||||||
pub fn new(garage: Arc<Garage>) -> Arc<Self> {
|
pub fn new(garage: Arc<Garage>, background: Arc<BackgroundRunner>) -> Arc<Self> {
|
||||||
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
|
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
|
||||||
let admin = Arc::new(Self { garage, endpoint });
|
let admin = Arc::new(Self {
|
||||||
|
garage,
|
||||||
|
background,
|
||||||
|
endpoint,
|
||||||
|
});
|
||||||
admin.endpoint.set_handler(admin.clone());
|
admin.endpoint.set_handler(admin.clone());
|
||||||
admin
|
admin
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ================ BUCKET COMMANDS ====================
|
||||||
|
|
||||||
async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
|
async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
|
||||||
match cmd {
|
match cmd {
|
||||||
BucketOperation::List => self.handle_list_buckets().await,
|
BucketOperation::List => self.handle_list_buckets().await,
|
||||||
|
@ -551,6 +571,8 @@ impl AdminRpcHandler {
|
||||||
Ok(AdminRpc::Ok(ret))
|
Ok(AdminRpc::Ok(ret))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ================ KEY COMMANDS ====================
|
||||||
|
|
||||||
async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> {
|
async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> {
|
||||||
match cmd {
|
match cmd {
|
||||||
KeyOperation::List => self.handle_list_keys().await,
|
KeyOperation::List => self.handle_list_keys().await,
|
||||||
|
@ -688,6 +710,8 @@ impl AdminRpcHandler {
|
||||||
Ok(AdminRpc::KeyInfo(key, relevant_buckets))
|
Ok(AdminRpc::KeyInfo(key, relevant_buckets))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ================ MIGRATION COMMANDS ====================
|
||||||
|
|
||||||
async fn handle_migrate(self: &Arc<Self>, opt: MigrateOpt) -> Result<AdminRpc, Error> {
|
async fn handle_migrate(self: &Arc<Self>, opt: MigrateOpt) -> Result<AdminRpc, Error> {
|
||||||
if !opt.yes {
|
if !opt.yes {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -704,6 +728,8 @@ impl AdminRpcHandler {
|
||||||
Ok(AdminRpc::Ok("Migration successfull.".into()))
|
Ok(AdminRpc::Ok("Migration successfull.".into()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ================ REPAIR COMMANDS ====================
|
||||||
|
|
||||||
async fn handle_launch_repair(self: &Arc<Self>, opt: RepairOpt) -> Result<AdminRpc, Error> {
|
async fn handle_launch_repair(self: &Arc<Self>, opt: RepairOpt) -> Result<AdminRpc, Error> {
|
||||||
if !opt.yes {
|
if !opt.yes {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -739,7 +765,7 @@ impl AdminRpcHandler {
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
launch_online_repair(self.garage.clone(), opt).await;
|
launch_online_repair(&self.garage, &self.background, opt).await?;
|
||||||
Ok(AdminRpc::Ok(format!(
|
Ok(AdminRpc::Ok(format!(
|
||||||
"Repair launched on {:?}",
|
"Repair launched on {:?}",
|
||||||
self.garage.system.id
|
self.garage.system.id
|
||||||
|
@ -747,6 +773,8 @@ impl AdminRpcHandler {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ================ STATS COMMANDS ====================
|
||||||
|
|
||||||
async fn handle_stats(&self, opt: StatsOpt) -> Result<AdminRpc, Error> {
|
async fn handle_stats(&self, opt: StatsOpt) -> Result<AdminRpc, Error> {
|
||||||
if opt.all_nodes {
|
if opt.all_nodes {
|
||||||
let mut ret = String::new();
|
let mut ret = String::new();
|
||||||
|
@ -763,11 +791,12 @@ impl AdminRpcHandler {
|
||||||
match self
|
match self
|
||||||
.endpoint
|
.endpoint
|
||||||
.call(&node_id, AdminRpc::Stats(opt), PRIO_NORMAL)
|
.call(&node_id, AdminRpc::Stats(opt), PRIO_NORMAL)
|
||||||
.await?
|
.await
|
||||||
{
|
{
|
||||||
Ok(AdminRpc::Ok(s)) => writeln!(&mut ret, "{}", s).unwrap(),
|
Ok(Ok(AdminRpc::Ok(s))) => writeln!(&mut ret, "{}", s).unwrap(),
|
||||||
Ok(x) => writeln!(&mut ret, "Bad answer: {:?}", x).unwrap(),
|
Ok(Ok(x)) => writeln!(&mut ret, "Bad answer: {:?}", x).unwrap(),
|
||||||
Err(e) => writeln!(&mut ret, "Error: {}", e).unwrap(),
|
Ok(Err(e)) => writeln!(&mut ret, "Remote error: {}", e).unwrap(),
|
||||||
|
Err(e) => writeln!(&mut ret, "Network error: {}", e).unwrap(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(AdminRpc::Ok(ret))
|
Ok(AdminRpc::Ok(ret))
|
||||||
|
@ -787,6 +816,7 @@ impl AdminRpcHandler {
|
||||||
.unwrap_or_else(|| "(unknown)".into()),
|
.unwrap_or_else(|| "(unknown)".into()),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap();
|
writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap();
|
||||||
|
|
||||||
// Gather ring statistics
|
// Gather ring statistics
|
||||||
|
@ -805,21 +835,38 @@ impl AdminRpcHandler {
|
||||||
writeln!(&mut ret, " {:?} {}", n, c).unwrap();
|
writeln!(&mut ret, " {:?} {}", n, c).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
self.gather_table_stats(&mut ret, &self.garage.bucket_table, &opt)?;
|
// Gather table statistics
|
||||||
self.gather_table_stats(&mut ret, &self.garage.key_table, &opt)?;
|
let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()];
|
||||||
self.gather_table_stats(&mut ret, &self.garage.object_table, &opt)?;
|
table.push(self.gather_table_stats(&self.garage.bucket_table, opt.detailed)?);
|
||||||
self.gather_table_stats(&mut ret, &self.garage.version_table, &opt)?;
|
table.push(self.gather_table_stats(&self.garage.key_table, opt.detailed)?);
|
||||||
self.gather_table_stats(&mut ret, &self.garage.block_ref_table, &opt)?;
|
table.push(self.gather_table_stats(&self.garage.object_table, opt.detailed)?);
|
||||||
|
table.push(self.gather_table_stats(&self.garage.version_table, opt.detailed)?);
|
||||||
|
table.push(self.gather_table_stats(&self.garage.block_ref_table, opt.detailed)?);
|
||||||
|
write!(
|
||||||
|
&mut ret,
|
||||||
|
"\nTable stats:\n{}",
|
||||||
|
format_table_to_string(table)
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Gather block manager statistics
|
||||||
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
|
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
|
||||||
if opt.detailed {
|
let rc_len = if opt.detailed {
|
||||||
|
self.garage.block_manager.rc_len()?.to_string()
|
||||||
|
} else {
|
||||||
|
self.garage
|
||||||
|
.block_manager
|
||||||
|
.rc_fast_len()?
|
||||||
|
.map(|x| x.to_string())
|
||||||
|
.unwrap_or_else(|| "NC".into())
|
||||||
|
};
|
||||||
|
|
||||||
writeln!(
|
writeln!(
|
||||||
&mut ret,
|
&mut ret,
|
||||||
" number of RC entries (~= number of blocks): {}",
|
" number of RC entries (~= number of blocks): {}",
|
||||||
self.garage.block_manager.rc_len()?
|
rc_len
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
|
||||||
writeln!(
|
writeln!(
|
||||||
&mut ret,
|
&mut ret,
|
||||||
" resync queue length: {}",
|
" resync queue length: {}",
|
||||||
|
@ -833,79 +880,305 @@ impl AdminRpcHandler {
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
if !opt.detailed {
|
||||||
|
writeln!(&mut ret, "\nIf values are missing (marked as NC), consider adding the --detailed flag - this will be slow.").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
Ok(ret)
|
Ok(ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gather_table_stats<F, R>(
|
fn gather_table_stats<F, R>(
|
||||||
&self,
|
&self,
|
||||||
to: &mut String,
|
|
||||||
t: &Arc<Table<F, R>>,
|
t: &Arc<Table<F, R>>,
|
||||||
opt: &StatsOpt,
|
detailed: bool,
|
||||||
) -> Result<(), Error>
|
) -> Result<String, Error>
|
||||||
where
|
where
|
||||||
F: TableSchema + 'static,
|
F: TableSchema + 'static,
|
||||||
R: TableReplication + 'static,
|
R: TableReplication + 'static,
|
||||||
{
|
{
|
||||||
writeln!(to, "\nTable stats for {}", F::TABLE_NAME).unwrap();
|
let (data_len, mkl_len) = if detailed {
|
||||||
if opt.detailed {
|
(
|
||||||
writeln!(
|
t.data.store.len().map_err(GarageError::from)?.to_string(),
|
||||||
to,
|
t.merkle_updater.merkle_tree_len()?.to_string(),
|
||||||
" number of items: {}",
|
|
||||||
t.data.store.len().map_err(GarageError::from)?
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
} else {
|
||||||
writeln!(
|
(
|
||||||
to,
|
t.data
|
||||||
" Merkle tree size: {}",
|
.store
|
||||||
t.merkle_updater.merkle_tree_len()?
|
.fast_len()
|
||||||
|
.map_err(GarageError::from)?
|
||||||
|
.map(|x| x.to_string())
|
||||||
|
.unwrap_or_else(|| "NC".into()),
|
||||||
|
t.merkle_updater
|
||||||
|
.merkle_tree_fast_len()?
|
||||||
|
.map(|x| x.to_string())
|
||||||
|
.unwrap_or_else(|| "NC".into()),
|
||||||
)
|
)
|
||||||
.unwrap();
|
};
|
||||||
}
|
|
||||||
writeln!(
|
|
||||||
to,
|
|
||||||
" Merkle updater todo queue length: {}",
|
|
||||||
t.merkle_updater.todo_len()?
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
writeln!(to, " GC todo queue length: {}", t.data.gc_todo_len()?).unwrap();
|
|
||||||
|
|
||||||
Ok(())
|
Ok(format!(
|
||||||
|
" {}\t{}\t{}\t{}\t{}",
|
||||||
|
F::TABLE_NAME,
|
||||||
|
data_len,
|
||||||
|
mkl_len,
|
||||||
|
t.merkle_updater.todo_len()?,
|
||||||
|
t.data.gc_todo_len()?
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----
|
// ================ WORKER COMMANDS ====================
|
||||||
|
|
||||||
async fn handle_worker_cmd(&self, opt: WorkerOpt) -> Result<AdminRpc, Error> {
|
async fn handle_worker_cmd(&self, cmd: &WorkerOperation) -> Result<AdminRpc, Error> {
|
||||||
match opt.cmd {
|
match cmd {
|
||||||
WorkerCmd::List { opt } => {
|
WorkerOperation::List { opt } => {
|
||||||
let workers = self.garage.background.get_worker_info();
|
let workers = self.background.get_worker_info();
|
||||||
Ok(AdminRpc::WorkerList(workers, opt))
|
Ok(AdminRpc::WorkerList(workers, *opt))
|
||||||
}
|
}
|
||||||
WorkerCmd::Set { opt } => match opt {
|
WorkerOperation::Info { tid } => {
|
||||||
WorkerSetCmd::ScrubTranquility { tranquility } => {
|
let info = self
|
||||||
let scrub_command = ScrubWorkerCommand::SetTranquility(tranquility);
|
.background
|
||||||
self.garage
|
.get_worker_info()
|
||||||
.block_manager
|
.get(tid)
|
||||||
.send_scrub_command(scrub_command)
|
.ok_or_bad_request(format!("No worker with TID {}", tid))?
|
||||||
.await;
|
.clone();
|
||||||
Ok(AdminRpc::Ok("Scrub tranquility updated".into()))
|
Ok(AdminRpc::WorkerInfo(*tid, info))
|
||||||
}
|
}
|
||||||
WorkerSetCmd::ResyncNWorkers { n_workers } => {
|
WorkerOperation::Get {
|
||||||
self.garage
|
all_nodes,
|
||||||
.block_manager
|
variable,
|
||||||
.resync
|
} => self.handle_get_var(*all_nodes, variable).await,
|
||||||
.set_n_workers(n_workers)
|
WorkerOperation::Set {
|
||||||
|
all_nodes,
|
||||||
|
variable,
|
||||||
|
value,
|
||||||
|
} => self.handle_set_var(*all_nodes, variable, value).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_get_var(
|
||||||
|
&self,
|
||||||
|
all_nodes: bool,
|
||||||
|
variable: &Option<String>,
|
||||||
|
) -> Result<AdminRpc, Error> {
|
||||||
|
if all_nodes {
|
||||||
|
let mut ret = vec![];
|
||||||
|
let ring = self.garage.system.ring.borrow().clone();
|
||||||
|
for node in ring.layout.node_ids().iter() {
|
||||||
|
let node = (*node).into();
|
||||||
|
match self
|
||||||
|
.endpoint
|
||||||
|
.call(
|
||||||
|
&node,
|
||||||
|
AdminRpc::Worker(WorkerOperation::Get {
|
||||||
|
all_nodes: false,
|
||||||
|
variable: variable.clone(),
|
||||||
|
}),
|
||||||
|
PRIO_NORMAL,
|
||||||
|
)
|
||||||
|
.await??
|
||||||
|
{
|
||||||
|
AdminRpc::WorkerVars(v) => ret.extend(v),
|
||||||
|
m => return Err(GarageError::unexpected_rpc_message(m).into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(AdminRpc::WorkerVars(ret))
|
||||||
|
} else {
|
||||||
|
#[allow(clippy::collapsible_else_if)]
|
||||||
|
if let Some(v) = variable {
|
||||||
|
Ok(AdminRpc::WorkerVars(vec![(
|
||||||
|
self.garage.system.id,
|
||||||
|
v.clone(),
|
||||||
|
self.garage.bg_vars.get(v)?,
|
||||||
|
)]))
|
||||||
|
} else {
|
||||||
|
let mut vars = self.garage.bg_vars.get_all();
|
||||||
|
vars.sort();
|
||||||
|
Ok(AdminRpc::WorkerVars(
|
||||||
|
vars.into_iter()
|
||||||
|
.map(|(k, v)| (self.garage.system.id, k.to_string(), v))
|
||||||
|
.collect(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_set_var(
|
||||||
|
&self,
|
||||||
|
all_nodes: bool,
|
||||||
|
variable: &str,
|
||||||
|
value: &str,
|
||||||
|
) -> Result<AdminRpc, Error> {
|
||||||
|
if all_nodes {
|
||||||
|
let mut ret = vec![];
|
||||||
|
let ring = self.garage.system.ring.borrow().clone();
|
||||||
|
for node in ring.layout.node_ids().iter() {
|
||||||
|
let node = (*node).into();
|
||||||
|
match self
|
||||||
|
.endpoint
|
||||||
|
.call(
|
||||||
|
&node,
|
||||||
|
AdminRpc::Worker(WorkerOperation::Set {
|
||||||
|
all_nodes: false,
|
||||||
|
variable: variable.to_string(),
|
||||||
|
value: value.to_string(),
|
||||||
|
}),
|
||||||
|
PRIO_NORMAL,
|
||||||
|
)
|
||||||
|
.await??
|
||||||
|
{
|
||||||
|
AdminRpc::WorkerVars(v) => ret.extend(v),
|
||||||
|
m => return Err(GarageError::unexpected_rpc_message(m).into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(AdminRpc::WorkerVars(ret))
|
||||||
|
} else {
|
||||||
|
self.garage.bg_vars.set(variable, value)?;
|
||||||
|
Ok(AdminRpc::WorkerVars(vec![(
|
||||||
|
self.garage.system.id,
|
||||||
|
variable.to_string(),
|
||||||
|
value.to_string(),
|
||||||
|
)]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ================ BLOCK COMMANDS ====================
|
||||||
|
|
||||||
|
async fn handle_block_cmd(&self, cmd: &BlockOperation) -> Result<AdminRpc, Error> {
|
||||||
|
match cmd {
|
||||||
|
BlockOperation::ListErrors => Ok(AdminRpc::BlockErrorList(
|
||||||
|
self.garage.block_manager.list_resync_errors()?,
|
||||||
|
)),
|
||||||
|
BlockOperation::Info { hash } => {
|
||||||
|
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||||
|
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
||||||
|
let refcount = self.garage.block_manager.get_block_rc(&hash)?;
|
||||||
|
let block_refs = self
|
||||||
|
.garage
|
||||||
|
.block_ref_table
|
||||||
|
.get_range(&hash, None, None, 10000, Default::default())
|
||||||
.await?;
|
.await?;
|
||||||
Ok(AdminRpc::Ok("Number of resync workers updated".into()))
|
let mut versions = vec![];
|
||||||
|
for br in block_refs {
|
||||||
|
if let Some(v) = self
|
||||||
|
.garage
|
||||||
|
.version_table
|
||||||
|
.get(&br.version, &EmptyKey)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
versions.push(Ok(v));
|
||||||
|
} else {
|
||||||
|
versions.push(Err(br.version));
|
||||||
}
|
}
|
||||||
WorkerSetCmd::ResyncTranquility { tranquility } => {
|
}
|
||||||
self.garage
|
Ok(AdminRpc::BlockInfo {
|
||||||
.block_manager
|
hash,
|
||||||
.resync
|
refcount,
|
||||||
.set_tranquility(tranquility)
|
versions,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
BlockOperation::RetryNow { all, blocks } => {
|
||||||
|
if *all {
|
||||||
|
if !blocks.is_empty() {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
"--all was specified, cannot also specify blocks".into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let blocks = self.garage.block_manager.list_resync_errors()?;
|
||||||
|
for b in blocks.iter() {
|
||||||
|
self.garage.block_manager.resync.clear_backoff(&b.hash)?;
|
||||||
|
}
|
||||||
|
Ok(AdminRpc::Ok(format!(
|
||||||
|
"{} blocks returned in queue for a retry now (check logs to see results)",
|
||||||
|
blocks.len()
|
||||||
|
)))
|
||||||
|
} else {
|
||||||
|
for hash in blocks {
|
||||||
|
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||||
|
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
||||||
|
self.garage.block_manager.resync.clear_backoff(&hash)?;
|
||||||
|
}
|
||||||
|
Ok(AdminRpc::Ok(format!(
|
||||||
|
"{} blocks returned in queue for a retry now (check logs to see results)",
|
||||||
|
blocks.len()
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BlockOperation::Purge { yes, blocks } => {
|
||||||
|
if !yes {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
"Pass the --yes flag to confirm block purge operation.".into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut obj_dels = 0;
|
||||||
|
let mut ver_dels = 0;
|
||||||
|
|
||||||
|
for hash in blocks {
|
||||||
|
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
||||||
|
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
||||||
|
let block_refs = self
|
||||||
|
.garage
|
||||||
|
.block_ref_table
|
||||||
|
.get_range(&hash, None, None, 10000, Default::default())
|
||||||
.await?;
|
.await?;
|
||||||
Ok(AdminRpc::Ok("Resync tranquility updated".into()))
|
|
||||||
|
for br in block_refs {
|
||||||
|
let version = match self
|
||||||
|
.garage
|
||||||
|
.version_table
|
||||||
|
.get(&br.version, &EmptyKey)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
Some(v) => v,
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(object) = self
|
||||||
|
.garage
|
||||||
|
.object_table
|
||||||
|
.get(&version.bucket_id, &version.key)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
let ov = object.versions().iter().rev().find(|v| v.is_complete());
|
||||||
|
if let Some(ov) = ov {
|
||||||
|
if ov.uuid == br.version {
|
||||||
|
let del_uuid = gen_uuid();
|
||||||
|
let deleted_object = Object::new(
|
||||||
|
version.bucket_id,
|
||||||
|
version.key.clone(),
|
||||||
|
vec![ObjectVersion {
|
||||||
|
uuid: del_uuid,
|
||||||
|
timestamp: ov.timestamp + 1,
|
||||||
|
state: ObjectVersionState::Complete(
|
||||||
|
ObjectVersionData::DeleteMarker,
|
||||||
|
),
|
||||||
|
}],
|
||||||
|
);
|
||||||
|
self.garage.object_table.insert(&deleted_object).await?;
|
||||||
|
obj_dels += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !version.deleted.get() {
|
||||||
|
let deleted_version = Version::new(
|
||||||
|
version.uuid,
|
||||||
|
version.bucket_id,
|
||||||
|
version.key.clone(),
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
self.garage.version_table.insert(&deleted_version).await?;
|
||||||
|
ver_dels += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(AdminRpc::Ok(format!(
|
||||||
|
"{} blocks were purged: {} object deletion markers added, {} versions marked deleted",
|
||||||
|
blocks.len(),
|
||||||
|
obj_dels,
|
||||||
|
ver_dels
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -923,7 +1196,8 @@ impl EndpointHandler<AdminRpc> for AdminRpcHandler {
|
||||||
AdminRpc::Migrate(opt) => self.handle_migrate(opt.clone()).await,
|
AdminRpc::Migrate(opt) => self.handle_migrate(opt.clone()).await,
|
||||||
AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await,
|
AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await,
|
||||||
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
|
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
|
||||||
AdminRpc::Worker(opt) => self.handle_worker_cmd(opt.clone()).await,
|
AdminRpc::Worker(wo) => self.handle_worker_cmd(wo).await,
|
||||||
|
AdminRpc::BlockOperation(bo) => self.handle_block_cmd(bo).await,
|
||||||
m => Err(GarageError::unexpected_rpc_message(m).into()),
|
m => Err(GarageError::unexpected_rpc_message(m).into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,6 +41,9 @@ pub async fn cli_command_dispatch(
|
||||||
}
|
}
|
||||||
Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await,
|
Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await,
|
||||||
Command::Worker(wo) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Worker(wo)).await,
|
Command::Worker(wo) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Worker(wo)).await,
|
||||||
|
Command::Block(bo) => {
|
||||||
|
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await
|
||||||
|
}
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -186,7 +189,23 @@ pub async fn cmd_admin(
|
||||||
print_key_info(&key, &rb);
|
print_key_info(&key, &rb);
|
||||||
}
|
}
|
||||||
AdminRpc::WorkerList(wi, wlo) => {
|
AdminRpc::WorkerList(wi, wlo) => {
|
||||||
print_worker_info(wi, wlo);
|
print_worker_list(wi, wlo);
|
||||||
|
}
|
||||||
|
AdminRpc::WorkerVars(wv) => {
|
||||||
|
print_worker_vars(wv);
|
||||||
|
}
|
||||||
|
AdminRpc::WorkerInfo(tid, wi) => {
|
||||||
|
print_worker_info(tid, wi);
|
||||||
|
}
|
||||||
|
AdminRpc::BlockErrorList(el) => {
|
||||||
|
print_block_error_list(el);
|
||||||
|
}
|
||||||
|
AdminRpc::BlockInfo {
|
||||||
|
hash,
|
||||||
|
refcount,
|
||||||
|
versions,
|
||||||
|
} => {
|
||||||
|
print_block_info(hash, refcount, versions);
|
||||||
}
|
}
|
||||||
r => {
|
r => {
|
||||||
error!("Unexpected response: {:?}", r);
|
error!("Unexpected response: {:?}", r);
|
||||||
|
|
|
@ -49,7 +49,11 @@ pub enum Command {
|
||||||
|
|
||||||
/// Manage background workers
|
/// Manage background workers
|
||||||
#[structopt(name = "worker", version = garage_version())]
|
#[structopt(name = "worker", version = garage_version())]
|
||||||
Worker(WorkerOpt),
|
Worker(WorkerOperation),
|
||||||
|
|
||||||
|
/// Low-level debug operations on data blocks
|
||||||
|
#[structopt(name = "block", version = garage_version())]
|
||||||
|
Block(BlockOperation),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
#[derive(StructOpt, Debug)]
|
||||||
|
@ -502,25 +506,36 @@ pub struct StatsOpt {
|
||||||
pub detailed: bool,
|
pub detailed: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
|
|
||||||
pub struct WorkerOpt {
|
|
||||||
#[structopt(subcommand)]
|
|
||||||
pub cmd: WorkerCmd,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||||
pub enum WorkerCmd {
|
pub enum WorkerOperation {
|
||||||
/// List all workers on Garage node
|
/// List all workers on Garage node
|
||||||
#[structopt(name = "list", version = garage_version())]
|
#[structopt(name = "list", version = garage_version())]
|
||||||
List {
|
List {
|
||||||
#[structopt(flatten)]
|
#[structopt(flatten)]
|
||||||
opt: WorkerListOpt,
|
opt: WorkerListOpt,
|
||||||
},
|
},
|
||||||
|
/// Get detailed information about a worker
|
||||||
|
#[structopt(name = "info", version = garage_version())]
|
||||||
|
Info { tid: usize },
|
||||||
|
/// Get worker parameter
|
||||||
|
#[structopt(name = "get", version = garage_version())]
|
||||||
|
Get {
|
||||||
|
/// Gather variable values from all nodes
|
||||||
|
#[structopt(short = "a", long = "all-nodes")]
|
||||||
|
all_nodes: bool,
|
||||||
|
/// Variable name to get, or none to get all variables
|
||||||
|
variable: Option<String>,
|
||||||
|
},
|
||||||
/// Set worker parameter
|
/// Set worker parameter
|
||||||
#[structopt(name = "set", version = garage_version())]
|
#[structopt(name = "set", version = garage_version())]
|
||||||
Set {
|
Set {
|
||||||
#[structopt(subcommand)]
|
/// Set variable values on all nodes
|
||||||
opt: WorkerSetCmd,
|
#[structopt(short = "a", long = "all-nodes")]
|
||||||
|
all_nodes: bool,
|
||||||
|
/// Variable node to set
|
||||||
|
variable: String,
|
||||||
|
/// Value to set the variable to
|
||||||
|
value: String,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -535,14 +550,33 @@ pub struct WorkerListOpt {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||||
pub enum WorkerSetCmd {
|
pub enum BlockOperation {
|
||||||
/// Set tranquility of scrub operations
|
/// List all blocks that currently have a resync error
|
||||||
#[structopt(name = "scrub-tranquility", version = garage_version())]
|
#[structopt(name = "list-errors", version = garage_version())]
|
||||||
ScrubTranquility { tranquility: u32 },
|
ListErrors,
|
||||||
/// Set number of concurrent block resync workers
|
/// Get detailed information about a single block
|
||||||
#[structopt(name = "resync-n-workers", version = garage_version())]
|
#[structopt(name = "info", version = garage_version())]
|
||||||
ResyncNWorkers { n_workers: usize },
|
Info {
|
||||||
/// Set tranquility of block resync operations
|
/// Hash of the block for which to retrieve information
|
||||||
#[structopt(name = "resync-tranquility", version = garage_version())]
|
hash: String,
|
||||||
ResyncTranquility { tranquility: u32 },
|
},
|
||||||
|
/// Retry now the resync of one or many blocks
|
||||||
|
#[structopt(name = "retry-now", version = garage_version())]
|
||||||
|
RetryNow {
|
||||||
|
/// Retry all blocks that have a resync error
|
||||||
|
#[structopt(long = "all")]
|
||||||
|
all: bool,
|
||||||
|
/// Hashes of the block to retry to resync now
|
||||||
|
blocks: Vec<String>,
|
||||||
|
},
|
||||||
|
/// Delete all objects referencing a missing block
|
||||||
|
#[structopt(name = "purge", version = garage_version())]
|
||||||
|
Purge {
|
||||||
|
/// Mandatory to confirm this operation
|
||||||
|
#[structopt(long = "yes")]
|
||||||
|
yes: bool,
|
||||||
|
/// Hashes of the block to purge
|
||||||
|
#[structopt(required = true)]
|
||||||
|
blocks: Vec<String>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,14 +3,17 @@ use std::time::Duration;
|
||||||
|
|
||||||
use garage_util::background::*;
|
use garage_util::background::*;
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
use garage_util::data::Uuid;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::formater::format_table;
|
use garage_util::formater::format_table;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
|
use garage_block::manager::BlockResyncErrorInfo;
|
||||||
|
|
||||||
use garage_model::bucket_table::*;
|
use garage_model::bucket_table::*;
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
use garage_model::s3::object_table::{BYTES, OBJECTS, UNFINISHED_UPLOADS};
|
use garage_model::s3::object_table::{BYTES, OBJECTS, UNFINISHED_UPLOADS};
|
||||||
|
use garage_model::s3::version_table::Version;
|
||||||
|
|
||||||
use crate::cli::structs::WorkerListOpt;
|
use crate::cli::structs::WorkerListOpt;
|
||||||
|
|
||||||
|
@ -241,7 +244,7 @@ pub fn find_matching_node(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn print_worker_info(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
|
pub fn print_worker_list(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
|
||||||
let mut wi = wi.into_iter().collect::<Vec<_>>();
|
let mut wi = wi.into_iter().collect::<Vec<_>>();
|
||||||
wi.sort_by_key(|(tid, info)| {
|
wi.sort_by_key(|(tid, info)| {
|
||||||
(
|
(
|
||||||
|
@ -254,7 +257,7 @@ pub fn print_worker_info(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
|
||||||
)
|
)
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut table = vec![];
|
let mut table = vec!["TID\tState\tName\tTranq\tDone\tQueue\tErrors\tConsec\tLast".to_string()];
|
||||||
for (tid, info) in wi.iter() {
|
for (tid, info) in wi.iter() {
|
||||||
if wlo.busy && !matches!(info.state, WorkerState::Busy | WorkerState::Throttled(_)) {
|
if wlo.busy && !matches!(info.state, WorkerState::Busy | WorkerState::Throttled(_)) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -263,33 +266,155 @@ pub fn print_worker_info(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
table.push(format!("{}\t{}\t{}", tid, info.state, info.name));
|
|
||||||
if let Some(i) = &info.info {
|
|
||||||
table.push(format!("\t\t {}", i));
|
|
||||||
}
|
|
||||||
let tf = timeago::Formatter::new();
|
let tf = timeago::Formatter::new();
|
||||||
let (err_ago, err_msg) = info
|
let err_ago = info
|
||||||
.last_error
|
.last_error
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|(m, t)| {
|
.map(|(_, t)| tf.convert(Duration::from_millis(now_msec() - t)))
|
||||||
(
|
.unwrap_or_default();
|
||||||
tf.convert(Duration::from_millis(now_msec() - t)),
|
let (total_err, consec_err) = if info.errors > 0 {
|
||||||
m.as_str(),
|
(info.errors.to_string(), info.consecutive_errors.to_string())
|
||||||
)
|
} else {
|
||||||
})
|
("-".into(), "-".into())
|
||||||
.unwrap_or(("(?) ago".into(), "(?)"));
|
};
|
||||||
if info.consecutive_errors > 0 {
|
|
||||||
table.push(format!(
|
table.push(format!(
|
||||||
"\t\t {} consecutive errors ({} total), last {}",
|
"{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}",
|
||||||
info.consecutive_errors, info.errors, err_ago,
|
tid,
|
||||||
|
info.state,
|
||||||
|
info.name,
|
||||||
|
info.status
|
||||||
|
.tranquility
|
||||||
|
.as_ref()
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.unwrap_or_else(|| "-".into()),
|
||||||
|
info.status.progress.as_deref().unwrap_or("-"),
|
||||||
|
info.status
|
||||||
|
.queue_length
|
||||||
|
.as_ref()
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.unwrap_or_else(|| "-".into()),
|
||||||
|
total_err,
|
||||||
|
consec_err,
|
||||||
|
err_ago,
|
||||||
));
|
));
|
||||||
table.push(format!("\t\t {}", err_msg));
|
|
||||||
} else if info.errors > 0 {
|
|
||||||
table.push(format!("\t\t ({} errors, last {})", info.errors, err_ago,));
|
|
||||||
if wlo.errors {
|
|
||||||
table.push(format!("\t\t {}", err_msg));
|
|
||||||
}
|
}
|
||||||
|
format_table(table);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn print_worker_info(tid: usize, info: WorkerInfo) {
|
||||||
|
let mut table = vec![];
|
||||||
|
table.push(format!("Task id:\t{}", tid));
|
||||||
|
table.push(format!("Worker name:\t{}", info.name));
|
||||||
|
match info.state {
|
||||||
|
WorkerState::Throttled(t) => {
|
||||||
|
table.push(format!(
|
||||||
|
"Worker state:\tBusy (throttled, paused for {:.3}s)",
|
||||||
|
t
|
||||||
|
));
|
||||||
|
}
|
||||||
|
s => {
|
||||||
|
table.push(format!("Worker state:\t{}", s));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if let Some(tql) = info.status.tranquility {
|
||||||
|
table.push(format!("Tranquility:\t{}", tql));
|
||||||
|
}
|
||||||
|
|
||||||
|
table.push("".into());
|
||||||
|
table.push(format!("Total errors:\t{}", info.errors));
|
||||||
|
table.push(format!("Consecutive errs:\t{}", info.consecutive_errors));
|
||||||
|
if let Some((s, t)) = info.last_error {
|
||||||
|
table.push(format!("Last error:\t{}", s));
|
||||||
|
let tf = timeago::Formatter::new();
|
||||||
|
table.push(format!(
|
||||||
|
"Last error time:\t{}",
|
||||||
|
tf.convert(Duration::from_millis(now_msec() - t))
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
table.push("".into());
|
||||||
|
if let Some(p) = info.status.progress {
|
||||||
|
table.push(format!("Progress:\t{}", p));
|
||||||
|
}
|
||||||
|
if let Some(ql) = info.status.queue_length {
|
||||||
|
table.push(format!("Queue length:\t{}", ql));
|
||||||
|
}
|
||||||
|
if let Some(pe) = info.status.persistent_errors {
|
||||||
|
table.push(format!("Persistent errors:\t{}", pe));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i, s) in info.status.freeform.iter().enumerate() {
|
||||||
|
if i == 0 {
|
||||||
|
if table.last() != Some(&"".into()) {
|
||||||
|
table.push("".into());
|
||||||
|
}
|
||||||
|
table.push(format!("Message:\t{}", s));
|
||||||
|
} else {
|
||||||
|
table.push(format!("\t{}", s));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
format_table(table);
|
format_table(table);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn print_worker_vars(wv: Vec<(Uuid, String, String)>) {
|
||||||
|
let table = wv
|
||||||
|
.into_iter()
|
||||||
|
.map(|(n, k, v)| format!("{:?}\t{}\t{}", n, k, v))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
format_table(table);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn print_block_error_list(el: Vec<BlockResyncErrorInfo>) {
|
||||||
|
let now = now_msec();
|
||||||
|
let tf = timeago::Formatter::new();
|
||||||
|
let mut tf2 = timeago::Formatter::new();
|
||||||
|
tf2.ago("");
|
||||||
|
|
||||||
|
let mut table = vec!["Hash\tRC\tErrors\tLast error\tNext try".into()];
|
||||||
|
for e in el {
|
||||||
|
table.push(format!(
|
||||||
|
"{}\t{}\t{}\t{}\tin {}",
|
||||||
|
hex::encode(e.hash.as_slice()),
|
||||||
|
e.refcount,
|
||||||
|
e.error_count,
|
||||||
|
tf.convert(Duration::from_millis(now - e.last_try)),
|
||||||
|
tf2.convert(Duration::from_millis(e.next_try - now))
|
||||||
|
));
|
||||||
|
}
|
||||||
|
format_table(table);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn print_block_info(hash: Hash, refcount: u64, versions: Vec<Result<Version, Uuid>>) {
|
||||||
|
println!("Block hash: {}", hex::encode(hash.as_slice()));
|
||||||
|
println!("Refcount: {}", refcount);
|
||||||
|
println!();
|
||||||
|
|
||||||
|
let mut table = vec!["Version\tBucket\tKey\tDeleted".into()];
|
||||||
|
let mut nondeleted_count = 0;
|
||||||
|
for v in versions.iter() {
|
||||||
|
match v {
|
||||||
|
Ok(ver) => {
|
||||||
|
table.push(format!(
|
||||||
|
"{:?}\t{:?}\t{}\t{:?}",
|
||||||
|
ver.uuid,
|
||||||
|
ver.bucket_id,
|
||||||
|
ver.key,
|
||||||
|
ver.deleted.get()
|
||||||
|
));
|
||||||
|
if !ver.deleted.get() {
|
||||||
|
nondeleted_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(vh) => {
|
||||||
|
table.push(format!("{:?}\t\t\tyes", vh));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
format_table(table);
|
||||||
|
|
||||||
|
if refcount != nondeleted_count {
|
||||||
|
println!();
|
||||||
|
println!("Warning: refcount does not match number of non-deleted versions");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -127,9 +127,16 @@ async fn main() {
|
||||||
std::process::abort();
|
std::process::abort();
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
// Parse arguments and dispatch command line
|
||||||
|
let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches());
|
||||||
|
|
||||||
// Initialize logging as well as other libraries used in Garage
|
// Initialize logging as well as other libraries used in Garage
|
||||||
if std::env::var("RUST_LOG").is_err() {
|
if std::env::var("RUST_LOG").is_err() {
|
||||||
std::env::set_var("RUST_LOG", "netapp=info,garage=info")
|
let default_log = match &opt.cmd {
|
||||||
|
Command::Server => "netapp=info,garage=info",
|
||||||
|
_ => "netapp=warn,garage=warn",
|
||||||
|
};
|
||||||
|
std::env::set_var("RUST_LOG", default_log)
|
||||||
}
|
}
|
||||||
tracing_subscriber::fmt()
|
tracing_subscriber::fmt()
|
||||||
.with_writer(std::io::stderr)
|
.with_writer(std::io::stderr)
|
||||||
|
@ -137,9 +144,6 @@ async fn main() {
|
||||||
.init();
|
.init();
|
||||||
sodiumoxide::init().expect("Unable to init sodiumoxide");
|
sodiumoxide::init().expect("Unable to init sodiumoxide");
|
||||||
|
|
||||||
// Parse arguments and dispatch command line
|
|
||||||
let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches());
|
|
||||||
|
|
||||||
let res = match opt.cmd {
|
let res = match opt.cmd {
|
||||||
Command::Server => server::run_server(opt.config_file).await,
|
Command::Server => server::run_server(opt.config_file).await,
|
||||||
Command::OfflineRepair(repair_opt) => {
|
Command::OfflineRepair(repair_opt) => {
|
||||||
|
@ -169,7 +173,7 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
|
||||||
let net_key_hex_str = opt
|
let net_key_hex_str = opt
|
||||||
.rpc_secret
|
.rpc_secret
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.or_else(|| config.as_ref().map(|c| &c.rpc_secret))
|
.or_else(|| config.as_ref().and_then(|c| c.rpc_secret.as_ref()))
|
||||||
.ok_or("No RPC secret provided")?;
|
.ok_or("No RPC secret provided")?;
|
||||||
let network_key = NetworkKey::from_slice(
|
let network_key = NetworkKey::from_slice(
|
||||||
&hex::decode(net_key_hex_str).err_context("Invalid RPC secret key (bad hex)")?[..],
|
&hex::decode(net_key_hex_str).err_context("Invalid RPC secret key (bad hex)")?[..],
|
||||||
|
@ -182,9 +186,9 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
|
||||||
let netapp = NetApp::new(GARAGE_VERSION_TAG, network_key, sk);
|
let netapp = NetApp::new(GARAGE_VERSION_TAG, network_key, sk);
|
||||||
|
|
||||||
// Find and parse the address of the target host
|
// Find and parse the address of the target host
|
||||||
let (id, addr) = if let Some(h) = opt.rpc_host {
|
let (id, addr, is_default_addr) = if let Some(h) = opt.rpc_host {
|
||||||
let (id, addrs) = parse_and_resolve_peer_addr(&h).ok_or_else(|| format!("Invalid RPC remote node identifier: {}. Expected format is <pubkey>@<IP or hostname>:<port>.", h))?;
|
let (id, addrs) = parse_and_resolve_peer_addr(&h).ok_or_else(|| format!("Invalid RPC remote node identifier: {}. Expected format is <pubkey>@<IP or hostname>:<port>.", h))?;
|
||||||
(id, addrs[0])
|
(id, addrs[0], false)
|
||||||
} else {
|
} else {
|
||||||
let node_id = garage_rpc::system::read_node_id(&config.as_ref().unwrap().metadata_dir)
|
let node_id = garage_rpc::system::read_node_id(&config.as_ref().unwrap().metadata_dir)
|
||||||
.err_context(READ_KEY_ERROR)?;
|
.err_context(READ_KEY_ERROR)?;
|
||||||
|
@ -195,24 +199,26 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
|
||||||
.ok_or_message("unable to resolve rpc_public_addr specified in config file")?
|
.ok_or_message("unable to resolve rpc_public_addr specified in config file")?
|
||||||
.next()
|
.next()
|
||||||
.ok_or_message("unable to resolve rpc_public_addr specified in config file")?;
|
.ok_or_message("unable to resolve rpc_public_addr specified in config file")?;
|
||||||
(node_id, a)
|
(node_id, a, false)
|
||||||
} else {
|
} else {
|
||||||
let default_addr = SocketAddr::new(
|
let default_addr = SocketAddr::new(
|
||||||
"127.0.0.1".parse().unwrap(),
|
"127.0.0.1".parse().unwrap(),
|
||||||
config.as_ref().unwrap().rpc_bind_addr.port(),
|
config.as_ref().unwrap().rpc_bind_addr.port(),
|
||||||
);
|
);
|
||||||
warn!(
|
(node_id, default_addr, true)
|
||||||
"Trying to contact Garage node at default address {}",
|
|
||||||
default_addr
|
|
||||||
);
|
|
||||||
warn!("If this doesn't work, consider adding rpc_public_addr in your config file or specifying the -h command line parameter.");
|
|
||||||
(node_id, default_addr)
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Connect to target host
|
// Connect to target host
|
||||||
netapp.clone().try_connect(addr, id).await
|
if let Err(e) = netapp.clone().try_connect(addr, id).await {
|
||||||
.err_context("Unable to connect to destination RPC host. Check that you are using the same value of rpc_secret as them, and that you have their correct public key.")?;
|
if is_default_addr {
|
||||||
|
warn!(
|
||||||
|
"Tried to contact Garage node at default address {}, which didn't work. If that address is wrong, consider setting rpc_public_addr in your config file.",
|
||||||
|
addr
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e).err_context("Unable to connect to destination RPC host. Check that you are using the same value of rpc_secret as them, and that you have their correct public key.")?;
|
||||||
|
}
|
||||||
|
|
||||||
let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into());
|
let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into());
|
||||||
let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into());
|
let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into());
|
||||||
|
|
|
@ -1,8 +1,5 @@
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use tokio::sync::watch;
|
|
||||||
|
|
||||||
use garage_util::background::*;
|
|
||||||
use garage_util::config::*;
|
use garage_util::config::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
|
|
||||||
|
@ -20,12 +17,8 @@ pub async fn offline_repair(config_file: PathBuf, opt: OfflineRepairOpt) -> Resu
|
||||||
info!("Loading configuration...");
|
info!("Loading configuration...");
|
||||||
let config = read_config(config_file)?;
|
let config = read_config(config_file)?;
|
||||||
|
|
||||||
info!("Initializing background runner...");
|
|
||||||
let (done_tx, done_rx) = watch::channel(false);
|
|
||||||
let (background, await_background_done) = BackgroundRunner::new(16, done_rx);
|
|
||||||
|
|
||||||
info!("Initializing Garage main data store...");
|
info!("Initializing Garage main data store...");
|
||||||
let garage = Garage::new(config.clone(), background)?;
|
let garage = Garage::new(config)?;
|
||||||
|
|
||||||
info!("Launching repair operation...");
|
info!("Launching repair operation...");
|
||||||
match opt.what {
|
match opt.what {
|
||||||
|
@ -43,13 +36,7 @@ pub async fn offline_repair(config_file: PathBuf, opt: OfflineRepairOpt) -> Resu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Repair operation finished, shutting down Garage internals...");
|
info!("Repair operation finished, shutting down...");
|
||||||
done_tx.send(true).unwrap();
|
|
||||||
drop(garage);
|
|
||||||
|
|
||||||
await_background_done.await?;
|
|
||||||
|
|
||||||
info!("Cleaning up...");
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,36 +12,35 @@ use garage_model::s3::version_table::*;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
use garage_util::background::*;
|
use garage_util::background::*;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
|
use garage_util::migrate::Migrate;
|
||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
pub async fn launch_online_repair(garage: Arc<Garage>, opt: RepairOpt) {
|
pub async fn launch_online_repair(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
bg: &BackgroundRunner,
|
||||||
|
opt: RepairOpt,
|
||||||
|
) -> Result<(), Error> {
|
||||||
match opt.what {
|
match opt.what {
|
||||||
RepairWhat::Tables => {
|
RepairWhat::Tables => {
|
||||||
info!("Launching a full sync of tables");
|
info!("Launching a full sync of tables");
|
||||||
garage.bucket_table.syncer.add_full_sync();
|
garage.bucket_table.syncer.add_full_sync()?;
|
||||||
garage.object_table.syncer.add_full_sync();
|
garage.object_table.syncer.add_full_sync()?;
|
||||||
garage.version_table.syncer.add_full_sync();
|
garage.version_table.syncer.add_full_sync()?;
|
||||||
garage.block_ref_table.syncer.add_full_sync();
|
garage.block_ref_table.syncer.add_full_sync()?;
|
||||||
garage.key_table.syncer.add_full_sync();
|
garage.key_table.syncer.add_full_sync()?;
|
||||||
}
|
}
|
||||||
RepairWhat::Versions => {
|
RepairWhat::Versions => {
|
||||||
info!("Repairing the versions table");
|
info!("Repairing the versions table");
|
||||||
garage
|
bg.spawn_worker(RepairVersionsWorker::new(garage.clone()));
|
||||||
.background
|
|
||||||
.spawn_worker(RepairVersionsWorker::new(garage.clone()));
|
|
||||||
}
|
}
|
||||||
RepairWhat::BlockRefs => {
|
RepairWhat::BlockRefs => {
|
||||||
info!("Repairing the block refs table");
|
info!("Repairing the block refs table");
|
||||||
garage
|
bg.spawn_worker(RepairBlockrefsWorker::new(garage.clone()));
|
||||||
.background
|
|
||||||
.spawn_worker(RepairBlockrefsWorker::new(garage.clone()));
|
|
||||||
}
|
}
|
||||||
RepairWhat::Blocks => {
|
RepairWhat::Blocks => {
|
||||||
info!("Repairing the stored blocks");
|
info!("Repairing the stored blocks");
|
||||||
garage
|
bg.spawn_worker(garage_block::repair::RepairWorker::new(
|
||||||
.background
|
|
||||||
.spawn_worker(garage_block::repair::RepairWorker::new(
|
|
||||||
garage.block_manager.clone(),
|
garage.block_manager.clone(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -52,13 +51,18 @@ pub async fn launch_online_repair(garage: Arc<Garage>, opt: RepairOpt) {
|
||||||
ScrubCmd::Resume => ScrubWorkerCommand::Resume,
|
ScrubCmd::Resume => ScrubWorkerCommand::Resume,
|
||||||
ScrubCmd::Cancel => ScrubWorkerCommand::Cancel,
|
ScrubCmd::Cancel => ScrubWorkerCommand::Cancel,
|
||||||
ScrubCmd::SetTranquility { tranquility } => {
|
ScrubCmd::SetTranquility { tranquility } => {
|
||||||
ScrubWorkerCommand::SetTranquility(tranquility)
|
garage
|
||||||
|
.block_manager
|
||||||
|
.scrub_persister
|
||||||
|
.set_with(|x| x.tranquility = tranquility)?;
|
||||||
|
return Ok(());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
info!("Sending command to scrub worker: {:?}", cmd);
|
info!("Sending command to scrub worker: {:?}", cmd);
|
||||||
garage.block_manager.send_scrub_command(cmd).await;
|
garage.block_manager.send_scrub_command(cmd).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
@ -85,25 +89,23 @@ impl Worker for RepairVersionsWorker {
|
||||||
"Version repair worker".into()
|
"Version repair worker".into()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn info(&self) -> Option<String> {
|
fn status(&self) -> WorkerStatus {
|
||||||
Some(format!("{} items done", self.counter))
|
WorkerStatus {
|
||||||
|
progress: Some(self.counter.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
let item_bytes = match self.garage.version_table.data.store.get_gt(&self.pos)? {
|
let (item_bytes, next_pos) = match self.garage.version_table.data.store.get_gt(&self.pos)? {
|
||||||
Some((k, v)) => {
|
Some((k, v)) => (v, k),
|
||||||
self.pos = k;
|
|
||||||
v
|
|
||||||
}
|
|
||||||
None => {
|
None => {
|
||||||
info!("repair_versions: finished, done {}", self.counter);
|
info!("repair_versions: finished, done {}", self.counter);
|
||||||
return Ok(WorkerState::Done);
|
return Ok(WorkerState::Done);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
self.counter += 1;
|
let version = Version::decode(&item_bytes).ok_or_message("Cannot decode Version")?;
|
||||||
|
|
||||||
let version = rmp_serde::decode::from_read_ref::<_, Version>(&item_bytes)?;
|
|
||||||
if !version.deleted.get() {
|
if !version.deleted.get() {
|
||||||
let object = self
|
let object = self
|
||||||
.garage
|
.garage
|
||||||
|
@ -131,10 +133,13 @@ impl Worker for RepairVersionsWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.counter += 1;
|
||||||
|
self.pos = next_pos;
|
||||||
|
|
||||||
Ok(WorkerState::Busy)
|
Ok(WorkerState::Busy)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
async fn wait_for_work(&mut self) -> WorkerState {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -163,25 +168,24 @@ impl Worker for RepairBlockrefsWorker {
|
||||||
"Block refs repair worker".into()
|
"Block refs repair worker".into()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn info(&self) -> Option<String> {
|
fn status(&self) -> WorkerStatus {
|
||||||
Some(format!("{} items done", self.counter))
|
WorkerStatus {
|
||||||
|
progress: Some(self.counter.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
let item_bytes = match self.garage.block_ref_table.data.store.get_gt(&self.pos)? {
|
let (item_bytes, next_pos) =
|
||||||
Some((k, v)) => {
|
match self.garage.block_ref_table.data.store.get_gt(&self.pos)? {
|
||||||
self.pos = k;
|
Some((k, v)) => (v, k),
|
||||||
v
|
|
||||||
}
|
|
||||||
None => {
|
None => {
|
||||||
info!("repair_block_ref: finished, done {}", self.counter);
|
info!("repair_block_ref: finished, done {}", self.counter);
|
||||||
return Ok(WorkerState::Done);
|
return Ok(WorkerState::Done);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
self.counter += 1;
|
let block_ref = BlockRef::decode(&item_bytes).ok_or_message("Cannot decode BlockRef")?;
|
||||||
|
|
||||||
let block_ref = rmp_serde::decode::from_read_ref::<_, BlockRef>(&item_bytes)?;
|
|
||||||
if !block_ref.deleted.get() {
|
if !block_ref.deleted.get() {
|
||||||
let version = self
|
let version = self
|
||||||
.garage
|
.garage
|
||||||
|
@ -206,10 +210,13 @@ impl Worker for RepairBlockrefsWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.counter += 1;
|
||||||
|
self.pos = next_pos;
|
||||||
|
|
||||||
Ok(WorkerState::Busy)
|
Ok(WorkerState::Busy)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
async fn wait_for_work(&mut self) -> WorkerState {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,12 +35,15 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> {
|
||||||
#[cfg(feature = "metrics")]
|
#[cfg(feature = "metrics")]
|
||||||
let metrics_exporter = opentelemetry_prometheus::exporter().init();
|
let metrics_exporter = opentelemetry_prometheus::exporter().init();
|
||||||
|
|
||||||
|
info!("Initializing Garage main data store...");
|
||||||
|
let garage = Garage::new(config.clone())?;
|
||||||
|
|
||||||
info!("Initializing background runner...");
|
info!("Initializing background runner...");
|
||||||
let watch_cancel = watch_shutdown_signal();
|
let watch_cancel = watch_shutdown_signal();
|
||||||
let (background, await_background_done) = BackgroundRunner::new(16, watch_cancel.clone());
|
let (background, await_background_done) = BackgroundRunner::new(watch_cancel.clone());
|
||||||
|
|
||||||
info!("Initializing Garage main data store...");
|
info!("Spawning Garage workers...");
|
||||||
let garage = Garage::new(config.clone(), background)?;
|
garage.spawn_workers(&background);
|
||||||
|
|
||||||
if config.admin.trace_sink.is_some() {
|
if config.admin.trace_sink.is_some() {
|
||||||
info!("Initialize tracing...");
|
info!("Initialize tracing...");
|
||||||
|
@ -63,7 +66,7 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> {
|
||||||
let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone()));
|
let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone()));
|
||||||
|
|
||||||
info!("Create admin RPC handler...");
|
info!("Create admin RPC handler...");
|
||||||
AdminRpcHandler::new(garage.clone());
|
AdminRpcHandler::new(garage.clone(), background.clone());
|
||||||
|
|
||||||
// ---- Launch public-facing API servers ----
|
// ---- Launch public-facing API servers ----
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
use crate::common;
|
use crate::common;
|
||||||
|
use crate::common::ext::CommandExt;
|
||||||
use aws_sdk_s3::model::BucketLocationConstraint;
|
use aws_sdk_s3::model::BucketLocationConstraint;
|
||||||
use aws_sdk_s3::output::DeleteBucketOutput;
|
use aws_sdk_s3::output::DeleteBucketOutput;
|
||||||
|
|
||||||
|
@ -8,6 +9,27 @@ async fn test_bucket_all() {
|
||||||
let bucket_name = "hello";
|
let bucket_name = "hello";
|
||||||
|
|
||||||
{
|
{
|
||||||
|
// Check bucket cannot be created if not authorized
|
||||||
|
ctx.garage
|
||||||
|
.command()
|
||||||
|
.args(["key", "deny"])
|
||||||
|
.args(["--create-bucket", &ctx.garage.key.id])
|
||||||
|
.quiet()
|
||||||
|
.expect_success_output("Could not deny key to create buckets");
|
||||||
|
|
||||||
|
// Try create bucket, should fail
|
||||||
|
let r = ctx.client.create_bucket().bucket(bucket_name).send().await;
|
||||||
|
assert!(r.is_err());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Now allow key to create bucket
|
||||||
|
ctx.garage
|
||||||
|
.command()
|
||||||
|
.args(["key", "allow"])
|
||||||
|
.args(["--create-bucket", &ctx.garage.key.id])
|
||||||
|
.quiet()
|
||||||
|
.expect_success_output("Could not deny key to create buckets");
|
||||||
|
|
||||||
// Create bucket
|
// Create bucket
|
||||||
//@TODO check with an invalid bucket name + with an already existing bucket
|
//@TODO check with an invalid bucket name + with an already existing bucket
|
||||||
let r = ctx
|
let r = ctx
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use crate::common;
|
use crate::common;
|
||||||
|
use crate::common::ext::CommandExt;
|
||||||
use common::custom_requester::BodySignature;
|
use common::custom_requester::BodySignature;
|
||||||
use hyper::Method;
|
use hyper::Method;
|
||||||
|
|
||||||
|
@ -105,6 +106,13 @@ async fn test_create_bucket_streaming() {
|
||||||
let ctx = common::context();
|
let ctx = common::context();
|
||||||
let bucket = "createbucket-streaming";
|
let bucket = "createbucket-streaming";
|
||||||
|
|
||||||
|
ctx.garage
|
||||||
|
.command()
|
||||||
|
.args(["key", "allow"])
|
||||||
|
.args(["--create-bucket", &ctx.garage.key.id])
|
||||||
|
.quiet()
|
||||||
|
.expect_success_output("Could not allow key to create buckets");
|
||||||
|
|
||||||
{
|
{
|
||||||
// create bucket
|
// create bucket
|
||||||
let _ = ctx
|
let _ = ctx
|
||||||
|
|
|
@ -22,7 +22,7 @@ tokio = "1.17.0"
|
||||||
|
|
||||||
# cli deps
|
# cli deps
|
||||||
clap = { version = "3.1.18", optional = true, features = ["derive", "env"] }
|
clap = { version = "3.1.18", optional = true, features = ["derive", "env"] }
|
||||||
garage_util = { version = "0.8.0", path = "../util", optional = true }
|
garage_util = { version = "0.8.1", path = "../util", optional = true }
|
||||||
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,11 +14,11 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.0", path = "../db" }
|
garage_db = { version = "0.8.1", default-features = false, path = "../db" }
|
||||||
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
||||||
garage_table = { version = "0.8.0", path = "../table" }
|
garage_table = { version = "0.8.1", path = "../table" }
|
||||||
garage_block = { version = "0.8.0", path = "../block" }
|
garage_block = { version = "0.8.1", path = "../block" }
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
|
@ -30,7 +30,6 @@ tracing = "0.1.30"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
zstd = { version = "0.9", default-features = false }
|
zstd = { version = "0.9", default-features = false }
|
||||||
|
|
||||||
rmp-serde = "0.15"
|
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||||
serde_bytes = "0.11"
|
serde_bytes = "0.11"
|
||||||
|
|
||||||
|
@ -42,6 +41,7 @@ opentelemetry = "0.17"
|
||||||
netapp = "0.5"
|
netapp = "0.5"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
default = [ "sled" ]
|
||||||
k2v = [ "garage_util/k2v" ]
|
k2v = [ "garage_util/k2v" ]
|
||||||
lmdb = [ "garage_db/lmdb" ]
|
lmdb = [ "garage_db/lmdb" ]
|
||||||
sled = [ "garage_db/sled" ]
|
sled = [ "garage_db/sled" ]
|
||||||
|
|
|
@ -1,18 +1,26 @@
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
|
||||||
/// The bucket alias table holds the names given to buckets
|
mod v08 {
|
||||||
/// in the global namespace.
|
use garage_util::crdt;
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
use garage_util::data::Uuid;
|
||||||
pub struct BucketAlias {
|
use serde::{Deserialize, Serialize};
|
||||||
name: String,
|
|
||||||
|
/// The bucket alias table holds the names given to buckets
|
||||||
|
/// in the global namespace.
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct BucketAlias {
|
||||||
|
pub(super) name: String,
|
||||||
pub state: crdt::Lww<Option<Uuid>>,
|
pub state: crdt::Lww<Option<Uuid>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl garage_util::migrate::InitialFormat for BucketAlias {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub use v08::*;
|
||||||
|
|
||||||
impl BucketAlias {
|
impl BucketAlias {
|
||||||
pub fn new(name: String, ts: u64, bucket_id: Option<Uuid>) -> Option<Self> {
|
pub fn new(name: String, ts: u64, bucket_id: Option<Uuid>) -> Option<Self> {
|
||||||
if !is_valid_bucket_name(&name) {
|
if !is_valid_bucket_name(&name) {
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
@ -7,22 +5,28 @@ use garage_util::time::*;
|
||||||
|
|
||||||
use crate::permission::BucketKeyPerm;
|
use crate::permission::BucketKeyPerm;
|
||||||
|
|
||||||
/// A bucket is a collection of objects
|
mod v08 {
|
||||||
///
|
use crate::permission::BucketKeyPerm;
|
||||||
/// Its parameters are not directly accessible as:
|
use garage_util::crdt;
|
||||||
/// - It must be possible to merge paramaters, hence the use of a LWW CRDT.
|
use garage_util::data::Uuid;
|
||||||
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
|
use serde::{Deserialize, Serialize};
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Bucket {
|
/// A bucket is a collection of objects
|
||||||
|
///
|
||||||
|
/// Its parameters are not directly accessible as:
|
||||||
|
/// - It must be possible to merge paramaters, hence the use of a LWW CRDT.
|
||||||
|
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Bucket {
|
||||||
/// ID of the bucket
|
/// ID of the bucket
|
||||||
pub id: Uuid,
|
pub id: Uuid,
|
||||||
/// State, and configuration if not deleted, of the bucket
|
/// State, and configuration if not deleted, of the bucket
|
||||||
pub state: crdt::Deletable<BucketParams>,
|
pub state: crdt::Deletable<BucketParams>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Configuration for a bucket
|
/// Configuration for a bucket
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct BucketParams {
|
pub struct BucketParams {
|
||||||
/// Bucket's creation date
|
/// Bucket's creation date
|
||||||
pub creation_date: u64,
|
pub creation_date: u64,
|
||||||
/// Map of key with access to the bucket, and what kind of access they give
|
/// Map of key with access to the bucket, and what kind of access they give
|
||||||
|
@ -47,32 +51,37 @@ pub struct BucketParams {
|
||||||
/// Bucket quotas
|
/// Bucket quotas
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub quotas: crdt::Lww<BucketQuotas>,
|
pub quotas: crdt::Lww<BucketQuotas>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct WebsiteConfig {
|
pub struct WebsiteConfig {
|
||||||
pub index_document: String,
|
pub index_document: String,
|
||||||
pub error_document: Option<String>,
|
pub error_document: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct CorsRule {
|
pub struct CorsRule {
|
||||||
pub id: Option<String>,
|
pub id: Option<String>,
|
||||||
pub max_age_seconds: Option<u64>,
|
pub max_age_seconds: Option<u64>,
|
||||||
pub allow_origins: Vec<String>,
|
pub allow_origins: Vec<String>,
|
||||||
pub allow_methods: Vec<String>,
|
pub allow_methods: Vec<String>,
|
||||||
pub allow_headers: Vec<String>,
|
pub allow_headers: Vec<String>,
|
||||||
pub expose_headers: Vec<String>,
|
pub expose_headers: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
#[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct BucketQuotas {
|
pub struct BucketQuotas {
|
||||||
/// Maximum size in bytes (bucket size = sum of sizes of objects in the bucket)
|
/// Maximum size in bytes (bucket size = sum of sizes of objects in the bucket)
|
||||||
pub max_size: Option<u64>,
|
pub max_size: Option<u64>,
|
||||||
/// Maximum number of non-deleted objects in the bucket
|
/// Maximum number of non-deleted objects in the bucket
|
||||||
pub max_objects: Option<u64>,
|
pub max_objects: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl garage_util::migrate::InitialFormat for Bucket {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub use v08::*;
|
||||||
|
|
||||||
impl AutoCrdt for BucketQuotas {
|
impl AutoCrdt for BucketQuotas {
|
||||||
const WARN_IF_DIFFERENT: bool = true;
|
const WARN_IF_DIFFERENT: bool = true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,10 +8,10 @@ use garage_util::background::*;
|
||||||
use garage_util::config::*;
|
use garage_util::config::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
|
|
||||||
|
use garage_rpc::replication_mode::ReplicationMode;
|
||||||
use garage_rpc::system::System;
|
use garage_rpc::system::System;
|
||||||
|
|
||||||
use garage_block::manager::*;
|
use garage_block::manager::*;
|
||||||
use garage_table::replication::ReplicationMode;
|
|
||||||
use garage_table::replication::TableFullReplication;
|
use garage_table::replication::TableFullReplication;
|
||||||
use garage_table::replication::TableShardedReplication;
|
use garage_table::replication::TableShardedReplication;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
@ -33,11 +33,14 @@ use crate::k2v::{item_table::*, poll::*, rpc::*};
|
||||||
pub struct Garage {
|
pub struct Garage {
|
||||||
/// The parsed configuration Garage is running
|
/// The parsed configuration Garage is running
|
||||||
pub config: Config,
|
pub config: Config,
|
||||||
|
/// The set of background variables that can be viewed/modified at runtime
|
||||||
|
pub bg_vars: vars::BgVars,
|
||||||
|
|
||||||
|
/// The replication mode of this cluster
|
||||||
|
pub replication_mode: ReplicationMode,
|
||||||
|
|
||||||
/// The local database
|
/// The local database
|
||||||
pub db: db::Db,
|
pub db: db::Db,
|
||||||
/// A background job runner
|
|
||||||
pub background: Arc<BackgroundRunner>,
|
|
||||||
/// The membership manager
|
/// The membership manager
|
||||||
pub system: Arc<System>,
|
pub system: Arc<System>,
|
||||||
/// The block manager
|
/// The block manager
|
||||||
|
@ -75,7 +78,7 @@ pub struct GarageK2V {
|
||||||
|
|
||||||
impl Garage {
|
impl Garage {
|
||||||
/// Create and run garage
|
/// Create and run garage
|
||||||
pub fn new(config: Config, background: Arc<BackgroundRunner>) -> Result<Arc<Self>, Error> {
|
pub fn new(config: Config) -> Result<Arc<Self>, Error> {
|
||||||
// Create meta dir and data dir if they don't exist already
|
// Create meta dir and data dir if they don't exist already
|
||||||
std::fs::create_dir_all(&config.metadata_dir)
|
std::fs::create_dir_all(&config.metadata_dir)
|
||||||
.ok_or_message("Unable to create Garage metadata directory")?;
|
.ok_or_message("Unable to create Garage metadata directory")?;
|
||||||
|
@ -156,7 +159,7 @@ impl Garage {
|
||||||
};
|
};
|
||||||
|
|
||||||
let network_key = NetworkKey::from_slice(
|
let network_key = NetworkKey::from_slice(
|
||||||
&hex::decode(&config.rpc_secret).expect("Invalid RPC secret key")[..],
|
&hex::decode(&config.rpc_secret.as_ref().unwrap()).expect("Invalid RPC secret key")[..],
|
||||||
)
|
)
|
||||||
.expect("Invalid RPC secret key");
|
.expect("Invalid RPC secret key");
|
||||||
|
|
||||||
|
@ -164,12 +167,7 @@ impl Garage {
|
||||||
.expect("Invalid replication_mode in config file.");
|
.expect("Invalid replication_mode in config file.");
|
||||||
|
|
||||||
info!("Initialize membership management system...");
|
info!("Initialize membership management system...");
|
||||||
let system = System::new(
|
let system = System::new(network_key, replication_mode, &config)?;
|
||||||
network_key,
|
|
||||||
background.clone(),
|
|
||||||
replication_mode.replication_factor(),
|
|
||||||
&config,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let data_rep_param = TableShardedReplication {
|
let data_rep_param = TableShardedReplication {
|
||||||
system: system.clone(),
|
system: system.clone(),
|
||||||
|
@ -227,7 +225,6 @@ impl Garage {
|
||||||
info!("Initialize version_table...");
|
info!("Initialize version_table...");
|
||||||
let version_table = Table::new(
|
let version_table = Table::new(
|
||||||
VersionTable {
|
VersionTable {
|
||||||
background: background.clone(),
|
|
||||||
block_ref_table: block_ref_table.clone(),
|
block_ref_table: block_ref_table.clone(),
|
||||||
},
|
},
|
||||||
meta_rep_param.clone(),
|
meta_rep_param.clone(),
|
||||||
|
@ -242,7 +239,6 @@ impl Garage {
|
||||||
#[allow(clippy::redundant_clone)]
|
#[allow(clippy::redundant_clone)]
|
||||||
let object_table = Table::new(
|
let object_table = Table::new(
|
||||||
ObjectTable {
|
ObjectTable {
|
||||||
background: background.clone(),
|
|
||||||
version_table: version_table.clone(),
|
version_table: version_table.clone(),
|
||||||
object_counter_table: object_counter_table.clone(),
|
object_counter_table: object_counter_table.clone(),
|
||||||
},
|
},
|
||||||
|
@ -255,11 +251,16 @@ impl Garage {
|
||||||
#[cfg(feature = "k2v")]
|
#[cfg(feature = "k2v")]
|
||||||
let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param);
|
let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param);
|
||||||
|
|
||||||
|
// Initialize bg vars
|
||||||
|
let mut bg_vars = vars::BgVars::new();
|
||||||
|
block_manager.register_bg_vars(&mut bg_vars);
|
||||||
|
|
||||||
// -- done --
|
// -- done --
|
||||||
Ok(Arc::new(Self {
|
Ok(Arc::new(Self {
|
||||||
config,
|
config,
|
||||||
|
bg_vars,
|
||||||
|
replication_mode,
|
||||||
db,
|
db,
|
||||||
background,
|
|
||||||
system,
|
system,
|
||||||
block_manager,
|
block_manager,
|
||||||
bucket_table,
|
bucket_table,
|
||||||
|
@ -274,6 +275,22 @@ impl Garage {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
|
||||||
|
self.block_manager.spawn_workers(bg);
|
||||||
|
|
||||||
|
self.bucket_table.spawn_workers(bg);
|
||||||
|
self.bucket_alias_table.spawn_workers(bg);
|
||||||
|
self.key_table.spawn_workers(bg);
|
||||||
|
|
||||||
|
self.object_table.spawn_workers(bg);
|
||||||
|
self.object_counter_table.spawn_workers(bg);
|
||||||
|
self.version_table.spawn_workers(bg);
|
||||||
|
self.block_ref_table.spawn_workers(bg);
|
||||||
|
|
||||||
|
#[cfg(feature = "k2v")]
|
||||||
|
self.k2v.spawn_workers(bg);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
||||||
helper::bucket::BucketHelper(self)
|
helper::bucket::BucketHelper(self)
|
||||||
}
|
}
|
||||||
|
@ -308,4 +325,9 @@ impl GarageK2V {
|
||||||
rpc,
|
rpc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
|
||||||
|
self.item_table.spawn_workers(bg);
|
||||||
|
self.counter_table.spawn_workers(bg);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,19 +1,18 @@
|
||||||
use core::ops::Bound;
|
use core::ops::Bound;
|
||||||
use std::collections::{hash_map, BTreeMap, HashMap};
|
use std::collections::{BTreeMap, HashMap};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::sync::{mpsc, watch};
|
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_rpc::ring::Ring;
|
use garage_rpc::ring::Ring;
|
||||||
use garage_rpc::system::System;
|
use garage_rpc::system::System;
|
||||||
use garage_util::background::*;
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
|
use garage_util::migrate::Migrate;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
|
@ -31,14 +30,44 @@ pub trait CountedItem: Clone + PartialEq + Send + Sync + 'static {
|
||||||
fn counts(&self) -> Vec<(&'static str, i64)>;
|
fn counts(&self) -> Vec<(&'static str, i64)>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A counter entry in the global table
|
mod v08 {
|
||||||
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
|
use super::CountedItem;
|
||||||
pub struct CounterEntry<T: CountedItem> {
|
use garage_util::data::Uuid;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
// ---- Global part (the table everyone queries) ----
|
||||||
|
|
||||||
|
/// A counter entry in the global table
|
||||||
|
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct CounterEntry<T: CountedItem> {
|
||||||
pub pk: T::CP,
|
pub pk: T::CP,
|
||||||
pub sk: T::CS,
|
pub sk: T::CS,
|
||||||
pub values: BTreeMap<String, CounterValue>,
|
pub values: BTreeMap<String, CounterValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A counter entry in the global table
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct CounterValue {
|
||||||
|
pub node_values: BTreeMap<Uuid, (u64, i64)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: CountedItem> garage_util::migrate::InitialFormat for CounterEntry<T> {}
|
||||||
|
|
||||||
|
// ---- Local part (the counter we maintain transactionnaly on each node) ----
|
||||||
|
|
||||||
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub(super) struct LocalCounterEntry<T: CountedItem> {
|
||||||
|
pub(super) pk: T::CP,
|
||||||
|
pub(super) sk: T::CS,
|
||||||
|
pub(super) values: BTreeMap<String, (u64, i64)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: CountedItem> garage_util::migrate::InitialFormat for LocalCounterEntry<T> {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub use v08::*;
|
||||||
|
|
||||||
impl<T: CountedItem> Entry<T::CP, T::CS> for CounterEntry<T> {
|
impl<T: CountedItem> Entry<T::CP, T::CS> for CounterEntry<T> {
|
||||||
fn partition_key(&self) -> &T::CP {
|
fn partition_key(&self) -> &T::CP {
|
||||||
&self.pk
|
&self.pk
|
||||||
|
@ -80,12 +109,6 @@ impl<T: CountedItem> CounterEntry<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A counter entry in the global table
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CounterValue {
|
|
||||||
pub node_values: BTreeMap<Uuid, (u64, i64)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: CountedItem> Crdt for CounterEntry<T> {
|
impl<T: CountedItem> Crdt for CounterEntry<T> {
|
||||||
fn merge(&mut self, other: &Self) {
|
fn merge(&mut self, other: &Self) {
|
||||||
for (name, e2) in other.values.iter() {
|
for (name, e2) in other.values.iter() {
|
||||||
|
@ -142,7 +165,6 @@ impl<T: CountedItem> TableSchema for CounterTable<T> {
|
||||||
pub struct IndexCounter<T: CountedItem> {
|
pub struct IndexCounter<T: CountedItem> {
|
||||||
this_node: Uuid,
|
this_node: Uuid,
|
||||||
local_counter: db::Tree,
|
local_counter: db::Tree,
|
||||||
propagate_tx: mpsc::UnboundedSender<(T::CP, T::CS, LocalCounterEntry<T>)>,
|
|
||||||
pub table: Arc<Table<CounterTable<T>, TableShardedReplication>>,
|
pub table: Arc<Table<CounterTable<T>, TableShardedReplication>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,16 +174,11 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
replication: TableShardedReplication,
|
replication: TableShardedReplication,
|
||||||
db: &db::Db,
|
db: &db::Db,
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
let background = system.background.clone();
|
Arc::new(Self {
|
||||||
|
|
||||||
let (propagate_tx, propagate_rx) = mpsc::unbounded_channel();
|
|
||||||
|
|
||||||
let this = Arc::new(Self {
|
|
||||||
this_node: system.id,
|
this_node: system.id,
|
||||||
local_counter: db
|
local_counter: db
|
||||||
.open_tree(format!("local_counter_v2:{}", T::COUNTER_TABLE_NAME))
|
.open_tree(format!("local_counter_v2:{}", T::COUNTER_TABLE_NAME))
|
||||||
.expect("Unable to open local counter tree"),
|
.expect("Unable to open local counter tree"),
|
||||||
propagate_tx,
|
|
||||||
table: Table::new(
|
table: Table::new(
|
||||||
CounterTable {
|
CounterTable {
|
||||||
_phantom_t: Default::default(),
|
_phantom_t: Default::default(),
|
||||||
|
@ -170,16 +187,11 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
system,
|
system,
|
||||||
db,
|
db,
|
||||||
),
|
),
|
||||||
});
|
})
|
||||||
|
}
|
||||||
|
|
||||||
background.spawn_worker(IndexPropagatorWorker {
|
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
|
||||||
index_counter: this.clone(),
|
self.table.spawn_workers(bg);
|
||||||
propagate_rx,
|
|
||||||
buf: HashMap::new(),
|
|
||||||
errors: 0,
|
|
||||||
});
|
|
||||||
|
|
||||||
this
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn count(
|
pub fn count(
|
||||||
|
@ -208,11 +220,9 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
let tree_key = self.table.data.tree_key(pk, sk);
|
let tree_key = self.table.data.tree_key(pk, sk);
|
||||||
|
|
||||||
let mut entry = match tx.get(&self.local_counter, &tree_key[..])? {
|
let mut entry = match tx.get(&self.local_counter, &tree_key[..])? {
|
||||||
Some(old_bytes) => {
|
Some(old_bytes) => LocalCounterEntry::<T>::decode(&old_bytes)
|
||||||
rmp_serde::decode::from_read_ref::<_, LocalCounterEntry<T>>(&old_bytes)
|
.ok_or_message("Cannot decode local counter entry")
|
||||||
.map_err(Error::RmpDecode)
|
.map_err(db::TxError::Abort)?,
|
||||||
.map_err(db::TxError::Abort)?
|
|
||||||
}
|
|
||||||
None => LocalCounterEntry {
|
None => LocalCounterEntry {
|
||||||
pk: pk.clone(),
|
pk: pk.clone(),
|
||||||
sk: sk.clone(),
|
sk: sk.clone(),
|
||||||
|
@ -227,17 +237,14 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
ent.1 += *inc;
|
ent.1 += *inc;
|
||||||
}
|
}
|
||||||
|
|
||||||
let new_entry_bytes = rmp_to_vec_all_named(&entry)
|
let new_entry_bytes = entry
|
||||||
|
.encode()
|
||||||
.map_err(Error::RmpEncode)
|
.map_err(Error::RmpEncode)
|
||||||
.map_err(db::TxError::Abort)?;
|
.map_err(db::TxError::Abort)?;
|
||||||
tx.insert(&self.local_counter, &tree_key[..], new_entry_bytes)?;
|
tx.insert(&self.local_counter, &tree_key[..], new_entry_bytes)?;
|
||||||
|
|
||||||
if let Err(e) = self.propagate_tx.send((pk.clone(), sk.clone(), entry)) {
|
let dist_entry = entry.into_counter_entry(self.this_node);
|
||||||
error!(
|
self.table.queue_insert(tx, &dist_entry)?;
|
||||||
"Could not propagate updated counter values, failed to send to channel: {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -250,23 +257,6 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
TS: TableSchema<E = T>,
|
TS: TableSchema<E = T>,
|
||||||
TR: TableReplication,
|
TR: TableReplication,
|
||||||
{
|
{
|
||||||
let save_counter_entry = |entry: CounterEntry<T>| -> Result<(), Error> {
|
|
||||||
let entry_k = self
|
|
||||||
.table
|
|
||||||
.data
|
|
||||||
.tree_key(entry.partition_key(), entry.sort_key());
|
|
||||||
self.table
|
|
||||||
.data
|
|
||||||
.update_entry_with(&entry_k, |ent| match ent {
|
|
||||||
Some(mut ent) => {
|
|
||||||
ent.merge(&entry);
|
|
||||||
ent
|
|
||||||
}
|
|
||||||
None => entry.clone(),
|
|
||||||
})?;
|
|
||||||
Ok(())
|
|
||||||
};
|
|
||||||
|
|
||||||
// 1. Set all old local counters to zero
|
// 1. Set all old local counters to zero
|
||||||
let now = now_msec();
|
let now = now_msec();
|
||||||
let mut next_start: Option<Vec<u8>> = None;
|
let mut next_start: Option<Vec<u8>> = None;
|
||||||
|
@ -289,20 +279,22 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
|
|
||||||
info!("zeroing old counters... ({})", hex::encode(&batch[0].0));
|
info!("zeroing old counters... ({})", hex::encode(&batch[0].0));
|
||||||
for (local_counter_k, local_counter) in batch {
|
for (local_counter_k, local_counter) in batch {
|
||||||
let mut local_counter =
|
let mut local_counter = LocalCounterEntry::<T>::decode(&local_counter)
|
||||||
rmp_serde::decode::from_read_ref::<_, LocalCounterEntry<T>>(&local_counter)?;
|
.ok_or_message("Cannot decode local counter entry")?;
|
||||||
|
|
||||||
for (_, tv) in local_counter.values.iter_mut() {
|
for (_, tv) in local_counter.values.iter_mut() {
|
||||||
tv.0 = std::cmp::max(tv.0 + 1, now);
|
tv.0 = std::cmp::max(tv.0 + 1, now);
|
||||||
tv.1 = 0;
|
tv.1 = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
let local_counter_bytes = rmp_to_vec_all_named(&local_counter)?;
|
let local_counter_bytes = local_counter.encode()?;
|
||||||
self.local_counter
|
self.local_counter
|
||||||
.insert(&local_counter_k, &local_counter_bytes)?;
|
.insert(&local_counter_k, &local_counter_bytes)?;
|
||||||
|
|
||||||
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
||||||
save_counter_entry(counter_entry)?;
|
self.local_counter
|
||||||
|
.db()
|
||||||
|
.transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
|
||||||
|
|
||||||
next_start = Some(local_counter_k);
|
next_start = Some(local_counter_k);
|
||||||
}
|
}
|
||||||
|
@ -343,9 +335,8 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
let local_counter_key = self.table.data.tree_key(pk, sk);
|
let local_counter_key = self.table.data.tree_key(pk, sk);
|
||||||
let mut local_counter = match self.local_counter.get(&local_counter_key)? {
|
let mut local_counter = match self.local_counter.get(&local_counter_key)? {
|
||||||
Some(old_bytes) => {
|
Some(old_bytes) => {
|
||||||
let ent = rmp_serde::decode::from_read_ref::<_, LocalCounterEntry<T>>(
|
let ent = LocalCounterEntry::<T>::decode(&old_bytes)
|
||||||
&old_bytes,
|
.ok_or_message("Cannot decode local counter entry")?;
|
||||||
)?;
|
|
||||||
assert!(ent.pk == *pk);
|
assert!(ent.pk == *pk);
|
||||||
assert!(ent.sk == *sk);
|
assert!(ent.sk == *sk);
|
||||||
ent
|
ent
|
||||||
|
@ -362,12 +353,14 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
tv.1 += v;
|
tv.1 += v;
|
||||||
}
|
}
|
||||||
|
|
||||||
let local_counter_bytes = rmp_to_vec_all_named(&local_counter)?;
|
let local_counter_bytes = local_counter.encode()?;
|
||||||
self.local_counter
|
self.local_counter
|
||||||
.insert(&local_counter_key, local_counter_bytes)?;
|
.insert(&local_counter_key, local_counter_bytes)?;
|
||||||
|
|
||||||
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
||||||
save_counter_entry(counter_entry)?;
|
self.local_counter
|
||||||
|
.db()
|
||||||
|
.transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
|
||||||
|
|
||||||
next_start = Some(counted_entry_k);
|
next_start = Some(counted_entry_k);
|
||||||
}
|
}
|
||||||
|
@ -378,104 +371,7 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct IndexPropagatorWorker<T: CountedItem> {
|
// ----
|
||||||
index_counter: Arc<IndexCounter<T>>,
|
|
||||||
propagate_rx: mpsc::UnboundedReceiver<(T::CP, T::CS, LocalCounterEntry<T>)>,
|
|
||||||
|
|
||||||
buf: HashMap<Vec<u8>, CounterEntry<T>>,
|
|
||||||
errors: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: CountedItem> IndexPropagatorWorker<T> {
|
|
||||||
fn add_ent(&mut self, pk: T::CP, sk: T::CS, counters: LocalCounterEntry<T>) {
|
|
||||||
let tree_key = self.index_counter.table.data.tree_key(&pk, &sk);
|
|
||||||
let dist_entry = counters.into_counter_entry(self.index_counter.this_node);
|
|
||||||
match self.buf.entry(tree_key) {
|
|
||||||
hash_map::Entry::Vacant(e) => {
|
|
||||||
e.insert(dist_entry);
|
|
||||||
}
|
|
||||||
hash_map::Entry::Occupied(mut e) => {
|
|
||||||
e.get_mut().merge(&dist_entry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<T: CountedItem> Worker for IndexPropagatorWorker<T> {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
format!("{} index counter propagator", T::COUNTER_TABLE_NAME)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn info(&self) -> Option<String> {
|
|
||||||
if !self.buf.is_empty() {
|
|
||||||
Some(format!("{} items in queue", self.buf.len()))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn work(&mut self, must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
|
||||||
// This loop batches updates to counters to be sent all at once.
|
|
||||||
// They are sent once the propagate_rx channel has been emptied (or is closed).
|
|
||||||
let closed = loop {
|
|
||||||
match self.propagate_rx.try_recv() {
|
|
||||||
Ok((pk, sk, counters)) => {
|
|
||||||
self.add_ent(pk, sk, counters);
|
|
||||||
}
|
|
||||||
Err(mpsc::error::TryRecvError::Empty) => break false,
|
|
||||||
Err(mpsc::error::TryRecvError::Disconnected) => break true,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if !self.buf.is_empty() {
|
|
||||||
let entries_k = self.buf.keys().take(100).cloned().collect::<Vec<_>>();
|
|
||||||
let entries = entries_k.iter().map(|k| self.buf.get(k).unwrap());
|
|
||||||
if let Err(e) = self.index_counter.table.insert_many(entries).await {
|
|
||||||
self.errors += 1;
|
|
||||||
if self.errors >= 2 && *must_exit.borrow() {
|
|
||||||
error!("({}) Could not propagate {} counter values: {}, these counters will not be updated correctly.", T::COUNTER_TABLE_NAME, self.buf.len(), e);
|
|
||||||
return Ok(WorkerState::Done);
|
|
||||||
}
|
|
||||||
// Propagate error up to worker manager, it will log it, increment a counter,
|
|
||||||
// and sleep for a certain delay (with exponential backoff), waiting for
|
|
||||||
// things to go back to normal
|
|
||||||
return Err(e);
|
|
||||||
} else {
|
|
||||||
for k in entries_k {
|
|
||||||
self.buf.remove(&k);
|
|
||||||
}
|
|
||||||
self.errors = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(WorkerState::Busy);
|
|
||||||
} else if closed {
|
|
||||||
return Ok(WorkerState::Done);
|
|
||||||
} else {
|
|
||||||
return Ok(WorkerState::Idle);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
|
||||||
match self.propagate_rx.recv().await {
|
|
||||||
Some((pk, sk, counters)) => {
|
|
||||||
self.add_ent(pk, sk, counters);
|
|
||||||
WorkerState::Busy
|
|
||||||
}
|
|
||||||
None => match self.buf.is_empty() {
|
|
||||||
false => WorkerState::Busy,
|
|
||||||
true => WorkerState::Done,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
struct LocalCounterEntry<T: CountedItem> {
|
|
||||||
pk: T::CP,
|
|
||||||
sk: T::CS,
|
|
||||||
values: BTreeMap<String, (u64, i64)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: CountedItem> LocalCounterEntry<T> {
|
impl<T: CountedItem> LocalCounterEntry<T> {
|
||||||
fn into_counter_entry(self, this_node: Uuid) -> CounterEntry<T> {
|
fn into_counter_entry(self, this_node: Uuid) -> CounterEntry<T> {
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
|
@ -17,32 +18,43 @@ pub const CONFLICTS: &str = "conflicts";
|
||||||
pub const VALUES: &str = "values";
|
pub const VALUES: &str = "values";
|
||||||
pub const BYTES: &str = "bytes";
|
pub const BYTES: &str = "bytes";
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
mod v08 {
|
||||||
pub struct K2VItem {
|
use crate::k2v::causality::K2VNodeId;
|
||||||
|
use garage_util::data::Uuid;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct K2VItem {
|
||||||
pub partition: K2VItemPartition,
|
pub partition: K2VItemPartition,
|
||||||
pub sort_key: String,
|
pub sort_key: String,
|
||||||
|
|
||||||
items: BTreeMap<K2VNodeId, DvvsEntry>,
|
pub(super) items: BTreeMap<K2VNodeId, DvvsEntry>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, Hash)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, Hash)]
|
||||||
pub struct K2VItemPartition {
|
pub struct K2VItemPartition {
|
||||||
pub bucket_id: Uuid,
|
pub bucket_id: Uuid,
|
||||||
pub partition_key: String,
|
pub partition_key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
struct DvvsEntry {
|
pub struct DvvsEntry {
|
||||||
t_discard: u64,
|
pub(super) t_discard: u64,
|
||||||
values: Vec<(u64, DvvsValue)>,
|
pub(super) values: Vec<(u64, DvvsValue)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub enum DvvsValue {
|
pub enum DvvsValue {
|
||||||
Value(#[serde(with = "serde_bytes")] Vec<u8>),
|
Value(#[serde(with = "serde_bytes")] Vec<u8>),
|
||||||
Deleted,
|
Deleted,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl garage_util::migrate::InitialFormat for K2VItem {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub use v08::*;
|
||||||
|
|
||||||
impl K2VItem {
|
impl K2VItem {
|
||||||
/// Creates a new K2VItem when no previous entry existed in the db
|
/// Creates a new K2VItem when no previous entry existed in the db
|
||||||
pub fn new(bucket_id: Uuid, partition_key: String, sort_key: String) -> Self {
|
pub fn new(bucket_id: Uuid, partition_key: String, sort_key: String) -> Self {
|
||||||
|
|
|
@ -273,14 +273,9 @@ impl K2VRpcHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn local_insert(&self, item: &InsertedItem) -> Result<Option<K2VItem>, Error> {
|
fn local_insert(&self, item: &InsertedItem) -> Result<Option<K2VItem>, Error> {
|
||||||
let tree_key = self
|
|
||||||
.item_table
|
|
||||||
.data
|
|
||||||
.tree_key(&item.partition, &item.sort_key);
|
|
||||||
|
|
||||||
self.item_table
|
self.item_table
|
||||||
.data
|
.data
|
||||||
.update_entry_with(&tree_key[..], |ent| {
|
.update_entry_with(&item.partition, &item.sort_key, |ent| {
|
||||||
let mut ent = ent.unwrap_or_else(|| {
|
let mut ent = ent.unwrap_or_else(|| {
|
||||||
K2VItem::new(
|
K2VItem::new(
|
||||||
item.partition.bucket_id,
|
item.partition.bucket_id,
|
||||||
|
|
|
@ -1,26 +1,72 @@
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_util::crdt::{self, Crdt};
|
||||||
use garage_table::*;
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
|
use garage_table::{DeletedFilter, EmptyKey, Entry, TableSchema};
|
||||||
|
|
||||||
use crate::permission::BucketKeyPerm;
|
use crate::permission::BucketKeyPerm;
|
||||||
|
|
||||||
use crate::prev::v051::key_table as old;
|
pub(crate) mod v05 {
|
||||||
|
use garage_util::crdt;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// An api key
|
/// An api key
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct Key {
|
pub struct Key {
|
||||||
|
/// The id of the key (immutable), used as partition key
|
||||||
|
pub key_id: String,
|
||||||
|
|
||||||
|
/// The secret_key associated
|
||||||
|
pub secret_key: String,
|
||||||
|
|
||||||
|
/// Name for the key
|
||||||
|
pub name: crdt::Lww<String>,
|
||||||
|
|
||||||
|
/// Is the key deleted
|
||||||
|
pub deleted: crdt::Bool,
|
||||||
|
|
||||||
|
/// Buckets in which the key is authorized. Empty if `Key` is deleted
|
||||||
|
// CRDT interaction: deleted implies authorized_buckets is empty
|
||||||
|
pub authorized_buckets: crdt::LwwMap<String, PermissionSet>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Permission given to a key in a bucket
|
||||||
|
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct PermissionSet {
|
||||||
|
/// The key can be used to read the bucket
|
||||||
|
pub allow_read: bool,
|
||||||
|
/// The key can be used to write in the bucket
|
||||||
|
pub allow_write: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl crdt::AutoCrdt for PermissionSet {
|
||||||
|
const WARN_IF_DIFFERENT: bool = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl garage_util::migrate::InitialFormat for Key {}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod v08 {
|
||||||
|
use super::v05;
|
||||||
|
use crate::permission::BucketKeyPerm;
|
||||||
|
use garage_util::crdt;
|
||||||
|
use garage_util::data::Uuid;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
/// An api key
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Key {
|
||||||
/// The id of the key (immutable), used as partition key
|
/// The id of the key (immutable), used as partition key
|
||||||
pub key_id: String,
|
pub key_id: String,
|
||||||
|
|
||||||
/// Internal state of the key
|
/// Internal state of the key
|
||||||
pub state: crdt::Deletable<KeyParams>,
|
pub state: crdt::Deletable<KeyParams>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Configuration for a key
|
/// Configuration for a key
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct KeyParams {
|
pub struct KeyParams {
|
||||||
/// The secret_key associated (immutable)
|
/// The secret_key associated (immutable)
|
||||||
pub secret_key: String,
|
pub secret_key: String,
|
||||||
|
|
||||||
|
@ -38,8 +84,38 @@ pub struct KeyParams {
|
||||||
/// A key can have a local view of buckets names it is
|
/// A key can have a local view of buckets names it is
|
||||||
/// the only one to see, this is the namespace for these aliases
|
/// the only one to see, this is the namespace for these aliases
|
||||||
pub local_aliases: crdt::LwwMap<String, Option<Uuid>>,
|
pub local_aliases: crdt::LwwMap<String, Option<Uuid>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl garage_util::migrate::Migrate for Key {
|
||||||
|
type Previous = v05::Key;
|
||||||
|
|
||||||
|
fn migrate(old_k: v05::Key) -> Key {
|
||||||
|
let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone());
|
||||||
|
|
||||||
|
let state = if old_k.deleted.get() {
|
||||||
|
crdt::Deletable::Deleted
|
||||||
|
} else {
|
||||||
|
// Authorized buckets is ignored here,
|
||||||
|
// migration is performed in specific migration code in
|
||||||
|
// garage/migrate.rs
|
||||||
|
crdt::Deletable::Present(KeyParams {
|
||||||
|
secret_key: old_k.secret_key,
|
||||||
|
name,
|
||||||
|
allow_create_bucket: crdt::Lww::new(false),
|
||||||
|
authorized_buckets: crdt::Map::new(),
|
||||||
|
local_aliases: crdt::LwwMap::new(),
|
||||||
|
})
|
||||||
|
};
|
||||||
|
Key {
|
||||||
|
key_id: old_k.key_id,
|
||||||
|
state,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub use v08::*;
|
||||||
|
|
||||||
impl KeyParams {
|
impl KeyParams {
|
||||||
fn new(secret_key: &str, name: &str) -> Self {
|
fn new(secret_key: &str, name: &str) -> Self {
|
||||||
KeyParams {
|
KeyParams {
|
||||||
|
@ -173,28 +249,4 @@ impl TableSchema for KeyTable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
|
|
||||||
let old_k = rmp_serde::decode::from_read_ref::<_, old::Key>(bytes).ok()?;
|
|
||||||
let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone());
|
|
||||||
|
|
||||||
let state = if old_k.deleted.get() {
|
|
||||||
crdt::Deletable::Deleted
|
|
||||||
} else {
|
|
||||||
// Authorized buckets is ignored here,
|
|
||||||
// migration is performed in specific migration code in
|
|
||||||
// garage/migrate.rs
|
|
||||||
crdt::Deletable::Present(KeyParams {
|
|
||||||
secret_key: old_k.secret_key,
|
|
||||||
name,
|
|
||||||
allow_create_bucket: crdt::Lww::new(false),
|
|
||||||
authorized_buckets: crdt::Map::new(),
|
|
||||||
local_aliases: crdt::LwwMap::new(),
|
|
||||||
})
|
|
||||||
};
|
|
||||||
Some(Key {
|
|
||||||
key_id: old_k.key_id,
|
|
||||||
state,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ use std::sync::Arc;
|
||||||
|
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
use garage_util::encode::nonversioned_decode;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
|
@ -28,8 +29,8 @@ impl Migrate {
|
||||||
let mut old_buckets = vec![];
|
let mut old_buckets = vec![];
|
||||||
for res in tree.iter().map_err(GarageError::from)? {
|
for res in tree.iter().map_err(GarageError::from)? {
|
||||||
let (_k, v) = res.map_err(GarageError::from)?;
|
let (_k, v) = res.map_err(GarageError::from)?;
|
||||||
let bucket = rmp_serde::decode::from_read_ref::<_, old_bucket::Bucket>(&v[..])
|
let bucket =
|
||||||
.map_err(GarageError::from)?;
|
nonversioned_decode::<old_bucket::Bucket>(&v[..]).map_err(GarageError::from)?;
|
||||||
old_buckets.push(bucket);
|
old_buckets.push(bucket);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
|
||||||
use garage_table::crdt::Crdt;
|
use garage_table::crdt::Crdt;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
|
||||||
use super::key_table::PermissionSet;
|
use crate::key_table::v05::PermissionSet;
|
||||||
|
|
||||||
/// A bucket is a collection of objects
|
/// A bucket is a collection of objects
|
||||||
///
|
///
|
||||||
|
|
|
@ -1,50 +0,0 @@
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
|
||||||
use garage_table::*;
|
|
||||||
|
|
||||||
/// An api key
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Key {
|
|
||||||
/// The id of the key (immutable), used as partition key
|
|
||||||
pub key_id: String,
|
|
||||||
|
|
||||||
/// The secret_key associated
|
|
||||||
pub secret_key: String,
|
|
||||||
|
|
||||||
/// Name for the key
|
|
||||||
pub name: crdt::Lww<String>,
|
|
||||||
|
|
||||||
/// Is the key deleted
|
|
||||||
pub deleted: crdt::Bool,
|
|
||||||
|
|
||||||
/// Buckets in which the key is authorized. Empty if `Key` is deleted
|
|
||||||
// CRDT interaction: deleted implies authorized_buckets is empty
|
|
||||||
pub authorized_buckets: crdt::LwwMap<String, PermissionSet>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Permission given to a key in a bucket
|
|
||||||
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct PermissionSet {
|
|
||||||
/// The key can be used to read the bucket
|
|
||||||
pub allow_read: bool,
|
|
||||||
/// The key can be used to write in the bucket
|
|
||||||
pub allow_write: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AutoCrdt for PermissionSet {
|
|
||||||
const WARN_IF_DIFFERENT: bool = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Crdt for Key {
|
|
||||||
fn merge(&mut self, other: &Self) {
|
|
||||||
self.name.merge(&other.name);
|
|
||||||
self.deleted.merge(&other.deleted);
|
|
||||||
|
|
||||||
if self.deleted.get() {
|
|
||||||
self.authorized_buckets.clear();
|
|
||||||
} else {
|
|
||||||
self.authorized_buckets.merge(&other.authorized_buckets);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,4 +1 @@
|
||||||
pub(crate) mod bucket_table;
|
pub(crate) mod bucket_table;
|
||||||
pub(crate) mod key_table;
|
|
||||||
pub(crate) mod object_table;
|
|
||||||
pub(crate) mod version_table;
|
|
||||||
|
|
|
@ -1,149 +0,0 @@
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use garage_util::data::*;
|
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
|
||||||
|
|
||||||
/// An object
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Object {
|
|
||||||
/// The bucket in which the object is stored, used as partition key
|
|
||||||
pub bucket: String,
|
|
||||||
|
|
||||||
/// The key at which the object is stored in its bucket, used as sorting key
|
|
||||||
pub key: String,
|
|
||||||
|
|
||||||
/// The list of currenty stored versions of the object
|
|
||||||
versions: Vec<ObjectVersion>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Object {
|
|
||||||
/// Get a list of currently stored versions of `Object`
|
|
||||||
pub fn versions(&self) -> &[ObjectVersion] {
|
|
||||||
&self.versions[..]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Informations about a version of an object
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ObjectVersion {
|
|
||||||
/// Id of the version
|
|
||||||
pub uuid: Uuid,
|
|
||||||
/// Timestamp of when the object was created
|
|
||||||
pub timestamp: u64,
|
|
||||||
/// State of the version
|
|
||||||
pub state: ObjectVersionState,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// State of an object version
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum ObjectVersionState {
|
|
||||||
/// The version is being received
|
|
||||||
Uploading(ObjectVersionHeaders),
|
|
||||||
/// The version is fully received
|
|
||||||
Complete(ObjectVersionData),
|
|
||||||
/// The version uploaded containded errors or the upload was explicitly aborted
|
|
||||||
Aborted,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Crdt for ObjectVersionState {
|
|
||||||
fn merge(&mut self, other: &Self) {
|
|
||||||
use ObjectVersionState::*;
|
|
||||||
match other {
|
|
||||||
Aborted => {
|
|
||||||
*self = Aborted;
|
|
||||||
}
|
|
||||||
Complete(b) => match self {
|
|
||||||
Aborted => {}
|
|
||||||
Complete(a) => {
|
|
||||||
a.merge(b);
|
|
||||||
}
|
|
||||||
Uploading(_) => {
|
|
||||||
*self = Complete(b.clone());
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Uploading(_) => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Data stored in object version
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum ObjectVersionData {
|
|
||||||
/// The object was deleted, this Version is a tombstone to mark it as such
|
|
||||||
DeleteMarker,
|
|
||||||
/// The object is short, it's stored inlined
|
|
||||||
Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec<u8>),
|
|
||||||
/// The object is not short, Hash of first block is stored here, next segments hashes are
|
|
||||||
/// stored in the version table
|
|
||||||
FirstBlock(ObjectVersionMeta, Hash),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AutoCrdt for ObjectVersionData {
|
|
||||||
const WARN_IF_DIFFERENT: bool = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Metadata about the object version
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ObjectVersionMeta {
|
|
||||||
/// Headers to send to the client
|
|
||||||
pub headers: ObjectVersionHeaders,
|
|
||||||
/// Size of the object
|
|
||||||
pub size: u64,
|
|
||||||
/// etag of the object
|
|
||||||
pub etag: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Additional headers for an object
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ObjectVersionHeaders {
|
|
||||||
/// Content type of the object
|
|
||||||
pub content_type: String,
|
|
||||||
/// Any other http headers to send
|
|
||||||
pub other: BTreeMap<String, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ObjectVersion {
|
|
||||||
fn cmp_key(&self) -> (u64, Uuid) {
|
|
||||||
(self.timestamp, self.uuid)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Is the object version completely received
|
|
||||||
pub fn is_complete(&self) -> bool {
|
|
||||||
matches!(self.state, ObjectVersionState::Complete(_))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Crdt for Object {
|
|
||||||
fn merge(&mut self, other: &Self) {
|
|
||||||
// Merge versions from other into here
|
|
||||||
for other_v in other.versions.iter() {
|
|
||||||
match self
|
|
||||||
.versions
|
|
||||||
.binary_search_by(|v| v.cmp_key().cmp(&other_v.cmp_key()))
|
|
||||||
{
|
|
||||||
Ok(i) => {
|
|
||||||
self.versions[i].state.merge(&other_v.state);
|
|
||||||
}
|
|
||||||
Err(i) => {
|
|
||||||
self.versions.insert(i, other_v.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove versions which are obsolete, i.e. those that come
|
|
||||||
// before the last version which .is_complete().
|
|
||||||
let last_complete = self
|
|
||||||
.versions
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.rev()
|
|
||||||
.find(|(_, v)| v.is_complete())
|
|
||||||
.map(|(vi, _)| vi);
|
|
||||||
|
|
||||||
if let Some(last_vi) = last_complete {
|
|
||||||
self.versions = self.versions.drain(last_vi..).collect::<Vec<_>>();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,79 +0,0 @@
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use garage_util::data::*;
|
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
|
||||||
use garage_table::*;
|
|
||||||
|
|
||||||
/// A version of an object
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Version {
|
|
||||||
/// UUID of the version, used as partition key
|
|
||||||
pub uuid: Uuid,
|
|
||||||
|
|
||||||
// Actual data: the blocks for this version
|
|
||||||
// In the case of a multipart upload, also store the etags
|
|
||||||
// of individual parts and check them when doing CompleteMultipartUpload
|
|
||||||
/// Is this version deleted
|
|
||||||
pub deleted: crdt::Bool,
|
|
||||||
/// list of blocks of data composing the version
|
|
||||||
pub blocks: crdt::Map<VersionBlockKey, VersionBlock>,
|
|
||||||
/// Etag of each part in case of a multipart upload, empty otherwise
|
|
||||||
pub parts_etags: crdt::Map<u64, String>,
|
|
||||||
|
|
||||||
// Back link to bucket+key so that we can figure if
|
|
||||||
// this was deleted later on
|
|
||||||
/// Bucket in which the related object is stored
|
|
||||||
pub bucket: String,
|
|
||||||
/// Key in which the related object is stored
|
|
||||||
pub key: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct VersionBlockKey {
|
|
||||||
/// Number of the part
|
|
||||||
pub part_number: u64,
|
|
||||||
/// Offset of this sub-segment in its part
|
|
||||||
pub offset: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Ord for VersionBlockKey {
|
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
|
||||||
self.part_number
|
|
||||||
.cmp(&other.part_number)
|
|
||||||
.then(self.offset.cmp(&other.offset))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialOrd for VersionBlockKey {
|
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
|
||||||
Some(self.cmp(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Informations about a single block
|
|
||||||
#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct VersionBlock {
|
|
||||||
/// Blake2 sum of the block
|
|
||||||
pub hash: Hash,
|
|
||||||
/// Size of the block
|
|
||||||
pub size: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AutoCrdt for VersionBlock {
|
|
||||||
const WARN_IF_DIFFERENT: bool = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Crdt for Version {
|
|
||||||
fn merge(&mut self, other: &Self) {
|
|
||||||
self.deleted.merge(&other.deleted);
|
|
||||||
|
|
||||||
if self.deleted.get() {
|
|
||||||
self.blocks.clear();
|
|
||||||
self.parts_etags.clear();
|
|
||||||
} else {
|
|
||||||
self.blocks.merge(&other.blocks);
|
|
||||||
self.parts_etags.merge(&other.parts_etags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,4 +1,3 @@
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
@ -10,8 +9,13 @@ use garage_table::*;
|
||||||
|
|
||||||
use garage_block::manager::*;
|
use garage_block::manager::*;
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
mod v08 {
|
||||||
pub struct BlockRef {
|
use garage_util::crdt;
|
||||||
|
use garage_util::data::{Hash, Uuid};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct BlockRef {
|
||||||
/// Hash (blake2 sum) of the block, used as partition key
|
/// Hash (blake2 sum) of the block, used as partition key
|
||||||
pub block: Hash,
|
pub block: Hash,
|
||||||
|
|
||||||
|
@ -21,8 +25,13 @@ pub struct BlockRef {
|
||||||
// Keep track of deleted status
|
// Keep track of deleted status
|
||||||
/// Is the Version that contains this block deleted
|
/// Is the Version that contains this block deleted
|
||||||
pub deleted: crdt::Bool,
|
pub deleted: crdt::Bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl garage_util::migrate::InitialFormat for BlockRef {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub use v08::*;
|
||||||
|
|
||||||
impl Entry<Hash, Uuid> for BlockRef {
|
impl Entry<Hash, Uuid> for BlockRef {
|
||||||
fn partition_key(&self) -> &Hash {
|
fn partition_key(&self) -> &Hash {
|
||||||
&self.block
|
&self.block
|
||||||
|
|
|
@ -1,10 +1,8 @@
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::background::BackgroundRunner;
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
|
@ -14,15 +12,99 @@ use garage_table::*;
|
||||||
use crate::index_counter::*;
|
use crate::index_counter::*;
|
||||||
use crate::s3::version_table::*;
|
use crate::s3::version_table::*;
|
||||||
|
|
||||||
use crate::prev::v051::object_table as old;
|
|
||||||
|
|
||||||
pub const OBJECTS: &str = "objects";
|
pub const OBJECTS: &str = "objects";
|
||||||
pub const UNFINISHED_UPLOADS: &str = "unfinished_uploads";
|
pub const UNFINISHED_UPLOADS: &str = "unfinished_uploads";
|
||||||
pub const BYTES: &str = "bytes";
|
pub const BYTES: &str = "bytes";
|
||||||
|
|
||||||
/// An object
|
mod v05 {
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
use garage_util::data::{Hash, Uuid};
|
||||||
pub struct Object {
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
/// An object
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Object {
|
||||||
|
/// The bucket in which the object is stored, used as partition key
|
||||||
|
pub bucket: String,
|
||||||
|
|
||||||
|
/// The key at which the object is stored in its bucket, used as sorting key
|
||||||
|
pub key: String,
|
||||||
|
|
||||||
|
/// The list of currenty stored versions of the object
|
||||||
|
pub(super) versions: Vec<ObjectVersion>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Informations about a version of an object
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ObjectVersion {
|
||||||
|
/// Id of the version
|
||||||
|
pub uuid: Uuid,
|
||||||
|
/// Timestamp of when the object was created
|
||||||
|
pub timestamp: u64,
|
||||||
|
/// State of the version
|
||||||
|
pub state: ObjectVersionState,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// State of an object version
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ObjectVersionState {
|
||||||
|
/// The version is being received
|
||||||
|
Uploading(ObjectVersionHeaders),
|
||||||
|
/// The version is fully received
|
||||||
|
Complete(ObjectVersionData),
|
||||||
|
/// The version uploaded containded errors or the upload was explicitly aborted
|
||||||
|
Aborted,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data stored in object version
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ObjectVersionData {
|
||||||
|
/// The object was deleted, this Version is a tombstone to mark it as such
|
||||||
|
DeleteMarker,
|
||||||
|
/// The object is short, it's stored inlined
|
||||||
|
Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec<u8>),
|
||||||
|
/// The object is not short, Hash of first block is stored here, next segments hashes are
|
||||||
|
/// stored in the version table
|
||||||
|
FirstBlock(ObjectVersionMeta, Hash),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Metadata about the object version
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ObjectVersionMeta {
|
||||||
|
/// Headers to send to the client
|
||||||
|
pub headers: ObjectVersionHeaders,
|
||||||
|
/// Size of the object
|
||||||
|
pub size: u64,
|
||||||
|
/// etag of the object
|
||||||
|
pub etag: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Additional headers for an object
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ObjectVersionHeaders {
|
||||||
|
/// Content type of the object
|
||||||
|
pub content_type: String,
|
||||||
|
/// Any other http headers to send
|
||||||
|
pub other: BTreeMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl garage_util::migrate::InitialFormat for Object {}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod v08 {
|
||||||
|
use garage_util::data::Uuid;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use super::v05;
|
||||||
|
|
||||||
|
pub use v05::{
|
||||||
|
ObjectVersion, ObjectVersionData, ObjectVersionHeaders, ObjectVersionMeta,
|
||||||
|
ObjectVersionState,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// An object
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Object {
|
||||||
/// The bucket in which the object is stored, used as partition key
|
/// The bucket in which the object is stored, used as partition key
|
||||||
pub bucket_id: Uuid,
|
pub bucket_id: Uuid,
|
||||||
|
|
||||||
|
@ -30,9 +112,26 @@ pub struct Object {
|
||||||
pub key: String,
|
pub key: String,
|
||||||
|
|
||||||
/// The list of currenty stored versions of the object
|
/// The list of currenty stored versions of the object
|
||||||
versions: Vec<ObjectVersion>,
|
pub(super) versions: Vec<ObjectVersion>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl garage_util::migrate::Migrate for Object {
|
||||||
|
type Previous = v05::Object;
|
||||||
|
|
||||||
|
fn migrate(old: v05::Object) -> Object {
|
||||||
|
use garage_util::data::blake2sum;
|
||||||
|
|
||||||
|
Object {
|
||||||
|
bucket_id: blake2sum(old.bucket.as_bytes()),
|
||||||
|
key: old.key,
|
||||||
|
versions: old.versions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub use v08::*;
|
||||||
|
|
||||||
impl Object {
|
impl Object {
|
||||||
/// Initialize an Object struct from parts
|
/// Initialize an Object struct from parts
|
||||||
pub fn new(bucket_id: Uuid, key: String, versions: Vec<ObjectVersion>) -> Self {
|
pub fn new(bucket_id: Uuid, key: String, versions: Vec<ObjectVersion>) -> Self {
|
||||||
|
@ -69,28 +168,6 @@ impl Object {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Informations about a version of an object
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ObjectVersion {
|
|
||||||
/// Id of the version
|
|
||||||
pub uuid: Uuid,
|
|
||||||
/// Timestamp of when the object was created
|
|
||||||
pub timestamp: u64,
|
|
||||||
/// State of the version
|
|
||||||
pub state: ObjectVersionState,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// State of an object version
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum ObjectVersionState {
|
|
||||||
/// The version is being received
|
|
||||||
Uploading(ObjectVersionHeaders),
|
|
||||||
/// The version is fully received
|
|
||||||
Complete(ObjectVersionData),
|
|
||||||
/// The version uploaded containded errors or the upload was explicitly aborted
|
|
||||||
Aborted,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Crdt for ObjectVersionState {
|
impl Crdt for ObjectVersionState {
|
||||||
fn merge(&mut self, other: &Self) {
|
fn merge(&mut self, other: &Self) {
|
||||||
use ObjectVersionState::*;
|
use ObjectVersionState::*;
|
||||||
|
@ -112,42 +189,10 @@ impl Crdt for ObjectVersionState {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Data stored in object version
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum ObjectVersionData {
|
|
||||||
/// The object was deleted, this Version is a tombstone to mark it as such
|
|
||||||
DeleteMarker,
|
|
||||||
/// The object is short, it's stored inlined
|
|
||||||
Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec<u8>),
|
|
||||||
/// The object is not short, Hash of first block is stored here, next segments hashes are
|
|
||||||
/// stored in the version table
|
|
||||||
FirstBlock(ObjectVersionMeta, Hash),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AutoCrdt for ObjectVersionData {
|
impl AutoCrdt for ObjectVersionData {
|
||||||
const WARN_IF_DIFFERENT: bool = true;
|
const WARN_IF_DIFFERENT: bool = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Metadata about the object version
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ObjectVersionMeta {
|
|
||||||
/// Headers to send to the client
|
|
||||||
pub headers: ObjectVersionHeaders,
|
|
||||||
/// Size of the object
|
|
||||||
pub size: u64,
|
|
||||||
/// etag of the object
|
|
||||||
pub etag: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Additional headers for an object
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ObjectVersionHeaders {
|
|
||||||
/// Content type of the object
|
|
||||||
pub content_type: String,
|
|
||||||
/// Any other http headers to send
|
|
||||||
pub other: BTreeMap<String, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ObjectVersion {
|
impl ObjectVersion {
|
||||||
fn cmp_key(&self) -> (u64, Uuid) {
|
fn cmp_key(&self) -> (u64, Uuid) {
|
||||||
(self.timestamp, self.uuid)
|
(self.timestamp, self.uuid)
|
||||||
|
@ -221,7 +266,6 @@ impl Crdt for Object {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct ObjectTable {
|
pub struct ObjectTable {
|
||||||
pub background: Arc<BackgroundRunner>,
|
|
||||||
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
|
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
|
||||||
pub object_counter_table: Arc<IndexCounter<Object>>,
|
pub object_counter_table: Arc<IndexCounter<Object>>,
|
||||||
}
|
}
|
||||||
|
@ -255,12 +299,7 @@ impl TableSchema for ObjectTable {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Spawn threads that propagates deletions to version table
|
// 2. Enqueue propagation deletions to version table
|
||||||
let version_table = self.version_table.clone();
|
|
||||||
let old = old.cloned();
|
|
||||||
let new = new.cloned();
|
|
||||||
|
|
||||||
self.background.spawn(async move {
|
|
||||||
if let (Some(old_v), Some(new_v)) = (old, new) {
|
if let (Some(old_v), Some(new_v)) = (old, new) {
|
||||||
// Propagate deletion of old versions
|
// Propagate deletion of old versions
|
||||||
for v in old_v.versions.iter() {
|
for v in old_v.versions.iter() {
|
||||||
|
@ -277,12 +316,17 @@ impl TableSchema for ObjectTable {
|
||||||
if newly_deleted {
|
if newly_deleted {
|
||||||
let deleted_version =
|
let deleted_version =
|
||||||
Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true);
|
Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true);
|
||||||
version_table.insert(&deleted_version).await?;
|
let res = self.version_table.queue_insert(tx, &deleted_version);
|
||||||
|
if let Err(e) = db::unabort(res)? {
|
||||||
|
error!(
|
||||||
|
"Unable to enqueue version deletion propagation: {}. A repair will be needed.",
|
||||||
|
e
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
}
|
||||||
});
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,11 +336,6 @@ impl TableSchema for ObjectTable {
|
||||||
ObjectFilter::IsUploading => entry.versions.iter().any(|v| v.is_uploading()),
|
ObjectFilter::IsUploading => entry.versions.iter().any(|v| v.is_uploading()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
|
|
||||||
let old_obj = rmp_serde::decode::from_read_ref::<_, old::Object>(bytes).ok()?;
|
|
||||||
Some(migrate_object(old_obj))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CountedItem for Object {
|
impl CountedItem for Object {
|
||||||
|
@ -341,64 +380,3 @@ impl CountedItem for Object {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// vvvvvvvv migration code, stupid stuff vvvvvvvvvvvv
|
|
||||||
// (we just want to change bucket into bucket_id by hashing it)
|
|
||||||
|
|
||||||
fn migrate_object(o: old::Object) -> Object {
|
|
||||||
let versions = o
|
|
||||||
.versions()
|
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.map(migrate_object_version)
|
|
||||||
.collect();
|
|
||||||
Object {
|
|
||||||
bucket_id: blake2sum(o.bucket.as_bytes()),
|
|
||||||
key: o.key,
|
|
||||||
versions,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn migrate_object_version(v: old::ObjectVersion) -> ObjectVersion {
|
|
||||||
ObjectVersion {
|
|
||||||
uuid: Uuid::try_from(v.uuid.as_slice()).unwrap(),
|
|
||||||
timestamp: v.timestamp,
|
|
||||||
state: match v.state {
|
|
||||||
old::ObjectVersionState::Uploading(h) => {
|
|
||||||
ObjectVersionState::Uploading(migrate_object_version_headers(h))
|
|
||||||
}
|
|
||||||
old::ObjectVersionState::Complete(d) => {
|
|
||||||
ObjectVersionState::Complete(migrate_object_version_data(d))
|
|
||||||
}
|
|
||||||
old::ObjectVersionState::Aborted => ObjectVersionState::Aborted,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn migrate_object_version_headers(h: old::ObjectVersionHeaders) -> ObjectVersionHeaders {
|
|
||||||
ObjectVersionHeaders {
|
|
||||||
content_type: h.content_type,
|
|
||||||
other: h.other,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn migrate_object_version_data(d: old::ObjectVersionData) -> ObjectVersionData {
|
|
||||||
match d {
|
|
||||||
old::ObjectVersionData::DeleteMarker => ObjectVersionData::DeleteMarker,
|
|
||||||
old::ObjectVersionData::Inline(m, b) => {
|
|
||||||
ObjectVersionData::Inline(migrate_object_version_meta(m), b)
|
|
||||||
}
|
|
||||||
old::ObjectVersionData::FirstBlock(m, h) => ObjectVersionData::FirstBlock(
|
|
||||||
migrate_object_version_meta(m),
|
|
||||||
Hash::try_from(h.as_slice()).unwrap(),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn migrate_object_version_meta(m: old::ObjectVersionMeta) -> ObjectVersionMeta {
|
|
||||||
ObjectVersionMeta {
|
|
||||||
headers: migrate_object_version_headers(m.headers),
|
|
||||||
size: m.size,
|
|
||||||
etag: m.etag,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::background::BackgroundRunner;
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
|
@ -12,11 +10,65 @@ use garage_table::*;
|
||||||
|
|
||||||
use crate::s3::block_ref_table::*;
|
use crate::s3::block_ref_table::*;
|
||||||
|
|
||||||
use crate::prev::v051::version_table as old;
|
mod v05 {
|
||||||
|
use garage_util::crdt;
|
||||||
|
use garage_util::data::{Hash, Uuid};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// A version of an object
|
/// A version of an object
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct Version {
|
pub struct Version {
|
||||||
|
/// UUID of the version, used as partition key
|
||||||
|
pub uuid: Uuid,
|
||||||
|
|
||||||
|
// Actual data: the blocks for this version
|
||||||
|
// In the case of a multipart upload, also store the etags
|
||||||
|
// of individual parts and check them when doing CompleteMultipartUpload
|
||||||
|
/// Is this version deleted
|
||||||
|
pub deleted: crdt::Bool,
|
||||||
|
/// list of blocks of data composing the version
|
||||||
|
pub blocks: crdt::Map<VersionBlockKey, VersionBlock>,
|
||||||
|
/// Etag of each part in case of a multipart upload, empty otherwise
|
||||||
|
pub parts_etags: crdt::Map<u64, String>,
|
||||||
|
|
||||||
|
// Back link to bucket+key so that we can figure if
|
||||||
|
// this was deleted later on
|
||||||
|
/// Bucket in which the related object is stored
|
||||||
|
pub bucket: String,
|
||||||
|
/// Key in which the related object is stored
|
||||||
|
pub key: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct VersionBlockKey {
|
||||||
|
/// Number of the part
|
||||||
|
pub part_number: u64,
|
||||||
|
/// Offset of this sub-segment in its part
|
||||||
|
pub offset: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Informations about a single block
|
||||||
|
#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct VersionBlock {
|
||||||
|
/// Blake2 sum of the block
|
||||||
|
pub hash: Hash,
|
||||||
|
/// Size of the block
|
||||||
|
pub size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl garage_util::migrate::InitialFormat for Version {}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod v08 {
|
||||||
|
use garage_util::crdt;
|
||||||
|
use garage_util::data::Uuid;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use super::v05;
|
||||||
|
|
||||||
|
/// A version of an object
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Version {
|
||||||
/// UUID of the version, used as partition key
|
/// UUID of the version, used as partition key
|
||||||
pub uuid: Uuid,
|
pub uuid: Uuid,
|
||||||
|
|
||||||
|
@ -36,8 +88,30 @@ pub struct Version {
|
||||||
pub bucket_id: Uuid,
|
pub bucket_id: Uuid,
|
||||||
/// Key in which the related object is stored
|
/// Key in which the related object is stored
|
||||||
pub key: String,
|
pub key: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub use v05::{VersionBlock, VersionBlockKey};
|
||||||
|
|
||||||
|
impl garage_util::migrate::Migrate for Version {
|
||||||
|
type Previous = v05::Version;
|
||||||
|
|
||||||
|
fn migrate(old: v05::Version) -> Version {
|
||||||
|
use garage_util::data::blake2sum;
|
||||||
|
|
||||||
|
Version {
|
||||||
|
uuid: old.uuid,
|
||||||
|
deleted: old.deleted,
|
||||||
|
blocks: old.blocks,
|
||||||
|
parts_etags: old.parts_etags,
|
||||||
|
bucket_id: blake2sum(old.bucket.as_bytes()),
|
||||||
|
key: old.key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub use v08::*;
|
||||||
|
|
||||||
impl Version {
|
impl Version {
|
||||||
pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self {
|
pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
@ -65,14 +139,6 @@ impl Version {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct VersionBlockKey {
|
|
||||||
/// Number of the part
|
|
||||||
pub part_number: u64,
|
|
||||||
/// Offset of this sub-segment in its part
|
|
||||||
pub offset: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Ord for VersionBlockKey {
|
impl Ord for VersionBlockKey {
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
self.part_number
|
self.part_number
|
||||||
|
@ -87,15 +153,6 @@ impl PartialOrd for VersionBlockKey {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Informations about a single block
|
|
||||||
#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct VersionBlock {
|
|
||||||
/// Blake2 sum of the block
|
|
||||||
pub hash: Hash,
|
|
||||||
/// Size of the block
|
|
||||||
pub size: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AutoCrdt for VersionBlock {
|
impl AutoCrdt for VersionBlock {
|
||||||
const WARN_IF_DIFFERENT: bool = true;
|
const WARN_IF_DIFFERENT: bool = true;
|
||||||
}
|
}
|
||||||
|
@ -127,7 +184,6 @@ impl Crdt for Version {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct VersionTable {
|
pub struct VersionTable {
|
||||||
pub background: Arc<BackgroundRunner>,
|
|
||||||
pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,33 +197,26 @@ impl TableSchema for VersionTable {
|
||||||
|
|
||||||
fn updated(
|
fn updated(
|
||||||
&self,
|
&self,
|
||||||
_tx: &mut db::Transaction,
|
tx: &mut db::Transaction,
|
||||||
old: Option<&Self::E>,
|
old: Option<&Self::E>,
|
||||||
new: Option<&Self::E>,
|
new: Option<&Self::E>,
|
||||||
) -> db::TxOpResult<()> {
|
) -> db::TxOpResult<()> {
|
||||||
let block_ref_table = self.block_ref_table.clone();
|
|
||||||
let old = old.cloned();
|
|
||||||
let new = new.cloned();
|
|
||||||
|
|
||||||
self.background.spawn(async move {
|
|
||||||
if let (Some(old_v), Some(new_v)) = (old, new) {
|
if let (Some(old_v), Some(new_v)) = (old, new) {
|
||||||
// Propagate deletion of version blocks
|
// Propagate deletion of version blocks
|
||||||
if new_v.deleted.get() && !old_v.deleted.get() {
|
if new_v.deleted.get() && !old_v.deleted.get() {
|
||||||
let deleted_block_refs = old_v
|
let deleted_block_refs = old_v.blocks.items().iter().map(|(_k, vb)| BlockRef {
|
||||||
.blocks
|
|
||||||
.items()
|
|
||||||
.iter()
|
|
||||||
.map(|(_k, vb)| BlockRef {
|
|
||||||
block: vb.hash,
|
block: vb.hash,
|
||||||
version: old_v.uuid,
|
version: old_v.uuid,
|
||||||
deleted: true.into(),
|
deleted: true.into(),
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
block_ref_table.insert_many(&deleted_block_refs[..]).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
});
|
});
|
||||||
|
for block_ref in deleted_block_refs {
|
||||||
|
let res = self.block_ref_table.queue_insert(tx, &block_ref);
|
||||||
|
if let Err(e) = db::unabort(res)? {
|
||||||
|
error!("Unable to enqueue block ref deletion propagation: {}. A repair will be needed.", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -175,42 +224,4 @@ impl TableSchema for VersionTable {
|
||||||
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
|
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
|
||||||
filter.apply(entry.deleted.get())
|
filter.apply(entry.deleted.get())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
|
|
||||||
let old = rmp_serde::decode::from_read_ref::<_, old::Version>(bytes).ok()?;
|
|
||||||
|
|
||||||
let blocks = old
|
|
||||||
.blocks
|
|
||||||
.items()
|
|
||||||
.iter()
|
|
||||||
.map(|(k, v)| {
|
|
||||||
(
|
|
||||||
VersionBlockKey {
|
|
||||||
part_number: k.part_number,
|
|
||||||
offset: k.offset,
|
|
||||||
},
|
|
||||||
VersionBlock {
|
|
||||||
hash: Hash::try_from(v.hash.as_slice()).unwrap(),
|
|
||||||
size: v.size,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<crdt::Map<_, _>>();
|
|
||||||
|
|
||||||
let parts_etags = old
|
|
||||||
.parts_etags
|
|
||||||
.items()
|
|
||||||
.iter()
|
|
||||||
.map(|(k, v)| (*k, v.clone()))
|
|
||||||
.collect::<crdt::Map<_, _>>();
|
|
||||||
|
|
||||||
Some(Version {
|
|
||||||
uuid: Hash::try_from(old.uuid.as_slice()).unwrap(),
|
|
||||||
deleted: crdt::Bool::new(old.deleted.get()),
|
|
||||||
blocks,
|
|
||||||
parts_etags,
|
|
||||||
bucket_id: blake2sum(old.bucket.as_bytes()),
|
|
||||||
key: old.key,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_rpc"
|
name = "garage_rpc"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,7 +14,7 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
|
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
|
@ -25,7 +25,6 @@ rand = "0.8"
|
||||||
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
rmp-serde = "0.15"
|
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||||
serde_bytes = "0.11"
|
serde_bytes = "0.11"
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
|
|
@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::crdt::{AutoCrdt, Crdt, LwwMap};
|
use garage_util::crdt::{AutoCrdt, Crdt, LwwMap};
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
use garage_util::encode::nonversioned_encode;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
|
|
||||||
use crate::ring::*;
|
use crate::ring::*;
|
||||||
|
@ -35,6 +36,8 @@ pub struct ClusterLayout {
|
||||||
pub staging_hash: Hash,
|
pub staging_hash: Hash,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl garage_util::migrate::InitialFormat for ClusterLayout {}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct NodeRoleV(pub Option<NodeRole>);
|
pub struct NodeRoleV(pub Option<NodeRole>);
|
||||||
|
|
||||||
|
@ -68,7 +71,7 @@ impl NodeRole {
|
||||||
impl ClusterLayout {
|
impl ClusterLayout {
|
||||||
pub fn new(replication_factor: usize) -> Self {
|
pub fn new(replication_factor: usize) -> Self {
|
||||||
let empty_lwwmap = LwwMap::new();
|
let empty_lwwmap = LwwMap::new();
|
||||||
let empty_lwwmap_hash = blake2sum(&rmp_to_vec_all_named(&empty_lwwmap).unwrap()[..]);
|
let empty_lwwmap_hash = blake2sum(&nonversioned_encode(&empty_lwwmap).unwrap()[..]);
|
||||||
|
|
||||||
ClusterLayout {
|
ClusterLayout {
|
||||||
version: 0,
|
version: 0,
|
||||||
|
@ -90,7 +93,7 @@ impl ClusterLayout {
|
||||||
Ordering::Equal => {
|
Ordering::Equal => {
|
||||||
self.staging.merge(&other.staging);
|
self.staging.merge(&other.staging);
|
||||||
|
|
||||||
let new_staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]);
|
let new_staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]);
|
||||||
let changed = new_staging_hash != self.staging_hash;
|
let changed = new_staging_hash != self.staging_hash;
|
||||||
|
|
||||||
self.staging_hash = new_staging_hash;
|
self.staging_hash = new_staging_hash;
|
||||||
|
@ -125,7 +128,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
|
||||||
}
|
}
|
||||||
|
|
||||||
self.staging.clear();
|
self.staging.clear();
|
||||||
self.staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]);
|
self.staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]);
|
||||||
|
|
||||||
self.version += 1;
|
self.version += 1;
|
||||||
|
|
||||||
|
@ -149,7 +152,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
|
||||||
}
|
}
|
||||||
|
|
||||||
self.staging.clear();
|
self.staging.clear();
|
||||||
self.staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]);
|
self.staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]);
|
||||||
|
|
||||||
self.version += 1;
|
self.version += 1;
|
||||||
|
|
||||||
|
@ -178,7 +181,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
|
||||||
/// returns true if consistent, false if error
|
/// returns true if consistent, false if error
|
||||||
pub fn check(&self) -> bool {
|
pub fn check(&self) -> bool {
|
||||||
// Check that the hash of the staging data is correct
|
// Check that the hash of the staging data is correct
|
||||||
let staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]);
|
let staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]);
|
||||||
if staging_hash != self.staging_hash {
|
if staging_hash != self.staging_hash {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -254,11 +257,13 @@ To know the correct value of the new layout version, invoke `garage layout show`
|
||||||
match self.initial_partition_assignation() {
|
match self.initial_partition_assignation() {
|
||||||
Some(initial_partitions) => {
|
Some(initial_partitions) => {
|
||||||
for (part, ipart) in partitions.iter_mut().zip(initial_partitions.iter()) {
|
for (part, ipart) in partitions.iter_mut().zip(initial_partitions.iter()) {
|
||||||
|
for _ in 0..2 {
|
||||||
for (id, info) in ipart.nodes.iter() {
|
for (id, info) in ipart.nodes.iter() {
|
||||||
if part.nodes.len() < self.replication_factor {
|
if part.nodes.len() < self.replication_factor {
|
||||||
part.add(None, n_zones, id, info.unwrap());
|
part.add(None, n_zones, id, info.unwrap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
assert!(part.nodes.len() == self.replication_factor);
|
assert!(part.nodes.len() == self.replication_factor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,7 @@ mod consul;
|
||||||
mod kubernetes;
|
mod kubernetes;
|
||||||
|
|
||||||
pub mod layout;
|
pub mod layout;
|
||||||
|
pub mod replication_mode;
|
||||||
pub mod ring;
|
pub mod ring;
|
||||||
pub mod system;
|
pub mod system;
|
||||||
|
|
||||||
|
@ -16,3 +17,5 @@ mod metrics;
|
||||||
pub mod rpc_helper;
|
pub mod rpc_helper;
|
||||||
|
|
||||||
pub use rpc_helper::*;
|
pub use rpc_helper::*;
|
||||||
|
|
||||||
|
pub mod system_metrics;
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
pub enum ReplicationMode {
|
pub enum ReplicationMode {
|
||||||
None,
|
None,
|
||||||
TwoWay,
|
TwoWay,
|