forked from Deuxfleurs/garage
Compare commits
No commits in common. "94d723f27cea7aa58d11af3e18a165283b40f19a" and "0c7ed0b0af40c3521b9dc259a98f8aad05999b4f" have entirely different histories.
94d723f27c
...
0c7ed0b0af
82 changed files with 1936 additions and 2869 deletions
1
.envrc
1
.envrc
|
@ -1 +0,0 @@
|
||||||
use flake
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -3,4 +3,3 @@
|
||||||
/pki
|
/pki
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
*.swp
|
*.swp
|
||||||
/.direnv
|
|
26
Cargo.lock
generated
26
Cargo.lock
generated
|
@ -1048,7 +1048,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"assert-json-diff",
|
"assert-json-diff",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1080,6 +1080,7 @@ dependencies = [
|
||||||
"parse_duration",
|
"parse_duration",
|
||||||
"prometheus",
|
"prometheus",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
|
"rmp-serde",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
@ -1095,7 +1096,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base64",
|
"base64",
|
||||||
|
@ -1140,7 +1141,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-compression",
|
"async-compression",
|
||||||
|
@ -1155,6 +1156,7 @@ dependencies = [
|
||||||
"hex",
|
"hex",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
|
"rmp-serde",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
@ -1165,7 +1167,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_db"
|
name = "garage_db"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 3.1.18",
|
"clap 3.1.18",
|
||||||
"err-derive",
|
"err-derive",
|
||||||
|
@ -1180,7 +1182,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1198,6 +1200,7 @@ dependencies = [
|
||||||
"netapp",
|
"netapp",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
|
"rmp-serde",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
@ -1207,7 +1210,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_rpc"
|
name = "garage_rpc"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1226,6 +1229,7 @@ dependencies = [
|
||||||
"pnet_datalink",
|
"pnet_datalink",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
|
"rmp-serde",
|
||||||
"schemars",
|
"schemars",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
|
@ -1237,9 +1241,8 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_table"
|
name = "garage_table"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures",
|
"futures",
|
||||||
|
@ -1251,6 +1254,7 @@ dependencies = [
|
||||||
"hexdump",
|
"hexdump",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
|
"rmp-serde",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
@ -1259,7 +1263,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_util"
|
name = "garage_util"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1272,11 +1276,9 @@ dependencies = [
|
||||||
"garage_db",
|
"garage_db",
|
||||||
"git-version",
|
"git-version",
|
||||||
"hex",
|
"hex",
|
||||||
"hexdump",
|
|
||||||
"http",
|
"http",
|
||||||
"hyper",
|
"hyper",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"mktemp",
|
|
||||||
"netapp",
|
"netapp",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
|
@ -1292,7 +1294,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_web"
|
name = "garage_web"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"err-derive",
|
"err-derive",
|
||||||
"futures",
|
"futures",
|
||||||
|
|
138
Cargo.nix
138
Cargo.nix
|
@ -32,7 +32,7 @@ args@{
|
||||||
ignoreLockHash,
|
ignoreLockHash,
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
nixifiedLockHash = "8461dcfb984a8d042fecb5745d5da17912135dbf2a8ef7e6c3ae8e64c03d9744";
|
nixifiedLockHash = "90b29705f5037c7e1b33f4650841f1266f2e86fa03d5d0c87ad80be7619985c7";
|
||||||
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
||||||
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
||||||
lockHashIgnored = if ignoreLockHash
|
lockHashIgnored = if ignoreLockHash
|
||||||
|
@ -56,15 +56,15 @@ in
|
||||||
{
|
{
|
||||||
cargo2nixVersion = "0.11.0";
|
cargo2nixVersion = "0.11.0";
|
||||||
workspace = {
|
workspace = {
|
||||||
garage_db = rustPackages.unknown.garage_db."0.8.1";
|
garage_db = rustPackages.unknown.garage_db."0.8.0";
|
||||||
garage_util = rustPackages.unknown.garage_util."0.8.1";
|
garage_util = rustPackages.unknown.garage_util."0.8.0";
|
||||||
garage_rpc = rustPackages.unknown.garage_rpc."0.8.1";
|
garage_rpc = rustPackages.unknown.garage_rpc."0.8.0";
|
||||||
garage_table = rustPackages.unknown.garage_table."0.8.1";
|
garage_table = rustPackages.unknown.garage_table."0.8.0";
|
||||||
garage_block = rustPackages.unknown.garage_block."0.8.1";
|
garage_block = rustPackages.unknown.garage_block."0.8.0";
|
||||||
garage_model = rustPackages.unknown.garage_model."0.8.1";
|
garage_model = rustPackages.unknown.garage_model."0.8.0";
|
||||||
garage_api = rustPackages.unknown.garage_api."0.8.1";
|
garage_api = rustPackages.unknown.garage_api."0.8.0";
|
||||||
garage_web = rustPackages.unknown.garage_web."0.8.1";
|
garage_web = rustPackages.unknown.garage_web."0.8.0";
|
||||||
garage = rustPackages.unknown.garage."0.8.1";
|
garage = rustPackages.unknown.garage."0.8.0";
|
||||||
k2v-client = rustPackages.unknown.k2v-client."0.0.1";
|
k2v-client = rustPackages.unknown.k2v-client."0.0.1";
|
||||||
};
|
};
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.17.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.17.0" = overridableMkRustCrate (profileName: rec {
|
||||||
|
@ -1494,16 +1494,16 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage."0.8.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage."0.8.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage";
|
name = "garage";
|
||||||
version = "0.8.1";
|
version = "0.8.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/garage");
|
src = fetchCrateLocal (workspaceSrc + "/src/garage");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default") "bundled-libs")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default") "bundled-libs")
|
||||||
(lib.optional (rootFeatures' ? "garage/consul-discovery") "consul-discovery")
|
(lib.optional (rootFeatures' ? "garage/consul-discovery") "consul-discovery")
|
||||||
(lib.optional (rootFeatures' ? "garage/default") "default")
|
(lib.optional (rootFeatures' ? "garage/default") "default")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v") "k2v")
|
(lib.optional (rootFeatures' ? "garage/k2v") "k2v")
|
||||||
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery") "kubernetes-discovery")
|
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery") "kubernetes-discovery")
|
||||||
(lib.optional (rootFeatures' ? "garage/lmdb") "lmdb")
|
(lib.optional (rootFeatures' ? "garage/lmdb") "lmdb")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics") "metrics")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics") "metrics")
|
||||||
|
@ -1522,14 +1522,14 @@ in
|
||||||
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.1.0" { inherit profileName; }).out;
|
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.1.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_api = (rustPackages."unknown".garage_api."0.8.1" { inherit profileName; }).out;
|
garage_api = (rustPackages."unknown".garage_api."0.8.0" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
||||||
garage_web = (rustPackages."unknown".garage_web."0.8.1" { inherit profileName; }).out;
|
garage_web = (rustPackages."unknown".garage_web."0.8.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
|
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
|
||||||
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
||||||
|
@ -1539,6 +1539,7 @@ in
|
||||||
parse_duration = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parse_duration."2.1.1" { inherit profileName; }).out;
|
parse_duration = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parse_duration."2.1.1" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus" then "prometheus" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".prometheus."0.13.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus" then "prometheus" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".prometheus."0.13.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
|
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
||||||
structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out;
|
structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out;
|
||||||
|
@ -1562,13 +1563,13 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_api."0.8.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_api."0.8.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_api";
|
name = "garage_api";
|
||||||
version = "0.8.1";
|
version = "0.8.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/api");
|
src = fetchCrateLocal (workspaceSrc + "/src/api");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v") "k2v")
|
(lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v") "k2v")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics") "metrics")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics") "metrics")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus") "opentelemetry-prometheus")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus") "opentelemetry-prometheus")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/prometheus") "prometheus")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/prometheus") "prometheus")
|
||||||
|
@ -1583,11 +1584,11 @@ in
|
||||||
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.0.1" { inherit profileName; }).out;
|
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.0.1" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
|
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
|
@ -1616,9 +1617,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_block."0.8.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_block."0.8.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_block";
|
name = "garage_block";
|
||||||
version = "0.8.1";
|
version = "0.8.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/block");
|
src = fetchCrateLocal (workspaceSrc + "/src/block");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1631,13 +1632,14 @@ in
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
|
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
||||||
|
@ -1647,9 +1649,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_db."0.8.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_db."0.8.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_db";
|
name = "garage_db";
|
||||||
version = "0.8.1";
|
version = "0.8.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/db");
|
src = fetchCrateLocal (workspaceSrc + "/src/db");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1679,14 +1681,14 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_model."0.8.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_model."0.8.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_model";
|
name = "garage_model";
|
||||||
version = "0.8.1";
|
version = "0.8.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/model");
|
src = fetchCrateLocal (workspaceSrc + "/src/model");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
[ "default" ]
|
[ "default" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
|
(lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
|
||||||
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
||||||
[ "sled" ]
|
[ "sled" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
||||||
|
@ -1699,15 +1701,16 @@ in
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
|
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
||||||
|
@ -1716,9 +1719,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_rpc."0.8.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_rpc."0.8.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_rpc";
|
name = "garage_rpc";
|
||||||
version = "0.8.1";
|
version = "0.8.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
|
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1738,7 +1741,7 @@ in
|
||||||
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive" then "err_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive" then "err_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
||||||
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.3" { inherit profileName; }).out;
|
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.3" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.16.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.16.0" { inherit profileName; }).out;
|
||||||
|
@ -1749,6 +1752,7 @@ in
|
||||||
pnet_datalink = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pnet_datalink."0.28.0" { inherit profileName; }).out;
|
pnet_datalink = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pnet_datalink."0.28.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest" then "reqwest" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".reqwest."0.11.12" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest" then "reqwest" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".reqwest."0.11.12" { inherit profileName; }).out;
|
||||||
|
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kubernetes-discovery" || rootFeatures' ? "garage_rpc/schemars" then "schemars" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.8" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kubernetes-discovery" || rootFeatures' ? "garage_rpc/schemars" then "schemars" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.8" { inherit profileName; }).out;
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
||||||
|
@ -1759,24 +1763,24 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_table."0.8.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_table."0.8.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_table";
|
name = "garage_table";
|
||||||
version = "0.8.1";
|
version = "0.8.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/table");
|
src = fetchCrateLocal (workspaceSrc + "/src/table");
|
||||||
dependencies = {
|
dependencies = {
|
||||||
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }).out;
|
|
||||||
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }).out;
|
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }).out;
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
|
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
|
||||||
|
@ -1784,13 +1788,13 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_util."0.8.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_util."0.8.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_util";
|
name = "garage_util";
|
||||||
version = "0.8.1";
|
version = "0.8.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/util");
|
src = fetchCrateLocal (workspaceSrc + "/src/util");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v" || rootFeatures' ? "garage_util/k2v") "k2v")
|
(lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v" || rootFeatures' ? "garage_util/k2v") "k2v")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }).out;
|
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }).out;
|
||||||
|
@ -1801,10 +1805,9 @@ in
|
||||||
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.3" { inherit profileName; }).out;
|
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.3" { inherit profileName; }).out;
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
||||||
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out;
|
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
|
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
|
||||||
lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
|
lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
|
||||||
|
@ -1820,23 +1823,20 @@ in
|
||||||
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }).out;
|
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }).out;
|
||||||
xxhash_rust = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".xxhash-rust."0.8.4" { inherit profileName; }).out;
|
xxhash_rust = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".xxhash-rust."0.8.4" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
devDependencies = {
|
|
||||||
mktemp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".mktemp."0.4.1" { inherit profileName; }).out;
|
|
||||||
};
|
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_web."0.8.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_web."0.8.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_web";
|
name = "garage_web";
|
||||||
version = "0.8.1";
|
version = "0.8.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/web");
|
src = fetchCrateLocal (workspaceSrc + "/src/web");
|
||||||
dependencies = {
|
dependencies = {
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
garage_api = (rustPackages."unknown".garage_api."0.8.1" { inherit profileName; }).out;
|
garage_api = (rustPackages."unknown".garage_api."0.8.0" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
|
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
|
@ -2450,7 +2450,7 @@ in
|
||||||
dependencies = {
|
dependencies = {
|
||||||
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }).out;
|
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli" then "clap" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".clap."3.1.18" { inherit profileName; }).out;
|
${ if rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli" then "clap" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".clap."3.1.18" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "k2v-client/cli" || rootFeatures' ? "k2v-client/garage_util" then "garage_util" else null } = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
${ if rootFeatures' ? "k2v-client/cli" || rootFeatures' ? "k2v-client/garage_util" then "garage_util" else null } = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
log = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out;
|
log = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out;
|
||||||
rusoto_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusoto_core."0.48.0" { inherit profileName; }).out;
|
rusoto_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusoto_core."0.48.0" { inherit profileName; }).out;
|
||||||
|
|
|
@ -8,7 +8,7 @@ In this section, we cover the following web applications:
|
||||||
| Name | Status | Note |
|
| Name | Status | Note |
|
||||||
|------|--------|------|
|
|------|--------|------|
|
||||||
| [Nextcloud](#nextcloud) | ✅ | Both Primary Storage and External Storage are supported |
|
| [Nextcloud](#nextcloud) | ✅ | Both Primary Storage and External Storage are supported |
|
||||||
| [Peertube](#peertube) | ✅ | Supported with the website endpoint, proxifying private videos unsupported |
|
| [Peertube](#peertube) | ✅ | Must be configured with the website endpoint |
|
||||||
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
||||||
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
||||||
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
||||||
|
@ -128,10 +128,6 @@ In other words, Peertube is only responsible of the "control plane" and offload
|
||||||
In return, this system is a bit harder to configure.
|
In return, this system is a bit harder to configure.
|
||||||
We show how it is still possible to configure Garage with Peertube, allowing you to spread the load and the bandwidth usage on the Garage cluster.
|
We show how it is still possible to configure Garage with Peertube, allowing you to spread the load and the bandwidth usage on the Garage cluster.
|
||||||
|
|
||||||
Starting from version 5.0, Peertube also supports improving the security for private videos by not exposing them directly
|
|
||||||
but relying on a single control point in the Peertube instance. This is based on S3 per-object and prefix ACL, which are not currently supported
|
|
||||||
in Garage, so this feature is unsupported. While this technically impedes security for private videos, it is not a blocking issue and could be
|
|
||||||
a reasonable trade-off for some instances.
|
|
||||||
|
|
||||||
### Create resources in Garage
|
### Create resources in Garage
|
||||||
|
|
||||||
|
@ -199,11 +195,6 @@ object_storage:
|
||||||
|
|
||||||
max_upload_part: 2GB
|
max_upload_part: 2GB
|
||||||
|
|
||||||
proxy:
|
|
||||||
# You may enable this feature, yet it will not provide any security benefit, so
|
|
||||||
# you should rather benefit from Garage public endpoint for all videos
|
|
||||||
proxify_private_files: false
|
|
||||||
|
|
||||||
streaming_playlists:
|
streaming_playlists:
|
||||||
bucket_name: 'peertube-playlist'
|
bucket_name: 'peertube-playlist'
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ Now you can enter our nix-shell, all the required packages will be downloaded bu
|
||||||
nix-shell
|
nix-shell
|
||||||
```
|
```
|
||||||
|
|
||||||
You can use the traditional Rust development workflow:
|
You can use the traditionnal Rust development workflow:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cargo build # compile the project
|
cargo build # compile the project
|
||||||
|
|
|
@ -96,7 +96,7 @@ Performance characteristics of the different DB engines are as follows:
|
||||||
|
|
||||||
- Sled: the default database engine, which tends to produce
|
- Sled: the default database engine, which tends to produce
|
||||||
large data files and also has performance issues, especially when the metadata folder
|
large data files and also has performance issues, especially when the metadata folder
|
||||||
is on a traditional HDD and not on SSD.
|
is on a traditionnal HDD and not on SSD.
|
||||||
- LMDB: the recommended alternative on 64-bit systems,
|
- LMDB: the recommended alternative on 64-bit systems,
|
||||||
much more space-efficiant and slightly faster. Note that the data format of LMDB is not portable
|
much more space-efficiant and slightly faster. Note that the data format of LMDB is not portable
|
||||||
between architectures, so for instance the Garage database of an x86-64
|
between architectures, so for instance the Garage database of an x86-64
|
||||||
|
@ -267,10 +267,6 @@ This key should be specified here in the form of a 32-byte hex-encoded
|
||||||
random string. Such a string can be generated with a command
|
random string. Such a string can be generated with a command
|
||||||
such as `openssl rand -hex 32`.
|
such as `openssl rand -hex 32`.
|
||||||
|
|
||||||
### `rpc_secret_file`
|
|
||||||
|
|
||||||
Like `rpc_secret` above, just that this is the path to a file that Garage will try to read the secret from.
|
|
||||||
|
|
||||||
### `rpc_bind_addr`
|
### `rpc_bind_addr`
|
||||||
|
|
||||||
The address and port on which to bind for inter-cluster communcations
|
The address and port on which to bind for inter-cluster communcations
|
||||||
|
|
16
flake.lock
16
flake.lock
|
@ -55,21 +55,6 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-utils_2": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1667395993,
|
|
||||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1665657542,
|
"lastModified": 1665657542,
|
||||||
|
@ -89,7 +74,6 @@
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"cargo2nix": "cargo2nix",
|
"cargo2nix": "cargo2nix",
|
||||||
"flake-utils": "flake-utils_2",
|
|
||||||
"nixpkgs": "nixpkgs"
|
"nixpkgs": "nixpkgs"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
42
flake.nix
42
flake.nix
|
@ -7,30 +7,22 @@
|
||||||
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
|
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
};
|
};
|
||||||
inputs.flake-utils.url = "github:numtide/flake-utils";
|
|
||||||
|
|
||||||
outputs = { self, nixpkgs, cargo2nix, flake-utils }:
|
outputs = { self, nixpkgs, cargo2nix }: let
|
||||||
let
|
git_version = self.lastModifiedDate;
|
||||||
git_version = self.lastModifiedDate;
|
compile = import ./nix/compile.nix;
|
||||||
compile = import ./nix/compile.nix;
|
forAllSystems = nixpkgs.lib.genAttrs nixpkgs.lib.systems.flakeExposed;
|
||||||
in flake-utils.lib.eachDefaultSystem (system:
|
in
|
||||||
let pkgs = nixpkgs.legacyPackages.${system};
|
{
|
||||||
in {
|
packages = forAllSystems (system: {
|
||||||
packages = {
|
default = (compile {
|
||||||
default = (compile {
|
inherit system git_version;
|
||||||
inherit system git_version;
|
pkgsSrc = nixpkgs;
|
||||||
pkgsSrc = nixpkgs;
|
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
release = true;
|
||||||
release = true;
|
}).workspace.garage {
|
||||||
}).workspace.garage { compileMode = "build"; };
|
compileMode = "build";
|
||||||
};
|
};
|
||||||
devShell = ((compile {
|
});
|
||||||
inherit system git_version;
|
};
|
||||||
pkgsSrc = nixpkgs;
|
|
||||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
|
||||||
release = false;
|
|
||||||
}).workspaceShell {
|
|
||||||
packages = [ pkgs.rustfmt cargo2nix.packages.${system}.default ];
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,11 +14,11 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_model = { version = "0.8.1", path = "../model" }
|
garage_model = { version = "0.8.0", path = "../model" }
|
||||||
garage_table = { version = "0.8.1", path = "../table" }
|
garage_table = { version = "0.8.0", path = "../table" }
|
||||||
garage_block = { version = "0.8.1", path = "../block" }
|
garage_block = { version = "0.8.0", path = "../block" }
|
||||||
garage_util = { version = "0.8.1", path = "../util" }
|
garage_util = { version = "0.8.0", path = "../util" }
|
||||||
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
base64 = "0.13"
|
base64 = "0.13"
|
||||||
|
|
|
@ -145,7 +145,6 @@ macro_rules! generateQueryParameters {
|
||||||
) => {
|
) => {
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
#[allow(non_camel_case_types)]
|
#[allow(non_camel_case_types)]
|
||||||
#[allow(clippy::upper_case_acronyms)]
|
|
||||||
enum Keyword {
|
enum Keyword {
|
||||||
EMPTY,
|
EMPTY,
|
||||||
$( $kw_name, )*
|
$( $kw_name, )*
|
||||||
|
|
|
@ -119,17 +119,6 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
return Ok((version_uuid, data_md5sum_hex));
|
return Ok((version_uuid, data_md5sum_hex));
|
||||||
}
|
}
|
||||||
|
|
||||||
// The following consists in many steps that can each fail.
|
|
||||||
// Keep track that some cleanup will be needed if things fail
|
|
||||||
// before everything is finished (cleanup is done using the Drop trait).
|
|
||||||
let mut interrupted_cleanup = InterruptedCleanup(Some((
|
|
||||||
garage.clone(),
|
|
||||||
bucket.id,
|
|
||||||
key.into(),
|
|
||||||
version_uuid,
|
|
||||||
version_timestamp,
|
|
||||||
)));
|
|
||||||
|
|
||||||
// Write version identifier in object table so that we have a trace
|
// Write version identifier in object table so that we have a trace
|
||||||
// that we are uploading something
|
// that we are uploading something
|
||||||
let mut object_version = ObjectVersion {
|
let mut object_version = ObjectVersion {
|
||||||
|
@ -150,27 +139,44 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
// Transfer data and verify checksum
|
// Transfer data and verify checksum
|
||||||
let first_block_hash = async_blake2sum(first_block.clone()).await;
|
let first_block_hash = async_blake2sum(first_block.clone()).await;
|
||||||
|
|
||||||
let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
|
let tx_result = (|| async {
|
||||||
&garage,
|
let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
|
||||||
&version,
|
&garage,
|
||||||
1,
|
&version,
|
||||||
first_block,
|
1,
|
||||||
first_block_hash,
|
first_block,
|
||||||
&mut chunker,
|
first_block_hash,
|
||||||
)
|
&mut chunker,
|
||||||
.await?;
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
ensure_checksum_matches(
|
ensure_checksum_matches(
|
||||||
data_md5sum.as_slice(),
|
data_md5sum.as_slice(),
|
||||||
data_sha256sum,
|
data_sha256sum,
|
||||||
content_md5.as_deref(),
|
content_md5.as_deref(),
|
||||||
content_sha256,
|
content_sha256,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
check_quotas(&garage, bucket, key, total_size).await?;
|
check_quotas(&garage, bucket, key, total_size).await?;
|
||||||
|
|
||||||
|
Ok((total_size, data_md5sum))
|
||||||
|
})()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// If something went wrong, clean up
|
||||||
|
let (total_size, md5sum_arr) = match tx_result {
|
||||||
|
Ok(rv) => rv,
|
||||||
|
Err(e) => {
|
||||||
|
// Mark object as aborted, this will free the blocks further down
|
||||||
|
object_version.state = ObjectVersionState::Aborted;
|
||||||
|
let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]);
|
||||||
|
garage.object_table.insert(&object).await?;
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Save final object state, marked as Complete
|
// Save final object state, marked as Complete
|
||||||
let md5sum_hex = hex::encode(data_md5sum);
|
let md5sum_hex = hex::encode(md5sum_arr);
|
||||||
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||||
ObjectVersionMeta {
|
ObjectVersionMeta {
|
||||||
headers,
|
headers,
|
||||||
|
@ -182,10 +188,6 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
let object = Object::new(bucket.id, key.into(), vec![object_version]);
|
let object = Object::new(bucket.id, key.into(), vec![object_version]);
|
||||||
garage.object_table.insert(&object).await?;
|
garage.object_table.insert(&object).await?;
|
||||||
|
|
||||||
// We were not interrupted, everything went fine.
|
|
||||||
// We won't have to clean up on drop.
|
|
||||||
interrupted_cleanup.cancel();
|
|
||||||
|
|
||||||
Ok((version_uuid, md5sum_hex))
|
Ok((version_uuid, md5sum_hex))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -424,33 +426,6 @@ pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<Body> {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
struct InterruptedCleanup(Option<(Arc<Garage>, Uuid, String, Uuid, u64)>);
|
|
||||||
|
|
||||||
impl InterruptedCleanup {
|
|
||||||
fn cancel(&mut self) {
|
|
||||||
drop(self.0.take());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Drop for InterruptedCleanup {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some((garage, bucket_id, key, version_uuid, version_ts)) = self.0.take() {
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let object_version = ObjectVersion {
|
|
||||||
uuid: version_uuid,
|
|
||||||
timestamp: version_ts,
|
|
||||||
state: ObjectVersionState::Aborted,
|
|
||||||
};
|
|
||||||
let object = Object::new(bucket_id, key, vec![object_version]);
|
|
||||||
if let Err(e) = garage.object_table.insert(&object).await {
|
|
||||||
warn!("Cannot cleanup after aborted PutObject: {}", e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----
|
|
||||||
|
|
||||||
pub async fn handle_create_multipart_upload(
|
pub async fn handle_create_multipart_upload(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: &Request<Body>,
|
req: &Request<Body>,
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,10 +14,10 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.1", path = "../db" }
|
garage_db = { version = "0.8.0", path = "../db" }
|
||||||
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
||||||
garage_util = { version = "0.8.1", path = "../util" }
|
garage_util = { version = "0.8.0", path = "../util" }
|
||||||
garage_table = { version = "0.8.1", path = "../table" }
|
garage_table = { version = "0.8.0", path = "../table" }
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
opentelemetry = "0.17"
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@ rand = "0.8"
|
||||||
async-compression = { version = "0.3", features = ["tokio", "zstd"] }
|
async-compression = { version = "0.3", features = ["tokio", "zstd"] }
|
||||||
zstd = { version = "0.9", default-features = false }
|
zstd = { version = "0.9", default-features = false }
|
||||||
|
|
||||||
|
rmp-serde = "0.15"
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||||
serde_bytes = "0.11"
|
serde_bytes = "0.11"
|
||||||
|
|
||||||
|
|
|
@ -3,10 +3,8 @@ use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use arc_swap::ArcSwapOption;
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use rand::prelude::*;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
|
@ -24,12 +22,9 @@ use garage_rpc::rpc_helper::netapp::stream::{stream_asyncread, ByteStream};
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::background::{vars, BackgroundRunner};
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
use garage_util::persister::PersisterShared;
|
|
||||||
use garage_util::time::msec_to_rfc3339;
|
|
||||||
|
|
||||||
use garage_rpc::rpc_helper::OrderTag;
|
use garage_rpc::rpc_helper::OrderTag;
|
||||||
use garage_rpc::system::System;
|
use garage_rpc::system::System;
|
||||||
|
@ -92,17 +87,7 @@ pub struct BlockManager {
|
||||||
|
|
||||||
pub(crate) metrics: BlockManagerMetrics,
|
pub(crate) metrics: BlockManagerMetrics,
|
||||||
|
|
||||||
pub scrub_persister: PersisterShared<ScrubWorkerPersisted>,
|
tx_scrub_command: mpsc::Sender<ScrubWorkerCommand>,
|
||||||
tx_scrub_command: ArcSwapOption<mpsc::Sender<ScrubWorkerCommand>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
|
||||||
pub struct BlockResyncErrorInfo {
|
|
||||||
pub hash: Hash,
|
|
||||||
pub refcount: u64,
|
|
||||||
pub error_count: u64,
|
|
||||||
pub last_try: u64,
|
|
||||||
pub next_try: u64,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This custom struct contains functions that must only be ran
|
// This custom struct contains functions that must only be ran
|
||||||
|
@ -129,14 +114,9 @@ impl BlockManager {
|
||||||
.netapp
|
.netapp
|
||||||
.endpoint("garage_block/manager.rs/Rpc".to_string());
|
.endpoint("garage_block/manager.rs/Rpc".to_string());
|
||||||
|
|
||||||
let metrics = BlockManagerMetrics::new(
|
let metrics = BlockManagerMetrics::new(resync.queue.clone(), resync.errors.clone());
|
||||||
compression_level,
|
|
||||||
rc.rc.clone(),
|
|
||||||
resync.queue.clone(),
|
|
||||||
resync.errors.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info");
|
let (scrub_tx, scrub_rx) = mpsc::channel(1);
|
||||||
|
|
||||||
let block_manager = Arc::new(Self {
|
let block_manager = Arc::new(Self {
|
||||||
replication,
|
replication,
|
||||||
|
@ -148,46 +128,21 @@ impl BlockManager {
|
||||||
system,
|
system,
|
||||||
endpoint,
|
endpoint,
|
||||||
metrics,
|
metrics,
|
||||||
scrub_persister,
|
tx_scrub_command: scrub_tx,
|
||||||
tx_scrub_command: ArcSwapOption::new(None),
|
|
||||||
});
|
});
|
||||||
block_manager.endpoint.set_handler(block_manager.clone());
|
block_manager.endpoint.set_handler(block_manager.clone());
|
||||||
|
|
||||||
block_manager
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
|
||||||
// Spawn a bunch of resync workers
|
// Spawn a bunch of resync workers
|
||||||
for index in 0..MAX_RESYNC_WORKERS {
|
for index in 0..MAX_RESYNC_WORKERS {
|
||||||
let worker = ResyncWorker::new(index, self.clone());
|
let worker = ResyncWorker::new(index, block_manager.clone());
|
||||||
bg.spawn_worker(worker);
|
block_manager.system.background.spawn_worker(worker);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spawn scrub worker
|
// Spawn scrub worker
|
||||||
let (scrub_tx, scrub_rx) = mpsc::channel(1);
|
let scrub_worker = ScrubWorker::new(block_manager.clone(), scrub_rx);
|
||||||
self.tx_scrub_command.store(Some(Arc::new(scrub_tx)));
|
block_manager.system.background.spawn_worker(scrub_worker);
|
||||||
bg.spawn_worker(ScrubWorker::new(
|
|
||||||
self.clone(),
|
|
||||||
scrub_rx,
|
|
||||||
self.scrub_persister.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
block_manager
|
||||||
self.resync.register_bg_vars(vars);
|
|
||||||
|
|
||||||
vars.register_rw(
|
|
||||||
&self.scrub_persister,
|
|
||||||
"scrub-tranquility",
|
|
||||||
|p| p.get_with(|x| x.tranquility),
|
|
||||||
|p, tranquility| p.set_with(|x| x.tranquility = tranquility),
|
|
||||||
);
|
|
||||||
vars.register_ro(&self.scrub_persister, "scrub-last-completed", |p| {
|
|
||||||
p.get_with(|x| msec_to_rfc3339(x.time_last_complete_scrub))
|
|
||||||
});
|
|
||||||
vars.register_ro(&self.scrub_persister, "scrub-corruptions_detected", |p| {
|
|
||||||
p.get_with(|x| x.corruptions_detected)
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ask nodes that might have a (possibly compressed) block for it
|
/// Ask nodes that might have a (possibly compressed) block for it
|
||||||
|
@ -354,42 +309,9 @@ impl BlockManager {
|
||||||
Ok(self.rc.rc.len()?)
|
Ok(self.rc.rc.len()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get number of items in the refcount table
|
|
||||||
pub fn rc_fast_len(&self) -> Result<Option<usize>, Error> {
|
|
||||||
Ok(self.rc.rc.fast_len()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send command to start/stop/manager scrub worker
|
/// Send command to start/stop/manager scrub worker
|
||||||
pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) -> Result<(), Error> {
|
pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) {
|
||||||
let tx = self.tx_scrub_command.load();
|
let _ = self.tx_scrub_command.send(cmd).await;
|
||||||
let tx = tx.as_ref().ok_or_message("scrub worker is not running")?;
|
|
||||||
tx.send(cmd).await.ok_or_message("send error")?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the reference count of a block
|
|
||||||
pub fn get_block_rc(&self, hash: &Hash) -> Result<u64, Error> {
|
|
||||||
Ok(self.rc.get_block_rc(hash)?.as_u64())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List all resync errors
|
|
||||||
pub fn list_resync_errors(&self) -> Result<Vec<BlockResyncErrorInfo>, Error> {
|
|
||||||
let mut blocks = Vec::with_capacity(self.resync.errors.len());
|
|
||||||
for ent in self.resync.errors.iter()? {
|
|
||||||
let (hash, cnt) = ent?;
|
|
||||||
let cnt = ErrorCounter::decode(&cnt);
|
|
||||||
blocks.push(BlockResyncErrorInfo {
|
|
||||||
hash: Hash::try_from(&hash).unwrap(),
|
|
||||||
refcount: 0,
|
|
||||||
error_count: cnt.errors,
|
|
||||||
last_try: cnt.last_try,
|
|
||||||
next_try: cnt.next_try(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
for block in blocks.iter_mut() {
|
|
||||||
block.refcount = self.get_block_rc(&block.hash)?;
|
|
||||||
}
|
|
||||||
Ok(blocks)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//// ----- Managing the reference counter ----
|
//// ----- Managing the reference counter ----
|
||||||
|
@ -681,21 +603,14 @@ impl BlockManagerLocked {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut path_tmp = path.clone();
|
let mut path2 = path.clone();
|
||||||
let tmp_extension = format!("tmp{}", hex::encode(thread_rng().gen::<[u8; 4]>()));
|
path2.set_extension("tmp");
|
||||||
path_tmp.set_extension(tmp_extension);
|
let mut f = fs::File::create(&path2).await?;
|
||||||
|
|
||||||
let mut delete_on_drop = DeleteOnDrop(Some(path_tmp.clone()));
|
|
||||||
|
|
||||||
let mut f = fs::File::create(&path_tmp).await?;
|
|
||||||
f.write_all(data).await?;
|
f.write_all(data).await?;
|
||||||
f.sync_all().await?;
|
f.sync_all().await?;
|
||||||
drop(f);
|
drop(f);
|
||||||
|
|
||||||
fs::rename(path_tmp, path).await?;
|
fs::rename(path2, path).await?;
|
||||||
|
|
||||||
delete_on_drop.cancel();
|
|
||||||
|
|
||||||
if let Some(to_delete) = to_delete {
|
if let Some(to_delete) = to_delete {
|
||||||
fs::remove_file(to_delete).await?;
|
fs::remove_file(to_delete).await?;
|
||||||
}
|
}
|
||||||
|
@ -761,23 +676,3 @@ async fn read_stream_to_end(mut stream: ByteStream) -> Result<Bytes, Error> {
|
||||||
.concat()
|
.concat()
|
||||||
.into())
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
struct DeleteOnDrop(Option<PathBuf>);
|
|
||||||
|
|
||||||
impl DeleteOnDrop {
|
|
||||||
fn cancel(&mut self) {
|
|
||||||
drop(self.0.take());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for DeleteOnDrop {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some(path) = self.0.take() {
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(e) = fs::remove_file(&path).await {
|
|
||||||
debug!("DeleteOnDrop failed for {}: {}", path.display(), e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,12 +1,9 @@
|
||||||
use opentelemetry::{global, metrics::*};
|
use opentelemetry::{global, metrics::*};
|
||||||
|
|
||||||
use garage_db as db;
|
|
||||||
use garage_db::counted_tree_hack::CountedTree;
|
use garage_db::counted_tree_hack::CountedTree;
|
||||||
|
|
||||||
/// TableMetrics reference all counter used for metrics
|
/// TableMetrics reference all counter used for metrics
|
||||||
pub struct BlockManagerMetrics {
|
pub struct BlockManagerMetrics {
|
||||||
pub(crate) _compression_level: ValueObserver<u64>,
|
|
||||||
pub(crate) _rc_size: ValueObserver<u64>,
|
|
||||||
pub(crate) _resync_queue_len: ValueObserver<u64>,
|
pub(crate) _resync_queue_len: ValueObserver<u64>,
|
||||||
pub(crate) _resync_errored_blocks: ValueObserver<u64>,
|
pub(crate) _resync_errored_blocks: ValueObserver<u64>,
|
||||||
|
|
||||||
|
@ -26,31 +23,9 @@ pub struct BlockManagerMetrics {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockManagerMetrics {
|
impl BlockManagerMetrics {
|
||||||
pub fn new(
|
pub fn new(resync_queue: CountedTree, resync_errors: CountedTree) -> Self {
|
||||||
compression_level: Option<i32>,
|
|
||||||
rc_tree: db::Tree,
|
|
||||||
resync_queue: CountedTree,
|
|
||||||
resync_errors: CountedTree,
|
|
||||||
) -> Self {
|
|
||||||
let meter = global::meter("garage_model/block");
|
let meter = global::meter("garage_model/block");
|
||||||
Self {
|
Self {
|
||||||
_compression_level: meter
|
|
||||||
.u64_value_observer("block.compression_level", move |observer| {
|
|
||||||
match compression_level {
|
|
||||||
Some(v) => observer.observe(v as u64, &[]),
|
|
||||||
None => observer.observe(0 as u64, &[]),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.with_description("Garage compression level for node")
|
|
||||||
.init(),
|
|
||||||
_rc_size: meter
|
|
||||||
.u64_value_observer("block.rc_size", move |observer| {
|
|
||||||
if let Ok(Some(v)) = rc_tree.fast_len() {
|
|
||||||
observer.observe(v as u64, &[])
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.with_description("Number of blocks known to the reference counter")
|
|
||||||
.init(),
|
|
||||||
_resync_queue_len: meter
|
_resync_queue_len: meter
|
||||||
.u64_value_observer("block.resync_queue_length", move |observer| {
|
.u64_value_observer("block.resync_queue_length", move |observer| {
|
||||||
observer.observe(resync_queue.len() as u64, &[])
|
observer.observe(resync_queue.len() as u64, &[])
|
||||||
|
|
|
@ -169,11 +169,4 @@ impl RcEntry {
|
||||||
pub(crate) fn is_needed(&self) -> bool {
|
pub(crate) fn is_needed(&self) -> bool {
|
||||||
!self.is_deletable()
|
!self.is_deletable()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn as_u64(&self) -> u64 {
|
|
||||||
match self {
|
|
||||||
RcEntry::Present { count } => *count,
|
|
||||||
_ => 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ use tokio::sync::watch;
|
||||||
use garage_util::background::*;
|
use garage_util::background::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::persister::PersisterShared;
|
use garage_util::persister::Persister;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
use garage_util::tranquilizer::Tranquilizer;
|
use garage_util::tranquilizer::Tranquilizer;
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ impl Worker for RepairWorker {
|
||||||
"Block repair worker".into()
|
"Block repair worker".into()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
fn info(&self) -> Option<String> {
|
||||||
match self.block_iter.as_ref() {
|
match self.block_iter.as_ref() {
|
||||||
None => {
|
None => {
|
||||||
let idx_bytes = self
|
let idx_bytes = self
|
||||||
|
@ -66,20 +66,9 @@ impl Worker for RepairWorker {
|
||||||
} else {
|
} else {
|
||||||
idx_bytes
|
idx_bytes
|
||||||
};
|
};
|
||||||
WorkerStatus {
|
Some(format!("Phase 1: {}", hex::encode(idx_bytes)))
|
||||||
progress: Some("0.00%".into()),
|
|
||||||
freeform: vec![format!(
|
|
||||||
"Currently in phase 1, iterator position: {}",
|
|
||||||
hex::encode(idx_bytes)
|
|
||||||
)],
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Some(bi) => WorkerStatus {
|
Some(bi) => Some(format!("Phase 2: {:.2}% done", bi.progress() * 100.)),
|
||||||
progress: Some(format!("{:.2}%", bi.progress() * 100.)),
|
|
||||||
freeform: vec!["Currently in phase 2".into()],
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,7 +137,7 @@ impl Worker for RepairWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -168,24 +157,15 @@ pub struct ScrubWorker {
|
||||||
work: ScrubWorkerState,
|
work: ScrubWorkerState,
|
||||||
tranquilizer: Tranquilizer,
|
tranquilizer: Tranquilizer,
|
||||||
|
|
||||||
persister: PersisterShared<ScrubWorkerPersisted>,
|
persister: Persister<ScrubWorkerPersisted>,
|
||||||
|
persisted: ScrubWorkerPersisted,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct ScrubWorkerPersisted {
|
struct ScrubWorkerPersisted {
|
||||||
pub tranquility: u32,
|
tranquility: u32,
|
||||||
pub(crate) time_last_complete_scrub: u64,
|
time_last_complete_scrub: u64,
|
||||||
pub(crate) corruptions_detected: u64,
|
corruptions_detected: u64,
|
||||||
}
|
|
||||||
impl garage_util::migrate::InitialFormat for ScrubWorkerPersisted {}
|
|
||||||
impl Default for ScrubWorkerPersisted {
|
|
||||||
fn default() -> Self {
|
|
||||||
ScrubWorkerPersisted {
|
|
||||||
time_last_complete_scrub: 0,
|
|
||||||
tranquility: INITIAL_SCRUB_TRANQUILITY,
|
|
||||||
corruptions_detected: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum ScrubWorkerState {
|
enum ScrubWorkerState {
|
||||||
|
@ -206,20 +186,27 @@ pub enum ScrubWorkerCommand {
|
||||||
Pause(Duration),
|
Pause(Duration),
|
||||||
Resume,
|
Resume,
|
||||||
Cancel,
|
Cancel,
|
||||||
|
SetTranquility(u32),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ScrubWorker {
|
impl ScrubWorker {
|
||||||
pub(crate) fn new(
|
pub fn new(manager: Arc<BlockManager>, rx_cmd: mpsc::Receiver<ScrubWorkerCommand>) -> Self {
|
||||||
manager: Arc<BlockManager>,
|
let persister = Persister::new(&manager.system.metadata_dir, "scrub_info");
|
||||||
rx_cmd: mpsc::Receiver<ScrubWorkerCommand>,
|
let persisted = match persister.load() {
|
||||||
persister: PersisterShared<ScrubWorkerPersisted>,
|
Ok(v) => v,
|
||||||
) -> Self {
|
Err(_) => ScrubWorkerPersisted {
|
||||||
|
time_last_complete_scrub: 0,
|
||||||
|
tranquility: INITIAL_SCRUB_TRANQUILITY,
|
||||||
|
corruptions_detected: 0,
|
||||||
|
},
|
||||||
|
};
|
||||||
Self {
|
Self {
|
||||||
manager,
|
manager,
|
||||||
rx_cmd,
|
rx_cmd,
|
||||||
work: ScrubWorkerState::Finished,
|
work: ScrubWorkerState::Finished,
|
||||||
tranquilizer: Tranquilizer::new(30),
|
tranquilizer: Tranquilizer::new(30),
|
||||||
persister,
|
persister,
|
||||||
|
persisted,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -268,6 +255,12 @@ impl ScrubWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ScrubWorkerCommand::SetTranquility(t) => {
|
||||||
|
self.persisted.tranquility = t;
|
||||||
|
if let Err(e) = self.persister.save_async(&self.persisted).await {
|
||||||
|
error!("Could not save new tranquilitiy value: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -278,37 +271,29 @@ impl Worker for ScrubWorker {
|
||||||
"Block scrub worker".into()
|
"Block scrub worker".into()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
fn info(&self) -> Option<String> {
|
||||||
let (corruptions_detected, tranquility, time_last_complete_scrub) =
|
let s = match &self.work {
|
||||||
self.persister.get_with(|p| {
|
ScrubWorkerState::Running(bsi) => format!(
|
||||||
(
|
"{:.2}% done (tranquility = {})",
|
||||||
p.corruptions_detected,
|
bsi.progress() * 100.,
|
||||||
p.tranquility,
|
self.persisted.tranquility
|
||||||
p.time_last_complete_scrub,
|
),
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut s = WorkerStatus {
|
|
||||||
persistent_errors: Some(corruptions_detected),
|
|
||||||
tranquility: Some(tranquility),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
match &self.work {
|
|
||||||
ScrubWorkerState::Running(bsi) => {
|
|
||||||
s.progress = Some(format!("{:.2}%", bsi.progress() * 100.));
|
|
||||||
}
|
|
||||||
ScrubWorkerState::Paused(bsi, rt) => {
|
ScrubWorkerState::Paused(bsi, rt) => {
|
||||||
s.progress = Some(format!("{:.2}%", bsi.progress() * 100.));
|
format!(
|
||||||
s.freeform = vec![format!("Scrub paused, resumes at {}", msec_to_rfc3339(*rt))];
|
"Paused, {:.2}% done, resumes at {}",
|
||||||
|
bsi.progress() * 100.,
|
||||||
|
msec_to_rfc3339(*rt)
|
||||||
|
)
|
||||||
}
|
}
|
||||||
ScrubWorkerState::Finished => {
|
ScrubWorkerState::Finished => format!(
|
||||||
s.freeform = vec![format!(
|
"Last completed scrub: {}",
|
||||||
"Last scrub completed at {}",
|
msec_to_rfc3339(self.persisted.time_last_complete_scrub)
|
||||||
msec_to_rfc3339(time_last_complete_scrub)
|
),
|
||||||
)];
|
};
|
||||||
}
|
Some(format!(
|
||||||
}
|
"{} ; corruptions detected: {}",
|
||||||
s
|
s, self.persisted.corruptions_detected
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
|
@ -325,17 +310,18 @@ impl Worker for ScrubWorker {
|
||||||
match self.manager.read_block(&hash).await {
|
match self.manager.read_block(&hash).await {
|
||||||
Err(Error::CorruptData(_)) => {
|
Err(Error::CorruptData(_)) => {
|
||||||
error!("Found corrupt data block during scrub: {:?}", hash);
|
error!("Found corrupt data block during scrub: {:?}", hash);
|
||||||
self.persister.set_with(|p| p.corruptions_detected += 1)?;
|
self.persisted.corruptions_detected += 1;
|
||||||
|
self.persister.save_async(&self.persisted).await?;
|
||||||
}
|
}
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
_ => (),
|
_ => (),
|
||||||
};
|
};
|
||||||
Ok(self
|
Ok(self
|
||||||
.tranquilizer
|
.tranquilizer
|
||||||
.tranquilize_worker(self.persister.get_with(|p| p.tranquility)))
|
.tranquilize_worker(self.persisted.tranquility))
|
||||||
} else {
|
} else {
|
||||||
self.persister
|
self.persisted.time_last_complete_scrub = now_msec();
|
||||||
.set_with(|p| p.time_last_complete_scrub = now_msec())?;
|
self.persister.save_async(&self.persisted).await?;
|
||||||
self.work = ScrubWorkerState::Finished;
|
self.work = ScrubWorkerState::Finished;
|
||||||
self.tranquilizer.clear();
|
self.tranquilizer.clear();
|
||||||
Ok(WorkerState::Idle)
|
Ok(WorkerState::Idle)
|
||||||
|
@ -345,13 +331,12 @@ impl Worker for ScrubWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
let (wait_until, command) = match &self.work {
|
let (wait_until, command) = match &self.work {
|
||||||
ScrubWorkerState::Running(_) => return WorkerState::Busy,
|
ScrubWorkerState::Running(_) => return WorkerState::Busy,
|
||||||
ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume),
|
ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume),
|
||||||
ScrubWorkerState::Finished => (
|
ScrubWorkerState::Finished => (
|
||||||
self.persister.get_with(|p| p.time_last_complete_scrub)
|
self.persisted.time_last_complete_scrub + SCRUB_INTERVAL.as_millis() as u64,
|
||||||
+ SCRUB_INTERVAL.as_millis() as u64,
|
|
||||||
ScrubWorkerCommand::Start,
|
ScrubWorkerCommand::Start,
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
|
@ -3,6 +3,7 @@ use std::convert::TryInto;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use arc_swap::ArcSwap;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -21,7 +22,7 @@ use garage_util::background::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
use garage_util::persister::PersisterShared;
|
use garage_util::persister::Persister;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
use garage_util::tranquilizer::Tranquilizer;
|
use garage_util::tranquilizer::Tranquilizer;
|
||||||
|
|
||||||
|
@ -48,12 +49,13 @@ const INITIAL_RESYNC_TRANQUILITY: u32 = 2;
|
||||||
|
|
||||||
pub struct BlockResyncManager {
|
pub struct BlockResyncManager {
|
||||||
pub(crate) queue: CountedTree,
|
pub(crate) queue: CountedTree,
|
||||||
pub(crate) notify: Arc<Notify>,
|
pub(crate) notify: Notify,
|
||||||
pub(crate) errors: CountedTree,
|
pub(crate) errors: CountedTree,
|
||||||
|
|
||||||
busy_set: BusySet,
|
busy_set: BusySet,
|
||||||
|
|
||||||
persister: PersisterShared<ResyncPersistedConfig>,
|
persister: Persister<ResyncPersistedConfig>,
|
||||||
|
persisted: ArcSwap<ResyncPersistedConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Copy)]
|
#[derive(Serialize, Deserialize, Clone, Copy)]
|
||||||
|
@ -61,15 +63,6 @@ struct ResyncPersistedConfig {
|
||||||
n_workers: usize,
|
n_workers: usize,
|
||||||
tranquility: u32,
|
tranquility: u32,
|
||||||
}
|
}
|
||||||
impl garage_util::migrate::InitialFormat for ResyncPersistedConfig {}
|
|
||||||
impl Default for ResyncPersistedConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
ResyncPersistedConfig {
|
|
||||||
n_workers: 1,
|
|
||||||
tranquility: INITIAL_RESYNC_TRANQUILITY,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enum ResyncIterResult {
|
enum ResyncIterResult {
|
||||||
BusyDidSomething,
|
BusyDidSomething,
|
||||||
|
@ -97,14 +90,22 @@ impl BlockResyncManager {
|
||||||
.expect("Unable to open block_local_resync_errors tree");
|
.expect("Unable to open block_local_resync_errors tree");
|
||||||
let errors = CountedTree::new(errors).expect("Could not count block_local_resync_errors");
|
let errors = CountedTree::new(errors).expect("Could not count block_local_resync_errors");
|
||||||
|
|
||||||
let persister = PersisterShared::new(&system.metadata_dir, "resync_cfg");
|
let persister = Persister::new(&system.metadata_dir, "resync_cfg");
|
||||||
|
let persisted = match persister.load() {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(_) => ResyncPersistedConfig {
|
||||||
|
n_workers: 1,
|
||||||
|
tranquility: INITIAL_RESYNC_TRANQUILITY,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
queue,
|
queue,
|
||||||
notify: Arc::new(Notify::new()),
|
notify: Notify::new(),
|
||||||
errors,
|
errors,
|
||||||
busy_set: Arc::new(Mutex::new(HashSet::new())),
|
busy_set: Arc::new(Mutex::new(HashSet::new())),
|
||||||
persister,
|
persister,
|
||||||
|
persisted: ArcSwap::new(Arc::new(persisted)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,56 +123,6 @@ impl BlockResyncManager {
|
||||||
Ok(self.errors.len())
|
Ok(self.errors.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Clear the error counter for a block and put it in queue immediately
|
|
||||||
pub fn clear_backoff(&self, hash: &Hash) -> Result<(), Error> {
|
|
||||||
let now = now_msec();
|
|
||||||
if let Some(ec) = self.errors.get(hash)? {
|
|
||||||
let mut ec = ErrorCounter::decode(&ec);
|
|
||||||
if ec.errors > 0 {
|
|
||||||
ec.last_try = now - ec.delay_msec();
|
|
||||||
self.errors.insert(hash, ec.encode())?;
|
|
||||||
self.put_to_resync_at(hash, now)?;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(Error::Message(format!(
|
|
||||||
"Block {:?} was not in an errored state",
|
|
||||||
hash
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
|
||||||
let notify = self.notify.clone();
|
|
||||||
vars.register_rw(
|
|
||||||
&self.persister,
|
|
||||||
"resync-worker-count",
|
|
||||||
|p| p.get_with(|x| x.n_workers),
|
|
||||||
move |p, n_workers| {
|
|
||||||
if !(1..=MAX_RESYNC_WORKERS).contains(&n_workers) {
|
|
||||||
return Err(Error::Message(format!(
|
|
||||||
"Invalid number of resync workers, must be between 1 and {}",
|
|
||||||
MAX_RESYNC_WORKERS
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
p.set_with(|x| x.n_workers = n_workers)?;
|
|
||||||
notify.notify_waiters();
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
let notify = self.notify.clone();
|
|
||||||
vars.register_rw(
|
|
||||||
&self.persister,
|
|
||||||
"resync-tranquility",
|
|
||||||
|p| p.get_with(|x| x.tranquility),
|
|
||||||
move |p, tranquility| {
|
|
||||||
p.set_with(|x| x.tranquility = tranquility)?;
|
|
||||||
notify.notify_waiters();
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- Resync loop ----
|
// ---- Resync loop ----
|
||||||
|
|
||||||
// This part manages a queue of blocks that need to be
|
// This part manages a queue of blocks that need to be
|
||||||
|
@ -306,7 +257,7 @@ impl BlockResyncManager {
|
||||||
|
|
||||||
if let Err(e) = &res {
|
if let Err(e) = &res {
|
||||||
manager.metrics.resync_error_counter.add(1);
|
manager.metrics.resync_error_counter.add(1);
|
||||||
error!("Error when resyncing {:?}: {}", hash, e);
|
warn!("Error when resyncing {:?}: {}", hash, e);
|
||||||
|
|
||||||
let err_counter = match self.errors.get(hash.as_slice())? {
|
let err_counter = match self.errors.get(hash.as_slice())? {
|
||||||
Some(ec) => ErrorCounter::decode(&ec).add1(now + 1),
|
Some(ec) => ErrorCounter::decode(&ec).add1(now + 1),
|
||||||
|
@ -466,6 +417,33 @@ impl BlockResyncManager {
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn update_persisted(
|
||||||
|
&self,
|
||||||
|
update: impl Fn(&mut ResyncPersistedConfig),
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut cfg: ResyncPersistedConfig = *self.persisted.load().as_ref();
|
||||||
|
update(&mut cfg);
|
||||||
|
self.persister.save_async(&cfg).await?;
|
||||||
|
self.persisted.store(Arc::new(cfg));
|
||||||
|
self.notify.notify_waiters();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set_n_workers(&self, n_workers: usize) -> Result<(), Error> {
|
||||||
|
if !(1..=MAX_RESYNC_WORKERS).contains(&n_workers) {
|
||||||
|
return Err(Error::Message(format!(
|
||||||
|
"Invalid number of resync workers, must be between 1 and {}",
|
||||||
|
MAX_RESYNC_WORKERS
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
self.update_persisted(|cfg| cfg.n_workers = n_workers).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set_tranquility(&self, tranquility: u32) -> Result<(), Error> {
|
||||||
|
self.update_persisted(|cfg| cfg.tranquility = tranquility)
|
||||||
|
.await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for BusyBlock {
|
impl Drop for BusyBlock {
|
||||||
|
@ -480,18 +458,15 @@ pub(crate) struct ResyncWorker {
|
||||||
manager: Arc<BlockManager>,
|
manager: Arc<BlockManager>,
|
||||||
tranquilizer: Tranquilizer,
|
tranquilizer: Tranquilizer,
|
||||||
next_delay: Duration,
|
next_delay: Duration,
|
||||||
persister: PersisterShared<ResyncPersistedConfig>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ResyncWorker {
|
impl ResyncWorker {
|
||||||
pub(crate) fn new(index: usize, manager: Arc<BlockManager>) -> Self {
|
pub(crate) fn new(index: usize, manager: Arc<BlockManager>) -> Self {
|
||||||
let persister = manager.resync.persister.clone();
|
|
||||||
Self {
|
Self {
|
||||||
index,
|
index,
|
||||||
manager,
|
manager,
|
||||||
tranquilizer: Tranquilizer::new(30),
|
tranquilizer: Tranquilizer::new(30),
|
||||||
next_delay: Duration::from_secs(10),
|
next_delay: Duration::from_secs(10),
|
||||||
persister,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -502,36 +477,39 @@ impl Worker for ResyncWorker {
|
||||||
format!("Block resync worker #{}", self.index + 1)
|
format!("Block resync worker #{}", self.index + 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
fn info(&self) -> Option<String> {
|
||||||
let (n_workers, tranquility) = self.persister.get_with(|x| (x.n_workers, x.tranquility));
|
let persisted = self.manager.resync.persisted.load();
|
||||||
|
|
||||||
if self.index >= n_workers {
|
if self.index >= persisted.n_workers {
|
||||||
return WorkerStatus {
|
return Some("(unused)".into());
|
||||||
freeform: vec!["This worker is currently disabled".into()],
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
WorkerStatus {
|
let mut ret = vec![];
|
||||||
queue_length: Some(self.manager.resync.queue_len().unwrap_or(0) as u64),
|
ret.push(format!("tranquility = {}", persisted.tranquility));
|
||||||
tranquility: Some(tranquility),
|
|
||||||
persistent_errors: Some(self.manager.resync.errors_len().unwrap_or(0) as u64),
|
let qlen = self.manager.resync.queue_len().unwrap_or(0);
|
||||||
..Default::default()
|
if qlen > 0 {
|
||||||
|
ret.push(format!("{} blocks in queue", qlen));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let elen = self.manager.resync.errors_len().unwrap_or(0);
|
||||||
|
if elen > 0 {
|
||||||
|
ret.push(format!("{} blocks in error state", elen));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(ret.join(", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
let (n_workers, tranquility) = self.persister.get_with(|x| (x.n_workers, x.tranquility));
|
if self.index >= self.manager.resync.persisted.load().n_workers {
|
||||||
|
|
||||||
if self.index >= n_workers {
|
|
||||||
return Ok(WorkerState::Idle);
|
return Ok(WorkerState::Idle);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.tranquilizer.reset();
|
self.tranquilizer.reset();
|
||||||
match self.manager.resync.resync_iter(&self.manager).await {
|
match self.manager.resync.resync_iter(&self.manager).await {
|
||||||
Ok(ResyncIterResult::BusyDidSomething) => {
|
Ok(ResyncIterResult::BusyDidSomething) => Ok(self
|
||||||
Ok(self.tranquilizer.tranquilize_worker(tranquility))
|
.tranquilizer
|
||||||
}
|
.tranquilize_worker(self.manager.resync.persisted.load().tranquility)),
|
||||||
Ok(ResyncIterResult::BusyDidNothing) => Ok(WorkerState::Busy),
|
Ok(ResyncIterResult::BusyDidNothing) => Ok(WorkerState::Busy),
|
||||||
Ok(ResyncIterResult::IdleFor(delay)) => {
|
Ok(ResyncIterResult::IdleFor(delay)) => {
|
||||||
self.next_delay = delay;
|
self.next_delay = delay;
|
||||||
|
@ -549,8 +527,8 @@ impl Worker for ResyncWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
while self.index >= self.persister.get_with(|x| x.n_workers) {
|
while self.index >= self.manager.resync.persisted.load().n_workers {
|
||||||
self.manager.resync.notify.notified().await
|
self.manager.resync.notify.notified().await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -567,9 +545,9 @@ impl Worker for ResyncWorker {
|
||||||
/// and the time of the last try.
|
/// and the time of the last try.
|
||||||
/// Used to implement exponential backoff.
|
/// Used to implement exponential backoff.
|
||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub(crate) struct ErrorCounter {
|
struct ErrorCounter {
|
||||||
pub(crate) errors: u64,
|
errors: u64,
|
||||||
pub(crate) last_try: u64,
|
last_try: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ErrorCounter {
|
impl ErrorCounter {
|
||||||
|
@ -580,13 +558,12 @@ impl ErrorCounter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn decode(data: &[u8]) -> Self {
|
fn decode(data: &[u8]) -> Self {
|
||||||
Self {
|
Self {
|
||||||
errors: u64::from_be_bytes(data[0..8].try_into().unwrap()),
|
errors: u64::from_be_bytes(data[0..8].try_into().unwrap()),
|
||||||
last_try: u64::from_be_bytes(data[8..16].try_into().unwrap()),
|
last_try: u64::from_be_bytes(data[8..16].try_into().unwrap()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode(&self) -> Vec<u8> {
|
fn encode(&self) -> Vec<u8> {
|
||||||
[
|
[
|
||||||
u64::to_be_bytes(self.errors),
|
u64::to_be_bytes(self.errors),
|
||||||
|
@ -606,8 +583,7 @@ impl ErrorCounter {
|
||||||
(RESYNC_RETRY_DELAY.as_millis() as u64)
|
(RESYNC_RETRY_DELAY.as_millis() as u64)
|
||||||
<< std::cmp::min(self.errors - 1, RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER)
|
<< std::cmp::min(self.errors - 1, RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER)
|
||||||
}
|
}
|
||||||
|
fn next_try(&self) -> u64 {
|
||||||
pub(crate) fn next_try(&self) -> u64 {
|
|
||||||
self.last_try + self.delay_msec()
|
self.last_try + self.delay_msec()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_db"
|
name = "garage_db"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
|
@ -181,10 +181,6 @@ impl Tree {
|
||||||
pub fn len(&self) -> Result<usize> {
|
pub fn len(&self) -> Result<usize> {
|
||||||
self.0.len(self.1)
|
self.0.len(self.1)
|
||||||
}
|
}
|
||||||
#[inline]
|
|
||||||
pub fn fast_len(&self) -> Result<Option<usize>> {
|
|
||||||
self.0.fast_len(self.1)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn first(&self) -> Result<Option<(Value, Value)>> {
|
pub fn first(&self) -> Result<Option<(Value, Value)>> {
|
||||||
|
@ -327,9 +323,6 @@ pub(crate) trait IDb: Send + Sync {
|
||||||
|
|
||||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||||
fn len(&self, tree: usize) -> Result<usize>;
|
fn len(&self, tree: usize) -> Result<usize>;
|
||||||
fn fast_len(&self, _tree: usize) -> Result<Option<usize>> {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>>;
|
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>>;
|
||||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||||
|
|
|
@ -121,10 +121,6 @@ impl IDb for LmdbDb {
|
||||||
Ok(tree.len(&tx)?.try_into().unwrap())
|
Ok(tree.len(&tx)?.try_into().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fast_len(&self, tree: usize) -> Result<Option<usize>> {
|
|
||||||
Ok(Some(self.len(tree)?))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
let tree = self.get_tree(tree)?;
|
||||||
let mut tx = self.db.write_txn()?;
|
let mut tx = self.db.write_txn()?;
|
||||||
|
|
|
@ -144,10 +144,6 @@ impl IDb for SqliteDb {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fast_len(&self, tree: usize) -> Result<Option<usize>> {
|
|
||||||
Ok(Some(self.len(tree)?))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
||||||
trace!("insert {}: lock db", tree);
|
trace!("insert {}: lock db", tree);
|
||||||
let this = self.0.lock().unwrap();
|
let this = self.0.lock().unwrap();
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -21,14 +21,14 @@ path = "tests/lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.1", path = "../db" }
|
garage_db = { version = "0.8.0", path = "../db" }
|
||||||
garage_api = { version = "0.8.1", path = "../api" }
|
garage_api = { version = "0.8.0", path = "../api" }
|
||||||
garage_block = { version = "0.8.1", path = "../block" }
|
garage_block = { version = "0.8.0", path = "../block" }
|
||||||
garage_model = { version = "0.8.1", path = "../model" }
|
garage_model = { version = "0.8.0", path = "../model" }
|
||||||
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
||||||
garage_table = { version = "0.8.1", path = "../table" }
|
garage_table = { version = "0.8.0", path = "../table" }
|
||||||
garage_util = { version = "0.8.1", path = "../util" }
|
garage_util = { version = "0.8.0", path = "../util" }
|
||||||
garage_web = { version = "0.8.1", path = "../web" }
|
garage_web = { version = "0.8.0", path = "../web" }
|
||||||
|
|
||||||
backtrace = "0.3"
|
backtrace = "0.3"
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
|
@ -42,6 +42,7 @@ rand = "0.8"
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
||||||
|
|
||||||
|
rmp-serde = "0.15"
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||||
serde_bytes = "0.11"
|
serde_bytes = "0.11"
|
||||||
structopt = { version = "0.3", default-features = false }
|
structopt = { version = "0.3", default-features = false }
|
||||||
|
@ -73,7 +74,7 @@ base64 = "0.13"
|
||||||
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = [ "bundled-libs", "metrics", "sled", "k2v" ]
|
default = [ "bundled-libs", "metrics", "sled" ]
|
||||||
|
|
||||||
k2v = [ "garage_util/k2v", "garage_api/k2v" ]
|
k2v = [ "garage_util/k2v", "garage_api/k2v" ]
|
||||||
|
|
||||||
|
|
|
@ -5,11 +5,9 @@ use std::sync::Arc;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::background::BackgroundRunner;
|
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
use garage_util::formater::format_table_to_string;
|
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_table::replication::*;
|
use garage_table::replication::*;
|
||||||
|
@ -17,7 +15,7 @@ use garage_table::*;
|
||||||
|
|
||||||
use garage_rpc::*;
|
use garage_rpc::*;
|
||||||
|
|
||||||
use garage_block::manager::BlockResyncErrorInfo;
|
use garage_block::repair::ScrubWorkerCommand;
|
||||||
|
|
||||||
use garage_model::bucket_alias_table::*;
|
use garage_model::bucket_alias_table::*;
|
||||||
use garage_model::bucket_table::*;
|
use garage_model::bucket_table::*;
|
||||||
|
@ -26,8 +24,6 @@ use garage_model::helper::error::{Error, OkOrBadRequest};
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
use garage_model::migrate::Migrate;
|
use garage_model::migrate::Migrate;
|
||||||
use garage_model::permission::*;
|
use garage_model::permission::*;
|
||||||
use garage_model::s3::object_table::*;
|
|
||||||
use garage_model::s3::version_table::Version;
|
|
||||||
|
|
||||||
use crate::cli::*;
|
use crate::cli::*;
|
||||||
use crate::repair::online::launch_online_repair;
|
use crate::repair::online::launch_online_repair;
|
||||||
|
@ -42,8 +38,7 @@ pub enum AdminRpc {
|
||||||
LaunchRepair(RepairOpt),
|
LaunchRepair(RepairOpt),
|
||||||
Migrate(MigrateOpt),
|
Migrate(MigrateOpt),
|
||||||
Stats(StatsOpt),
|
Stats(StatsOpt),
|
||||||
Worker(WorkerOperation),
|
Worker(WorkerOpt),
|
||||||
BlockOperation(BlockOperation),
|
|
||||||
|
|
||||||
// Replies
|
// Replies
|
||||||
Ok(String),
|
Ok(String),
|
||||||
|
@ -59,14 +54,6 @@ pub enum AdminRpc {
|
||||||
HashMap<usize, garage_util::background::WorkerInfo>,
|
HashMap<usize, garage_util::background::WorkerInfo>,
|
||||||
WorkerListOpt,
|
WorkerListOpt,
|
||||||
),
|
),
|
||||||
WorkerVars(Vec<(Uuid, String, String)>),
|
|
||||||
WorkerInfo(usize, garage_util::background::WorkerInfo),
|
|
||||||
BlockErrorList(Vec<BlockResyncErrorInfo>),
|
|
||||||
BlockInfo {
|
|
||||||
hash: Hash,
|
|
||||||
refcount: u64,
|
|
||||||
versions: Vec<Result<Version, Uuid>>,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Rpc for AdminRpc {
|
impl Rpc for AdminRpc {
|
||||||
|
@ -75,24 +62,17 @@ impl Rpc for AdminRpc {
|
||||||
|
|
||||||
pub struct AdminRpcHandler {
|
pub struct AdminRpcHandler {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
background: Arc<BackgroundRunner>,
|
|
||||||
endpoint: Arc<Endpoint<AdminRpc, Self>>,
|
endpoint: Arc<Endpoint<AdminRpc, Self>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AdminRpcHandler {
|
impl AdminRpcHandler {
|
||||||
pub fn new(garage: Arc<Garage>, background: Arc<BackgroundRunner>) -> Arc<Self> {
|
pub fn new(garage: Arc<Garage>) -> Arc<Self> {
|
||||||
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
|
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
|
||||||
let admin = Arc::new(Self {
|
let admin = Arc::new(Self { garage, endpoint });
|
||||||
garage,
|
|
||||||
background,
|
|
||||||
endpoint,
|
|
||||||
});
|
|
||||||
admin.endpoint.set_handler(admin.clone());
|
admin.endpoint.set_handler(admin.clone());
|
||||||
admin
|
admin
|
||||||
}
|
}
|
||||||
|
|
||||||
// ================ BUCKET COMMANDS ====================
|
|
||||||
|
|
||||||
async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
|
async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
|
||||||
match cmd {
|
match cmd {
|
||||||
BucketOperation::List => self.handle_list_buckets().await,
|
BucketOperation::List => self.handle_list_buckets().await,
|
||||||
|
@ -571,8 +551,6 @@ impl AdminRpcHandler {
|
||||||
Ok(AdminRpc::Ok(ret))
|
Ok(AdminRpc::Ok(ret))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ================ KEY COMMANDS ====================
|
|
||||||
|
|
||||||
async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> {
|
async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> {
|
||||||
match cmd {
|
match cmd {
|
||||||
KeyOperation::List => self.handle_list_keys().await,
|
KeyOperation::List => self.handle_list_keys().await,
|
||||||
|
@ -710,8 +688,6 @@ impl AdminRpcHandler {
|
||||||
Ok(AdminRpc::KeyInfo(key, relevant_buckets))
|
Ok(AdminRpc::KeyInfo(key, relevant_buckets))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ================ MIGRATION COMMANDS ====================
|
|
||||||
|
|
||||||
async fn handle_migrate(self: &Arc<Self>, opt: MigrateOpt) -> Result<AdminRpc, Error> {
|
async fn handle_migrate(self: &Arc<Self>, opt: MigrateOpt) -> Result<AdminRpc, Error> {
|
||||||
if !opt.yes {
|
if !opt.yes {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -728,8 +704,6 @@ impl AdminRpcHandler {
|
||||||
Ok(AdminRpc::Ok("Migration successfull.".into()))
|
Ok(AdminRpc::Ok("Migration successfull.".into()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ================ REPAIR COMMANDS ====================
|
|
||||||
|
|
||||||
async fn handle_launch_repair(self: &Arc<Self>, opt: RepairOpt) -> Result<AdminRpc, Error> {
|
async fn handle_launch_repair(self: &Arc<Self>, opt: RepairOpt) -> Result<AdminRpc, Error> {
|
||||||
if !opt.yes {
|
if !opt.yes {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -765,7 +739,7 @@ impl AdminRpcHandler {
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
launch_online_repair(&self.garage, &self.background, opt).await?;
|
launch_online_repair(self.garage.clone(), opt).await;
|
||||||
Ok(AdminRpc::Ok(format!(
|
Ok(AdminRpc::Ok(format!(
|
||||||
"Repair launched on {:?}",
|
"Repair launched on {:?}",
|
||||||
self.garage.system.id
|
self.garage.system.id
|
||||||
|
@ -773,8 +747,6 @@ impl AdminRpcHandler {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ================ STATS COMMANDS ====================
|
|
||||||
|
|
||||||
async fn handle_stats(&self, opt: StatsOpt) -> Result<AdminRpc, Error> {
|
async fn handle_stats(&self, opt: StatsOpt) -> Result<AdminRpc, Error> {
|
||||||
if opt.all_nodes {
|
if opt.all_nodes {
|
||||||
let mut ret = String::new();
|
let mut ret = String::new();
|
||||||
|
@ -791,12 +763,11 @@ impl AdminRpcHandler {
|
||||||
match self
|
match self
|
||||||
.endpoint
|
.endpoint
|
||||||
.call(&node_id, AdminRpc::Stats(opt), PRIO_NORMAL)
|
.call(&node_id, AdminRpc::Stats(opt), PRIO_NORMAL)
|
||||||
.await
|
.await?
|
||||||
{
|
{
|
||||||
Ok(Ok(AdminRpc::Ok(s))) => writeln!(&mut ret, "{}", s).unwrap(),
|
Ok(AdminRpc::Ok(s)) => writeln!(&mut ret, "{}", s).unwrap(),
|
||||||
Ok(Ok(x)) => writeln!(&mut ret, "Bad answer: {:?}", x).unwrap(),
|
Ok(x) => writeln!(&mut ret, "Bad answer: {:?}", x).unwrap(),
|
||||||
Ok(Err(e)) => writeln!(&mut ret, "Remote error: {}", e).unwrap(),
|
Err(e) => writeln!(&mut ret, "Error: {}", e).unwrap(),
|
||||||
Err(e) => writeln!(&mut ret, "Network error: {}", e).unwrap(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(AdminRpc::Ok(ret))
|
Ok(AdminRpc::Ok(ret))
|
||||||
|
@ -816,7 +787,6 @@ impl AdminRpcHandler {
|
||||||
.unwrap_or_else(|| "(unknown)".into()),
|
.unwrap_or_else(|| "(unknown)".into()),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap();
|
writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap();
|
||||||
|
|
||||||
// Gather ring statistics
|
// Gather ring statistics
|
||||||
|
@ -835,38 +805,21 @@ impl AdminRpcHandler {
|
||||||
writeln!(&mut ret, " {:?} {}", n, c).unwrap();
|
writeln!(&mut ret, " {:?} {}", n, c).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gather table statistics
|
self.gather_table_stats(&mut ret, &self.garage.bucket_table, &opt)?;
|
||||||
let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()];
|
self.gather_table_stats(&mut ret, &self.garage.key_table, &opt)?;
|
||||||
table.push(self.gather_table_stats(&self.garage.bucket_table, opt.detailed)?);
|
self.gather_table_stats(&mut ret, &self.garage.object_table, &opt)?;
|
||||||
table.push(self.gather_table_stats(&self.garage.key_table, opt.detailed)?);
|
self.gather_table_stats(&mut ret, &self.garage.version_table, &opt)?;
|
||||||
table.push(self.gather_table_stats(&self.garage.object_table, opt.detailed)?);
|
self.gather_table_stats(&mut ret, &self.garage.block_ref_table, &opt)?;
|
||||||
table.push(self.gather_table_stats(&self.garage.version_table, opt.detailed)?);
|
|
||||||
table.push(self.gather_table_stats(&self.garage.block_ref_table, opt.detailed)?);
|
|
||||||
write!(
|
|
||||||
&mut ret,
|
|
||||||
"\nTable stats:\n{}",
|
|
||||||
format_table_to_string(table)
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Gather block manager statistics
|
|
||||||
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
|
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
|
||||||
let rc_len = if opt.detailed {
|
if opt.detailed {
|
||||||
self.garage.block_manager.rc_len()?.to_string()
|
writeln!(
|
||||||
} else {
|
&mut ret,
|
||||||
self.garage
|
" number of RC entries (~= number of blocks): {}",
|
||||||
.block_manager
|
self.garage.block_manager.rc_len()?
|
||||||
.rc_fast_len()?
|
)
|
||||||
.map(|x| x.to_string())
|
.unwrap();
|
||||||
.unwrap_or_else(|| "NC".into())
|
}
|
||||||
};
|
|
||||||
|
|
||||||
writeln!(
|
|
||||||
&mut ret,
|
|
||||||
" number of RC entries (~= number of blocks): {}",
|
|
||||||
rc_len
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
writeln!(
|
writeln!(
|
||||||
&mut ret,
|
&mut ret,
|
||||||
" resync queue length: {}",
|
" resync queue length: {}",
|
||||||
|
@ -880,305 +833,79 @@ impl AdminRpcHandler {
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
if !opt.detailed {
|
|
||||||
writeln!(&mut ret, "\nIf values are missing (marked as NC), consider adding the --detailed flag - this will be slow.").unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ret)
|
Ok(ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gather_table_stats<F, R>(
|
fn gather_table_stats<F, R>(
|
||||||
&self,
|
&self,
|
||||||
|
to: &mut String,
|
||||||
t: &Arc<Table<F, R>>,
|
t: &Arc<Table<F, R>>,
|
||||||
detailed: bool,
|
opt: &StatsOpt,
|
||||||
) -> Result<String, Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
F: TableSchema + 'static,
|
F: TableSchema + 'static,
|
||||||
R: TableReplication + 'static,
|
R: TableReplication + 'static,
|
||||||
{
|
{
|
||||||
let (data_len, mkl_len) = if detailed {
|
writeln!(to, "\nTable stats for {}", F::TABLE_NAME).unwrap();
|
||||||
(
|
if opt.detailed {
|
||||||
t.data.store.len().map_err(GarageError::from)?.to_string(),
|
writeln!(
|
||||||
t.merkle_updater.merkle_tree_len()?.to_string(),
|
to,
|
||||||
|
" number of items: {}",
|
||||||
|
t.data.store.len().map_err(GarageError::from)?
|
||||||
)
|
)
|
||||||
} else {
|
.unwrap();
|
||||||
(
|
writeln!(
|
||||||
t.data
|
to,
|
||||||
.store
|
" Merkle tree size: {}",
|
||||||
.fast_len()
|
t.merkle_updater.merkle_tree_len()?
|
||||||
.map_err(GarageError::from)?
|
|
||||||
.map(|x| x.to_string())
|
|
||||||
.unwrap_or_else(|| "NC".into()),
|
|
||||||
t.merkle_updater
|
|
||||||
.merkle_tree_fast_len()?
|
|
||||||
.map(|x| x.to_string())
|
|
||||||
.unwrap_or_else(|| "NC".into()),
|
|
||||||
)
|
)
|
||||||
};
|
.unwrap();
|
||||||
|
|
||||||
Ok(format!(
|
|
||||||
" {}\t{}\t{}\t{}\t{}",
|
|
||||||
F::TABLE_NAME,
|
|
||||||
data_len,
|
|
||||||
mkl_len,
|
|
||||||
t.merkle_updater.todo_len()?,
|
|
||||||
t.data.gc_todo_len()?
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ================ WORKER COMMANDS ====================
|
|
||||||
|
|
||||||
async fn handle_worker_cmd(&self, cmd: &WorkerOperation) -> Result<AdminRpc, Error> {
|
|
||||||
match cmd {
|
|
||||||
WorkerOperation::List { opt } => {
|
|
||||||
let workers = self.background.get_worker_info();
|
|
||||||
Ok(AdminRpc::WorkerList(workers, *opt))
|
|
||||||
}
|
|
||||||
WorkerOperation::Info { tid } => {
|
|
||||||
let info = self
|
|
||||||
.background
|
|
||||||
.get_worker_info()
|
|
||||||
.get(tid)
|
|
||||||
.ok_or_bad_request(format!("No worker with TID {}", tid))?
|
|
||||||
.clone();
|
|
||||||
Ok(AdminRpc::WorkerInfo(*tid, info))
|
|
||||||
}
|
|
||||||
WorkerOperation::Get {
|
|
||||||
all_nodes,
|
|
||||||
variable,
|
|
||||||
} => self.handle_get_var(*all_nodes, variable).await,
|
|
||||||
WorkerOperation::Set {
|
|
||||||
all_nodes,
|
|
||||||
variable,
|
|
||||||
value,
|
|
||||||
} => self.handle_set_var(*all_nodes, variable, value).await,
|
|
||||||
}
|
}
|
||||||
|
writeln!(
|
||||||
|
to,
|
||||||
|
" Merkle updater todo queue length: {}",
|
||||||
|
t.merkle_updater.todo_len()?
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
writeln!(to, " GC todo queue length: {}", t.data.gc_todo_len()?).unwrap();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_get_var(
|
// ----
|
||||||
&self,
|
|
||||||
all_nodes: bool,
|
|
||||||
variable: &Option<String>,
|
|
||||||
) -> Result<AdminRpc, Error> {
|
|
||||||
if all_nodes {
|
|
||||||
let mut ret = vec![];
|
|
||||||
let ring = self.garage.system.ring.borrow().clone();
|
|
||||||
for node in ring.layout.node_ids().iter() {
|
|
||||||
let node = (*node).into();
|
|
||||||
match self
|
|
||||||
.endpoint
|
|
||||||
.call(
|
|
||||||
&node,
|
|
||||||
AdminRpc::Worker(WorkerOperation::Get {
|
|
||||||
all_nodes: false,
|
|
||||||
variable: variable.clone(),
|
|
||||||
}),
|
|
||||||
PRIO_NORMAL,
|
|
||||||
)
|
|
||||||
.await??
|
|
||||||
{
|
|
||||||
AdminRpc::WorkerVars(v) => ret.extend(v),
|
|
||||||
m => return Err(GarageError::unexpected_rpc_message(m).into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(AdminRpc::WorkerVars(ret))
|
|
||||||
} else {
|
|
||||||
#[allow(clippy::collapsible_else_if)]
|
|
||||||
if let Some(v) = variable {
|
|
||||||
Ok(AdminRpc::WorkerVars(vec![(
|
|
||||||
self.garage.system.id,
|
|
||||||
v.clone(),
|
|
||||||
self.garage.bg_vars.get(v)?,
|
|
||||||
)]))
|
|
||||||
} else {
|
|
||||||
let mut vars = self.garage.bg_vars.get_all();
|
|
||||||
vars.sort();
|
|
||||||
Ok(AdminRpc::WorkerVars(
|
|
||||||
vars.into_iter()
|
|
||||||
.map(|(k, v)| (self.garage.system.id, k.to_string(), v))
|
|
||||||
.collect(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_set_var(
|
async fn handle_worker_cmd(&self, opt: WorkerOpt) -> Result<AdminRpc, Error> {
|
||||||
&self,
|
match opt.cmd {
|
||||||
all_nodes: bool,
|
WorkerCmd::List { opt } => {
|
||||||
variable: &str,
|
let workers = self.garage.background.get_worker_info();
|
||||||
value: &str,
|
Ok(AdminRpc::WorkerList(workers, opt))
|
||||||
) -> Result<AdminRpc, Error> {
|
|
||||||
if all_nodes {
|
|
||||||
let mut ret = vec![];
|
|
||||||
let ring = self.garage.system.ring.borrow().clone();
|
|
||||||
for node in ring.layout.node_ids().iter() {
|
|
||||||
let node = (*node).into();
|
|
||||||
match self
|
|
||||||
.endpoint
|
|
||||||
.call(
|
|
||||||
&node,
|
|
||||||
AdminRpc::Worker(WorkerOperation::Set {
|
|
||||||
all_nodes: false,
|
|
||||||
variable: variable.to_string(),
|
|
||||||
value: value.to_string(),
|
|
||||||
}),
|
|
||||||
PRIO_NORMAL,
|
|
||||||
)
|
|
||||||
.await??
|
|
||||||
{
|
|
||||||
AdminRpc::WorkerVars(v) => ret.extend(v),
|
|
||||||
m => return Err(GarageError::unexpected_rpc_message(m).into()),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(AdminRpc::WorkerVars(ret))
|
WorkerCmd::Set { opt } => match opt {
|
||||||
} else {
|
WorkerSetCmd::ScrubTranquility { tranquility } => {
|
||||||
self.garage.bg_vars.set(variable, value)?;
|
let scrub_command = ScrubWorkerCommand::SetTranquility(tranquility);
|
||||||
Ok(AdminRpc::WorkerVars(vec![(
|
self.garage
|
||||||
self.garage.system.id,
|
.block_manager
|
||||||
variable.to_string(),
|
.send_scrub_command(scrub_command)
|
||||||
value.to_string(),
|
.await;
|
||||||
)]))
|
Ok(AdminRpc::Ok("Scrub tranquility updated".into()))
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ================ BLOCK COMMANDS ====================
|
|
||||||
|
|
||||||
async fn handle_block_cmd(&self, cmd: &BlockOperation) -> Result<AdminRpc, Error> {
|
|
||||||
match cmd {
|
|
||||||
BlockOperation::ListErrors => Ok(AdminRpc::BlockErrorList(
|
|
||||||
self.garage.block_manager.list_resync_errors()?,
|
|
||||||
)),
|
|
||||||
BlockOperation::Info { hash } => {
|
|
||||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
|
||||||
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
|
||||||
let refcount = self.garage.block_manager.get_block_rc(&hash)?;
|
|
||||||
let block_refs = self
|
|
||||||
.garage
|
|
||||||
.block_ref_table
|
|
||||||
.get_range(&hash, None, None, 10000, Default::default())
|
|
||||||
.await?;
|
|
||||||
let mut versions = vec![];
|
|
||||||
for br in block_refs {
|
|
||||||
if let Some(v) = self
|
|
||||||
.garage
|
|
||||||
.version_table
|
|
||||||
.get(&br.version, &EmptyKey)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
versions.push(Ok(v));
|
|
||||||
} else {
|
|
||||||
versions.push(Err(br.version));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(AdminRpc::BlockInfo {
|
WorkerSetCmd::ResyncNWorkers { n_workers } => {
|
||||||
hash,
|
self.garage
|
||||||
refcount,
|
.block_manager
|
||||||
versions,
|
.resync
|
||||||
})
|
.set_n_workers(n_workers)
|
||||||
}
|
|
||||||
BlockOperation::RetryNow { all, blocks } => {
|
|
||||||
if *all {
|
|
||||||
if !blocks.is_empty() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
"--all was specified, cannot also specify blocks".into(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let blocks = self.garage.block_manager.list_resync_errors()?;
|
|
||||||
for b in blocks.iter() {
|
|
||||||
self.garage.block_manager.resync.clear_backoff(&b.hash)?;
|
|
||||||
}
|
|
||||||
Ok(AdminRpc::Ok(format!(
|
|
||||||
"{} blocks returned in queue for a retry now (check logs to see results)",
|
|
||||||
blocks.len()
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
for hash in blocks {
|
|
||||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
|
||||||
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
|
||||||
self.garage.block_manager.resync.clear_backoff(&hash)?;
|
|
||||||
}
|
|
||||||
Ok(AdminRpc::Ok(format!(
|
|
||||||
"{} blocks returned in queue for a retry now (check logs to see results)",
|
|
||||||
blocks.len()
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BlockOperation::Purge { yes, blocks } => {
|
|
||||||
if !yes {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
"Pass the --yes flag to confirm block purge operation.".into(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut obj_dels = 0;
|
|
||||||
let mut ver_dels = 0;
|
|
||||||
|
|
||||||
for hash in blocks {
|
|
||||||
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
|
|
||||||
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
|
|
||||||
let block_refs = self
|
|
||||||
.garage
|
|
||||||
.block_ref_table
|
|
||||||
.get_range(&hash, None, None, 10000, Default::default())
|
|
||||||
.await?;
|
.await?;
|
||||||
|
Ok(AdminRpc::Ok("Number of resync workers updated".into()))
|
||||||
for br in block_refs {
|
|
||||||
let version = match self
|
|
||||||
.garage
|
|
||||||
.version_table
|
|
||||||
.get(&br.version, &EmptyKey)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
Some(v) => v,
|
|
||||||
None => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(object) = self
|
|
||||||
.garage
|
|
||||||
.object_table
|
|
||||||
.get(&version.bucket_id, &version.key)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
let ov = object.versions().iter().rev().find(|v| v.is_complete());
|
|
||||||
if let Some(ov) = ov {
|
|
||||||
if ov.uuid == br.version {
|
|
||||||
let del_uuid = gen_uuid();
|
|
||||||
let deleted_object = Object::new(
|
|
||||||
version.bucket_id,
|
|
||||||
version.key.clone(),
|
|
||||||
vec![ObjectVersion {
|
|
||||||
uuid: del_uuid,
|
|
||||||
timestamp: ov.timestamp + 1,
|
|
||||||
state: ObjectVersionState::Complete(
|
|
||||||
ObjectVersionData::DeleteMarker,
|
|
||||||
),
|
|
||||||
}],
|
|
||||||
);
|
|
||||||
self.garage.object_table.insert(&deleted_object).await?;
|
|
||||||
obj_dels += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !version.deleted.get() {
|
|
||||||
let deleted_version = Version::new(
|
|
||||||
version.uuid,
|
|
||||||
version.bucket_id,
|
|
||||||
version.key.clone(),
|
|
||||||
true,
|
|
||||||
);
|
|
||||||
self.garage.version_table.insert(&deleted_version).await?;
|
|
||||||
ver_dels += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(AdminRpc::Ok(format!(
|
WorkerSetCmd::ResyncTranquility { tranquility } => {
|
||||||
"{} blocks were purged: {} object deletion markers added, {} versions marked deleted",
|
self.garage
|
||||||
blocks.len(),
|
.block_manager
|
||||||
obj_dels,
|
.resync
|
||||||
ver_dels
|
.set_tranquility(tranquility)
|
||||||
)))
|
.await?;
|
||||||
}
|
Ok(AdminRpc::Ok("Resync tranquility updated".into()))
|
||||||
|
}
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1196,8 +923,7 @@ impl EndpointHandler<AdminRpc> for AdminRpcHandler {
|
||||||
AdminRpc::Migrate(opt) => self.handle_migrate(opt.clone()).await,
|
AdminRpc::Migrate(opt) => self.handle_migrate(opt.clone()).await,
|
||||||
AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await,
|
AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await,
|
||||||
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
|
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
|
||||||
AdminRpc::Worker(wo) => self.handle_worker_cmd(wo).await,
|
AdminRpc::Worker(opt) => self.handle_worker_cmd(opt.clone()).await,
|
||||||
AdminRpc::BlockOperation(bo) => self.handle_block_cmd(bo).await,
|
|
||||||
m => Err(GarageError::unexpected_rpc_message(m).into()),
|
m => Err(GarageError::unexpected_rpc_message(m).into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,9 +41,6 @@ pub async fn cli_command_dispatch(
|
||||||
}
|
}
|
||||||
Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await,
|
Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await,
|
||||||
Command::Worker(wo) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Worker(wo)).await,
|
Command::Worker(wo) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Worker(wo)).await,
|
||||||
Command::Block(bo) => {
|
|
||||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await
|
|
||||||
}
|
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -189,23 +186,7 @@ pub async fn cmd_admin(
|
||||||
print_key_info(&key, &rb);
|
print_key_info(&key, &rb);
|
||||||
}
|
}
|
||||||
AdminRpc::WorkerList(wi, wlo) => {
|
AdminRpc::WorkerList(wi, wlo) => {
|
||||||
print_worker_list(wi, wlo);
|
print_worker_info(wi, wlo);
|
||||||
}
|
|
||||||
AdminRpc::WorkerVars(wv) => {
|
|
||||||
print_worker_vars(wv);
|
|
||||||
}
|
|
||||||
AdminRpc::WorkerInfo(tid, wi) => {
|
|
||||||
print_worker_info(tid, wi);
|
|
||||||
}
|
|
||||||
AdminRpc::BlockErrorList(el) => {
|
|
||||||
print_block_error_list(el);
|
|
||||||
}
|
|
||||||
AdminRpc::BlockInfo {
|
|
||||||
hash,
|
|
||||||
refcount,
|
|
||||||
versions,
|
|
||||||
} => {
|
|
||||||
print_block_info(hash, refcount, versions);
|
|
||||||
}
|
}
|
||||||
r => {
|
r => {
|
||||||
error!("Unexpected response: {:?}", r);
|
error!("Unexpected response: {:?}", r);
|
||||||
|
|
|
@ -49,11 +49,7 @@ pub enum Command {
|
||||||
|
|
||||||
/// Manage background workers
|
/// Manage background workers
|
||||||
#[structopt(name = "worker", version = garage_version())]
|
#[structopt(name = "worker", version = garage_version())]
|
||||||
Worker(WorkerOperation),
|
Worker(WorkerOpt),
|
||||||
|
|
||||||
/// Low-level debug operations on data blocks
|
|
||||||
#[structopt(name = "block", version = garage_version())]
|
|
||||||
Block(BlockOperation),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
#[derive(StructOpt, Debug)]
|
||||||
|
@ -506,36 +502,25 @@ pub struct StatsOpt {
|
||||||
pub detailed: bool,
|
pub detailed: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
|
||||||
|
pub struct WorkerOpt {
|
||||||
|
#[structopt(subcommand)]
|
||||||
|
pub cmd: WorkerCmd,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||||
pub enum WorkerOperation {
|
pub enum WorkerCmd {
|
||||||
/// List all workers on Garage node
|
/// List all workers on Garage node
|
||||||
#[structopt(name = "list", version = garage_version())]
|
#[structopt(name = "list", version = garage_version())]
|
||||||
List {
|
List {
|
||||||
#[structopt(flatten)]
|
#[structopt(flatten)]
|
||||||
opt: WorkerListOpt,
|
opt: WorkerListOpt,
|
||||||
},
|
},
|
||||||
/// Get detailed information about a worker
|
|
||||||
#[structopt(name = "info", version = garage_version())]
|
|
||||||
Info { tid: usize },
|
|
||||||
/// Get worker parameter
|
|
||||||
#[structopt(name = "get", version = garage_version())]
|
|
||||||
Get {
|
|
||||||
/// Gather variable values from all nodes
|
|
||||||
#[structopt(short = "a", long = "all-nodes")]
|
|
||||||
all_nodes: bool,
|
|
||||||
/// Variable name to get, or none to get all variables
|
|
||||||
variable: Option<String>,
|
|
||||||
},
|
|
||||||
/// Set worker parameter
|
/// Set worker parameter
|
||||||
#[structopt(name = "set", version = garage_version())]
|
#[structopt(name = "set", version = garage_version())]
|
||||||
Set {
|
Set {
|
||||||
/// Set variable values on all nodes
|
#[structopt(subcommand)]
|
||||||
#[structopt(short = "a", long = "all-nodes")]
|
opt: WorkerSetCmd,
|
||||||
all_nodes: bool,
|
|
||||||
/// Variable node to set
|
|
||||||
variable: String,
|
|
||||||
/// Value to set the variable to
|
|
||||||
value: String,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -550,33 +535,14 @@ pub struct WorkerListOpt {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||||
pub enum BlockOperation {
|
pub enum WorkerSetCmd {
|
||||||
/// List all blocks that currently have a resync error
|
/// Set tranquility of scrub operations
|
||||||
#[structopt(name = "list-errors", version = garage_version())]
|
#[structopt(name = "scrub-tranquility", version = garage_version())]
|
||||||
ListErrors,
|
ScrubTranquility { tranquility: u32 },
|
||||||
/// Get detailed information about a single block
|
/// Set number of concurrent block resync workers
|
||||||
#[structopt(name = "info", version = garage_version())]
|
#[structopt(name = "resync-n-workers", version = garage_version())]
|
||||||
Info {
|
ResyncNWorkers { n_workers: usize },
|
||||||
/// Hash of the block for which to retrieve information
|
/// Set tranquility of block resync operations
|
||||||
hash: String,
|
#[structopt(name = "resync-tranquility", version = garage_version())]
|
||||||
},
|
ResyncTranquility { tranquility: u32 },
|
||||||
/// Retry now the resync of one or many blocks
|
|
||||||
#[structopt(name = "retry-now", version = garage_version())]
|
|
||||||
RetryNow {
|
|
||||||
/// Retry all blocks that have a resync error
|
|
||||||
#[structopt(long = "all")]
|
|
||||||
all: bool,
|
|
||||||
/// Hashes of the block to retry to resync now
|
|
||||||
blocks: Vec<String>,
|
|
||||||
},
|
|
||||||
/// Delete all objects referencing a missing block
|
|
||||||
#[structopt(name = "purge", version = garage_version())]
|
|
||||||
Purge {
|
|
||||||
/// Mandatory to confirm this operation
|
|
||||||
#[structopt(long = "yes")]
|
|
||||||
yes: bool,
|
|
||||||
/// Hashes of the block to purge
|
|
||||||
#[structopt(required = true)]
|
|
||||||
blocks: Vec<String>,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,17 +3,14 @@ use std::time::Duration;
|
||||||
|
|
||||||
use garage_util::background::*;
|
use garage_util::background::*;
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::Uuid;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::formater::format_table;
|
use garage_util::formater::format_table;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_block::manager::BlockResyncErrorInfo;
|
|
||||||
|
|
||||||
use garage_model::bucket_table::*;
|
use garage_model::bucket_table::*;
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
use garage_model::s3::object_table::{BYTES, OBJECTS, UNFINISHED_UPLOADS};
|
use garage_model::s3::object_table::{BYTES, OBJECTS, UNFINISHED_UPLOADS};
|
||||||
use garage_model::s3::version_table::Version;
|
|
||||||
|
|
||||||
use crate::cli::structs::WorkerListOpt;
|
use crate::cli::structs::WorkerListOpt;
|
||||||
|
|
||||||
|
@ -244,7 +241,7 @@ pub fn find_matching_node(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn print_worker_list(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
|
pub fn print_worker_info(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
|
||||||
let mut wi = wi.into_iter().collect::<Vec<_>>();
|
let mut wi = wi.into_iter().collect::<Vec<_>>();
|
||||||
wi.sort_by_key(|(tid, info)| {
|
wi.sort_by_key(|(tid, info)| {
|
||||||
(
|
(
|
||||||
|
@ -257,7 +254,7 @@ pub fn print_worker_list(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
|
||||||
)
|
)
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut table = vec!["TID\tState\tName\tTranq\tDone\tQueue\tErrors\tConsec\tLast".to_string()];
|
let mut table = vec![];
|
||||||
for (tid, info) in wi.iter() {
|
for (tid, info) in wi.iter() {
|
||||||
if wlo.busy && !matches!(info.state, WorkerState::Busy | WorkerState::Throttled(_)) {
|
if wlo.busy && !matches!(info.state, WorkerState::Busy | WorkerState::Throttled(_)) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -266,155 +263,33 @@ pub fn print_worker_list(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table.push(format!("{}\t{}\t{}", tid, info.state, info.name));
|
||||||
|
if let Some(i) = &info.info {
|
||||||
|
table.push(format!("\t\t {}", i));
|
||||||
|
}
|
||||||
let tf = timeago::Formatter::new();
|
let tf = timeago::Formatter::new();
|
||||||
let err_ago = info
|
let (err_ago, err_msg) = info
|
||||||
.last_error
|
.last_error
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|(_, t)| tf.convert(Duration::from_millis(now_msec() - t)))
|
.map(|(m, t)| {
|
||||||
.unwrap_or_default();
|
(
|
||||||
let (total_err, consec_err) = if info.errors > 0 {
|
tf.convert(Duration::from_millis(now_msec() - t)),
|
||||||
(info.errors.to_string(), info.consecutive_errors.to_string())
|
m.as_str(),
|
||||||
} else {
|
)
|
||||||
("-".into(), "-".into())
|
})
|
||||||
};
|
.unwrap_or(("(?) ago".into(), "(?)"));
|
||||||
|
if info.consecutive_errors > 0 {
|
||||||
table.push(format!(
|
|
||||||
"{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}",
|
|
||||||
tid,
|
|
||||||
info.state,
|
|
||||||
info.name,
|
|
||||||
info.status
|
|
||||||
.tranquility
|
|
||||||
.as_ref()
|
|
||||||
.map(ToString::to_string)
|
|
||||||
.unwrap_or_else(|| "-".into()),
|
|
||||||
info.status.progress.as_deref().unwrap_or("-"),
|
|
||||||
info.status
|
|
||||||
.queue_length
|
|
||||||
.as_ref()
|
|
||||||
.map(ToString::to_string)
|
|
||||||
.unwrap_or_else(|| "-".into()),
|
|
||||||
total_err,
|
|
||||||
consec_err,
|
|
||||||
err_ago,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
format_table(table);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn print_worker_info(tid: usize, info: WorkerInfo) {
|
|
||||||
let mut table = vec![];
|
|
||||||
table.push(format!("Task id:\t{}", tid));
|
|
||||||
table.push(format!("Worker name:\t{}", info.name));
|
|
||||||
match info.state {
|
|
||||||
WorkerState::Throttled(t) => {
|
|
||||||
table.push(format!(
|
table.push(format!(
|
||||||
"Worker state:\tBusy (throttled, paused for {:.3}s)",
|
"\t\t {} consecutive errors ({} total), last {}",
|
||||||
t
|
info.consecutive_errors, info.errors, err_ago,
|
||||||
));
|
));
|
||||||
}
|
table.push(format!("\t\t {}", err_msg));
|
||||||
s => {
|
} else if info.errors > 0 {
|
||||||
table.push(format!("Worker state:\t{}", s));
|
table.push(format!("\t\t ({} errors, last {})", info.errors, err_ago,));
|
||||||
}
|
if wlo.errors {
|
||||||
};
|
table.push(format!("\t\t {}", err_msg));
|
||||||
if let Some(tql) = info.status.tranquility {
|
|
||||||
table.push(format!("Tranquility:\t{}", tql));
|
|
||||||
}
|
|
||||||
|
|
||||||
table.push("".into());
|
|
||||||
table.push(format!("Total errors:\t{}", info.errors));
|
|
||||||
table.push(format!("Consecutive errs:\t{}", info.consecutive_errors));
|
|
||||||
if let Some((s, t)) = info.last_error {
|
|
||||||
table.push(format!("Last error:\t{}", s));
|
|
||||||
let tf = timeago::Formatter::new();
|
|
||||||
table.push(format!(
|
|
||||||
"Last error time:\t{}",
|
|
||||||
tf.convert(Duration::from_millis(now_msec() - t))
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
table.push("".into());
|
|
||||||
if let Some(p) = info.status.progress {
|
|
||||||
table.push(format!("Progress:\t{}", p));
|
|
||||||
}
|
|
||||||
if let Some(ql) = info.status.queue_length {
|
|
||||||
table.push(format!("Queue length:\t{}", ql));
|
|
||||||
}
|
|
||||||
if let Some(pe) = info.status.persistent_errors {
|
|
||||||
table.push(format!("Persistent errors:\t{}", pe));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i, s) in info.status.freeform.iter().enumerate() {
|
|
||||||
if i == 0 {
|
|
||||||
if table.last() != Some(&"".into()) {
|
|
||||||
table.push("".into());
|
|
||||||
}
|
|
||||||
table.push(format!("Message:\t{}", s));
|
|
||||||
} else {
|
|
||||||
table.push(format!("\t{}", s));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
format_table(table);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn print_worker_vars(wv: Vec<(Uuid, String, String)>) {
|
|
||||||
let table = wv
|
|
||||||
.into_iter()
|
|
||||||
.map(|(n, k, v)| format!("{:?}\t{}\t{}", n, k, v))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
format_table(table);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn print_block_error_list(el: Vec<BlockResyncErrorInfo>) {
|
|
||||||
let now = now_msec();
|
|
||||||
let tf = timeago::Formatter::new();
|
|
||||||
let mut tf2 = timeago::Formatter::new();
|
|
||||||
tf2.ago("");
|
|
||||||
|
|
||||||
let mut table = vec!["Hash\tRC\tErrors\tLast error\tNext try".into()];
|
|
||||||
for e in el {
|
|
||||||
table.push(format!(
|
|
||||||
"{}\t{}\t{}\t{}\tin {}",
|
|
||||||
hex::encode(e.hash.as_slice()),
|
|
||||||
e.refcount,
|
|
||||||
e.error_count,
|
|
||||||
tf.convert(Duration::from_millis(now - e.last_try)),
|
|
||||||
tf2.convert(Duration::from_millis(e.next_try - now))
|
|
||||||
));
|
|
||||||
}
|
|
||||||
format_table(table);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn print_block_info(hash: Hash, refcount: u64, versions: Vec<Result<Version, Uuid>>) {
|
|
||||||
println!("Block hash: {}", hex::encode(hash.as_slice()));
|
|
||||||
println!("Refcount: {}", refcount);
|
|
||||||
println!();
|
|
||||||
|
|
||||||
let mut table = vec!["Version\tBucket\tKey\tDeleted".into()];
|
|
||||||
let mut nondeleted_count = 0;
|
|
||||||
for v in versions.iter() {
|
|
||||||
match v {
|
|
||||||
Ok(ver) => {
|
|
||||||
table.push(format!(
|
|
||||||
"{:?}\t{:?}\t{}\t{:?}",
|
|
||||||
ver.uuid,
|
|
||||||
ver.bucket_id,
|
|
||||||
ver.key,
|
|
||||||
ver.deleted.get()
|
|
||||||
));
|
|
||||||
if !ver.deleted.get() {
|
|
||||||
nondeleted_count += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(vh) => {
|
|
||||||
table.push(format!("{:?}\t\t\tyes", vh));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
format_table(table);
|
format_table(table);
|
||||||
|
|
||||||
if refcount != nondeleted_count {
|
|
||||||
println!();
|
|
||||||
println!("Warning: refcount does not match number of non-deleted versions");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,16 +127,9 @@ async fn main() {
|
||||||
std::process::abort();
|
std::process::abort();
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Parse arguments and dispatch command line
|
|
||||||
let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches());
|
|
||||||
|
|
||||||
// Initialize logging as well as other libraries used in Garage
|
// Initialize logging as well as other libraries used in Garage
|
||||||
if std::env::var("RUST_LOG").is_err() {
|
if std::env::var("RUST_LOG").is_err() {
|
||||||
let default_log = match &opt.cmd {
|
std::env::set_var("RUST_LOG", "netapp=info,garage=info")
|
||||||
Command::Server => "netapp=info,garage=info",
|
|
||||||
_ => "netapp=warn,garage=warn",
|
|
||||||
};
|
|
||||||
std::env::set_var("RUST_LOG", default_log)
|
|
||||||
}
|
}
|
||||||
tracing_subscriber::fmt()
|
tracing_subscriber::fmt()
|
||||||
.with_writer(std::io::stderr)
|
.with_writer(std::io::stderr)
|
||||||
|
@ -144,6 +137,9 @@ async fn main() {
|
||||||
.init();
|
.init();
|
||||||
sodiumoxide::init().expect("Unable to init sodiumoxide");
|
sodiumoxide::init().expect("Unable to init sodiumoxide");
|
||||||
|
|
||||||
|
// Parse arguments and dispatch command line
|
||||||
|
let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches());
|
||||||
|
|
||||||
let res = match opt.cmd {
|
let res = match opt.cmd {
|
||||||
Command::Server => server::run_server(opt.config_file).await,
|
Command::Server => server::run_server(opt.config_file).await,
|
||||||
Command::OfflineRepair(repair_opt) => {
|
Command::OfflineRepair(repair_opt) => {
|
||||||
|
@ -173,7 +169,7 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
|
||||||
let net_key_hex_str = opt
|
let net_key_hex_str = opt
|
||||||
.rpc_secret
|
.rpc_secret
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.or_else(|| config.as_ref().and_then(|c| c.rpc_secret.as_ref()))
|
.or_else(|| config.as_ref().map(|c| &c.rpc_secret))
|
||||||
.ok_or("No RPC secret provided")?;
|
.ok_or("No RPC secret provided")?;
|
||||||
let network_key = NetworkKey::from_slice(
|
let network_key = NetworkKey::from_slice(
|
||||||
&hex::decode(net_key_hex_str).err_context("Invalid RPC secret key (bad hex)")?[..],
|
&hex::decode(net_key_hex_str).err_context("Invalid RPC secret key (bad hex)")?[..],
|
||||||
|
@ -186,9 +182,9 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
|
||||||
let netapp = NetApp::new(GARAGE_VERSION_TAG, network_key, sk);
|
let netapp = NetApp::new(GARAGE_VERSION_TAG, network_key, sk);
|
||||||
|
|
||||||
// Find and parse the address of the target host
|
// Find and parse the address of the target host
|
||||||
let (id, addr, is_default_addr) = if let Some(h) = opt.rpc_host {
|
let (id, addr) = if let Some(h) = opt.rpc_host {
|
||||||
let (id, addrs) = parse_and_resolve_peer_addr(&h).ok_or_else(|| format!("Invalid RPC remote node identifier: {}. Expected format is <pubkey>@<IP or hostname>:<port>.", h))?;
|
let (id, addrs) = parse_and_resolve_peer_addr(&h).ok_or_else(|| format!("Invalid RPC remote node identifier: {}. Expected format is <pubkey>@<IP or hostname>:<port>.", h))?;
|
||||||
(id, addrs[0], false)
|
(id, addrs[0])
|
||||||
} else {
|
} else {
|
||||||
let node_id = garage_rpc::system::read_node_id(&config.as_ref().unwrap().metadata_dir)
|
let node_id = garage_rpc::system::read_node_id(&config.as_ref().unwrap().metadata_dir)
|
||||||
.err_context(READ_KEY_ERROR)?;
|
.err_context(READ_KEY_ERROR)?;
|
||||||
|
@ -199,26 +195,24 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
|
||||||
.ok_or_message("unable to resolve rpc_public_addr specified in config file")?
|
.ok_or_message("unable to resolve rpc_public_addr specified in config file")?
|
||||||
.next()
|
.next()
|
||||||
.ok_or_message("unable to resolve rpc_public_addr specified in config file")?;
|
.ok_or_message("unable to resolve rpc_public_addr specified in config file")?;
|
||||||
(node_id, a, false)
|
(node_id, a)
|
||||||
} else {
|
} else {
|
||||||
let default_addr = SocketAddr::new(
|
let default_addr = SocketAddr::new(
|
||||||
"127.0.0.1".parse().unwrap(),
|
"127.0.0.1".parse().unwrap(),
|
||||||
config.as_ref().unwrap().rpc_bind_addr.port(),
|
config.as_ref().unwrap().rpc_bind_addr.port(),
|
||||||
);
|
);
|
||||||
(node_id, default_addr, true)
|
warn!(
|
||||||
|
"Trying to contact Garage node at default address {}",
|
||||||
|
default_addr
|
||||||
|
);
|
||||||
|
warn!("If this doesn't work, consider adding rpc_public_addr in your config file or specifying the -h command line parameter.");
|
||||||
|
(node_id, default_addr)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Connect to target host
|
// Connect to target host
|
||||||
if let Err(e) = netapp.clone().try_connect(addr, id).await {
|
netapp.clone().try_connect(addr, id).await
|
||||||
if is_default_addr {
|
.err_context("Unable to connect to destination RPC host. Check that you are using the same value of rpc_secret as them, and that you have their correct public key.")?;
|
||||||
warn!(
|
|
||||||
"Tried to contact Garage node at default address {}, which didn't work. If that address is wrong, consider setting rpc_public_addr in your config file.",
|
|
||||||
addr
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err(e).err_context("Unable to connect to destination RPC host. Check that you are using the same value of rpc_secret as them, and that you have their correct public key.")?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into());
|
let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into());
|
||||||
let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into());
|
let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into());
|
||||||
|
|
|
@ -1,5 +1,8 @@
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
use garage_util::background::*;
|
||||||
use garage_util::config::*;
|
use garage_util::config::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
|
|
||||||
|
@ -17,8 +20,12 @@ pub async fn offline_repair(config_file: PathBuf, opt: OfflineRepairOpt) -> Resu
|
||||||
info!("Loading configuration...");
|
info!("Loading configuration...");
|
||||||
let config = read_config(config_file)?;
|
let config = read_config(config_file)?;
|
||||||
|
|
||||||
|
info!("Initializing background runner...");
|
||||||
|
let (done_tx, done_rx) = watch::channel(false);
|
||||||
|
let (background, await_background_done) = BackgroundRunner::new(16, done_rx);
|
||||||
|
|
||||||
info!("Initializing Garage main data store...");
|
info!("Initializing Garage main data store...");
|
||||||
let garage = Garage::new(config)?;
|
let garage = Garage::new(config.clone(), background)?;
|
||||||
|
|
||||||
info!("Launching repair operation...");
|
info!("Launching repair operation...");
|
||||||
match opt.what {
|
match opt.what {
|
||||||
|
@ -36,7 +43,13 @@ pub async fn offline_repair(config_file: PathBuf, opt: OfflineRepairOpt) -> Resu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Repair operation finished, shutting down...");
|
info!("Repair operation finished, shutting down Garage internals...");
|
||||||
|
done_tx.send(true).unwrap();
|
||||||
|
drop(garage);
|
||||||
|
|
||||||
|
await_background_done.await?;
|
||||||
|
|
||||||
|
info!("Cleaning up...");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,37 +12,38 @@ use garage_model::s3::version_table::*;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
use garage_util::background::*;
|
use garage_util::background::*;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
use garage_util::migrate::Migrate;
|
|
||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
pub async fn launch_online_repair(
|
pub async fn launch_online_repair(garage: Arc<Garage>, opt: RepairOpt) {
|
||||||
garage: &Arc<Garage>,
|
|
||||||
bg: &BackgroundRunner,
|
|
||||||
opt: RepairOpt,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
match opt.what {
|
match opt.what {
|
||||||
RepairWhat::Tables => {
|
RepairWhat::Tables => {
|
||||||
info!("Launching a full sync of tables");
|
info!("Launching a full sync of tables");
|
||||||
garage.bucket_table.syncer.add_full_sync()?;
|
garage.bucket_table.syncer.add_full_sync();
|
||||||
garage.object_table.syncer.add_full_sync()?;
|
garage.object_table.syncer.add_full_sync();
|
||||||
garage.version_table.syncer.add_full_sync()?;
|
garage.version_table.syncer.add_full_sync();
|
||||||
garage.block_ref_table.syncer.add_full_sync()?;
|
garage.block_ref_table.syncer.add_full_sync();
|
||||||
garage.key_table.syncer.add_full_sync()?;
|
garage.key_table.syncer.add_full_sync();
|
||||||
}
|
}
|
||||||
RepairWhat::Versions => {
|
RepairWhat::Versions => {
|
||||||
info!("Repairing the versions table");
|
info!("Repairing the versions table");
|
||||||
bg.spawn_worker(RepairVersionsWorker::new(garage.clone()));
|
garage
|
||||||
|
.background
|
||||||
|
.spawn_worker(RepairVersionsWorker::new(garage.clone()));
|
||||||
}
|
}
|
||||||
RepairWhat::BlockRefs => {
|
RepairWhat::BlockRefs => {
|
||||||
info!("Repairing the block refs table");
|
info!("Repairing the block refs table");
|
||||||
bg.spawn_worker(RepairBlockrefsWorker::new(garage.clone()));
|
garage
|
||||||
|
.background
|
||||||
|
.spawn_worker(RepairBlockrefsWorker::new(garage.clone()));
|
||||||
}
|
}
|
||||||
RepairWhat::Blocks => {
|
RepairWhat::Blocks => {
|
||||||
info!("Repairing the stored blocks");
|
info!("Repairing the stored blocks");
|
||||||
bg.spawn_worker(garage_block::repair::RepairWorker::new(
|
garage
|
||||||
garage.block_manager.clone(),
|
.background
|
||||||
));
|
.spawn_worker(garage_block::repair::RepairWorker::new(
|
||||||
|
garage.block_manager.clone(),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
RepairWhat::Scrub { cmd } => {
|
RepairWhat::Scrub { cmd } => {
|
||||||
let cmd = match cmd {
|
let cmd = match cmd {
|
||||||
|
@ -51,18 +52,13 @@ pub async fn launch_online_repair(
|
||||||
ScrubCmd::Resume => ScrubWorkerCommand::Resume,
|
ScrubCmd::Resume => ScrubWorkerCommand::Resume,
|
||||||
ScrubCmd::Cancel => ScrubWorkerCommand::Cancel,
|
ScrubCmd::Cancel => ScrubWorkerCommand::Cancel,
|
||||||
ScrubCmd::SetTranquility { tranquility } => {
|
ScrubCmd::SetTranquility { tranquility } => {
|
||||||
garage
|
ScrubWorkerCommand::SetTranquility(tranquility)
|
||||||
.block_manager
|
|
||||||
.scrub_persister
|
|
||||||
.set_with(|x| x.tranquility = tranquility)?;
|
|
||||||
return Ok(());
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
info!("Sending command to scrub worker: {:?}", cmd);
|
info!("Sending command to scrub worker: {:?}", cmd);
|
||||||
garage.block_manager.send_scrub_command(cmd).await?;
|
garage.block_manager.send_scrub_command(cmd).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
@ -89,23 +85,25 @@ impl Worker for RepairVersionsWorker {
|
||||||
"Version repair worker".into()
|
"Version repair worker".into()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
fn info(&self) -> Option<String> {
|
||||||
WorkerStatus {
|
Some(format!("{} items done", self.counter))
|
||||||
progress: Some(self.counter.to_string()),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
let (item_bytes, next_pos) = match self.garage.version_table.data.store.get_gt(&self.pos)? {
|
let item_bytes = match self.garage.version_table.data.store.get_gt(&self.pos)? {
|
||||||
Some((k, v)) => (v, k),
|
Some((k, v)) => {
|
||||||
|
self.pos = k;
|
||||||
|
v
|
||||||
|
}
|
||||||
None => {
|
None => {
|
||||||
info!("repair_versions: finished, done {}", self.counter);
|
info!("repair_versions: finished, done {}", self.counter);
|
||||||
return Ok(WorkerState::Done);
|
return Ok(WorkerState::Done);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let version = Version::decode(&item_bytes).ok_or_message("Cannot decode Version")?;
|
self.counter += 1;
|
||||||
|
|
||||||
|
let version = rmp_serde::decode::from_read_ref::<_, Version>(&item_bytes)?;
|
||||||
if !version.deleted.get() {
|
if !version.deleted.get() {
|
||||||
let object = self
|
let object = self
|
||||||
.garage
|
.garage
|
||||||
|
@ -133,13 +131,10 @@ impl Worker for RepairVersionsWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.counter += 1;
|
|
||||||
self.pos = next_pos;
|
|
||||||
|
|
||||||
Ok(WorkerState::Busy)
|
Ok(WorkerState::Busy)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -168,24 +163,25 @@ impl Worker for RepairBlockrefsWorker {
|
||||||
"Block refs repair worker".into()
|
"Block refs repair worker".into()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
fn info(&self) -> Option<String> {
|
||||||
WorkerStatus {
|
Some(format!("{} items done", self.counter))
|
||||||
progress: Some(self.counter.to_string()),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
let (item_bytes, next_pos) =
|
let item_bytes = match self.garage.block_ref_table.data.store.get_gt(&self.pos)? {
|
||||||
match self.garage.block_ref_table.data.store.get_gt(&self.pos)? {
|
Some((k, v)) => {
|
||||||
Some((k, v)) => (v, k),
|
self.pos = k;
|
||||||
None => {
|
v
|
||||||
info!("repair_block_ref: finished, done {}", self.counter);
|
}
|
||||||
return Ok(WorkerState::Done);
|
None => {
|
||||||
}
|
info!("repair_block_ref: finished, done {}", self.counter);
|
||||||
};
|
return Ok(WorkerState::Done);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let block_ref = BlockRef::decode(&item_bytes).ok_or_message("Cannot decode BlockRef")?;
|
self.counter += 1;
|
||||||
|
|
||||||
|
let block_ref = rmp_serde::decode::from_read_ref::<_, BlockRef>(&item_bytes)?;
|
||||||
if !block_ref.deleted.get() {
|
if !block_ref.deleted.get() {
|
||||||
let version = self
|
let version = self
|
||||||
.garage
|
.garage
|
||||||
|
@ -210,13 +206,10 @@ impl Worker for RepairBlockrefsWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.counter += 1;
|
|
||||||
self.pos = next_pos;
|
|
||||||
|
|
||||||
Ok(WorkerState::Busy)
|
Ok(WorkerState::Busy)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,15 +35,12 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> {
|
||||||
#[cfg(feature = "metrics")]
|
#[cfg(feature = "metrics")]
|
||||||
let metrics_exporter = opentelemetry_prometheus::exporter().init();
|
let metrics_exporter = opentelemetry_prometheus::exporter().init();
|
||||||
|
|
||||||
info!("Initializing Garage main data store...");
|
|
||||||
let garage = Garage::new(config.clone())?;
|
|
||||||
|
|
||||||
info!("Initializing background runner...");
|
info!("Initializing background runner...");
|
||||||
let watch_cancel = watch_shutdown_signal();
|
let watch_cancel = watch_shutdown_signal();
|
||||||
let (background, await_background_done) = BackgroundRunner::new(watch_cancel.clone());
|
let (background, await_background_done) = BackgroundRunner::new(16, watch_cancel.clone());
|
||||||
|
|
||||||
info!("Spawning Garage workers...");
|
info!("Initializing Garage main data store...");
|
||||||
garage.spawn_workers(&background);
|
let garage = Garage::new(config.clone(), background)?;
|
||||||
|
|
||||||
if config.admin.trace_sink.is_some() {
|
if config.admin.trace_sink.is_some() {
|
||||||
info!("Initialize tracing...");
|
info!("Initialize tracing...");
|
||||||
|
@ -66,7 +63,7 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> {
|
||||||
let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone()));
|
let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone()));
|
||||||
|
|
||||||
info!("Create admin RPC handler...");
|
info!("Create admin RPC handler...");
|
||||||
AdminRpcHandler::new(garage.clone(), background.clone());
|
AdminRpcHandler::new(garage.clone());
|
||||||
|
|
||||||
// ---- Launch public-facing API servers ----
|
// ---- Launch public-facing API servers ----
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ tokio = "1.17.0"
|
||||||
|
|
||||||
# cli deps
|
# cli deps
|
||||||
clap = { version = "3.1.18", optional = true, features = ["derive", "env"] }
|
clap = { version = "3.1.18", optional = true, features = ["derive", "env"] }
|
||||||
garage_util = { version = "0.8.1", path = "../util", optional = true }
|
garage_util = { version = "0.8.0", path = "../util", optional = true }
|
||||||
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,11 +14,11 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.1", default-features = false, path = "../db" }
|
garage_db = { version = "0.8.0", default-features = false, path = "../db" }
|
||||||
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
||||||
garage_table = { version = "0.8.1", path = "../table" }
|
garage_table = { version = "0.8.0", path = "../table" }
|
||||||
garage_block = { version = "0.8.1", path = "../block" }
|
garage_block = { version = "0.8.0", path = "../block" }
|
||||||
garage_util = { version = "0.8.1", path = "../util" }
|
garage_util = { version = "0.8.0", path = "../util" }
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
|
@ -30,6 +30,7 @@ tracing = "0.1.30"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
zstd = { version = "0.9", default-features = false }
|
zstd = { version = "0.9", default-features = false }
|
||||||
|
|
||||||
|
rmp-serde = "0.15"
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||||
serde_bytes = "0.11"
|
serde_bytes = "0.11"
|
||||||
|
|
||||||
|
|
|
@ -1,26 +1,18 @@
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
|
||||||
mod v08 {
|
/// The bucket alias table holds the names given to buckets
|
||||||
use garage_util::crdt;
|
/// in the global namespace.
|
||||||
use garage_util::data::Uuid;
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
use serde::{Deserialize, Serialize};
|
pub struct BucketAlias {
|
||||||
|
name: String,
|
||||||
/// The bucket alias table holds the names given to buckets
|
pub state: crdt::Lww<Option<Uuid>>,
|
||||||
/// in the global namespace.
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct BucketAlias {
|
|
||||||
pub(super) name: String,
|
|
||||||
pub state: crdt::Lww<Option<Uuid>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl garage_util::migrate::InitialFormat for BucketAlias {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub use v08::*;
|
|
||||||
|
|
||||||
impl BucketAlias {
|
impl BucketAlias {
|
||||||
pub fn new(name: String, ts: u64, bucket_id: Option<Uuid>) -> Option<Self> {
|
pub fn new(name: String, ts: u64, bucket_id: Option<Uuid>) -> Option<Self> {
|
||||||
if !is_valid_bucket_name(&name) {
|
if !is_valid_bucket_name(&name) {
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
@ -5,82 +7,71 @@ use garage_util::time::*;
|
||||||
|
|
||||||
use crate::permission::BucketKeyPerm;
|
use crate::permission::BucketKeyPerm;
|
||||||
|
|
||||||
mod v08 {
|
/// A bucket is a collection of objects
|
||||||
use crate::permission::BucketKeyPerm;
|
///
|
||||||
use garage_util::crdt;
|
/// Its parameters are not directly accessible as:
|
||||||
use garage_util::data::Uuid;
|
/// - It must be possible to merge paramaters, hence the use of a LWW CRDT.
|
||||||
use serde::{Deserialize, Serialize};
|
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
/// A bucket is a collection of objects
|
pub struct Bucket {
|
||||||
///
|
/// ID of the bucket
|
||||||
/// Its parameters are not directly accessible as:
|
pub id: Uuid,
|
||||||
/// - It must be possible to merge paramaters, hence the use of a LWW CRDT.
|
/// State, and configuration if not deleted, of the bucket
|
||||||
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
|
pub state: crdt::Deletable<BucketParams>,
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Bucket {
|
|
||||||
/// ID of the bucket
|
|
||||||
pub id: Uuid,
|
|
||||||
/// State, and configuration if not deleted, of the bucket
|
|
||||||
pub state: crdt::Deletable<BucketParams>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Configuration for a bucket
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct BucketParams {
|
|
||||||
/// Bucket's creation date
|
|
||||||
pub creation_date: u64,
|
|
||||||
/// Map of key with access to the bucket, and what kind of access they give
|
|
||||||
pub authorized_keys: crdt::Map<String, BucketKeyPerm>,
|
|
||||||
|
|
||||||
/// Map of aliases that are or have been given to this bucket
|
|
||||||
/// in the global namespace
|
|
||||||
/// (not authoritative: this is just used as an indication to
|
|
||||||
/// map back to aliases when doing ListBuckets)
|
|
||||||
pub aliases: crdt::LwwMap<String, bool>,
|
|
||||||
/// Map of aliases that are or have been given to this bucket
|
|
||||||
/// in namespaces local to keys
|
|
||||||
/// key = (access key id, alias name)
|
|
||||||
pub local_aliases: crdt::LwwMap<(String, String), bool>,
|
|
||||||
|
|
||||||
/// Whether this bucket is allowed for website access
|
|
||||||
/// (under all of its global alias names),
|
|
||||||
/// and if so, the website configuration XML document
|
|
||||||
pub website_config: crdt::Lww<Option<WebsiteConfig>>,
|
|
||||||
/// CORS rules
|
|
||||||
pub cors_config: crdt::Lww<Option<Vec<CorsRule>>>,
|
|
||||||
/// Bucket quotas
|
|
||||||
#[serde(default)]
|
|
||||||
pub quotas: crdt::Lww<BucketQuotas>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct WebsiteConfig {
|
|
||||||
pub index_document: String,
|
|
||||||
pub error_document: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CorsRule {
|
|
||||||
pub id: Option<String>,
|
|
||||||
pub max_age_seconds: Option<u64>,
|
|
||||||
pub allow_origins: Vec<String>,
|
|
||||||
pub allow_methods: Vec<String>,
|
|
||||||
pub allow_headers: Vec<String>,
|
|
||||||
pub expose_headers: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct BucketQuotas {
|
|
||||||
/// Maximum size in bytes (bucket size = sum of sizes of objects in the bucket)
|
|
||||||
pub max_size: Option<u64>,
|
|
||||||
/// Maximum number of non-deleted objects in the bucket
|
|
||||||
pub max_objects: Option<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl garage_util::migrate::InitialFormat for Bucket {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub use v08::*;
|
/// Configuration for a bucket
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct BucketParams {
|
||||||
|
/// Bucket's creation date
|
||||||
|
pub creation_date: u64,
|
||||||
|
/// Map of key with access to the bucket, and what kind of access they give
|
||||||
|
pub authorized_keys: crdt::Map<String, BucketKeyPerm>,
|
||||||
|
|
||||||
|
/// Map of aliases that are or have been given to this bucket
|
||||||
|
/// in the global namespace
|
||||||
|
/// (not authoritative: this is just used as an indication to
|
||||||
|
/// map back to aliases when doing ListBuckets)
|
||||||
|
pub aliases: crdt::LwwMap<String, bool>,
|
||||||
|
/// Map of aliases that are or have been given to this bucket
|
||||||
|
/// in namespaces local to keys
|
||||||
|
/// key = (access key id, alias name)
|
||||||
|
pub local_aliases: crdt::LwwMap<(String, String), bool>,
|
||||||
|
|
||||||
|
/// Whether this bucket is allowed for website access
|
||||||
|
/// (under all of its global alias names),
|
||||||
|
/// and if so, the website configuration XML document
|
||||||
|
pub website_config: crdt::Lww<Option<WebsiteConfig>>,
|
||||||
|
/// CORS rules
|
||||||
|
pub cors_config: crdt::Lww<Option<Vec<CorsRule>>>,
|
||||||
|
/// Bucket quotas
|
||||||
|
#[serde(default)]
|
||||||
|
pub quotas: crdt::Lww<BucketQuotas>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct WebsiteConfig {
|
||||||
|
pub index_document: String,
|
||||||
|
pub error_document: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct CorsRule {
|
||||||
|
pub id: Option<String>,
|
||||||
|
pub max_age_seconds: Option<u64>,
|
||||||
|
pub allow_origins: Vec<String>,
|
||||||
|
pub allow_methods: Vec<String>,
|
||||||
|
pub allow_headers: Vec<String>,
|
||||||
|
pub expose_headers: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct BucketQuotas {
|
||||||
|
/// Maximum size in bytes (bucket size = sum of sizes of objects in the bucket)
|
||||||
|
pub max_size: Option<u64>,
|
||||||
|
/// Maximum number of non-deleted objects in the bucket
|
||||||
|
pub max_objects: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
impl AutoCrdt for BucketQuotas {
|
impl AutoCrdt for BucketQuotas {
|
||||||
const WARN_IF_DIFFERENT: bool = true;
|
const WARN_IF_DIFFERENT: bool = true;
|
||||||
|
|
|
@ -33,14 +33,14 @@ use crate::k2v::{item_table::*, poll::*, rpc::*};
|
||||||
pub struct Garage {
|
pub struct Garage {
|
||||||
/// The parsed configuration Garage is running
|
/// The parsed configuration Garage is running
|
||||||
pub config: Config,
|
pub config: Config,
|
||||||
/// The set of background variables that can be viewed/modified at runtime
|
|
||||||
pub bg_vars: vars::BgVars,
|
|
||||||
|
|
||||||
/// The replication mode of this cluster
|
/// The replication mode of this cluster
|
||||||
pub replication_mode: ReplicationMode,
|
pub replication_mode: ReplicationMode,
|
||||||
|
|
||||||
/// The local database
|
/// The local database
|
||||||
pub db: db::Db,
|
pub db: db::Db,
|
||||||
|
/// A background job runner
|
||||||
|
pub background: Arc<BackgroundRunner>,
|
||||||
/// The membership manager
|
/// The membership manager
|
||||||
pub system: Arc<System>,
|
pub system: Arc<System>,
|
||||||
/// The block manager
|
/// The block manager
|
||||||
|
@ -78,7 +78,7 @@ pub struct GarageK2V {
|
||||||
|
|
||||||
impl Garage {
|
impl Garage {
|
||||||
/// Create and run garage
|
/// Create and run garage
|
||||||
pub fn new(config: Config) -> Result<Arc<Self>, Error> {
|
pub fn new(config: Config, background: Arc<BackgroundRunner>) -> Result<Arc<Self>, Error> {
|
||||||
// Create meta dir and data dir if they don't exist already
|
// Create meta dir and data dir if they don't exist already
|
||||||
std::fs::create_dir_all(&config.metadata_dir)
|
std::fs::create_dir_all(&config.metadata_dir)
|
||||||
.ok_or_message("Unable to create Garage metadata directory")?;
|
.ok_or_message("Unable to create Garage metadata directory")?;
|
||||||
|
@ -159,7 +159,7 @@ impl Garage {
|
||||||
};
|
};
|
||||||
|
|
||||||
let network_key = NetworkKey::from_slice(
|
let network_key = NetworkKey::from_slice(
|
||||||
&hex::decode(&config.rpc_secret.as_ref().unwrap()).expect("Invalid RPC secret key")[..],
|
&hex::decode(&config.rpc_secret).expect("Invalid RPC secret key")[..],
|
||||||
)
|
)
|
||||||
.expect("Invalid RPC secret key");
|
.expect("Invalid RPC secret key");
|
||||||
|
|
||||||
|
@ -167,7 +167,7 @@ impl Garage {
|
||||||
.expect("Invalid replication_mode in config file.");
|
.expect("Invalid replication_mode in config file.");
|
||||||
|
|
||||||
info!("Initialize membership management system...");
|
info!("Initialize membership management system...");
|
||||||
let system = System::new(network_key, replication_mode, &config)?;
|
let system = System::new(network_key, background.clone(), replication_mode, &config)?;
|
||||||
|
|
||||||
let data_rep_param = TableShardedReplication {
|
let data_rep_param = TableShardedReplication {
|
||||||
system: system.clone(),
|
system: system.clone(),
|
||||||
|
@ -225,6 +225,7 @@ impl Garage {
|
||||||
info!("Initialize version_table...");
|
info!("Initialize version_table...");
|
||||||
let version_table = Table::new(
|
let version_table = Table::new(
|
||||||
VersionTable {
|
VersionTable {
|
||||||
|
background: background.clone(),
|
||||||
block_ref_table: block_ref_table.clone(),
|
block_ref_table: block_ref_table.clone(),
|
||||||
},
|
},
|
||||||
meta_rep_param.clone(),
|
meta_rep_param.clone(),
|
||||||
|
@ -239,6 +240,7 @@ impl Garage {
|
||||||
#[allow(clippy::redundant_clone)]
|
#[allow(clippy::redundant_clone)]
|
||||||
let object_table = Table::new(
|
let object_table = Table::new(
|
||||||
ObjectTable {
|
ObjectTable {
|
||||||
|
background: background.clone(),
|
||||||
version_table: version_table.clone(),
|
version_table: version_table.clone(),
|
||||||
object_counter_table: object_counter_table.clone(),
|
object_counter_table: object_counter_table.clone(),
|
||||||
},
|
},
|
||||||
|
@ -251,16 +253,12 @@ impl Garage {
|
||||||
#[cfg(feature = "k2v")]
|
#[cfg(feature = "k2v")]
|
||||||
let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param);
|
let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param);
|
||||||
|
|
||||||
// Initialize bg vars
|
|
||||||
let mut bg_vars = vars::BgVars::new();
|
|
||||||
block_manager.register_bg_vars(&mut bg_vars);
|
|
||||||
|
|
||||||
// -- done --
|
// -- done --
|
||||||
Ok(Arc::new(Self {
|
Ok(Arc::new(Self {
|
||||||
config,
|
config,
|
||||||
bg_vars,
|
|
||||||
replication_mode,
|
replication_mode,
|
||||||
db,
|
db,
|
||||||
|
background,
|
||||||
system,
|
system,
|
||||||
block_manager,
|
block_manager,
|
||||||
bucket_table,
|
bucket_table,
|
||||||
|
@ -275,22 +273,6 @@ impl Garage {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
|
|
||||||
self.block_manager.spawn_workers(bg);
|
|
||||||
|
|
||||||
self.bucket_table.spawn_workers(bg);
|
|
||||||
self.bucket_alias_table.spawn_workers(bg);
|
|
||||||
self.key_table.spawn_workers(bg);
|
|
||||||
|
|
||||||
self.object_table.spawn_workers(bg);
|
|
||||||
self.object_counter_table.spawn_workers(bg);
|
|
||||||
self.version_table.spawn_workers(bg);
|
|
||||||
self.block_ref_table.spawn_workers(bg);
|
|
||||||
|
|
||||||
#[cfg(feature = "k2v")]
|
|
||||||
self.k2v.spawn_workers(bg);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
||||||
helper::bucket::BucketHelper(self)
|
helper::bucket::BucketHelper(self)
|
||||||
}
|
}
|
||||||
|
@ -325,9 +307,4 @@ impl GarageK2V {
|
||||||
rpc,
|
rpc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
|
|
||||||
self.item_table.spawn_workers(bg);
|
|
||||||
self.counter_table.spawn_workers(bg);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,18 +1,19 @@
|
||||||
use core::ops::Bound;
|
use core::ops::Bound;
|
||||||
use std::collections::{BTreeMap, HashMap};
|
use std::collections::{hash_map, BTreeMap, HashMap};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::sync::{mpsc, watch};
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_rpc::ring::Ring;
|
use garage_rpc::ring::Ring;
|
||||||
use garage_rpc::system::System;
|
use garage_rpc::system::System;
|
||||||
use garage_util::background::BackgroundRunner;
|
use garage_util::background::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::migrate::Migrate;
|
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
|
@ -30,44 +31,14 @@ pub trait CountedItem: Clone + PartialEq + Send + Sync + 'static {
|
||||||
fn counts(&self) -> Vec<(&'static str, i64)>;
|
fn counts(&self) -> Vec<(&'static str, i64)>;
|
||||||
}
|
}
|
||||||
|
|
||||||
mod v08 {
|
/// A counter entry in the global table
|
||||||
use super::CountedItem;
|
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
|
||||||
use garage_util::data::Uuid;
|
pub struct CounterEntry<T: CountedItem> {
|
||||||
use serde::{Deserialize, Serialize};
|
pub pk: T::CP,
|
||||||
use std::collections::BTreeMap;
|
pub sk: T::CS,
|
||||||
|
pub values: BTreeMap<String, CounterValue>,
|
||||||
// ---- Global part (the table everyone queries) ----
|
|
||||||
|
|
||||||
/// A counter entry in the global table
|
|
||||||
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CounterEntry<T: CountedItem> {
|
|
||||||
pub pk: T::CP,
|
|
||||||
pub sk: T::CS,
|
|
||||||
pub values: BTreeMap<String, CounterValue>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A counter entry in the global table
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CounterValue {
|
|
||||||
pub node_values: BTreeMap<Uuid, (u64, i64)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: CountedItem> garage_util::migrate::InitialFormat for CounterEntry<T> {}
|
|
||||||
|
|
||||||
// ---- Local part (the counter we maintain transactionnaly on each node) ----
|
|
||||||
|
|
||||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub(super) struct LocalCounterEntry<T: CountedItem> {
|
|
||||||
pub(super) pk: T::CP,
|
|
||||||
pub(super) sk: T::CS,
|
|
||||||
pub(super) values: BTreeMap<String, (u64, i64)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: CountedItem> garage_util::migrate::InitialFormat for LocalCounterEntry<T> {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub use v08::*;
|
|
||||||
|
|
||||||
impl<T: CountedItem> Entry<T::CP, T::CS> for CounterEntry<T> {
|
impl<T: CountedItem> Entry<T::CP, T::CS> for CounterEntry<T> {
|
||||||
fn partition_key(&self) -> &T::CP {
|
fn partition_key(&self) -> &T::CP {
|
||||||
&self.pk
|
&self.pk
|
||||||
|
@ -109,6 +80,12 @@ impl<T: CountedItem> CounterEntry<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A counter entry in the global table
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct CounterValue {
|
||||||
|
pub node_values: BTreeMap<Uuid, (u64, i64)>,
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: CountedItem> Crdt for CounterEntry<T> {
|
impl<T: CountedItem> Crdt for CounterEntry<T> {
|
||||||
fn merge(&mut self, other: &Self) {
|
fn merge(&mut self, other: &Self) {
|
||||||
for (name, e2) in other.values.iter() {
|
for (name, e2) in other.values.iter() {
|
||||||
|
@ -165,6 +142,7 @@ impl<T: CountedItem> TableSchema for CounterTable<T> {
|
||||||
pub struct IndexCounter<T: CountedItem> {
|
pub struct IndexCounter<T: CountedItem> {
|
||||||
this_node: Uuid,
|
this_node: Uuid,
|
||||||
local_counter: db::Tree,
|
local_counter: db::Tree,
|
||||||
|
propagate_tx: mpsc::UnboundedSender<(T::CP, T::CS, LocalCounterEntry<T>)>,
|
||||||
pub table: Arc<Table<CounterTable<T>, TableShardedReplication>>,
|
pub table: Arc<Table<CounterTable<T>, TableShardedReplication>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,11 +152,16 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
replication: TableShardedReplication,
|
replication: TableShardedReplication,
|
||||||
db: &db::Db,
|
db: &db::Db,
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
Arc::new(Self {
|
let background = system.background.clone();
|
||||||
|
|
||||||
|
let (propagate_tx, propagate_rx) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
let this = Arc::new(Self {
|
||||||
this_node: system.id,
|
this_node: system.id,
|
||||||
local_counter: db
|
local_counter: db
|
||||||
.open_tree(format!("local_counter_v2:{}", T::COUNTER_TABLE_NAME))
|
.open_tree(format!("local_counter_v2:{}", T::COUNTER_TABLE_NAME))
|
||||||
.expect("Unable to open local counter tree"),
|
.expect("Unable to open local counter tree"),
|
||||||
|
propagate_tx,
|
||||||
table: Table::new(
|
table: Table::new(
|
||||||
CounterTable {
|
CounterTable {
|
||||||
_phantom_t: Default::default(),
|
_phantom_t: Default::default(),
|
||||||
|
@ -187,11 +170,16 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
system,
|
system,
|
||||||
db,
|
db,
|
||||||
),
|
),
|
||||||
})
|
});
|
||||||
}
|
|
||||||
|
|
||||||
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
|
background.spawn_worker(IndexPropagatorWorker {
|
||||||
self.table.spawn_workers(bg);
|
index_counter: this.clone(),
|
||||||
|
propagate_rx,
|
||||||
|
buf: HashMap::new(),
|
||||||
|
errors: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
this
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn count(
|
pub fn count(
|
||||||
|
@ -220,9 +208,11 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
let tree_key = self.table.data.tree_key(pk, sk);
|
let tree_key = self.table.data.tree_key(pk, sk);
|
||||||
|
|
||||||
let mut entry = match tx.get(&self.local_counter, &tree_key[..])? {
|
let mut entry = match tx.get(&self.local_counter, &tree_key[..])? {
|
||||||
Some(old_bytes) => LocalCounterEntry::<T>::decode(&old_bytes)
|
Some(old_bytes) => {
|
||||||
.ok_or_message("Cannot decode local counter entry")
|
rmp_serde::decode::from_read_ref::<_, LocalCounterEntry<T>>(&old_bytes)
|
||||||
.map_err(db::TxError::Abort)?,
|
.map_err(Error::RmpDecode)
|
||||||
|
.map_err(db::TxError::Abort)?
|
||||||
|
}
|
||||||
None => LocalCounterEntry {
|
None => LocalCounterEntry {
|
||||||
pk: pk.clone(),
|
pk: pk.clone(),
|
||||||
sk: sk.clone(),
|
sk: sk.clone(),
|
||||||
|
@ -237,14 +227,17 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
ent.1 += *inc;
|
ent.1 += *inc;
|
||||||
}
|
}
|
||||||
|
|
||||||
let new_entry_bytes = entry
|
let new_entry_bytes = rmp_to_vec_all_named(&entry)
|
||||||
.encode()
|
|
||||||
.map_err(Error::RmpEncode)
|
.map_err(Error::RmpEncode)
|
||||||
.map_err(db::TxError::Abort)?;
|
.map_err(db::TxError::Abort)?;
|
||||||
tx.insert(&self.local_counter, &tree_key[..], new_entry_bytes)?;
|
tx.insert(&self.local_counter, &tree_key[..], new_entry_bytes)?;
|
||||||
|
|
||||||
let dist_entry = entry.into_counter_entry(self.this_node);
|
if let Err(e) = self.propagate_tx.send((pk.clone(), sk.clone(), entry)) {
|
||||||
self.table.queue_insert(tx, &dist_entry)?;
|
error!(
|
||||||
|
"Could not propagate updated counter values, failed to send to channel: {}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -257,6 +250,23 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
TS: TableSchema<E = T>,
|
TS: TableSchema<E = T>,
|
||||||
TR: TableReplication,
|
TR: TableReplication,
|
||||||
{
|
{
|
||||||
|
let save_counter_entry = |entry: CounterEntry<T>| -> Result<(), Error> {
|
||||||
|
let entry_k = self
|
||||||
|
.table
|
||||||
|
.data
|
||||||
|
.tree_key(entry.partition_key(), entry.sort_key());
|
||||||
|
self.table
|
||||||
|
.data
|
||||||
|
.update_entry_with(&entry_k, |ent| match ent {
|
||||||
|
Some(mut ent) => {
|
||||||
|
ent.merge(&entry);
|
||||||
|
ent
|
||||||
|
}
|
||||||
|
None => entry.clone(),
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
};
|
||||||
|
|
||||||
// 1. Set all old local counters to zero
|
// 1. Set all old local counters to zero
|
||||||
let now = now_msec();
|
let now = now_msec();
|
||||||
let mut next_start: Option<Vec<u8>> = None;
|
let mut next_start: Option<Vec<u8>> = None;
|
||||||
|
@ -279,22 +289,20 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
|
|
||||||
info!("zeroing old counters... ({})", hex::encode(&batch[0].0));
|
info!("zeroing old counters... ({})", hex::encode(&batch[0].0));
|
||||||
for (local_counter_k, local_counter) in batch {
|
for (local_counter_k, local_counter) in batch {
|
||||||
let mut local_counter = LocalCounterEntry::<T>::decode(&local_counter)
|
let mut local_counter =
|
||||||
.ok_or_message("Cannot decode local counter entry")?;
|
rmp_serde::decode::from_read_ref::<_, LocalCounterEntry<T>>(&local_counter)?;
|
||||||
|
|
||||||
for (_, tv) in local_counter.values.iter_mut() {
|
for (_, tv) in local_counter.values.iter_mut() {
|
||||||
tv.0 = std::cmp::max(tv.0 + 1, now);
|
tv.0 = std::cmp::max(tv.0 + 1, now);
|
||||||
tv.1 = 0;
|
tv.1 = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
let local_counter_bytes = local_counter.encode()?;
|
let local_counter_bytes = rmp_to_vec_all_named(&local_counter)?;
|
||||||
self.local_counter
|
self.local_counter
|
||||||
.insert(&local_counter_k, &local_counter_bytes)?;
|
.insert(&local_counter_k, &local_counter_bytes)?;
|
||||||
|
|
||||||
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
||||||
self.local_counter
|
save_counter_entry(counter_entry)?;
|
||||||
.db()
|
|
||||||
.transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
|
|
||||||
|
|
||||||
next_start = Some(local_counter_k);
|
next_start = Some(local_counter_k);
|
||||||
}
|
}
|
||||||
|
@ -335,8 +343,9 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
let local_counter_key = self.table.data.tree_key(pk, sk);
|
let local_counter_key = self.table.data.tree_key(pk, sk);
|
||||||
let mut local_counter = match self.local_counter.get(&local_counter_key)? {
|
let mut local_counter = match self.local_counter.get(&local_counter_key)? {
|
||||||
Some(old_bytes) => {
|
Some(old_bytes) => {
|
||||||
let ent = LocalCounterEntry::<T>::decode(&old_bytes)
|
let ent = rmp_serde::decode::from_read_ref::<_, LocalCounterEntry<T>>(
|
||||||
.ok_or_message("Cannot decode local counter entry")?;
|
&old_bytes,
|
||||||
|
)?;
|
||||||
assert!(ent.pk == *pk);
|
assert!(ent.pk == *pk);
|
||||||
assert!(ent.sk == *sk);
|
assert!(ent.sk == *sk);
|
||||||
ent
|
ent
|
||||||
|
@ -353,14 +362,12 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
tv.1 += v;
|
tv.1 += v;
|
||||||
}
|
}
|
||||||
|
|
||||||
let local_counter_bytes = local_counter.encode()?;
|
let local_counter_bytes = rmp_to_vec_all_named(&local_counter)?;
|
||||||
self.local_counter
|
self.local_counter
|
||||||
.insert(&local_counter_key, local_counter_bytes)?;
|
.insert(&local_counter_key, local_counter_bytes)?;
|
||||||
|
|
||||||
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
||||||
self.local_counter
|
save_counter_entry(counter_entry)?;
|
||||||
.db()
|
|
||||||
.transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
|
|
||||||
|
|
||||||
next_start = Some(counted_entry_k);
|
next_start = Some(counted_entry_k);
|
||||||
}
|
}
|
||||||
|
@ -371,7 +378,104 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----
|
struct IndexPropagatorWorker<T: CountedItem> {
|
||||||
|
index_counter: Arc<IndexCounter<T>>,
|
||||||
|
propagate_rx: mpsc::UnboundedReceiver<(T::CP, T::CS, LocalCounterEntry<T>)>,
|
||||||
|
|
||||||
|
buf: HashMap<Vec<u8>, CounterEntry<T>>,
|
||||||
|
errors: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: CountedItem> IndexPropagatorWorker<T> {
|
||||||
|
fn add_ent(&mut self, pk: T::CP, sk: T::CS, counters: LocalCounterEntry<T>) {
|
||||||
|
let tree_key = self.index_counter.table.data.tree_key(&pk, &sk);
|
||||||
|
let dist_entry = counters.into_counter_entry(self.index_counter.this_node);
|
||||||
|
match self.buf.entry(tree_key) {
|
||||||
|
hash_map::Entry::Vacant(e) => {
|
||||||
|
e.insert(dist_entry);
|
||||||
|
}
|
||||||
|
hash_map::Entry::Occupied(mut e) => {
|
||||||
|
e.get_mut().merge(&dist_entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: CountedItem> Worker for IndexPropagatorWorker<T> {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!("{} index counter propagator", T::COUNTER_TABLE_NAME)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn info(&self) -> Option<String> {
|
||||||
|
if !self.buf.is_empty() {
|
||||||
|
Some(format!("{} items in queue", self.buf.len()))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn work(&mut self, must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
|
// This loop batches updates to counters to be sent all at once.
|
||||||
|
// They are sent once the propagate_rx channel has been emptied (or is closed).
|
||||||
|
let closed = loop {
|
||||||
|
match self.propagate_rx.try_recv() {
|
||||||
|
Ok((pk, sk, counters)) => {
|
||||||
|
self.add_ent(pk, sk, counters);
|
||||||
|
}
|
||||||
|
Err(mpsc::error::TryRecvError::Empty) => break false,
|
||||||
|
Err(mpsc::error::TryRecvError::Disconnected) => break true,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if !self.buf.is_empty() {
|
||||||
|
let entries_k = self.buf.keys().take(100).cloned().collect::<Vec<_>>();
|
||||||
|
let entries = entries_k.iter().map(|k| self.buf.get(k).unwrap());
|
||||||
|
if let Err(e) = self.index_counter.table.insert_many(entries).await {
|
||||||
|
self.errors += 1;
|
||||||
|
if self.errors >= 2 && *must_exit.borrow() {
|
||||||
|
error!("({}) Could not propagate {} counter values: {}, these counters will not be updated correctly.", T::COUNTER_TABLE_NAME, self.buf.len(), e);
|
||||||
|
return Ok(WorkerState::Done);
|
||||||
|
}
|
||||||
|
// Propagate error up to worker manager, it will log it, increment a counter,
|
||||||
|
// and sleep for a certain delay (with exponential backoff), waiting for
|
||||||
|
// things to go back to normal
|
||||||
|
return Err(e);
|
||||||
|
} else {
|
||||||
|
for k in entries_k {
|
||||||
|
self.buf.remove(&k);
|
||||||
|
}
|
||||||
|
self.errors = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Ok(WorkerState::Busy);
|
||||||
|
} else if closed {
|
||||||
|
return Ok(WorkerState::Done);
|
||||||
|
} else {
|
||||||
|
return Ok(WorkerState::Idle);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
|
match self.propagate_rx.recv().await {
|
||||||
|
Some((pk, sk, counters)) => {
|
||||||
|
self.add_ent(pk, sk, counters);
|
||||||
|
WorkerState::Busy
|
||||||
|
}
|
||||||
|
None => match self.buf.is_empty() {
|
||||||
|
false => WorkerState::Busy,
|
||||||
|
true => WorkerState::Done,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
struct LocalCounterEntry<T: CountedItem> {
|
||||||
|
pk: T::CP,
|
||||||
|
sk: T::CS,
|
||||||
|
values: BTreeMap<String, (u64, i64)>,
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: CountedItem> LocalCounterEntry<T> {
|
impl<T: CountedItem> LocalCounterEntry<T> {
|
||||||
fn into_counter_entry(self, this_node: Uuid) -> CounterEntry<T> {
|
fn into_counter_entry(self, this_node: Uuid) -> CounterEntry<T> {
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
|
@ -18,42 +17,31 @@ pub const CONFLICTS: &str = "conflicts";
|
||||||
pub const VALUES: &str = "values";
|
pub const VALUES: &str = "values";
|
||||||
pub const BYTES: &str = "bytes";
|
pub const BYTES: &str = "bytes";
|
||||||
|
|
||||||
mod v08 {
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
use crate::k2v::causality::K2VNodeId;
|
pub struct K2VItem {
|
||||||
use garage_util::data::Uuid;
|
pub partition: K2VItemPartition,
|
||||||
use serde::{Deserialize, Serialize};
|
pub sort_key: String,
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
items: BTreeMap<K2VNodeId, DvvsEntry>,
|
||||||
pub struct K2VItem {
|
|
||||||
pub partition: K2VItemPartition,
|
|
||||||
pub sort_key: String,
|
|
||||||
|
|
||||||
pub(super) items: BTreeMap<K2VNodeId, DvvsEntry>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, Hash)]
|
|
||||||
pub struct K2VItemPartition {
|
|
||||||
pub bucket_id: Uuid,
|
|
||||||
pub partition_key: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct DvvsEntry {
|
|
||||||
pub(super) t_discard: u64,
|
|
||||||
pub(super) values: Vec<(u64, DvvsValue)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum DvvsValue {
|
|
||||||
Value(#[serde(with = "serde_bytes")] Vec<u8>),
|
|
||||||
Deleted,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl garage_util::migrate::InitialFormat for K2VItem {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub use v08::*;
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, Hash)]
|
||||||
|
pub struct K2VItemPartition {
|
||||||
|
pub bucket_id: Uuid,
|
||||||
|
pub partition_key: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
struct DvvsEntry {
|
||||||
|
t_discard: u64,
|
||||||
|
values: Vec<(u64, DvvsValue)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum DvvsValue {
|
||||||
|
Value(#[serde(with = "serde_bytes")] Vec<u8>),
|
||||||
|
Deleted,
|
||||||
|
}
|
||||||
|
|
||||||
impl K2VItem {
|
impl K2VItem {
|
||||||
/// Creates a new K2VItem when no previous entry existed in the db
|
/// Creates a new K2VItem when no previous entry existed in the db
|
||||||
|
|
|
@ -273,9 +273,14 @@ impl K2VRpcHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn local_insert(&self, item: &InsertedItem) -> Result<Option<K2VItem>, Error> {
|
fn local_insert(&self, item: &InsertedItem) -> Result<Option<K2VItem>, Error> {
|
||||||
|
let tree_key = self
|
||||||
|
.item_table
|
||||||
|
.data
|
||||||
|
.tree_key(&item.partition, &item.sort_key);
|
||||||
|
|
||||||
self.item_table
|
self.item_table
|
||||||
.data
|
.data
|
||||||
.update_entry_with(&item.partition, &item.sort_key, |ent| {
|
.update_entry_with(&tree_key[..], |ent| {
|
||||||
let mut ent = ent.unwrap_or_else(|| {
|
let mut ent = ent.unwrap_or_else(|| {
|
||||||
K2VItem::new(
|
K2VItem::new(
|
||||||
item.partition.bucket_id,
|
item.partition.bucket_id,
|
||||||
|
|
|
@ -1,121 +1,45 @@
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::crdt::{self, Crdt};
|
use garage_table::crdt::*;
|
||||||
|
use garage_table::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_table::{DeletedFilter, EmptyKey, Entry, TableSchema};
|
|
||||||
|
|
||||||
use crate::permission::BucketKeyPerm;
|
use crate::permission::BucketKeyPerm;
|
||||||
|
|
||||||
pub(crate) mod v05 {
|
use crate::prev::v051::key_table as old;
|
||||||
use garage_util::crdt;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// An api key
|
/// An api key
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct Key {
|
pub struct Key {
|
||||||
/// The id of the key (immutable), used as partition key
|
/// The id of the key (immutable), used as partition key
|
||||||
pub key_id: String,
|
pub key_id: String,
|
||||||
|
|
||||||
/// The secret_key associated
|
/// Internal state of the key
|
||||||
pub secret_key: String,
|
pub state: crdt::Deletable<KeyParams>,
|
||||||
|
|
||||||
/// Name for the key
|
|
||||||
pub name: crdt::Lww<String>,
|
|
||||||
|
|
||||||
/// Is the key deleted
|
|
||||||
pub deleted: crdt::Bool,
|
|
||||||
|
|
||||||
/// Buckets in which the key is authorized. Empty if `Key` is deleted
|
|
||||||
// CRDT interaction: deleted implies authorized_buckets is empty
|
|
||||||
pub authorized_buckets: crdt::LwwMap<String, PermissionSet>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Permission given to a key in a bucket
|
|
||||||
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct PermissionSet {
|
|
||||||
/// The key can be used to read the bucket
|
|
||||||
pub allow_read: bool,
|
|
||||||
/// The key can be used to write in the bucket
|
|
||||||
pub allow_write: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl crdt::AutoCrdt for PermissionSet {
|
|
||||||
const WARN_IF_DIFFERENT: bool = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl garage_util::migrate::InitialFormat for Key {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mod v08 {
|
/// Configuration for a key
|
||||||
use super::v05;
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
use crate::permission::BucketKeyPerm;
|
pub struct KeyParams {
|
||||||
use garage_util::crdt;
|
/// The secret_key associated (immutable)
|
||||||
use garage_util::data::Uuid;
|
pub secret_key: String,
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// An api key
|
/// Name for the key
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
pub name: crdt::Lww<String>,
|
||||||
pub struct Key {
|
|
||||||
/// The id of the key (immutable), used as partition key
|
|
||||||
pub key_id: String,
|
|
||||||
|
|
||||||
/// Internal state of the key
|
/// Flag to allow users having this key to create buckets
|
||||||
pub state: crdt::Deletable<KeyParams>,
|
pub allow_create_bucket: crdt::Lww<bool>,
|
||||||
}
|
|
||||||
|
|
||||||
/// Configuration for a key
|
/// If the key is present: it gives some permissions,
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
/// a map of bucket IDs (uuids) to permissions.
|
||||||
pub struct KeyParams {
|
/// Otherwise no permissions are granted to key
|
||||||
/// The secret_key associated (immutable)
|
pub authorized_buckets: crdt::Map<Uuid, BucketKeyPerm>,
|
||||||
pub secret_key: String,
|
|
||||||
|
|
||||||
/// Name for the key
|
/// A key can have a local view of buckets names it is
|
||||||
pub name: crdt::Lww<String>,
|
/// the only one to see, this is the namespace for these aliases
|
||||||
|
pub local_aliases: crdt::LwwMap<String, Option<Uuid>>,
|
||||||
/// Flag to allow users having this key to create buckets
|
|
||||||
pub allow_create_bucket: crdt::Lww<bool>,
|
|
||||||
|
|
||||||
/// If the key is present: it gives some permissions,
|
|
||||||
/// a map of bucket IDs (uuids) to permissions.
|
|
||||||
/// Otherwise no permissions are granted to key
|
|
||||||
pub authorized_buckets: crdt::Map<Uuid, BucketKeyPerm>,
|
|
||||||
|
|
||||||
/// A key can have a local view of buckets names it is
|
|
||||||
/// the only one to see, this is the namespace for these aliases
|
|
||||||
pub local_aliases: crdt::LwwMap<String, Option<Uuid>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl garage_util::migrate::Migrate for Key {
|
|
||||||
type Previous = v05::Key;
|
|
||||||
|
|
||||||
fn migrate(old_k: v05::Key) -> Key {
|
|
||||||
let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone());
|
|
||||||
|
|
||||||
let state = if old_k.deleted.get() {
|
|
||||||
crdt::Deletable::Deleted
|
|
||||||
} else {
|
|
||||||
// Authorized buckets is ignored here,
|
|
||||||
// migration is performed in specific migration code in
|
|
||||||
// garage/migrate.rs
|
|
||||||
crdt::Deletable::Present(KeyParams {
|
|
||||||
secret_key: old_k.secret_key,
|
|
||||||
name,
|
|
||||||
allow_create_bucket: crdt::Lww::new(false),
|
|
||||||
authorized_buckets: crdt::Map::new(),
|
|
||||||
local_aliases: crdt::LwwMap::new(),
|
|
||||||
})
|
|
||||||
};
|
|
||||||
Key {
|
|
||||||
key_id: old_k.key_id,
|
|
||||||
state,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub use v08::*;
|
|
||||||
|
|
||||||
impl KeyParams {
|
impl KeyParams {
|
||||||
fn new(secret_key: &str, name: &str) -> Self {
|
fn new(secret_key: &str, name: &str) -> Self {
|
||||||
KeyParams {
|
KeyParams {
|
||||||
|
@ -249,4 +173,28 @@ impl TableSchema for KeyTable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
|
||||||
|
let old_k = rmp_serde::decode::from_read_ref::<_, old::Key>(bytes).ok()?;
|
||||||
|
let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone());
|
||||||
|
|
||||||
|
let state = if old_k.deleted.get() {
|
||||||
|
crdt::Deletable::Deleted
|
||||||
|
} else {
|
||||||
|
// Authorized buckets is ignored here,
|
||||||
|
// migration is performed in specific migration code in
|
||||||
|
// garage/migrate.rs
|
||||||
|
crdt::Deletable::Present(KeyParams {
|
||||||
|
secret_key: old_k.secret_key,
|
||||||
|
name,
|
||||||
|
allow_create_bucket: crdt::Lww::new(false),
|
||||||
|
authorized_buckets: crdt::Map::new(),
|
||||||
|
local_aliases: crdt::LwwMap::new(),
|
||||||
|
})
|
||||||
|
};
|
||||||
|
Some(Key {
|
||||||
|
key_id: old_k.key_id,
|
||||||
|
state,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ use std::sync::Arc;
|
||||||
|
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::encode::nonversioned_decode;
|
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
|
@ -29,8 +28,8 @@ impl Migrate {
|
||||||
let mut old_buckets = vec![];
|
let mut old_buckets = vec![];
|
||||||
for res in tree.iter().map_err(GarageError::from)? {
|
for res in tree.iter().map_err(GarageError::from)? {
|
||||||
let (_k, v) = res.map_err(GarageError::from)?;
|
let (_k, v) = res.map_err(GarageError::from)?;
|
||||||
let bucket =
|
let bucket = rmp_serde::decode::from_read_ref::<_, old_bucket::Bucket>(&v[..])
|
||||||
nonversioned_decode::<old_bucket::Bucket>(&v[..]).map_err(GarageError::from)?;
|
.map_err(GarageError::from)?;
|
||||||
old_buckets.push(bucket);
|
old_buckets.push(bucket);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
|
||||||
use garage_table::crdt::Crdt;
|
use garage_table::crdt::Crdt;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
|
||||||
use crate::key_table::v05::PermissionSet;
|
use super::key_table::PermissionSet;
|
||||||
|
|
||||||
/// A bucket is a collection of objects
|
/// A bucket is a collection of objects
|
||||||
///
|
///
|
||||||
|
|
50
src/model/prev/v051/key_table.rs
Normal file
50
src/model/prev/v051/key_table.rs
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use garage_table::crdt::*;
|
||||||
|
use garage_table::*;
|
||||||
|
|
||||||
|
/// An api key
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Key {
|
||||||
|
/// The id of the key (immutable), used as partition key
|
||||||
|
pub key_id: String,
|
||||||
|
|
||||||
|
/// The secret_key associated
|
||||||
|
pub secret_key: String,
|
||||||
|
|
||||||
|
/// Name for the key
|
||||||
|
pub name: crdt::Lww<String>,
|
||||||
|
|
||||||
|
/// Is the key deleted
|
||||||
|
pub deleted: crdt::Bool,
|
||||||
|
|
||||||
|
/// Buckets in which the key is authorized. Empty if `Key` is deleted
|
||||||
|
// CRDT interaction: deleted implies authorized_buckets is empty
|
||||||
|
pub authorized_buckets: crdt::LwwMap<String, PermissionSet>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Permission given to a key in a bucket
|
||||||
|
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct PermissionSet {
|
||||||
|
/// The key can be used to read the bucket
|
||||||
|
pub allow_read: bool,
|
||||||
|
/// The key can be used to write in the bucket
|
||||||
|
pub allow_write: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AutoCrdt for PermissionSet {
|
||||||
|
const WARN_IF_DIFFERENT: bool = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Crdt for Key {
|
||||||
|
fn merge(&mut self, other: &Self) {
|
||||||
|
self.name.merge(&other.name);
|
||||||
|
self.deleted.merge(&other.deleted);
|
||||||
|
|
||||||
|
if self.deleted.get() {
|
||||||
|
self.authorized_buckets.clear();
|
||||||
|
} else {
|
||||||
|
self.authorized_buckets.merge(&other.authorized_buckets);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1 +1,4 @@
|
||||||
pub(crate) mod bucket_table;
|
pub(crate) mod bucket_table;
|
||||||
|
pub(crate) mod key_table;
|
||||||
|
pub(crate) mod object_table;
|
||||||
|
pub(crate) mod version_table;
|
||||||
|
|
149
src/model/prev/v051/object_table.rs
Normal file
149
src/model/prev/v051/object_table.rs
Normal file
|
@ -0,0 +1,149 @@
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use garage_util::data::*;
|
||||||
|
|
||||||
|
use garage_table::crdt::*;
|
||||||
|
|
||||||
|
/// An object
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Object {
|
||||||
|
/// The bucket in which the object is stored, used as partition key
|
||||||
|
pub bucket: String,
|
||||||
|
|
||||||
|
/// The key at which the object is stored in its bucket, used as sorting key
|
||||||
|
pub key: String,
|
||||||
|
|
||||||
|
/// The list of currenty stored versions of the object
|
||||||
|
versions: Vec<ObjectVersion>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Object {
|
||||||
|
/// Get a list of currently stored versions of `Object`
|
||||||
|
pub fn versions(&self) -> &[ObjectVersion] {
|
||||||
|
&self.versions[..]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Informations about a version of an object
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ObjectVersion {
|
||||||
|
/// Id of the version
|
||||||
|
pub uuid: Uuid,
|
||||||
|
/// Timestamp of when the object was created
|
||||||
|
pub timestamp: u64,
|
||||||
|
/// State of the version
|
||||||
|
pub state: ObjectVersionState,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// State of an object version
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ObjectVersionState {
|
||||||
|
/// The version is being received
|
||||||
|
Uploading(ObjectVersionHeaders),
|
||||||
|
/// The version is fully received
|
||||||
|
Complete(ObjectVersionData),
|
||||||
|
/// The version uploaded containded errors or the upload was explicitly aborted
|
||||||
|
Aborted,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Crdt for ObjectVersionState {
|
||||||
|
fn merge(&mut self, other: &Self) {
|
||||||
|
use ObjectVersionState::*;
|
||||||
|
match other {
|
||||||
|
Aborted => {
|
||||||
|
*self = Aborted;
|
||||||
|
}
|
||||||
|
Complete(b) => match self {
|
||||||
|
Aborted => {}
|
||||||
|
Complete(a) => {
|
||||||
|
a.merge(b);
|
||||||
|
}
|
||||||
|
Uploading(_) => {
|
||||||
|
*self = Complete(b.clone());
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Uploading(_) => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data stored in object version
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ObjectVersionData {
|
||||||
|
/// The object was deleted, this Version is a tombstone to mark it as such
|
||||||
|
DeleteMarker,
|
||||||
|
/// The object is short, it's stored inlined
|
||||||
|
Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec<u8>),
|
||||||
|
/// The object is not short, Hash of first block is stored here, next segments hashes are
|
||||||
|
/// stored in the version table
|
||||||
|
FirstBlock(ObjectVersionMeta, Hash),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AutoCrdt for ObjectVersionData {
|
||||||
|
const WARN_IF_DIFFERENT: bool = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Metadata about the object version
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ObjectVersionMeta {
|
||||||
|
/// Headers to send to the client
|
||||||
|
pub headers: ObjectVersionHeaders,
|
||||||
|
/// Size of the object
|
||||||
|
pub size: u64,
|
||||||
|
/// etag of the object
|
||||||
|
pub etag: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Additional headers for an object
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ObjectVersionHeaders {
|
||||||
|
/// Content type of the object
|
||||||
|
pub content_type: String,
|
||||||
|
/// Any other http headers to send
|
||||||
|
pub other: BTreeMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ObjectVersion {
|
||||||
|
fn cmp_key(&self) -> (u64, Uuid) {
|
||||||
|
(self.timestamp, self.uuid)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Is the object version completely received
|
||||||
|
pub fn is_complete(&self) -> bool {
|
||||||
|
matches!(self.state, ObjectVersionState::Complete(_))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Crdt for Object {
|
||||||
|
fn merge(&mut self, other: &Self) {
|
||||||
|
// Merge versions from other into here
|
||||||
|
for other_v in other.versions.iter() {
|
||||||
|
match self
|
||||||
|
.versions
|
||||||
|
.binary_search_by(|v| v.cmp_key().cmp(&other_v.cmp_key()))
|
||||||
|
{
|
||||||
|
Ok(i) => {
|
||||||
|
self.versions[i].state.merge(&other_v.state);
|
||||||
|
}
|
||||||
|
Err(i) => {
|
||||||
|
self.versions.insert(i, other_v.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove versions which are obsolete, i.e. those that come
|
||||||
|
// before the last version which .is_complete().
|
||||||
|
let last_complete = self
|
||||||
|
.versions
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.rev()
|
||||||
|
.find(|(_, v)| v.is_complete())
|
||||||
|
.map(|(vi, _)| vi);
|
||||||
|
|
||||||
|
if let Some(last_vi) = last_complete {
|
||||||
|
self.versions = self.versions.drain(last_vi..).collect::<Vec<_>>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
79
src/model/prev/v051/version_table.rs
Normal file
79
src/model/prev/v051/version_table.rs
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use garage_util::data::*;
|
||||||
|
|
||||||
|
use garage_table::crdt::*;
|
||||||
|
use garage_table::*;
|
||||||
|
|
||||||
|
/// A version of an object
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Version {
|
||||||
|
/// UUID of the version, used as partition key
|
||||||
|
pub uuid: Uuid,
|
||||||
|
|
||||||
|
// Actual data: the blocks for this version
|
||||||
|
// In the case of a multipart upload, also store the etags
|
||||||
|
// of individual parts and check them when doing CompleteMultipartUpload
|
||||||
|
/// Is this version deleted
|
||||||
|
pub deleted: crdt::Bool,
|
||||||
|
/// list of blocks of data composing the version
|
||||||
|
pub blocks: crdt::Map<VersionBlockKey, VersionBlock>,
|
||||||
|
/// Etag of each part in case of a multipart upload, empty otherwise
|
||||||
|
pub parts_etags: crdt::Map<u64, String>,
|
||||||
|
|
||||||
|
// Back link to bucket+key so that we can figure if
|
||||||
|
// this was deleted later on
|
||||||
|
/// Bucket in which the related object is stored
|
||||||
|
pub bucket: String,
|
||||||
|
/// Key in which the related object is stored
|
||||||
|
pub key: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct VersionBlockKey {
|
||||||
|
/// Number of the part
|
||||||
|
pub part_number: u64,
|
||||||
|
/// Offset of this sub-segment in its part
|
||||||
|
pub offset: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for VersionBlockKey {
|
||||||
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
|
self.part_number
|
||||||
|
.cmp(&other.part_number)
|
||||||
|
.then(self.offset.cmp(&other.offset))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for VersionBlockKey {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Informations about a single block
|
||||||
|
#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct VersionBlock {
|
||||||
|
/// Blake2 sum of the block
|
||||||
|
pub hash: Hash,
|
||||||
|
/// Size of the block
|
||||||
|
pub size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AutoCrdt for VersionBlock {
|
||||||
|
const WARN_IF_DIFFERENT: bool = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Crdt for Version {
|
||||||
|
fn merge(&mut self, other: &Self) {
|
||||||
|
self.deleted.merge(&other.deleted);
|
||||||
|
|
||||||
|
if self.deleted.get() {
|
||||||
|
self.blocks.clear();
|
||||||
|
self.parts_etags.clear();
|
||||||
|
} else {
|
||||||
|
self.blocks.merge(&other.blocks);
|
||||||
|
self.parts_etags.merge(&other.parts_etags);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,3 +1,4 @@
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
@ -9,29 +10,19 @@ use garage_table::*;
|
||||||
|
|
||||||
use garage_block::manager::*;
|
use garage_block::manager::*;
|
||||||
|
|
||||||
mod v08 {
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
use garage_util::crdt;
|
pub struct BlockRef {
|
||||||
use garage_util::data::{Hash, Uuid};
|
/// Hash (blake2 sum) of the block, used as partition key
|
||||||
use serde::{Deserialize, Serialize};
|
pub block: Hash,
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
/// Id of the Version for the object containing this block, used as sorting key
|
||||||
pub struct BlockRef {
|
pub version: Uuid,
|
||||||
/// Hash (blake2 sum) of the block, used as partition key
|
|
||||||
pub block: Hash,
|
|
||||||
|
|
||||||
/// Id of the Version for the object containing this block, used as sorting key
|
// Keep track of deleted status
|
||||||
pub version: Uuid,
|
/// Is the Version that contains this block deleted
|
||||||
|
pub deleted: crdt::Bool,
|
||||||
// Keep track of deleted status
|
|
||||||
/// Is the Version that contains this block deleted
|
|
||||||
pub deleted: crdt::Bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl garage_util::migrate::InitialFormat for BlockRef {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub use v08::*;
|
|
||||||
|
|
||||||
impl Entry<Hash, Uuid> for BlockRef {
|
impl Entry<Hash, Uuid> for BlockRef {
|
||||||
fn partition_key(&self) -> &Hash {
|
fn partition_key(&self) -> &Hash {
|
||||||
&self.block
|
&self.block
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
|
@ -12,126 +14,25 @@ use garage_table::*;
|
||||||
use crate::index_counter::*;
|
use crate::index_counter::*;
|
||||||
use crate::s3::version_table::*;
|
use crate::s3::version_table::*;
|
||||||
|
|
||||||
|
use crate::prev::v051::object_table as old;
|
||||||
|
|
||||||
pub const OBJECTS: &str = "objects";
|
pub const OBJECTS: &str = "objects";
|
||||||
pub const UNFINISHED_UPLOADS: &str = "unfinished_uploads";
|
pub const UNFINISHED_UPLOADS: &str = "unfinished_uploads";
|
||||||
pub const BYTES: &str = "bytes";
|
pub const BYTES: &str = "bytes";
|
||||||
|
|
||||||
mod v05 {
|
/// An object
|
||||||
use garage_util::data::{Hash, Uuid};
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
use serde::{Deserialize, Serialize};
|
pub struct Object {
|
||||||
use std::collections::BTreeMap;
|
/// The bucket in which the object is stored, used as partition key
|
||||||
|
pub bucket_id: Uuid,
|
||||||
|
|
||||||
/// An object
|
/// The key at which the object is stored in its bucket, used as sorting key
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
pub key: String,
|
||||||
pub struct Object {
|
|
||||||
/// The bucket in which the object is stored, used as partition key
|
|
||||||
pub bucket: String,
|
|
||||||
|
|
||||||
/// The key at which the object is stored in its bucket, used as sorting key
|
/// The list of currenty stored versions of the object
|
||||||
pub key: String,
|
versions: Vec<ObjectVersion>,
|
||||||
|
|
||||||
/// The list of currenty stored versions of the object
|
|
||||||
pub(super) versions: Vec<ObjectVersion>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Informations about a version of an object
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ObjectVersion {
|
|
||||||
/// Id of the version
|
|
||||||
pub uuid: Uuid,
|
|
||||||
/// Timestamp of when the object was created
|
|
||||||
pub timestamp: u64,
|
|
||||||
/// State of the version
|
|
||||||
pub state: ObjectVersionState,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// State of an object version
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum ObjectVersionState {
|
|
||||||
/// The version is being received
|
|
||||||
Uploading(ObjectVersionHeaders),
|
|
||||||
/// The version is fully received
|
|
||||||
Complete(ObjectVersionData),
|
|
||||||
/// The version uploaded containded errors or the upload was explicitly aborted
|
|
||||||
Aborted,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Data stored in object version
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub enum ObjectVersionData {
|
|
||||||
/// The object was deleted, this Version is a tombstone to mark it as such
|
|
||||||
DeleteMarker,
|
|
||||||
/// The object is short, it's stored inlined
|
|
||||||
Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec<u8>),
|
|
||||||
/// The object is not short, Hash of first block is stored here, next segments hashes are
|
|
||||||
/// stored in the version table
|
|
||||||
FirstBlock(ObjectVersionMeta, Hash),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Metadata about the object version
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ObjectVersionMeta {
|
|
||||||
/// Headers to send to the client
|
|
||||||
pub headers: ObjectVersionHeaders,
|
|
||||||
/// Size of the object
|
|
||||||
pub size: u64,
|
|
||||||
/// etag of the object
|
|
||||||
pub etag: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Additional headers for an object
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ObjectVersionHeaders {
|
|
||||||
/// Content type of the object
|
|
||||||
pub content_type: String,
|
|
||||||
/// Any other http headers to send
|
|
||||||
pub other: BTreeMap<String, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl garage_util::migrate::InitialFormat for Object {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mod v08 {
|
|
||||||
use garage_util::data::Uuid;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use super::v05;
|
|
||||||
|
|
||||||
pub use v05::{
|
|
||||||
ObjectVersion, ObjectVersionData, ObjectVersionHeaders, ObjectVersionMeta,
|
|
||||||
ObjectVersionState,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// An object
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Object {
|
|
||||||
/// The bucket in which the object is stored, used as partition key
|
|
||||||
pub bucket_id: Uuid,
|
|
||||||
|
|
||||||
/// The key at which the object is stored in its bucket, used as sorting key
|
|
||||||
pub key: String,
|
|
||||||
|
|
||||||
/// The list of currenty stored versions of the object
|
|
||||||
pub(super) versions: Vec<ObjectVersion>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl garage_util::migrate::Migrate for Object {
|
|
||||||
type Previous = v05::Object;
|
|
||||||
|
|
||||||
fn migrate(old: v05::Object) -> Object {
|
|
||||||
use garage_util::data::blake2sum;
|
|
||||||
|
|
||||||
Object {
|
|
||||||
bucket_id: blake2sum(old.bucket.as_bytes()),
|
|
||||||
key: old.key,
|
|
||||||
versions: old.versions,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub use v08::*;
|
|
||||||
|
|
||||||
impl Object {
|
impl Object {
|
||||||
/// Initialize an Object struct from parts
|
/// Initialize an Object struct from parts
|
||||||
pub fn new(bucket_id: Uuid, key: String, versions: Vec<ObjectVersion>) -> Self {
|
pub fn new(bucket_id: Uuid, key: String, versions: Vec<ObjectVersion>) -> Self {
|
||||||
|
@ -168,6 +69,28 @@ impl Object {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Informations about a version of an object
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ObjectVersion {
|
||||||
|
/// Id of the version
|
||||||
|
pub uuid: Uuid,
|
||||||
|
/// Timestamp of when the object was created
|
||||||
|
pub timestamp: u64,
|
||||||
|
/// State of the version
|
||||||
|
pub state: ObjectVersionState,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// State of an object version
|
||||||
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ObjectVersionState {
|
||||||
|
/// The version is being received
|
||||||
|
Uploading(ObjectVersionHeaders),
|
||||||
|
/// The version is fully received
|
||||||
|
Complete(ObjectVersionData),
|
||||||
|
/// The version uploaded containded errors or the upload was explicitly aborted
|
||||||
|
Aborted,
|
||||||
|
}
|
||||||
|
|
||||||
impl Crdt for ObjectVersionState {
|
impl Crdt for ObjectVersionState {
|
||||||
fn merge(&mut self, other: &Self) {
|
fn merge(&mut self, other: &Self) {
|
||||||
use ObjectVersionState::*;
|
use ObjectVersionState::*;
|
||||||
|
@ -189,10 +112,42 @@ impl Crdt for ObjectVersionState {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Data stored in object version
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum ObjectVersionData {
|
||||||
|
/// The object was deleted, this Version is a tombstone to mark it as such
|
||||||
|
DeleteMarker,
|
||||||
|
/// The object is short, it's stored inlined
|
||||||
|
Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec<u8>),
|
||||||
|
/// The object is not short, Hash of first block is stored here, next segments hashes are
|
||||||
|
/// stored in the version table
|
||||||
|
FirstBlock(ObjectVersionMeta, Hash),
|
||||||
|
}
|
||||||
|
|
||||||
impl AutoCrdt for ObjectVersionData {
|
impl AutoCrdt for ObjectVersionData {
|
||||||
const WARN_IF_DIFFERENT: bool = true;
|
const WARN_IF_DIFFERENT: bool = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Metadata about the object version
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ObjectVersionMeta {
|
||||||
|
/// Headers to send to the client
|
||||||
|
pub headers: ObjectVersionHeaders,
|
||||||
|
/// Size of the object
|
||||||
|
pub size: u64,
|
||||||
|
/// etag of the object
|
||||||
|
pub etag: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Additional headers for an object
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct ObjectVersionHeaders {
|
||||||
|
/// Content type of the object
|
||||||
|
pub content_type: String,
|
||||||
|
/// Any other http headers to send
|
||||||
|
pub other: BTreeMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
impl ObjectVersion {
|
impl ObjectVersion {
|
||||||
fn cmp_key(&self) -> (u64, Uuid) {
|
fn cmp_key(&self) -> (u64, Uuid) {
|
||||||
(self.timestamp, self.uuid)
|
(self.timestamp, self.uuid)
|
||||||
|
@ -266,6 +221,7 @@ impl Crdt for Object {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct ObjectTable {
|
pub struct ObjectTable {
|
||||||
|
pub background: Arc<BackgroundRunner>,
|
||||||
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
|
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
|
||||||
pub object_counter_table: Arc<IndexCounter<Object>>,
|
pub object_counter_table: Arc<IndexCounter<Object>>,
|
||||||
}
|
}
|
||||||
|
@ -299,34 +255,34 @@ impl TableSchema for ObjectTable {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Enqueue propagation deletions to version table
|
// 2. Spawn threads that propagates deletions to version table
|
||||||
if let (Some(old_v), Some(new_v)) = (old, new) {
|
let version_table = self.version_table.clone();
|
||||||
// Propagate deletion of old versions
|
let old = old.cloned();
|
||||||
for v in old_v.versions.iter() {
|
let new = new.cloned();
|
||||||
let newly_deleted = match new_v
|
|
||||||
.versions
|
self.background.spawn(async move {
|
||||||
.binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key()))
|
if let (Some(old_v), Some(new_v)) = (old, new) {
|
||||||
{
|
// Propagate deletion of old versions
|
||||||
Err(_) => true,
|
for v in old_v.versions.iter() {
|
||||||
Ok(i) => {
|
let newly_deleted = match new_v
|
||||||
new_v.versions[i].state == ObjectVersionState::Aborted
|
.versions
|
||||||
&& v.state != ObjectVersionState::Aborted
|
.binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key()))
|
||||||
}
|
{
|
||||||
};
|
Err(_) => true,
|
||||||
if newly_deleted {
|
Ok(i) => {
|
||||||
let deleted_version =
|
new_v.versions[i].state == ObjectVersionState::Aborted
|
||||||
Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true);
|
&& v.state != ObjectVersionState::Aborted
|
||||||
let res = self.version_table.queue_insert(tx, &deleted_version);
|
}
|
||||||
if let Err(e) = db::unabort(res)? {
|
};
|
||||||
error!(
|
if newly_deleted {
|
||||||
"Unable to enqueue version deletion propagation: {}. A repair will be needed.",
|
let deleted_version =
|
||||||
e
|
Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true);
|
||||||
);
|
version_table.insert(&deleted_version).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
Ok(())
|
||||||
|
});
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -336,6 +292,11 @@ impl TableSchema for ObjectTable {
|
||||||
ObjectFilter::IsUploading => entry.versions.iter().any(|v| v.is_uploading()),
|
ObjectFilter::IsUploading => entry.versions.iter().any(|v| v.is_uploading()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
|
||||||
|
let old_obj = rmp_serde::decode::from_read_ref::<_, old::Object>(bytes).ok()?;
|
||||||
|
Some(migrate_object(old_obj))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CountedItem for Object {
|
impl CountedItem for Object {
|
||||||
|
@ -380,3 +341,64 @@ impl CountedItem for Object {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// vvvvvvvv migration code, stupid stuff vvvvvvvvvvvv
|
||||||
|
// (we just want to change bucket into bucket_id by hashing it)
|
||||||
|
|
||||||
|
fn migrate_object(o: old::Object) -> Object {
|
||||||
|
let versions = o
|
||||||
|
.versions()
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.map(migrate_object_version)
|
||||||
|
.collect();
|
||||||
|
Object {
|
||||||
|
bucket_id: blake2sum(o.bucket.as_bytes()),
|
||||||
|
key: o.key,
|
||||||
|
versions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn migrate_object_version(v: old::ObjectVersion) -> ObjectVersion {
|
||||||
|
ObjectVersion {
|
||||||
|
uuid: Uuid::try_from(v.uuid.as_slice()).unwrap(),
|
||||||
|
timestamp: v.timestamp,
|
||||||
|
state: match v.state {
|
||||||
|
old::ObjectVersionState::Uploading(h) => {
|
||||||
|
ObjectVersionState::Uploading(migrate_object_version_headers(h))
|
||||||
|
}
|
||||||
|
old::ObjectVersionState::Complete(d) => {
|
||||||
|
ObjectVersionState::Complete(migrate_object_version_data(d))
|
||||||
|
}
|
||||||
|
old::ObjectVersionState::Aborted => ObjectVersionState::Aborted,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn migrate_object_version_headers(h: old::ObjectVersionHeaders) -> ObjectVersionHeaders {
|
||||||
|
ObjectVersionHeaders {
|
||||||
|
content_type: h.content_type,
|
||||||
|
other: h.other,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn migrate_object_version_data(d: old::ObjectVersionData) -> ObjectVersionData {
|
||||||
|
match d {
|
||||||
|
old::ObjectVersionData::DeleteMarker => ObjectVersionData::DeleteMarker,
|
||||||
|
old::ObjectVersionData::Inline(m, b) => {
|
||||||
|
ObjectVersionData::Inline(migrate_object_version_meta(m), b)
|
||||||
|
}
|
||||||
|
old::ObjectVersionData::FirstBlock(m, h) => ObjectVersionData::FirstBlock(
|
||||||
|
migrate_object_version_meta(m),
|
||||||
|
Hash::try_from(h.as_slice()).unwrap(),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn migrate_object_version_meta(m: old::ObjectVersionMeta) -> ObjectVersionMeta {
|
||||||
|
ObjectVersionMeta {
|
||||||
|
headers: migrate_object_version_headers(m.headers),
|
||||||
|
size: m.size,
|
||||||
|
etag: m.etag,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
|
@ -10,108 +12,32 @@ use garage_table::*;
|
||||||
|
|
||||||
use crate::s3::block_ref_table::*;
|
use crate::s3::block_ref_table::*;
|
||||||
|
|
||||||
mod v05 {
|
use crate::prev::v051::version_table as old;
|
||||||
use garage_util::crdt;
|
|
||||||
use garage_util::data::{Hash, Uuid};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// A version of an object
|
/// A version of an object
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct Version {
|
pub struct Version {
|
||||||
/// UUID of the version, used as partition key
|
/// UUID of the version, used as partition key
|
||||||
pub uuid: Uuid,
|
pub uuid: Uuid,
|
||||||
|
|
||||||
// Actual data: the blocks for this version
|
// Actual data: the blocks for this version
|
||||||
// In the case of a multipart upload, also store the etags
|
// In the case of a multipart upload, also store the etags
|
||||||
// of individual parts and check them when doing CompleteMultipartUpload
|
// of individual parts and check them when doing CompleteMultipartUpload
|
||||||
/// Is this version deleted
|
/// Is this version deleted
|
||||||
pub deleted: crdt::Bool,
|
pub deleted: crdt::Bool,
|
||||||
/// list of blocks of data composing the version
|
/// list of blocks of data composing the version
|
||||||
pub blocks: crdt::Map<VersionBlockKey, VersionBlock>,
|
pub blocks: crdt::Map<VersionBlockKey, VersionBlock>,
|
||||||
/// Etag of each part in case of a multipart upload, empty otherwise
|
/// Etag of each part in case of a multipart upload, empty otherwise
|
||||||
pub parts_etags: crdt::Map<u64, String>,
|
pub parts_etags: crdt::Map<u64, String>,
|
||||||
|
|
||||||
// Back link to bucket+key so that we can figure if
|
// Back link to bucket+key so that we can figure if
|
||||||
// this was deleted later on
|
// this was deleted later on
|
||||||
/// Bucket in which the related object is stored
|
/// Bucket in which the related object is stored
|
||||||
pub bucket: String,
|
pub bucket_id: Uuid,
|
||||||
/// Key in which the related object is stored
|
/// Key in which the related object is stored
|
||||||
pub key: String,
|
pub key: String,
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct VersionBlockKey {
|
|
||||||
/// Number of the part
|
|
||||||
pub part_number: u64,
|
|
||||||
/// Offset of this sub-segment in its part
|
|
||||||
pub offset: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Informations about a single block
|
|
||||||
#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct VersionBlock {
|
|
||||||
/// Blake2 sum of the block
|
|
||||||
pub hash: Hash,
|
|
||||||
/// Size of the block
|
|
||||||
pub size: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl garage_util::migrate::InitialFormat for Version {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mod v08 {
|
|
||||||
use garage_util::crdt;
|
|
||||||
use garage_util::data::Uuid;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use super::v05;
|
|
||||||
|
|
||||||
/// A version of an object
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Version {
|
|
||||||
/// UUID of the version, used as partition key
|
|
||||||
pub uuid: Uuid,
|
|
||||||
|
|
||||||
// Actual data: the blocks for this version
|
|
||||||
// In the case of a multipart upload, also store the etags
|
|
||||||
// of individual parts and check them when doing CompleteMultipartUpload
|
|
||||||
/// Is this version deleted
|
|
||||||
pub deleted: crdt::Bool,
|
|
||||||
/// list of blocks of data composing the version
|
|
||||||
pub blocks: crdt::Map<VersionBlockKey, VersionBlock>,
|
|
||||||
/// Etag of each part in case of a multipart upload, empty otherwise
|
|
||||||
pub parts_etags: crdt::Map<u64, String>,
|
|
||||||
|
|
||||||
// Back link to bucket+key so that we can figure if
|
|
||||||
// this was deleted later on
|
|
||||||
/// Bucket in which the related object is stored
|
|
||||||
pub bucket_id: Uuid,
|
|
||||||
/// Key in which the related object is stored
|
|
||||||
pub key: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub use v05::{VersionBlock, VersionBlockKey};
|
|
||||||
|
|
||||||
impl garage_util::migrate::Migrate for Version {
|
|
||||||
type Previous = v05::Version;
|
|
||||||
|
|
||||||
fn migrate(old: v05::Version) -> Version {
|
|
||||||
use garage_util::data::blake2sum;
|
|
||||||
|
|
||||||
Version {
|
|
||||||
uuid: old.uuid,
|
|
||||||
deleted: old.deleted,
|
|
||||||
blocks: old.blocks,
|
|
||||||
parts_etags: old.parts_etags,
|
|
||||||
bucket_id: blake2sum(old.bucket.as_bytes()),
|
|
||||||
key: old.key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub use v08::*;
|
|
||||||
|
|
||||||
impl Version {
|
impl Version {
|
||||||
pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self {
|
pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
@ -139,6 +65,14 @@ impl Version {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct VersionBlockKey {
|
||||||
|
/// Number of the part
|
||||||
|
pub part_number: u64,
|
||||||
|
/// Offset of this sub-segment in its part
|
||||||
|
pub offset: u64,
|
||||||
|
}
|
||||||
|
|
||||||
impl Ord for VersionBlockKey {
|
impl Ord for VersionBlockKey {
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
self.part_number
|
self.part_number
|
||||||
|
@ -153,6 +87,15 @@ impl PartialOrd for VersionBlockKey {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Informations about a single block
|
||||||
|
#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct VersionBlock {
|
||||||
|
/// Blake2 sum of the block
|
||||||
|
pub hash: Hash,
|
||||||
|
/// Size of the block
|
||||||
|
pub size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
impl AutoCrdt for VersionBlock {
|
impl AutoCrdt for VersionBlock {
|
||||||
const WARN_IF_DIFFERENT: bool = true;
|
const WARN_IF_DIFFERENT: bool = true;
|
||||||
}
|
}
|
||||||
|
@ -184,6 +127,7 @@ impl Crdt for Version {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct VersionTable {
|
pub struct VersionTable {
|
||||||
|
pub background: Arc<BackgroundRunner>,
|
||||||
pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,26 +141,33 @@ impl TableSchema for VersionTable {
|
||||||
|
|
||||||
fn updated(
|
fn updated(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut db::Transaction,
|
_tx: &mut db::Transaction,
|
||||||
old: Option<&Self::E>,
|
old: Option<&Self::E>,
|
||||||
new: Option<&Self::E>,
|
new: Option<&Self::E>,
|
||||||
) -> db::TxOpResult<()> {
|
) -> db::TxOpResult<()> {
|
||||||
if let (Some(old_v), Some(new_v)) = (old, new) {
|
let block_ref_table = self.block_ref_table.clone();
|
||||||
// Propagate deletion of version blocks
|
let old = old.cloned();
|
||||||
if new_v.deleted.get() && !old_v.deleted.get() {
|
let new = new.cloned();
|
||||||
let deleted_block_refs = old_v.blocks.items().iter().map(|(_k, vb)| BlockRef {
|
|
||||||
block: vb.hash,
|
self.background.spawn(async move {
|
||||||
version: old_v.uuid,
|
if let (Some(old_v), Some(new_v)) = (old, new) {
|
||||||
deleted: true.into(),
|
// Propagate deletion of version blocks
|
||||||
});
|
if new_v.deleted.get() && !old_v.deleted.get() {
|
||||||
for block_ref in deleted_block_refs {
|
let deleted_block_refs = old_v
|
||||||
let res = self.block_ref_table.queue_insert(tx, &block_ref);
|
.blocks
|
||||||
if let Err(e) = db::unabort(res)? {
|
.items()
|
||||||
error!("Unable to enqueue block ref deletion propagation: {}. A repair will be needed.", e);
|
.iter()
|
||||||
}
|
.map(|(_k, vb)| BlockRef {
|
||||||
|
block: vb.hash,
|
||||||
|
version: old_v.uuid,
|
||||||
|
deleted: true.into(),
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
block_ref_table.insert_many(&deleted_block_refs[..]).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
Ok(())
|
||||||
|
});
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -224,4 +175,42 @@ impl TableSchema for VersionTable {
|
||||||
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
|
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
|
||||||
filter.apply(entry.deleted.get())
|
filter.apply(entry.deleted.get())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
|
||||||
|
let old = rmp_serde::decode::from_read_ref::<_, old::Version>(bytes).ok()?;
|
||||||
|
|
||||||
|
let blocks = old
|
||||||
|
.blocks
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.map(|(k, v)| {
|
||||||
|
(
|
||||||
|
VersionBlockKey {
|
||||||
|
part_number: k.part_number,
|
||||||
|
offset: k.offset,
|
||||||
|
},
|
||||||
|
VersionBlock {
|
||||||
|
hash: Hash::try_from(v.hash.as_slice()).unwrap(),
|
||||||
|
size: v.size,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect::<crdt::Map<_, _>>();
|
||||||
|
|
||||||
|
let parts_etags = old
|
||||||
|
.parts_etags
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.map(|(k, v)| (*k, v.clone()))
|
||||||
|
.collect::<crdt::Map<_, _>>();
|
||||||
|
|
||||||
|
Some(Version {
|
||||||
|
uuid: Hash::try_from(old.uuid.as_slice()).unwrap(),
|
||||||
|
deleted: crdt::Bool::new(old.deleted.get()),
|
||||||
|
blocks,
|
||||||
|
parts_etags,
|
||||||
|
bucket_id: blake2sum(old.bucket.as_bytes()),
|
||||||
|
key: old.key,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_rpc"
|
name = "garage_rpc"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,7 +14,7 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_util = { version = "0.8.1", path = "../util" }
|
garage_util = { version = "0.8.0", path = "../util" }
|
||||||
|
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
|
@ -25,6 +25,7 @@ rand = "0.8"
|
||||||
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
|
rmp-serde = "0.15"
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||||
serde_bytes = "0.11"
|
serde_bytes = "0.11"
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
|
|
|
@ -5,7 +5,6 @@ use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::crdt::{AutoCrdt, Crdt, LwwMap};
|
use garage_util::crdt::{AutoCrdt, Crdt, LwwMap};
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::encode::nonversioned_encode;
|
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
|
|
||||||
use crate::ring::*;
|
use crate::ring::*;
|
||||||
|
@ -36,8 +35,6 @@ pub struct ClusterLayout {
|
||||||
pub staging_hash: Hash,
|
pub staging_hash: Hash,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl garage_util::migrate::InitialFormat for ClusterLayout {}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct NodeRoleV(pub Option<NodeRole>);
|
pub struct NodeRoleV(pub Option<NodeRole>);
|
||||||
|
|
||||||
|
@ -71,7 +68,7 @@ impl NodeRole {
|
||||||
impl ClusterLayout {
|
impl ClusterLayout {
|
||||||
pub fn new(replication_factor: usize) -> Self {
|
pub fn new(replication_factor: usize) -> Self {
|
||||||
let empty_lwwmap = LwwMap::new();
|
let empty_lwwmap = LwwMap::new();
|
||||||
let empty_lwwmap_hash = blake2sum(&nonversioned_encode(&empty_lwwmap).unwrap()[..]);
|
let empty_lwwmap_hash = blake2sum(&rmp_to_vec_all_named(&empty_lwwmap).unwrap()[..]);
|
||||||
|
|
||||||
ClusterLayout {
|
ClusterLayout {
|
||||||
version: 0,
|
version: 0,
|
||||||
|
@ -93,7 +90,7 @@ impl ClusterLayout {
|
||||||
Ordering::Equal => {
|
Ordering::Equal => {
|
||||||
self.staging.merge(&other.staging);
|
self.staging.merge(&other.staging);
|
||||||
|
|
||||||
let new_staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]);
|
let new_staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]);
|
||||||
let changed = new_staging_hash != self.staging_hash;
|
let changed = new_staging_hash != self.staging_hash;
|
||||||
|
|
||||||
self.staging_hash = new_staging_hash;
|
self.staging_hash = new_staging_hash;
|
||||||
|
@ -128,7 +125,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
|
||||||
}
|
}
|
||||||
|
|
||||||
self.staging.clear();
|
self.staging.clear();
|
||||||
self.staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]);
|
self.staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]);
|
||||||
|
|
||||||
self.version += 1;
|
self.version += 1;
|
||||||
|
|
||||||
|
@ -152,7 +149,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
|
||||||
}
|
}
|
||||||
|
|
||||||
self.staging.clear();
|
self.staging.clear();
|
||||||
self.staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]);
|
self.staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]);
|
||||||
|
|
||||||
self.version += 1;
|
self.version += 1;
|
||||||
|
|
||||||
|
@ -181,7 +178,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
|
||||||
/// returns true if consistent, false if error
|
/// returns true if consistent, false if error
|
||||||
pub fn check(&self) -> bool {
|
pub fn check(&self) -> bool {
|
||||||
// Check that the hash of the staging data is correct
|
// Check that the hash of the staging data is correct
|
||||||
let staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]);
|
let staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]);
|
||||||
if staging_hash != self.staging_hash {
|
if staging_hash != self.staging_hash {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,5 +17,3 @@ mod metrics;
|
||||||
pub mod rpc_helper;
|
pub mod rpc_helper;
|
||||||
|
|
||||||
pub use rpc_helper::*;
|
pub use rpc_helper::*;
|
||||||
|
|
||||||
pub mod system_metrics;
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ use std::time::Duration;
|
||||||
use futures::future::join_all;
|
use futures::future::join_all;
|
||||||
use futures::stream::futures_unordered::FuturesUnordered;
|
use futures::stream::futures_unordered::FuturesUnordered;
|
||||||
use futures::stream::StreamExt;
|
use futures::stream::StreamExt;
|
||||||
|
use futures_util::future::FutureExt;
|
||||||
use tokio::select;
|
use tokio::select;
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
@ -23,6 +24,7 @@ pub use netapp::message::{
|
||||||
use netapp::peering::fullmesh::FullMeshPeeringStrategy;
|
use netapp::peering::fullmesh::FullMeshPeeringStrategy;
|
||||||
pub use netapp::{self, NetApp, NodeID};
|
pub use netapp::{self, NetApp, NodeID};
|
||||||
|
|
||||||
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
|
@ -92,6 +94,7 @@ pub struct RpcHelper(Arc<RpcHelperInner>);
|
||||||
struct RpcHelperInner {
|
struct RpcHelperInner {
|
||||||
our_node_id: Uuid,
|
our_node_id: Uuid,
|
||||||
fullmesh: Arc<FullMeshPeeringStrategy>,
|
fullmesh: Arc<FullMeshPeeringStrategy>,
|
||||||
|
background: Arc<BackgroundRunner>,
|
||||||
ring: watch::Receiver<Arc<Ring>>,
|
ring: watch::Receiver<Arc<Ring>>,
|
||||||
metrics: RpcMetrics,
|
metrics: RpcMetrics,
|
||||||
rpc_timeout: Duration,
|
rpc_timeout: Duration,
|
||||||
|
@ -101,6 +104,7 @@ impl RpcHelper {
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
our_node_id: Uuid,
|
our_node_id: Uuid,
|
||||||
fullmesh: Arc<FullMeshPeeringStrategy>,
|
fullmesh: Arc<FullMeshPeeringStrategy>,
|
||||||
|
background: Arc<BackgroundRunner>,
|
||||||
ring: watch::Receiver<Arc<Ring>>,
|
ring: watch::Receiver<Arc<Ring>>,
|
||||||
rpc_timeout: Option<Duration>,
|
rpc_timeout: Option<Duration>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
@ -109,6 +113,7 @@ impl RpcHelper {
|
||||||
Self(Arc::new(RpcHelperInner {
|
Self(Arc::new(RpcHelperInner {
|
||||||
our_node_id,
|
our_node_id,
|
||||||
fullmesh,
|
fullmesh,
|
||||||
|
background,
|
||||||
ring,
|
ring,
|
||||||
metrics,
|
metrics,
|
||||||
rpc_timeout: rpc_timeout.unwrap_or(DEFAULT_TIMEOUT),
|
rpc_timeout: rpc_timeout.unwrap_or(DEFAULT_TIMEOUT),
|
||||||
|
@ -372,13 +377,16 @@ impl RpcHelper {
|
||||||
|
|
||||||
if !resp_stream.is_empty() {
|
if !resp_stream.is_empty() {
|
||||||
// Continue remaining requests in background.
|
// Continue remaining requests in background.
|
||||||
// Note: these requests can get interrupted on process shutdown,
|
// Continue the remaining requests immediately using tokio::spawn
|
||||||
// we must not count on them being executed for certain.
|
// but enqueue a task in the background runner
|
||||||
// For all background things that have to happen with certainty,
|
// to ensure that the process won't exit until the requests are done
|
||||||
// they have to be put in a proper queue that is persisted to disk.
|
// (if we had just enqueued the resp_stream.collect directly in the background runner,
|
||||||
tokio::spawn(async move {
|
// the requests might have been put on hold in the background runner's queue,
|
||||||
|
// in which case they might timeout or otherwise fail)
|
||||||
|
let wait_finished_fut = tokio::spawn(async move {
|
||||||
resp_stream.collect::<Vec<Result<_, _>>>().await;
|
resp_stream.collect::<Vec<Result<_, _>>>().await;
|
||||||
});
|
});
|
||||||
|
self.0.background.spawn(wait_finished_fut.map(|_| Ok(())));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ use netapp::peering::fullmesh::FullMeshPeeringStrategy;
|
||||||
use netapp::util::parse_and_resolve_peer_addr_async;
|
use netapp::util::parse_and_resolve_peer_addr_async;
|
||||||
use netapp::{NetApp, NetworkKey, NodeID, NodeKey};
|
use netapp::{NetApp, NetworkKey, NodeID, NodeKey};
|
||||||
|
|
||||||
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::config::Config;
|
use garage_util::config::Config;
|
||||||
#[cfg(feature = "kubernetes-discovery")]
|
#[cfg(feature = "kubernetes-discovery")]
|
||||||
use garage_util::config::KubernetesDiscoveryConfig;
|
use garage_util::config::KubernetesDiscoveryConfig;
|
||||||
|
@ -38,9 +39,6 @@ use crate::replication_mode::*;
|
||||||
use crate::ring::*;
|
use crate::ring::*;
|
||||||
use crate::rpc_helper::*;
|
use crate::rpc_helper::*;
|
||||||
|
|
||||||
#[cfg(feature = "metrics")]
|
|
||||||
use crate::system_metrics::*;
|
|
||||||
|
|
||||||
const DISCOVERY_INTERVAL: Duration = Duration::from_secs(60);
|
const DISCOVERY_INTERVAL: Duration = Duration::from_secs(60);
|
||||||
const STATUS_EXCHANGE_INTERVAL: Duration = Duration::from_secs(10);
|
const STATUS_EXCHANGE_INTERVAL: Duration = Duration::from_secs(10);
|
||||||
|
|
||||||
|
@ -52,6 +50,8 @@ pub const GARAGE_VERSION_TAG: u64 = 0x6761726167650008; // garage 0x0008
|
||||||
/// RPC endpoint used for calls related to membership
|
/// RPC endpoint used for calls related to membership
|
||||||
pub const SYSTEM_RPC_PATH: &str = "garage_rpc/membership.rs/SystemRpc";
|
pub const SYSTEM_RPC_PATH: &str = "garage_rpc/membership.rs/SystemRpc";
|
||||||
|
|
||||||
|
pub const CONNECT_ERROR_MESSAGE: &str = "Error establishing RPC connection to remote node. This can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret";
|
||||||
|
|
||||||
/// RPC messages related to membership
|
/// RPC messages related to membership
|
||||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
pub enum SystemRpc {
|
pub enum SystemRpc {
|
||||||
|
@ -76,17 +76,13 @@ impl Rpc for SystemRpc {
|
||||||
type Response = Result<SystemRpc, Error>;
|
type Response = Result<SystemRpc, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
pub struct PeerList(Vec<(Uuid, SocketAddr)>);
|
|
||||||
impl garage_util::migrate::InitialFormat for PeerList {}
|
|
||||||
|
|
||||||
/// This node's membership manager
|
/// This node's membership manager
|
||||||
pub struct System {
|
pub struct System {
|
||||||
/// The id of this node
|
/// The id of this node
|
||||||
pub id: Uuid,
|
pub id: Uuid,
|
||||||
|
|
||||||
persist_cluster_layout: Persister<ClusterLayout>,
|
persist_cluster_layout: Persister<ClusterLayout>,
|
||||||
persist_peer_list: Persister<PeerList>,
|
persist_peer_list: Persister<Vec<(Uuid, SocketAddr)>>,
|
||||||
|
|
||||||
local_status: ArcSwap<NodeStatus>,
|
local_status: ArcSwap<NodeStatus>,
|
||||||
node_status: RwLock<HashMap<Uuid, (u64, NodeStatus)>>,
|
node_status: RwLock<HashMap<Uuid, (u64, NodeStatus)>>,
|
||||||
|
@ -106,8 +102,6 @@ pub struct System {
|
||||||
consul_discovery: Option<ConsulDiscovery>,
|
consul_discovery: Option<ConsulDiscovery>,
|
||||||
#[cfg(feature = "kubernetes-discovery")]
|
#[cfg(feature = "kubernetes-discovery")]
|
||||||
kubernetes_discovery: Option<KubernetesDiscoveryConfig>,
|
kubernetes_discovery: Option<KubernetesDiscoveryConfig>,
|
||||||
#[cfg(feature = "metrics")]
|
|
||||||
metrics: SystemMetrics,
|
|
||||||
|
|
||||||
replication_mode: ReplicationMode,
|
replication_mode: ReplicationMode,
|
||||||
replication_factor: usize,
|
replication_factor: usize,
|
||||||
|
@ -116,6 +110,9 @@ pub struct System {
|
||||||
pub ring: watch::Receiver<Arc<Ring>>,
|
pub ring: watch::Receiver<Arc<Ring>>,
|
||||||
update_ring: Mutex<watch::Sender<Arc<Ring>>>,
|
update_ring: Mutex<watch::Sender<Arc<Ring>>>,
|
||||||
|
|
||||||
|
/// The job runner of this node
|
||||||
|
pub background: Arc<BackgroundRunner>,
|
||||||
|
|
||||||
/// Path to metadata directory
|
/// Path to metadata directory
|
||||||
pub metadata_dir: PathBuf,
|
pub metadata_dir: PathBuf,
|
||||||
}
|
}
|
||||||
|
@ -235,6 +232,7 @@ impl System {
|
||||||
/// Create this node's membership manager
|
/// Create this node's membership manager
|
||||||
pub fn new(
|
pub fn new(
|
||||||
network_key: NetworkKey,
|
network_key: NetworkKey,
|
||||||
|
background: Arc<BackgroundRunner>,
|
||||||
replication_mode: ReplicationMode,
|
replication_mode: ReplicationMode,
|
||||||
config: &Config,
|
config: &Config,
|
||||||
) -> Result<Arc<Self>, Error> {
|
) -> Result<Arc<Self>, Error> {
|
||||||
|
@ -280,9 +278,6 @@ impl System {
|
||||||
cluster_layout_staging_hash: cluster_layout.staging_hash,
|
cluster_layout_staging_hash: cluster_layout.staging_hash,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(feature = "metrics")]
|
|
||||||
let metrics = SystemMetrics::new(replication_factor);
|
|
||||||
|
|
||||||
let ring = Ring::new(cluster_layout, replication_factor);
|
let ring = Ring::new(cluster_layout, replication_factor);
|
||||||
let (update_ring, ring) = watch::channel(Arc::new(ring));
|
let (update_ring, ring) = watch::channel(Arc::new(ring));
|
||||||
|
|
||||||
|
@ -359,6 +354,7 @@ impl System {
|
||||||
rpc: RpcHelper::new(
|
rpc: RpcHelper::new(
|
||||||
netapp.id.into(),
|
netapp.id.into(),
|
||||||
fullmesh,
|
fullmesh,
|
||||||
|
background.clone(),
|
||||||
ring.clone(),
|
ring.clone(),
|
||||||
config.rpc_timeout_msec.map(Duration::from_millis),
|
config.rpc_timeout_msec.map(Duration::from_millis),
|
||||||
),
|
),
|
||||||
|
@ -373,11 +369,10 @@ impl System {
|
||||||
consul_discovery,
|
consul_discovery,
|
||||||
#[cfg(feature = "kubernetes-discovery")]
|
#[cfg(feature = "kubernetes-discovery")]
|
||||||
kubernetes_discovery: config.kubernetes_discovery.clone(),
|
kubernetes_discovery: config.kubernetes_discovery.clone(),
|
||||||
#[cfg(feature = "metrics")]
|
|
||||||
metrics,
|
|
||||||
|
|
||||||
ring,
|
ring,
|
||||||
update_ring: Mutex::new(update_ring),
|
update_ring: Mutex::new(update_ring),
|
||||||
|
background,
|
||||||
metadata_dir: config.metadata_dir.clone(),
|
metadata_dir: config.metadata_dir.clone(),
|
||||||
});
|
});
|
||||||
sys.system_endpoint.set_handler(sys.clone());
|
sys.system_endpoint.set_handler(sys.clone());
|
||||||
|
@ -449,14 +444,17 @@ impl System {
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
let mut errors = vec![];
|
let mut errors = vec![];
|
||||||
for addr in addrs.iter() {
|
for ip in addrs.iter() {
|
||||||
match self.netapp.clone().try_connect(*addr, pubkey).await {
|
match self
|
||||||
|
.netapp
|
||||||
|
.clone()
|
||||||
|
.try_connect(*ip, pubkey)
|
||||||
|
.await
|
||||||
|
.err_context(CONNECT_ERROR_MESSAGE)
|
||||||
|
{
|
||||||
Ok(()) => return Ok(()),
|
Ok(()) => return Ok(()),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
errors.push((
|
errors.push((*ip, e));
|
||||||
*addr,
|
|
||||||
Error::Message(connect_error_message(*addr, pubkey, e)),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -531,61 +529,56 @@ impl System {
|
||||||
// ---- INTERNALS ----
|
// ---- INTERNALS ----
|
||||||
|
|
||||||
#[cfg(feature = "consul-discovery")]
|
#[cfg(feature = "consul-discovery")]
|
||||||
async fn advertise_to_consul(self: Arc<Self>) {
|
async fn advertise_to_consul(self: Arc<Self>) -> Result<(), Error> {
|
||||||
let c = match &self.consul_discovery {
|
let c = match &self.consul_discovery {
|
||||||
Some(c) => c,
|
Some(c) => c,
|
||||||
_ => return,
|
_ => return Ok(()),
|
||||||
};
|
};
|
||||||
|
|
||||||
let rpc_public_addr = match self.rpc_public_addr {
|
let rpc_public_addr = match self.rpc_public_addr {
|
||||||
Some(addr) => addr,
|
Some(addr) => addr,
|
||||||
None => {
|
None => {
|
||||||
warn!("Not advertising to Consul because rpc_public_addr is not defined in config file and could not be autodetected.");
|
warn!("Not advertising to Consul because rpc_public_addr is not defined in config file and could not be autodetected.");
|
||||||
return;
|
return Ok(());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Err(e) = c
|
c.publish_consul_service(
|
||||||
.publish_consul_service(
|
self.netapp.id,
|
||||||
self.netapp.id,
|
&self.local_status.load_full().hostname,
|
||||||
&self.local_status.load_full().hostname,
|
rpc_public_addr,
|
||||||
rpc_public_addr,
|
)
|
||||||
)
|
.await
|
||||||
.await
|
.err_context("Error while publishing Consul service")
|
||||||
{
|
|
||||||
error!("Error while publishing Consul service: {}", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "kubernetes-discovery")]
|
#[cfg(feature = "kubernetes-discovery")]
|
||||||
async fn advertise_to_kubernetes(self: Arc<Self>) {
|
async fn advertise_to_kubernetes(self: Arc<Self>) -> Result<(), Error> {
|
||||||
let k = match &self.kubernetes_discovery {
|
let k = match &self.kubernetes_discovery {
|
||||||
Some(k) => k,
|
Some(k) => k,
|
||||||
_ => return,
|
_ => return Ok(()),
|
||||||
};
|
};
|
||||||
|
|
||||||
let rpc_public_addr = match self.rpc_public_addr {
|
let rpc_public_addr = match self.rpc_public_addr {
|
||||||
Some(addr) => addr,
|
Some(addr) => addr,
|
||||||
None => {
|
None => {
|
||||||
warn!("Not advertising to Kubernetes because rpc_public_addr is not defined in config file and could not be autodetected.");
|
warn!("Not advertising to Kubernetes because rpc_public_addr is not defined in config file and could not be autodetected.");
|
||||||
return;
|
return Ok(());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Err(e) = publish_kubernetes_node(
|
publish_kubernetes_node(
|
||||||
k,
|
k,
|
||||||
self.netapp.id,
|
self.netapp.id,
|
||||||
&self.local_status.load_full().hostname,
|
&self.local_status.load_full().hostname,
|
||||||
rpc_public_addr,
|
rpc_public_addr,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
.err_context("Error while publishing node to kubernetes")
|
||||||
error!("Error while publishing node to Kubernetes: {}", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Save network configuration to disc
|
/// Save network configuration to disc
|
||||||
async fn save_cluster_layout(&self) -> Result<(), Error> {
|
async fn save_cluster_layout(self: Arc<Self>) -> Result<(), Error> {
|
||||||
let ring: Arc<Ring> = self.ring.borrow().clone();
|
let ring: Arc<Ring> = self.ring.borrow().clone();
|
||||||
self.persist_cluster_layout
|
self.persist_cluster_layout
|
||||||
.save_async(&ring.layout)
|
.save_async(&ring.layout)
|
||||||
|
@ -637,7 +630,11 @@ impl System {
|
||||||
if info.cluster_layout_version > local_info.cluster_layout_version
|
if info.cluster_layout_version > local_info.cluster_layout_version
|
||||||
|| info.cluster_layout_staging_hash != local_info.cluster_layout_staging_hash
|
|| info.cluster_layout_staging_hash != local_info.cluster_layout_staging_hash
|
||||||
{
|
{
|
||||||
tokio::spawn(self.clone().pull_cluster_layout(from));
|
let self2 = self.clone();
|
||||||
|
self.background.spawn_cancellable(async move {
|
||||||
|
self2.pull_cluster_layout(from).await;
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
self.node_status
|
self.node_status
|
||||||
|
@ -679,21 +676,18 @@ impl System {
|
||||||
drop(update_ring);
|
drop(update_ring);
|
||||||
|
|
||||||
let self2 = self.clone();
|
let self2 = self.clone();
|
||||||
tokio::spawn(async move {
|
self.background.spawn_cancellable(async move {
|
||||||
if let Err(e) = self2
|
self2
|
||||||
.rpc
|
.rpc
|
||||||
.broadcast(
|
.broadcast(
|
||||||
&self2.system_endpoint,
|
&self2.system_endpoint,
|
||||||
SystemRpc::AdvertiseClusterLayout(layout),
|
SystemRpc::AdvertiseClusterLayout(layout),
|
||||||
RequestStrategy::with_priority(PRIO_HIGH),
|
RequestStrategy::with_priority(PRIO_HIGH),
|
||||||
)
|
)
|
||||||
.await
|
.await?;
|
||||||
{
|
Ok(())
|
||||||
warn!("Error while broadcasting new cluster layout: {}", e);
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
self.background.spawn(self.clone().save_cluster_layout());
|
||||||
self.save_cluster_layout().await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(SystemRpc::Ok)
|
Ok(SystemRpc::Ok)
|
||||||
|
@ -740,7 +734,7 @@ impl System {
|
||||||
|
|
||||||
// Add peer list from list stored on disk
|
// Add peer list from list stored on disk
|
||||||
if let Ok(peers) = self.persist_peer_list.load_async().await {
|
if let Ok(peers) = self.persist_peer_list.load_async().await {
|
||||||
ping_list.extend(peers.0.iter().map(|(id, addr)| ((*id).into(), *addr)))
|
ping_list.extend(peers.iter().map(|(id, addr)| ((*id).into(), *addr)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch peer list from Consul
|
// Fetch peer list from Consul
|
||||||
|
@ -779,12 +773,12 @@ impl System {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (node_id, node_addr) in ping_list {
|
for (node_id, node_addr) in ping_list {
|
||||||
let self2 = self.clone();
|
tokio::spawn(
|
||||||
tokio::spawn(async move {
|
self.netapp
|
||||||
if let Err(e) = self2.netapp.clone().try_connect(node_addr, node_id).await {
|
.clone()
|
||||||
error!("{}", connect_error_message(node_addr, node_id, e));
|
.try_connect(node_addr, node_id)
|
||||||
}
|
.map(|r| r.err_context(CONNECT_ERROR_MESSAGE)),
|
||||||
});
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -793,10 +787,11 @@ impl System {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "consul-discovery")]
|
#[cfg(feature = "consul-discovery")]
|
||||||
tokio::spawn(self.clone().advertise_to_consul());
|
self.background.spawn(self.clone().advertise_to_consul());
|
||||||
|
|
||||||
#[cfg(feature = "kubernetes-discovery")]
|
#[cfg(feature = "kubernetes-discovery")]
|
||||||
tokio::spawn(self.clone().advertise_to_kubernetes());
|
self.background
|
||||||
|
.spawn(self.clone().advertise_to_kubernetes());
|
||||||
|
|
||||||
let restart_at = tokio::time::sleep(DISCOVERY_INTERVAL);
|
let restart_at = tokio::time::sleep(DISCOVERY_INTERVAL);
|
||||||
select! {
|
select! {
|
||||||
|
@ -820,16 +815,12 @@ impl System {
|
||||||
// and append it to the list we are about to save,
|
// and append it to the list we are about to save,
|
||||||
// so that no peer ID gets lost in the process.
|
// so that no peer ID gets lost in the process.
|
||||||
if let Ok(mut prev_peer_list) = self.persist_peer_list.load_async().await {
|
if let Ok(mut prev_peer_list) = self.persist_peer_list.load_async().await {
|
||||||
prev_peer_list
|
prev_peer_list.retain(|(id, _ip)| peer_list.iter().all(|(id2, _ip2)| id2 != id));
|
||||||
.0
|
peer_list.extend(prev_peer_list);
|
||||||
.retain(|(id, _ip)| peer_list.iter().all(|(id2, _ip2)| id2 != id));
|
|
||||||
peer_list.extend(prev_peer_list.0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save new peer list to file
|
// Save new peer list to file
|
||||||
self.persist_peer_list
|
self.persist_peer_list.save_async(&peer_list).await
|
||||||
.save_async(&PeerList(peer_list))
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn pull_cluster_layout(self: Arc<Self>, peer: Uuid) {
|
async fn pull_cluster_layout(self: Arc<Self>, peer: Uuid) {
|
||||||
|
@ -890,11 +881,3 @@ async fn resolve_peers(peers: &[String]) -> Vec<(NodeID, SocketAddr)> {
|
||||||
|
|
||||||
ret
|
ret
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connect_error_message(
|
|
||||||
addr: SocketAddr,
|
|
||||||
pubkey: ed25519::PublicKey,
|
|
||||||
e: netapp::error::Error,
|
|
||||||
) -> String {
|
|
||||||
format!("Error establishing RPC connection to remote node: {}@{}.\nThis can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret.\n{}", hex::encode(pubkey), addr, e)
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
use opentelemetry::{global, metrics::*, KeyValue};
|
|
||||||
|
|
||||||
/// TableMetrics reference all counter used for metrics
|
|
||||||
pub struct SystemMetrics {
|
|
||||||
pub(crate) _garage_build_info: ValueObserver<u64>,
|
|
||||||
pub(crate) _replication_factor: ValueObserver<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SystemMetrics {
|
|
||||||
pub fn new(replication_factor: usize) -> Self {
|
|
||||||
let meter = global::meter("garage_system");
|
|
||||||
Self {
|
|
||||||
_garage_build_info: meter
|
|
||||||
.u64_value_observer("garage_build_info", move |observer| {
|
|
||||||
observer.observe(
|
|
||||||
1,
|
|
||||||
&[KeyValue::new(
|
|
||||||
"version",
|
|
||||||
garage_util::version::garage_version(),
|
|
||||||
)],
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.with_description("Garage build info")
|
|
||||||
.init(),
|
|
||||||
_replication_factor: meter
|
|
||||||
.u64_value_observer("garage_replication_factor", move |observer| {
|
|
||||||
observer.observe(replication_factor as u64, &[])
|
|
||||||
})
|
|
||||||
.with_description("Garage replication factor setting")
|
|
||||||
.init(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_table"
|
name = "garage_table"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,20 +14,20 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.1", path = "../db" }
|
garage_db = { version = "0.8.0", path = "../db" }
|
||||||
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
||||||
garage_util = { version = "0.8.1", path = "../util" }
|
garage_util = { version = "0.8.0", path = "../util" }
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
opentelemetry = "0.17"
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
arc-swap = "1.0"
|
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
hexdump = "0.1"
|
hexdump = "0.1"
|
||||||
tracing = "0.1.30"
|
tracing = "0.1.30"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
|
|
||||||
|
rmp-serde = "0.15"
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||||
serde_bytes = "0.11"
|
serde_bytes = "0.11"
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,6 @@ use garage_db::counted_tree_hack::CountedTree;
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::migrate::Migrate;
|
|
||||||
|
|
||||||
use garage_rpc::system::System;
|
use garage_rpc::system::System;
|
||||||
|
|
||||||
|
@ -32,16 +31,16 @@ pub struct TableData<F: TableSchema, R: TableReplication> {
|
||||||
pub(crate) merkle_tree: db::Tree,
|
pub(crate) merkle_tree: db::Tree,
|
||||||
pub(crate) merkle_todo: db::Tree,
|
pub(crate) merkle_todo: db::Tree,
|
||||||
pub(crate) merkle_todo_notify: Notify,
|
pub(crate) merkle_todo_notify: Notify,
|
||||||
|
|
||||||
pub(crate) insert_queue: db::Tree,
|
|
||||||
pub(crate) insert_queue_notify: Notify,
|
|
||||||
|
|
||||||
pub(crate) gc_todo: CountedTree,
|
pub(crate) gc_todo: CountedTree,
|
||||||
|
|
||||||
pub(crate) metrics: TableMetrics,
|
pub(crate) metrics: TableMetrics,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
impl<F, R> TableData<F, R>
|
||||||
|
where
|
||||||
|
F: TableSchema,
|
||||||
|
R: TableReplication,
|
||||||
|
{
|
||||||
pub fn new(system: Arc<System>, instance: F, replication: R, db: &db::Db) -> Arc<Self> {
|
pub fn new(system: Arc<System>, instance: F, replication: R, db: &db::Db) -> Arc<Self> {
|
||||||
let store = db
|
let store = db
|
||||||
.open_tree(&format!("{}:table", F::TABLE_NAME))
|
.open_tree(&format!("{}:table", F::TABLE_NAME))
|
||||||
|
@ -54,22 +53,12 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
||||||
.open_tree(&format!("{}:merkle_todo", F::TABLE_NAME))
|
.open_tree(&format!("{}:merkle_todo", F::TABLE_NAME))
|
||||||
.expect("Unable to open DB Merkle TODO tree");
|
.expect("Unable to open DB Merkle TODO tree");
|
||||||
|
|
||||||
let insert_queue = db
|
|
||||||
.open_tree(&format!("{}:insert_queue", F::TABLE_NAME))
|
|
||||||
.expect("Unable to open insert queue DB tree");
|
|
||||||
|
|
||||||
let gc_todo = db
|
let gc_todo = db
|
||||||
.open_tree(&format!("{}:gc_todo_v2", F::TABLE_NAME))
|
.open_tree(&format!("{}:gc_todo_v2", F::TABLE_NAME))
|
||||||
.expect("Unable to open GC DB tree");
|
.expect("Unable to open DB tree");
|
||||||
let gc_todo = CountedTree::new(gc_todo).expect("Cannot count gc_todo_v2");
|
let gc_todo = CountedTree::new(gc_todo).expect("Cannot count gc_todo_v2");
|
||||||
|
|
||||||
let metrics = TableMetrics::new(
|
let metrics = TableMetrics::new(F::TABLE_NAME, merkle_todo.clone(), gc_todo.clone());
|
||||||
F::TABLE_NAME,
|
|
||||||
store.clone(),
|
|
||||||
merkle_tree.clone(),
|
|
||||||
merkle_todo.clone(),
|
|
||||||
gc_todo.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Arc::new(Self {
|
Arc::new(Self {
|
||||||
system,
|
system,
|
||||||
|
@ -79,8 +68,6 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
||||||
merkle_tree,
|
merkle_tree,
|
||||||
merkle_todo,
|
merkle_todo,
|
||||||
merkle_todo_notify: Notify::new(),
|
merkle_todo_notify: Notify::new(),
|
||||||
insert_queue,
|
|
||||||
insert_queue_notify: Notify::new(),
|
|
||||||
gc_todo,
|
gc_todo,
|
||||||
metrics,
|
metrics,
|
||||||
})
|
})
|
||||||
|
@ -180,8 +167,9 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
||||||
|
|
||||||
pub(crate) fn update_entry(&self, update_bytes: &[u8]) -> Result<(), Error> {
|
pub(crate) fn update_entry(&self, update_bytes: &[u8]) -> Result<(), Error> {
|
||||||
let update = self.decode_entry(update_bytes)?;
|
let update = self.decode_entry(update_bytes)?;
|
||||||
|
let tree_key = self.tree_key(update.partition_key(), update.sort_key());
|
||||||
|
|
||||||
self.update_entry_with(update.partition_key(), update.sort_key(), |ent| match ent {
|
self.update_entry_with(&tree_key[..], |ent| match ent {
|
||||||
Some(mut ent) => {
|
Some(mut ent) => {
|
||||||
ent.merge(&update);
|
ent.merge(&update);
|
||||||
ent
|
ent
|
||||||
|
@ -193,14 +181,11 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
||||||
|
|
||||||
pub fn update_entry_with(
|
pub fn update_entry_with(
|
||||||
&self,
|
&self,
|
||||||
partition_key: &F::P,
|
tree_key: &[u8],
|
||||||
sort_key: &F::S,
|
|
||||||
f: impl Fn(Option<F::E>) -> F::E,
|
f: impl Fn(Option<F::E>) -> F::E,
|
||||||
) -> Result<Option<F::E>, Error> {
|
) -> Result<Option<F::E>, Error> {
|
||||||
let tree_key = self.tree_key(partition_key, sort_key);
|
|
||||||
|
|
||||||
let changed = self.store.db().transaction(|mut tx| {
|
let changed = self.store.db().transaction(|mut tx| {
|
||||||
let (old_entry, old_bytes, new_entry) = match tx.get(&self.store, &tree_key)? {
|
let (old_entry, old_bytes, new_entry) = match tx.get(&self.store, tree_key)? {
|
||||||
Some(old_bytes) => {
|
Some(old_bytes) => {
|
||||||
let old_entry = self.decode_entry(&old_bytes).map_err(db::TxError::Abort)?;
|
let old_entry = self.decode_entry(&old_bytes).map_err(db::TxError::Abort)?;
|
||||||
let new_entry = f(Some(old_entry.clone()));
|
let new_entry = f(Some(old_entry.clone()));
|
||||||
|
@ -209,24 +194,23 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
||||||
None => (None, None, f(None)),
|
None => (None, None, f(None)),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Changed can be true in two scenarios
|
// Scenario 1: the value changed, so of course there is a change
|
||||||
// Scenario 1: the actual represented value changed,
|
let value_changed = Some(&new_entry) != old_entry.as_ref();
|
||||||
// so of course the messagepack encoding changed as well
|
|
||||||
// Scenario 2: the value didn't change but due to a migration in the
|
// Scenario 2: the value didn't change but due to a migration in the
|
||||||
// data format, the messagepack encoding changed. In this case,
|
// data format, the messagepack encoding changed. In this case
|
||||||
// we also have to write the migrated value in the table and update
|
// we have to write the migrated value in the table and update
|
||||||
// the associated Merkle tree entry.
|
// the associated Merkle tree entry.
|
||||||
let new_bytes = new_entry
|
let new_bytes = rmp_to_vec_all_named(&new_entry)
|
||||||
.encode()
|
|
||||||
.map_err(Error::RmpEncode)
|
.map_err(Error::RmpEncode)
|
||||||
.map_err(db::TxError::Abort)?;
|
.map_err(db::TxError::Abort)?;
|
||||||
let changed = Some(&new_bytes[..]) != old_bytes.as_deref();
|
let encoding_changed = Some(&new_bytes[..]) != old_bytes.as_ref().map(|x| &x[..]);
|
||||||
drop(old_bytes);
|
drop(old_bytes);
|
||||||
|
|
||||||
if changed {
|
if value_changed || encoding_changed {
|
||||||
let new_bytes_hash = blake2sum(&new_bytes);
|
let new_bytes_hash = blake2sum(&new_bytes[..]);
|
||||||
tx.insert(&self.merkle_todo, &tree_key, new_bytes_hash.as_slice())?;
|
tx.insert(&self.merkle_todo, tree_key, new_bytes_hash.as_slice())?;
|
||||||
tx.insert(&self.store, &tree_key, new_bytes)?;
|
tx.insert(&self.store, tree_key, new_bytes)?;
|
||||||
|
|
||||||
self.instance
|
self.instance
|
||||||
.updated(&mut tx, old_entry.as_ref(), Some(&new_entry))?;
|
.updated(&mut tx, old_entry.as_ref(), Some(&new_entry))?;
|
||||||
|
@ -252,7 +236,7 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
||||||
let pk_hash = Hash::try_from(&tree_key[..32]).unwrap();
|
let pk_hash = Hash::try_from(&tree_key[..32]).unwrap();
|
||||||
let nodes = self.replication.write_nodes(&pk_hash);
|
let nodes = self.replication.write_nodes(&pk_hash);
|
||||||
if nodes.first() == Some(&self.system.id) {
|
if nodes.first() == Some(&self.system.id) {
|
||||||
GcTodoEntry::new(tree_key, new_bytes_hash).save(&self.gc_todo)?;
|
GcTodoEntry::new(tree_key.to_vec(), new_bytes_hash).save(&self.gc_todo)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -268,11 +252,10 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
||||||
.db()
|
.db()
|
||||||
.transaction(|mut tx| match tx.get(&self.store, k)? {
|
.transaction(|mut tx| match tx.get(&self.store, k)? {
|
||||||
Some(cur_v) if cur_v == v => {
|
Some(cur_v) if cur_v == v => {
|
||||||
let old_entry = self.decode_entry(v).map_err(db::TxError::Abort)?;
|
|
||||||
|
|
||||||
tx.remove(&self.store, k)?;
|
tx.remove(&self.store, k)?;
|
||||||
tx.insert(&self.merkle_todo, k, vec![])?;
|
tx.insert(&self.merkle_todo, k, vec![])?;
|
||||||
|
|
||||||
|
let old_entry = self.decode_entry(v).map_err(db::TxError::Abort)?;
|
||||||
self.instance.updated(&mut tx, Some(&old_entry), None)?;
|
self.instance.updated(&mut tx, Some(&old_entry), None)?;
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
@ -296,11 +279,10 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
||||||
.db()
|
.db()
|
||||||
.transaction(|mut tx| match tx.get(&self.store, k)? {
|
.transaction(|mut tx| match tx.get(&self.store, k)? {
|
||||||
Some(cur_v) if blake2sum(&cur_v[..]) == vhash => {
|
Some(cur_v) if blake2sum(&cur_v[..]) == vhash => {
|
||||||
let old_entry = self.decode_entry(&cur_v[..]).map_err(db::TxError::Abort)?;
|
|
||||||
|
|
||||||
tx.remove(&self.store, k)?;
|
tx.remove(&self.store, k)?;
|
||||||
tx.insert(&self.merkle_todo, k, vec![])?;
|
tx.insert(&self.merkle_todo, k, vec![])?;
|
||||||
|
|
||||||
|
let old_entry = self.decode_entry(&cur_v[..]).map_err(db::TxError::Abort)?;
|
||||||
self.instance.updated(&mut tx, Some(&old_entry), None)?;
|
self.instance.updated(&mut tx, Some(&old_entry), None)?;
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
@ -314,32 +296,6 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
||||||
Ok(removed)
|
Ok(removed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- Insert queue functions ----
|
|
||||||
|
|
||||||
pub(crate) fn queue_insert(
|
|
||||||
&self,
|
|
||||||
tx: &mut db::Transaction,
|
|
||||||
ins: &F::E,
|
|
||||||
) -> db::TxResult<(), Error> {
|
|
||||||
let tree_key = self.tree_key(ins.partition_key(), ins.sort_key());
|
|
||||||
|
|
||||||
let new_entry = match tx.get(&self.insert_queue, &tree_key)? {
|
|
||||||
Some(old_v) => {
|
|
||||||
let mut entry = self.decode_entry(&old_v).map_err(db::TxError::Abort)?;
|
|
||||||
entry.merge(ins);
|
|
||||||
entry.encode()
|
|
||||||
}
|
|
||||||
None => ins.encode(),
|
|
||||||
};
|
|
||||||
let new_entry = new_entry
|
|
||||||
.map_err(Error::RmpEncode)
|
|
||||||
.map_err(db::TxError::Abort)?;
|
|
||||||
tx.insert(&self.insert_queue, &tree_key, new_entry)?;
|
|
||||||
self.insert_queue_notify.notify_one();
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- Utility functions ----
|
// ---- Utility functions ----
|
||||||
|
|
||||||
pub fn tree_key(&self, p: &F::P, s: &F::S) -> Vec<u8> {
|
pub fn tree_key(&self, p: &F::P, s: &F::S) -> Vec<u8> {
|
||||||
|
@ -349,18 +305,18 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn decode_entry(&self, bytes: &[u8]) -> Result<F::E, Error> {
|
pub fn decode_entry(&self, bytes: &[u8]) -> Result<F::E, Error> {
|
||||||
match F::E::decode(bytes) {
|
match rmp_serde::decode::from_read_ref::<_, F::E>(bytes) {
|
||||||
Some(x) => Ok(x),
|
Ok(x) => Ok(x),
|
||||||
None => {
|
Err(e) => match F::try_migrate(bytes) {
|
||||||
error!("Unable to decode entry of {}", F::TABLE_NAME);
|
Some(x) => Ok(x),
|
||||||
for line in hexdump::hexdump_iter(bytes) {
|
None => {
|
||||||
debug!("{}", line);
|
warn!("Unable to decode entry of {}: {}", F::TABLE_NAME, e);
|
||||||
|
for line in hexdump::hexdump_iter(bytes) {
|
||||||
|
debug!("{}", line);
|
||||||
|
}
|
||||||
|
Err(e.into())
|
||||||
}
|
}
|
||||||
Err(Error::Message(format!(
|
},
|
||||||
"Unable to decode entry of {}",
|
|
||||||
F::TABLE_NAME
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ const TABLE_GC_BATCH_SIZE: usize = 1024;
|
||||||
// and the moment the garbage collection actually happens)
|
// and the moment the garbage collection actually happens)
|
||||||
const TABLE_GC_DELAY: Duration = Duration::from_secs(24 * 3600);
|
const TABLE_GC_DELAY: Duration = Duration::from_secs(24 * 3600);
|
||||||
|
|
||||||
pub(crate) struct TableGc<F: TableSchema, R: TableReplication> {
|
pub(crate) struct TableGc<F: TableSchema + 'static, R: TableReplication + 'static> {
|
||||||
system: Arc<System>,
|
system: Arc<System>,
|
||||||
data: Arc<TableData<F, R>>,
|
data: Arc<TableData<F, R>>,
|
||||||
|
|
||||||
|
@ -49,24 +49,27 @@ impl Rpc for GcRpc {
|
||||||
type Response = Result<GcRpc, Error>;
|
type Response = Result<GcRpc, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<F: TableSchema, R: TableReplication> TableGc<F, R> {
|
impl<F, R> TableGc<F, R>
|
||||||
pub(crate) fn new(system: Arc<System>, data: Arc<TableData<F, R>>) -> Arc<Self> {
|
where
|
||||||
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static,
|
||||||
|
{
|
||||||
|
pub(crate) fn launch(system: Arc<System>, data: Arc<TableData<F, R>>) -> Arc<Self> {
|
||||||
let endpoint = system
|
let endpoint = system
|
||||||
.netapp
|
.netapp
|
||||||
.endpoint(format!("garage_table/gc.rs/Rpc:{}", F::TABLE_NAME));
|
.endpoint(format!("garage_table/gc.rs/Rpc:{}", F::TABLE_NAME));
|
||||||
|
|
||||||
let gc = Arc::new(Self {
|
let gc = Arc::new(Self {
|
||||||
system,
|
system: system.clone(),
|
||||||
data,
|
data,
|
||||||
endpoint,
|
endpoint,
|
||||||
});
|
});
|
||||||
|
|
||||||
gc.endpoint.set_handler(gc.clone());
|
gc.endpoint.set_handler(gc.clone());
|
||||||
|
|
||||||
gc
|
system.background.spawn_worker(GcWorker::new(gc.clone()));
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
gc
|
||||||
bg.spawn_worker(GcWorker::new(self.clone()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn gc_loop_iter(&self) -> Result<Option<Duration>, Error> {
|
async fn gc_loop_iter(&self) -> Result<Option<Duration>, Error> {
|
||||||
|
@ -273,7 +276,11 @@ impl<F: TableSchema, R: TableReplication> TableGc<F, R> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<F: TableSchema, R: TableReplication> EndpointHandler<GcRpc> for TableGc<F, R> {
|
impl<F, R> EndpointHandler<GcRpc> for TableGc<F, R>
|
||||||
|
where
|
||||||
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static,
|
||||||
|
{
|
||||||
async fn handle(self: &Arc<Self>, message: &GcRpc, _from: NodeID) -> Result<GcRpc, Error> {
|
async fn handle(self: &Arc<Self>, message: &GcRpc, _from: NodeID) -> Result<GcRpc, Error> {
|
||||||
match message {
|
match message {
|
||||||
GcRpc::Update(items) => {
|
GcRpc::Update(items) => {
|
||||||
|
@ -291,12 +298,20 @@ impl<F: TableSchema, R: TableReplication> EndpointHandler<GcRpc> for TableGc<F,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct GcWorker<F: TableSchema, R: TableReplication> {
|
struct GcWorker<F, R>
|
||||||
|
where
|
||||||
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static,
|
||||||
|
{
|
||||||
gc: Arc<TableGc<F, R>>,
|
gc: Arc<TableGc<F, R>>,
|
||||||
wait_delay: Duration,
|
wait_delay: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<F: TableSchema, R: TableReplication> GcWorker<F, R> {
|
impl<F, R> GcWorker<F, R>
|
||||||
|
where
|
||||||
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static,
|
||||||
|
{
|
||||||
fn new(gc: Arc<TableGc<F, R>>) -> Self {
|
fn new(gc: Arc<TableGc<F, R>>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
gc,
|
gc,
|
||||||
|
@ -306,15 +321,21 @@ impl<F: TableSchema, R: TableReplication> GcWorker<F, R> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<F: TableSchema, R: TableReplication> Worker for GcWorker<F, R> {
|
impl<F, R> Worker for GcWorker<F, R>
|
||||||
|
where
|
||||||
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static,
|
||||||
|
{
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
format!("{} GC", F::TABLE_NAME)
|
format!("{} GC", F::TABLE_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
fn info(&self) -> Option<String> {
|
||||||
WorkerStatus {
|
let l = self.gc.data.gc_todo_len().unwrap_or(0);
|
||||||
queue_length: Some(self.gc.data.gc_todo_len().unwrap_or(0) as u64),
|
if l > 0 {
|
||||||
..Default::default()
|
Some(format!("{} items in queue", l))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -328,7 +349,10 @@ impl<F: TableSchema, R: TableReplication> Worker for GcWorker<F, R> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
|
if *must_exit.borrow() {
|
||||||
|
return WorkerState::Done;
|
||||||
|
}
|
||||||
tokio::time::sleep(self.wait_delay).await;
|
tokio::time::sleep(self.wait_delay).await;
|
||||||
WorkerState::Busy
|
WorkerState::Busy
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,18 +4,16 @@
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate tracing;
|
extern crate tracing;
|
||||||
|
|
||||||
|
mod metrics;
|
||||||
pub mod schema;
|
pub mod schema;
|
||||||
pub mod util;
|
pub mod util;
|
||||||
|
|
||||||
pub mod data;
|
pub mod data;
|
||||||
pub mod replication;
|
|
||||||
pub mod table;
|
|
||||||
|
|
||||||
mod gc;
|
mod gc;
|
||||||
mod merkle;
|
mod merkle;
|
||||||
mod metrics;
|
pub mod replication;
|
||||||
mod queue;
|
|
||||||
mod sync;
|
mod sync;
|
||||||
|
pub mod table;
|
||||||
|
|
||||||
pub use schema::*;
|
pub use schema::*;
|
||||||
pub use table::*;
|
pub use table::*;
|
||||||
|
|
|
@ -3,14 +3,12 @@ use std::time::Duration;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::select;
|
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::background::*;
|
use garage_util::background::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::encode::{nonversioned_decode, nonversioned_encode};
|
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
|
|
||||||
use garage_rpc::ring::*;
|
use garage_rpc::ring::*;
|
||||||
|
@ -66,18 +64,22 @@ pub enum MerkleNode {
|
||||||
Leaf(Vec<u8>, Hash),
|
Leaf(Vec<u8>, Hash),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<F: TableSchema, R: TableReplication> MerkleUpdater<F, R> {
|
impl<F, R> MerkleUpdater<F, R>
|
||||||
pub(crate) fn new(data: Arc<TableData<F, R>>) -> Arc<Self> {
|
where
|
||||||
let empty_node_hash = blake2sum(&nonversioned_encode(&MerkleNode::Empty).unwrap()[..]);
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static,
|
||||||
|
{
|
||||||
|
pub(crate) fn launch(background: &BackgroundRunner, data: Arc<TableData<F, R>>) -> Arc<Self> {
|
||||||
|
let empty_node_hash = blake2sum(&rmp_to_vec_all_named(&MerkleNode::Empty).unwrap()[..]);
|
||||||
|
|
||||||
Arc::new(Self {
|
let ret = Arc::new(Self {
|
||||||
data,
|
data,
|
||||||
empty_node_hash,
|
empty_node_hash,
|
||||||
})
|
});
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn spawn_workers(self: &Arc<Self>, background: &BackgroundRunner) {
|
background.spawn_worker(MerkleWorker(ret.clone()));
|
||||||
background.spawn_worker(MerkleWorker(self.clone()));
|
|
||||||
|
ret
|
||||||
}
|
}
|
||||||
|
|
||||||
fn updater_loop_iter(&self) -> Result<WorkerState, Error> {
|
fn updater_loop_iter(&self) -> Result<WorkerState, Error> {
|
||||||
|
@ -274,7 +276,7 @@ impl<F: TableSchema, R: TableReplication> MerkleUpdater<F, R> {
|
||||||
tx.remove(&self.data.merkle_tree, k.encode())?;
|
tx.remove(&self.data.merkle_tree, k.encode())?;
|
||||||
Ok(self.empty_node_hash)
|
Ok(self.empty_node_hash)
|
||||||
} else {
|
} else {
|
||||||
let vby = nonversioned_encode(v).map_err(|e| db::TxError::Abort(e.into()))?;
|
let vby = rmp_to_vec_all_named(v).map_err(|e| db::TxError::Abort(e.into()))?;
|
||||||
let rethash = blake2sum(&vby[..]);
|
let rethash = blake2sum(&vby[..]);
|
||||||
tx.insert(&self.data.merkle_tree, k.encode(), vby)?;
|
tx.insert(&self.data.merkle_tree, k.encode(), vby)?;
|
||||||
Ok(rethash)
|
Ok(rethash)
|
||||||
|
@ -291,27 +293,32 @@ impl<F: TableSchema, R: TableReplication> MerkleUpdater<F, R> {
|
||||||
Ok(self.data.merkle_tree.len()?)
|
Ok(self.data.merkle_tree.len()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn merkle_tree_fast_len(&self) -> Result<Option<usize>, Error> {
|
|
||||||
Ok(self.data.merkle_tree.fast_len()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn todo_len(&self) -> Result<usize, Error> {
|
pub fn todo_len(&self) -> Result<usize, Error> {
|
||||||
Ok(self.data.merkle_todo.len()?)
|
Ok(self.data.merkle_todo.len()?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct MerkleWorker<F: TableSchema, R: TableReplication>(Arc<MerkleUpdater<F, R>>);
|
struct MerkleWorker<F, R>(Arc<MerkleUpdater<F, R>>)
|
||||||
|
where
|
||||||
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static;
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<F: TableSchema, R: TableReplication> Worker for MerkleWorker<F, R> {
|
impl<F, R> Worker for MerkleWorker<F, R>
|
||||||
|
where
|
||||||
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static,
|
||||||
|
{
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
format!("{} Merkle", F::TABLE_NAME)
|
format!("{} Merkle tree updater", F::TABLE_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
fn info(&self) -> Option<String> {
|
||||||
WorkerStatus {
|
let l = self.0.todo_len().unwrap_or(0);
|
||||||
queue_length: Some(self.0.todo_len().unwrap_or(0) as u64),
|
if l > 0 {
|
||||||
..Default::default()
|
Some(format!("{} items in queue", l))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -330,11 +337,11 @@ impl<F: TableSchema, R: TableReplication> Worker for MerkleWorker<F, R> {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
select! {
|
if *must_exit.borrow() {
|
||||||
_ = tokio::time::sleep(Duration::from_secs(60)) => (),
|
return WorkerState::Done;
|
||||||
_ = self.0.data.merkle_todo_notify.notified() => (),
|
|
||||||
}
|
}
|
||||||
|
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||||
WorkerState::Busy
|
WorkerState::Busy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -365,7 +372,7 @@ impl MerkleNode {
|
||||||
fn decode_opt(ent: &Option<db::Value>) -> Result<Self, Error> {
|
fn decode_opt(ent: &Option<db::Value>) -> Result<Self, Error> {
|
||||||
match ent {
|
match ent {
|
||||||
None => Ok(MerkleNode::Empty),
|
None => Ok(MerkleNode::Empty),
|
||||||
Some(v) => Ok(nonversioned_decode::<MerkleNode>(&v[..])?),
|
Some(v) => Ok(rmp_serde::decode::from_read_ref::<_, MerkleNode>(&v[..])?),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,8 +5,6 @@ use garage_db::counted_tree_hack::CountedTree;
|
||||||
|
|
||||||
/// TableMetrics reference all counter used for metrics
|
/// TableMetrics reference all counter used for metrics
|
||||||
pub struct TableMetrics {
|
pub struct TableMetrics {
|
||||||
pub(crate) _table_size: ValueObserver<u64>,
|
|
||||||
pub(crate) _merkle_tree_size: ValueObserver<u64>,
|
|
||||||
pub(crate) _merkle_todo_len: ValueObserver<u64>,
|
pub(crate) _merkle_todo_len: ValueObserver<u64>,
|
||||||
pub(crate) _gc_todo_len: ValueObserver<u64>,
|
pub(crate) _gc_todo_len: ValueObserver<u64>,
|
||||||
|
|
||||||
|
@ -22,43 +20,9 @@ pub struct TableMetrics {
|
||||||
pub(crate) sync_items_received: Counter<u64>,
|
pub(crate) sync_items_received: Counter<u64>,
|
||||||
}
|
}
|
||||||
impl TableMetrics {
|
impl TableMetrics {
|
||||||
pub fn new(
|
pub fn new(table_name: &'static str, merkle_todo: db::Tree, gc_todo: CountedTree) -> Self {
|
||||||
table_name: &'static str,
|
|
||||||
store: db::Tree,
|
|
||||||
merkle_tree: db::Tree,
|
|
||||||
merkle_todo: db::Tree,
|
|
||||||
gc_todo: CountedTree,
|
|
||||||
) -> Self {
|
|
||||||
let meter = global::meter(table_name);
|
let meter = global::meter(table_name);
|
||||||
TableMetrics {
|
TableMetrics {
|
||||||
_table_size: meter
|
|
||||||
.u64_value_observer(
|
|
||||||
"table.size",
|
|
||||||
move |observer| {
|
|
||||||
if let Ok(Some(v)) = store.fast_len() {
|
|
||||||
observer.observe(
|
|
||||||
v as u64,
|
|
||||||
&[KeyValue::new("table_name", table_name)],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.with_description("Number of items in table")
|
|
||||||
.init(),
|
|
||||||
_merkle_tree_size: meter
|
|
||||||
.u64_value_observer(
|
|
||||||
"table.merkle_tree_size",
|
|
||||||
move |observer| {
|
|
||||||
if let Ok(Some(v)) = merkle_tree.fast_len() {
|
|
||||||
observer.observe(
|
|
||||||
v as u64,
|
|
||||||
&[KeyValue::new("table_name", table_name)],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.with_description("Number of nodes in table's Merkle tree")
|
|
||||||
.init(),
|
|
||||||
_merkle_todo_len: meter
|
_merkle_todo_len: meter
|
||||||
.u64_value_observer(
|
.u64_value_observer(
|
||||||
"table.merkle_updater_todo_queue_length",
|
"table.merkle_updater_todo_queue_length",
|
||||||
|
|
|
@ -1,77 +0,0 @@
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use tokio::select;
|
|
||||||
use tokio::sync::watch;
|
|
||||||
|
|
||||||
use garage_util::background::*;
|
|
||||||
use garage_util::error::Error;
|
|
||||||
|
|
||||||
use crate::replication::*;
|
|
||||||
use crate::schema::*;
|
|
||||||
use crate::table::*;
|
|
||||||
|
|
||||||
const BATCH_SIZE: usize = 100;
|
|
||||||
|
|
||||||
pub(crate) struct InsertQueueWorker<F, R>(pub(crate) Arc<Table<F, R>>)
|
|
||||||
where
|
|
||||||
F: TableSchema,
|
|
||||||
R: TableReplication;
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<F: TableSchema, R: TableReplication> Worker for InsertQueueWorker<F, R> {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
format!("{} queue", F::TABLE_NAME)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
|
||||||
WorkerStatus {
|
|
||||||
queue_length: Some(self.0.data.insert_queue.len().unwrap_or(0) as u64),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
|
||||||
let mut kv_pairs = vec![];
|
|
||||||
let mut values = vec![];
|
|
||||||
|
|
||||||
for entry_kv in self.0.data.insert_queue.iter()? {
|
|
||||||
let (k, v) = entry_kv?;
|
|
||||||
|
|
||||||
values.push(self.0.data.decode_entry(&v)?);
|
|
||||||
kv_pairs.push((k, v));
|
|
||||||
|
|
||||||
if kv_pairs.len() > BATCH_SIZE {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if kv_pairs.is_empty() {
|
|
||||||
return Ok(WorkerState::Idle);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.0.insert_many(values).await?;
|
|
||||||
|
|
||||||
self.0.data.insert_queue.db().transaction(|mut tx| {
|
|
||||||
for (k, v) in kv_pairs.iter() {
|
|
||||||
if let Some(v2) = tx.get(&self.0.data.insert_queue, k)? {
|
|
||||||
if &v2 == v {
|
|
||||||
tx.remove(&self.0.data.insert_queue, k)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(WorkerState::Busy)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
|
||||||
select! {
|
|
||||||
_ = tokio::time::sleep(Duration::from_secs(600)) => (),
|
|
||||||
_ = self.0.data.insert_queue_notify.notified() => (),
|
|
||||||
}
|
|
||||||
WorkerState::Busy
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -2,7 +2,7 @@ use garage_rpc::ring::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
/// Trait to describe how a table shall be replicated
|
/// Trait to describe how a table shall be replicated
|
||||||
pub trait TableReplication: Send + Sync + 'static {
|
pub trait TableReplication: Send + Sync {
|
||||||
// See examples in table_sharded.rs and table_fullcopy.rs
|
// See examples in table_sharded.rs and table_fullcopy.rs
|
||||||
// To understand various replication methods
|
// To understand various replication methods
|
||||||
|
|
||||||
|
|
|
@ -2,14 +2,11 @@ use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::migrate::Migrate;
|
|
||||||
|
|
||||||
use crate::crdt::Crdt;
|
use crate::crdt::Crdt;
|
||||||
|
|
||||||
/// Trait for field used to partition data
|
/// Trait for field used to partition data
|
||||||
pub trait PartitionKey:
|
pub trait PartitionKey {
|
||||||
Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static
|
|
||||||
{
|
|
||||||
/// Get the key used to partition
|
/// Get the key used to partition
|
||||||
fn hash(&self) -> Hash;
|
fn hash(&self) -> Hash;
|
||||||
}
|
}
|
||||||
|
@ -30,7 +27,7 @@ impl PartitionKey for FixedBytes32 {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Trait for field used to sort data
|
/// Trait for field used to sort data
|
||||||
pub trait SortKey: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static {
|
pub trait SortKey {
|
||||||
/// Get the key used to sort
|
/// Get the key used to sort
|
||||||
fn sort_key(&self) -> &[u8];
|
fn sort_key(&self) -> &[u8];
|
||||||
}
|
}
|
||||||
|
@ -49,7 +46,7 @@ impl SortKey for FixedBytes32 {
|
||||||
|
|
||||||
/// Trait for an entry in a table. It must be sortable and partitionnable.
|
/// Trait for an entry in a table. It must be sortable and partitionnable.
|
||||||
pub trait Entry<P: PartitionKey, S: SortKey>:
|
pub trait Entry<P: PartitionKey, S: SortKey>:
|
||||||
Crdt + PartialEq + Clone + Migrate + Send + Sync + 'static
|
Crdt + PartialEq + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync
|
||||||
{
|
{
|
||||||
/// Get the key used to partition
|
/// Get the key used to partition
|
||||||
fn partition_key(&self) -> &P;
|
fn partition_key(&self) -> &P;
|
||||||
|
@ -68,16 +65,23 @@ pub trait TableSchema: Send + Sync + 'static {
|
||||||
const TABLE_NAME: &'static str;
|
const TABLE_NAME: &'static str;
|
||||||
|
|
||||||
/// The partition key used in that table
|
/// The partition key used in that table
|
||||||
type P: PartitionKey;
|
type P: PartitionKey + Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync;
|
||||||
/// The sort key used int that table
|
/// The sort key used int that table
|
||||||
type S: SortKey;
|
type S: SortKey + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync;
|
||||||
|
|
||||||
/// They type for an entry in that table
|
/// They type for an entry in that table
|
||||||
type E: Entry<Self::P, Self::S>;
|
type E: Entry<Self::P, Self::S>;
|
||||||
|
|
||||||
/// The type for a filter that can be applied to select entries
|
/// The type for a filter that can be applied to select entries
|
||||||
/// (e.g. filter out deleted entries)
|
/// (e.g. filter out deleted entries)
|
||||||
type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static;
|
type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync;
|
||||||
|
|
||||||
|
// Action to take if not able to decode current version:
|
||||||
|
// try loading from an older version
|
||||||
|
/// Try migrating an entry from an older version
|
||||||
|
fn try_migrate(_bytes: &[u8]) -> Option<Self::E> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
/// Actions triggered by data changing in a table. If such actions
|
/// Actions triggered by data changing in a table. If such actions
|
||||||
/// include updates to the local database that should be applied
|
/// include updates to the local database that should be applied
|
||||||
|
|
|
@ -2,7 +2,6 @@ use std::collections::VecDeque;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use arc_swap::ArcSwapOption;
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use futures_util::stream::*;
|
use futures_util::stream::*;
|
||||||
use opentelemetry::KeyValue;
|
use opentelemetry::KeyValue;
|
||||||
|
@ -14,8 +13,7 @@ use tokio::sync::{mpsc, watch};
|
||||||
|
|
||||||
use garage_util::background::*;
|
use garage_util::background::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::encode::{debug_serialize, nonversioned_encode};
|
use garage_util::error::Error;
|
||||||
use garage_util::error::{Error, OkOrMessage};
|
|
||||||
|
|
||||||
use garage_rpc::ring::*;
|
use garage_rpc::ring::*;
|
||||||
use garage_rpc::system::System;
|
use garage_rpc::system::System;
|
||||||
|
@ -29,12 +27,12 @@ use crate::*;
|
||||||
// Do anti-entropy every 10 minutes
|
// Do anti-entropy every 10 minutes
|
||||||
const ANTI_ENTROPY_INTERVAL: Duration = Duration::from_secs(10 * 60);
|
const ANTI_ENTROPY_INTERVAL: Duration = Duration::from_secs(10 * 60);
|
||||||
|
|
||||||
pub struct TableSyncer<F: TableSchema, R: TableReplication> {
|
pub struct TableSyncer<F: TableSchema + 'static, R: TableReplication + 'static> {
|
||||||
system: Arc<System>,
|
system: Arc<System>,
|
||||||
data: Arc<TableData<F, R>>,
|
data: Arc<TableData<F, R>>,
|
||||||
merkle: Arc<MerkleUpdater<F, R>>,
|
merkle: Arc<MerkleUpdater<F, R>>,
|
||||||
|
|
||||||
add_full_sync_tx: ArcSwapOption<mpsc::UnboundedSender<()>>,
|
add_full_sync_tx: mpsc::UnboundedSender<()>,
|
||||||
endpoint: Arc<Endpoint<SyncRpc, Self>>,
|
endpoint: Arc<Endpoint<SyncRpc, Self>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,8 +60,12 @@ struct TodoPartition {
|
||||||
retain: bool,
|
retain: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> {
|
impl<F, R> TableSyncer<F, R>
|
||||||
pub(crate) fn new(
|
where
|
||||||
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static,
|
||||||
|
{
|
||||||
|
pub(crate) fn launch(
|
||||||
system: Arc<System>,
|
system: Arc<System>,
|
||||||
data: Arc<TableData<F, R>>,
|
data: Arc<TableData<F, R>>,
|
||||||
merkle: Arc<MerkleUpdater<F, R>>,
|
merkle: Arc<MerkleUpdater<F, R>>,
|
||||||
|
@ -72,40 +74,34 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> {
|
||||||
.netapp
|
.netapp
|
||||||
.endpoint(format!("garage_table/sync.rs/Rpc:{}", F::TABLE_NAME));
|
.endpoint(format!("garage_table/sync.rs/Rpc:{}", F::TABLE_NAME));
|
||||||
|
|
||||||
|
let (add_full_sync_tx, add_full_sync_rx) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
let syncer = Arc::new(Self {
|
let syncer = Arc::new(Self {
|
||||||
system,
|
system: system.clone(),
|
||||||
data,
|
data,
|
||||||
merkle,
|
merkle,
|
||||||
add_full_sync_tx: ArcSwapOption::new(None),
|
add_full_sync_tx,
|
||||||
endpoint,
|
endpoint,
|
||||||
});
|
});
|
||||||
|
|
||||||
syncer.endpoint.set_handler(syncer.clone());
|
syncer.endpoint.set_handler(syncer.clone());
|
||||||
|
|
||||||
syncer
|
system.background.spawn_worker(SyncWorker {
|
||||||
}
|
syncer: syncer.clone(),
|
||||||
|
ring_recv: system.ring.clone(),
|
||||||
pub(crate) fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
ring: system.ring.borrow().clone(),
|
||||||
let (add_full_sync_tx, add_full_sync_rx) = mpsc::unbounded_channel();
|
|
||||||
self.add_full_sync_tx
|
|
||||||
.store(Some(Arc::new(add_full_sync_tx)));
|
|
||||||
|
|
||||||
bg.spawn_worker(SyncWorker {
|
|
||||||
syncer: self.clone(),
|
|
||||||
ring_recv: self.system.ring.clone(),
|
|
||||||
ring: self.system.ring.borrow().clone(),
|
|
||||||
add_full_sync_rx,
|
add_full_sync_rx,
|
||||||
todo: vec![],
|
todo: vec![],
|
||||||
next_full_sync: Instant::now() + Duration::from_secs(20),
|
next_full_sync: Instant::now() + Duration::from_secs(20),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
syncer
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_full_sync(&self) -> Result<(), Error> {
|
pub fn add_full_sync(&self) {
|
||||||
let tx = self.add_full_sync_tx.load();
|
if self.add_full_sync_tx.send(()).is_err() {
|
||||||
let tx = tx
|
error!("({}) Could not add full sync", F::TABLE_NAME);
|
||||||
.as_ref()
|
}
|
||||||
.ok_or_message("table sync worker is not running")?;
|
|
||||||
tx.send(()).ok_or_message("send error")?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
@ -299,7 +295,7 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> {
|
||||||
);
|
);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let root_ck_hash = hash_of_merkle_node(&root_ck)?;
|
let root_ck_hash = hash_of::<MerkleNode>(&root_ck)?;
|
||||||
|
|
||||||
// Check if they have the same root checksum
|
// Check if they have the same root checksum
|
||||||
// If so, do nothing.
|
// If so, do nothing.
|
||||||
|
@ -456,12 +452,16 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> {
|
||||||
// ======= SYNCHRONIZATION PROCEDURE -- RECEIVER SIDE ======
|
// ======= SYNCHRONIZATION PROCEDURE -- RECEIVER SIDE ======
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<F: TableSchema, R: TableReplication> EndpointHandler<SyncRpc> for TableSyncer<F, R> {
|
impl<F, R> EndpointHandler<SyncRpc> for TableSyncer<F, R>
|
||||||
|
where
|
||||||
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static,
|
||||||
|
{
|
||||||
async fn handle(self: &Arc<Self>, message: &SyncRpc, from: NodeID) -> Result<SyncRpc, Error> {
|
async fn handle(self: &Arc<Self>, message: &SyncRpc, from: NodeID) -> Result<SyncRpc, Error> {
|
||||||
match message {
|
match message {
|
||||||
SyncRpc::RootCkHash(range, h) => {
|
SyncRpc::RootCkHash(range, h) => {
|
||||||
let (_root_ck_key, root_ck) = self.get_root_ck(*range)?;
|
let (_root_ck_key, root_ck) = self.get_root_ck(*range)?;
|
||||||
let hash = hash_of_merkle_node(&root_ck)?;
|
let hash = hash_of::<MerkleNode>(&root_ck)?;
|
||||||
Ok(SyncRpc::RootCkDifferent(hash != *h))
|
Ok(SyncRpc::RootCkDifferent(hash != *h))
|
||||||
}
|
}
|
||||||
SyncRpc::GetNode(k) => {
|
SyncRpc::GetNode(k) => {
|
||||||
|
@ -490,7 +490,7 @@ impl<F: TableSchema, R: TableReplication> EndpointHandler<SyncRpc> for TableSync
|
||||||
|
|
||||||
// -------- Sync Worker ---------
|
// -------- Sync Worker ---------
|
||||||
|
|
||||||
struct SyncWorker<F: TableSchema, R: TableReplication> {
|
struct SyncWorker<F: TableSchema + 'static, R: TableReplication + 'static> {
|
||||||
syncer: Arc<TableSyncer<F, R>>,
|
syncer: Arc<TableSyncer<F, R>>,
|
||||||
ring_recv: watch::Receiver<Arc<Ring>>,
|
ring_recv: watch::Receiver<Arc<Ring>>,
|
||||||
ring: Arc<Ring>,
|
ring: Arc<Ring>,
|
||||||
|
@ -499,7 +499,7 @@ struct SyncWorker<F: TableSchema, R: TableReplication> {
|
||||||
next_full_sync: Instant,
|
next_full_sync: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<F: TableSchema, R: TableReplication> SyncWorker<F, R> {
|
impl<F: TableSchema + 'static, R: TableReplication + 'static> SyncWorker<F, R> {
|
||||||
fn add_full_sync(&mut self) {
|
fn add_full_sync(&mut self) {
|
||||||
let system = &self.syncer.system;
|
let system = &self.syncer.system;
|
||||||
let data = &self.syncer.data;
|
let data = &self.syncer.data;
|
||||||
|
@ -565,15 +565,17 @@ impl<F: TableSchema, R: TableReplication> SyncWorker<F, R> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<F: TableSchema, R: TableReplication> Worker for SyncWorker<F, R> {
|
impl<F: TableSchema + 'static, R: TableReplication + 'static> Worker for SyncWorker<F, R> {
|
||||||
fn name(&self) -> String {
|
fn name(&self) -> String {
|
||||||
format!("{} sync", F::TABLE_NAME)
|
format!("{} sync", F::TABLE_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
fn info(&self) -> Option<String> {
|
||||||
WorkerStatus {
|
let l = self.todo.len();
|
||||||
queue_length: Some(self.todo.len() as u64),
|
if l > 0 {
|
||||||
..Default::default()
|
Some(format!("{} partitions remaining", l))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -586,7 +588,10 @@ impl<F: TableSchema, R: TableReplication> Worker for SyncWorker<F, R> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
|
if *must_exit.borrow() {
|
||||||
|
return WorkerState::Done;
|
||||||
|
}
|
||||||
select! {
|
select! {
|
||||||
s = self.add_full_sync_rx.recv() => {
|
s = self.add_full_sync_rx.recv() => {
|
||||||
if let Some(()) = s {
|
if let Some(()) = s {
|
||||||
|
@ -615,8 +620,8 @@ impl<F: TableSchema, R: TableReplication> Worker for SyncWorker<F, R> {
|
||||||
|
|
||||||
// ---- UTIL ----
|
// ---- UTIL ----
|
||||||
|
|
||||||
fn hash_of_merkle_node(x: &MerkleNode) -> Result<Hash, Error> {
|
fn hash_of<T: Serialize>(x: &T) -> Result<Hash, Error> {
|
||||||
Ok(blake2sum(&nonversioned_encode(x)?[..]))
|
Ok(blake2sum(&rmp_to_vec_all_named(x)?[..]))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn join_ordered<'a, K: Ord + Eq, V1, V2>(
|
fn join_ordered<'a, K: Ord + Eq, V1, V2>(
|
||||||
|
|
|
@ -14,11 +14,9 @@ use opentelemetry::{
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::background::BackgroundRunner;
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
use garage_util::migrate::Migrate;
|
|
||||||
|
|
||||||
use garage_rpc::system::System;
|
use garage_rpc::system::System;
|
||||||
use garage_rpc::*;
|
use garage_rpc::*;
|
||||||
|
@ -27,18 +25,16 @@ use crate::crdt::Crdt;
|
||||||
use crate::data::*;
|
use crate::data::*;
|
||||||
use crate::gc::*;
|
use crate::gc::*;
|
||||||
use crate::merkle::*;
|
use crate::merkle::*;
|
||||||
use crate::queue::InsertQueueWorker;
|
|
||||||
use crate::replication::*;
|
use crate::replication::*;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::sync::*;
|
use crate::sync::*;
|
||||||
use crate::util::*;
|
use crate::util::*;
|
||||||
|
|
||||||
pub struct Table<F: TableSchema, R: TableReplication> {
|
pub struct Table<F: TableSchema + 'static, R: TableReplication + 'static> {
|
||||||
pub system: Arc<System>,
|
pub system: Arc<System>,
|
||||||
pub data: Arc<TableData<F, R>>,
|
pub data: Arc<TableData<F, R>>,
|
||||||
pub merkle_updater: Arc<MerkleUpdater<F, R>>,
|
pub merkle_updater: Arc<MerkleUpdater<F, R>>,
|
||||||
pub syncer: Arc<TableSyncer<F, R>>,
|
pub syncer: Arc<TableSyncer<F, R>>,
|
||||||
gc: Arc<TableGc<F, R>>,
|
|
||||||
endpoint: Arc<Endpoint<TableRpc<F>, Self>>,
|
endpoint: Arc<Endpoint<TableRpc<F>, Self>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,7 +61,11 @@ impl<F: TableSchema> Rpc for TableRpc<F> {
|
||||||
type Response = Result<TableRpc<F>, Error>;
|
type Response = Result<TableRpc<F>, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
impl<F, R> Table<F, R>
|
||||||
|
where
|
||||||
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static,
|
||||||
|
{
|
||||||
// =============== PUBLIC INTERFACE FUNCTIONS (new, insert, get, etc) ===============
|
// =============== PUBLIC INTERFACE FUNCTIONS (new, insert, get, etc) ===============
|
||||||
|
|
||||||
pub fn new(instance: F, replication: R, system: Arc<System>, db: &db::Db) -> Arc<Self> {
|
pub fn new(instance: F, replication: R, system: Arc<System>, db: &db::Db) -> Arc<Self> {
|
||||||
|
@ -75,16 +75,15 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
||||||
|
|
||||||
let data = TableData::new(system.clone(), instance, replication, db);
|
let data = TableData::new(system.clone(), instance, replication, db);
|
||||||
|
|
||||||
let merkle_updater = MerkleUpdater::new(data.clone());
|
let merkle_updater = MerkleUpdater::launch(&system.background, data.clone());
|
||||||
|
|
||||||
let syncer = TableSyncer::new(system.clone(), data.clone(), merkle_updater.clone());
|
let syncer = TableSyncer::launch(system.clone(), data.clone(), merkle_updater.clone());
|
||||||
let gc = TableGc::new(system.clone(), data.clone());
|
TableGc::launch(system.clone(), data.clone());
|
||||||
|
|
||||||
let table = Arc::new(Self {
|
let table = Arc::new(Self {
|
||||||
system,
|
system,
|
||||||
data,
|
data,
|
||||||
merkle_updater,
|
merkle_updater,
|
||||||
gc,
|
|
||||||
syncer,
|
syncer,
|
||||||
endpoint,
|
endpoint,
|
||||||
});
|
});
|
||||||
|
@ -94,13 +93,6 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
||||||
table
|
table
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
|
||||||
self.merkle_updater.spawn_workers(bg);
|
|
||||||
self.syncer.spawn_workers(bg);
|
|
||||||
self.gc.spawn_workers(bg);
|
|
||||||
bg.spawn_worker(InsertQueueWorker(self.clone()));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn insert(&self, e: &F::E) -> Result<(), Error> {
|
pub async fn insert(&self, e: &F::E) -> Result<(), Error> {
|
||||||
let tracer = opentelemetry::global::tracer("garage_table");
|
let tracer = opentelemetry::global::tracer("garage_table");
|
||||||
let span = tracer.start(format!("{} insert", F::TABLE_NAME));
|
let span = tracer.start(format!("{} insert", F::TABLE_NAME));
|
||||||
|
@ -119,7 +111,7 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
||||||
let hash = e.partition_key().hash();
|
let hash = e.partition_key().hash();
|
||||||
let who = self.data.replication.write_nodes(&hash);
|
let who = self.data.replication.write_nodes(&hash);
|
||||||
|
|
||||||
let e_enc = Arc::new(ByteBuf::from(e.encode()?));
|
let e_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(e)?));
|
||||||
let rpc = TableRpc::<F>::Update(vec![e_enc]);
|
let rpc = TableRpc::<F>::Update(vec![e_enc]);
|
||||||
|
|
||||||
self.system
|
self.system
|
||||||
|
@ -136,11 +128,6 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Insert item locally
|
|
||||||
pub fn queue_insert(&self, tx: &mut db::Transaction, e: &F::E) -> db::TxResult<(), Error> {
|
|
||||||
self.data.queue_insert(tx, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn insert_many<I, IE>(&self, entries: I) -> Result<(), Error>
|
pub async fn insert_many<I, IE>(&self, entries: I) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = IE> + Send + Sync,
|
I: IntoIterator<Item = IE> + Send + Sync,
|
||||||
|
@ -170,7 +157,7 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
||||||
let entry = entry.borrow();
|
let entry = entry.borrow();
|
||||||
let hash = entry.partition_key().hash();
|
let hash = entry.partition_key().hash();
|
||||||
let who = self.data.replication.write_nodes(&hash);
|
let who = self.data.replication.write_nodes(&hash);
|
||||||
let e_enc = Arc::new(ByteBuf::from(entry.encode()?));
|
let e_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(entry)?));
|
||||||
for node in who {
|
for node in who {
|
||||||
call_list.entry(node).or_default().push(e_enc.clone());
|
call_list.entry(node).or_default().push(e_enc.clone());
|
||||||
}
|
}
|
||||||
|
@ -272,11 +259,9 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
||||||
if not_all_same {
|
if not_all_same {
|
||||||
let self2 = self.clone();
|
let self2 = self.clone();
|
||||||
let ent2 = ret_entry.clone();
|
let ent2 = ret_entry.clone();
|
||||||
tokio::spawn(async move {
|
self.system
|
||||||
if let Err(e) = self2.repair_on_read(&who[..], ent2).await {
|
.background
|
||||||
warn!("Error doing repair on read: {}", e);
|
.spawn_cancellable(async move { self2.repair_on_read(&who[..], ent2).await });
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -373,12 +358,11 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|k| ret.get(&k).unwrap().clone())
|
.map(|k| ret.get(&k).unwrap().clone())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
tokio::spawn(async move {
|
self.system.background.spawn_cancellable(async move {
|
||||||
for v in to_repair {
|
for v in to_repair {
|
||||||
if let Err(e) = self2.repair_on_read(&who[..], v).await {
|
self2.repair_on_read(&who[..], v).await?;
|
||||||
warn!("Error doing repair on read: {}", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -409,7 +393,7 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
||||||
// =============== UTILITY FUNCTION FOR CLIENT OPERATIONS ===============
|
// =============== UTILITY FUNCTION FOR CLIENT OPERATIONS ===============
|
||||||
|
|
||||||
async fn repair_on_read(&self, who: &[Uuid], what: F::E) -> Result<(), Error> {
|
async fn repair_on_read(&self, who: &[Uuid], what: F::E) -> Result<(), Error> {
|
||||||
let what_enc = Arc::new(ByteBuf::from(what.encode()?));
|
let what_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(&what)?));
|
||||||
self.system
|
self.system
|
||||||
.rpc
|
.rpc
|
||||||
.try_call_many(
|
.try_call_many(
|
||||||
|
@ -424,7 +408,11 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<F: TableSchema, R: TableReplication> EndpointHandler<TableRpc<F>> for Table<F, R> {
|
impl<F, R> EndpointHandler<TableRpc<F>> for Table<F, R>
|
||||||
|
where
|
||||||
|
F: TableSchema + 'static,
|
||||||
|
R: TableReplication + 'static,
|
||||||
|
{
|
||||||
async fn handle(
|
async fn handle(
|
||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
msg: &TableRpc<F>,
|
msg: &TableRpc<F>,
|
||||||
|
|
|
@ -49,9 +49,3 @@ impl EnumerationOrder {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for EnumerationOrder {
|
|
||||||
fn default() -> Self {
|
|
||||||
EnumerationOrder::Forward
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_util"
|
name = "garage_util"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,7 +14,7 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.1", path = "../db" }
|
garage_db = { version = "0.8.0", path = "../db" }
|
||||||
|
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
@ -23,7 +23,6 @@ bytes = "1.0"
|
||||||
digest = "0.10"
|
digest = "0.10"
|
||||||
err-derive = "0.3"
|
err-derive = "0.3"
|
||||||
git-version = "0.3.4"
|
git-version = "0.3.4"
|
||||||
hexdump = "0.1"
|
|
||||||
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
|
@ -47,8 +46,6 @@ hyper = "0.14"
|
||||||
|
|
||||||
opentelemetry = { version = "0.17", features = [ "rt-tokio", "metrics", "trace" ] }
|
opentelemetry = { version = "0.17", features = [ "rt-tokio", "metrics", "trace" ] }
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
mktemp = "0.4"
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
k2v = []
|
k2v = []
|
||||||
|
|
48
src/util/background/job_worker.rs
Normal file
48
src/util/background/job_worker.rs
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
//! Job worker: a generic worker that just processes incoming
|
||||||
|
//! jobs one by one
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use tokio::sync::{mpsc, Mutex};
|
||||||
|
|
||||||
|
use crate::background::worker::*;
|
||||||
|
use crate::background::*;
|
||||||
|
|
||||||
|
pub(crate) struct JobWorker {
|
||||||
|
pub(crate) index: usize,
|
||||||
|
pub(crate) job_chan: Arc<Mutex<mpsc::UnboundedReceiver<(Job, bool)>>>,
|
||||||
|
pub(crate) next_job: Option<Job>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Worker for JobWorker {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!("Job worker #{}", self.index)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
|
match self.next_job.take() {
|
||||||
|
None => return Ok(WorkerState::Idle),
|
||||||
|
Some(job) => {
|
||||||
|
job.await?;
|
||||||
|
Ok(WorkerState::Busy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
|
loop {
|
||||||
|
match self.job_chan.lock().await.recv().await {
|
||||||
|
Some((job, cancellable)) => {
|
||||||
|
if cancellable && *must_exit.borrow() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
self.next_job = Some(job);
|
||||||
|
return WorkerState::Busy;
|
||||||
|
}
|
||||||
|
None => return WorkerState::Done,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,19 +1,27 @@
|
||||||
//! Job runner for futures and async functions
|
//! Job runner for futures and async functions
|
||||||
|
|
||||||
pub mod vars;
|
pub mod job_worker;
|
||||||
pub mod worker;
|
pub mod worker;
|
||||||
|
|
||||||
|
use core::future::Future;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::sync::{mpsc, watch};
|
use tokio::sync::{mpsc, watch, Mutex};
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
use worker::WorkerProcessor;
|
use worker::WorkerProcessor;
|
||||||
pub use worker::{Worker, WorkerState};
|
pub use worker::{Worker, WorkerState};
|
||||||
|
|
||||||
|
pub(crate) type JobOutput = Result<(), Error>;
|
||||||
|
pub(crate) type Job = Pin<Box<dyn Future<Output = JobOutput> + Send>>;
|
||||||
|
|
||||||
/// Job runner for futures and async functions
|
/// Job runner for futures and async functions
|
||||||
pub struct BackgroundRunner {
|
pub struct BackgroundRunner {
|
||||||
|
send_job: mpsc::UnboundedSender<(Job, bool)>,
|
||||||
send_worker: mpsc::UnboundedSender<Box<dyn Worker>>,
|
send_worker: mpsc::UnboundedSender<Box<dyn Worker>>,
|
||||||
worker_info: Arc<std::sync::Mutex<HashMap<usize, WorkerInfo>>>,
|
worker_info: Arc<std::sync::Mutex<HashMap<usize, WorkerInfo>>>,
|
||||||
}
|
}
|
||||||
|
@ -21,27 +29,19 @@ pub struct BackgroundRunner {
|
||||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||||
pub struct WorkerInfo {
|
pub struct WorkerInfo {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub status: WorkerStatus,
|
pub info: Option<String>,
|
||||||
pub state: WorkerState,
|
pub state: WorkerState,
|
||||||
pub errors: usize,
|
pub errors: usize,
|
||||||
pub consecutive_errors: usize,
|
pub consecutive_errors: usize,
|
||||||
pub last_error: Option<(String, u64)>,
|
pub last_error: Option<(String, u64)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// WorkerStatus is a struct returned by the worker with a bunch of canonical
|
|
||||||
/// fields to indicate their status to CLI users. All fields are optional.
|
|
||||||
#[derive(Clone, Serialize, Deserialize, Debug, Default)]
|
|
||||||
pub struct WorkerStatus {
|
|
||||||
pub tranquility: Option<u32>,
|
|
||||||
pub progress: Option<String>,
|
|
||||||
pub queue_length: Option<u64>,
|
|
||||||
pub persistent_errors: Option<u64>,
|
|
||||||
pub freeform: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BackgroundRunner {
|
impl BackgroundRunner {
|
||||||
/// Create a new BackgroundRunner
|
/// Create a new BackgroundRunner
|
||||||
pub fn new(stop_signal: watch::Receiver<bool>) -> (Arc<Self>, tokio::task::JoinHandle<()>) {
|
pub fn new(
|
||||||
|
n_runners: usize,
|
||||||
|
stop_signal: watch::Receiver<bool>,
|
||||||
|
) -> (Arc<Self>, tokio::task::JoinHandle<()>) {
|
||||||
let (send_worker, worker_out) = mpsc::unbounded_channel::<Box<dyn Worker>>();
|
let (send_worker, worker_out) = mpsc::unbounded_channel::<Box<dyn Worker>>();
|
||||||
|
|
||||||
let worker_info = Arc::new(std::sync::Mutex::new(HashMap::new()));
|
let worker_info = Arc::new(std::sync::Mutex::new(HashMap::new()));
|
||||||
|
@ -52,7 +52,24 @@ impl BackgroundRunner {
|
||||||
worker_processor.run().await;
|
worker_processor.run().await;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let (send_job, queue_out) = mpsc::unbounded_channel();
|
||||||
|
let queue_out = Arc::new(Mutex::new(queue_out));
|
||||||
|
|
||||||
|
for i in 0..n_runners {
|
||||||
|
let queue_out = queue_out.clone();
|
||||||
|
|
||||||
|
send_worker
|
||||||
|
.send(Box::new(job_worker::JobWorker {
|
||||||
|
index: i,
|
||||||
|
job_chan: queue_out.clone(),
|
||||||
|
next_job: None,
|
||||||
|
}))
|
||||||
|
.ok()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
let bgrunner = Arc::new(Self {
|
let bgrunner = Arc::new(Self {
|
||||||
|
send_job,
|
||||||
send_worker,
|
send_worker,
|
||||||
worker_info,
|
worker_info,
|
||||||
});
|
});
|
||||||
|
@ -63,6 +80,31 @@ impl BackgroundRunner {
|
||||||
self.worker_info.lock().unwrap().clone()
|
self.worker_info.lock().unwrap().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Spawn a task to be run in background
|
||||||
|
pub fn spawn<T>(&self, job: T)
|
||||||
|
where
|
||||||
|
T: Future<Output = JobOutput> + Send + 'static,
|
||||||
|
{
|
||||||
|
let boxed: Job = Box::pin(job);
|
||||||
|
self.send_job
|
||||||
|
.send((boxed, false))
|
||||||
|
.ok()
|
||||||
|
.expect("Could not put job in queue");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawn a task to be run in background. It may get discarded before running if spawned while
|
||||||
|
/// the runner is stopping
|
||||||
|
pub fn spawn_cancellable<T>(&self, job: T)
|
||||||
|
where
|
||||||
|
T: Future<Output = JobOutput> + Send + 'static,
|
||||||
|
{
|
||||||
|
let boxed: Job = Box::pin(job);
|
||||||
|
self.send_job
|
||||||
|
.send((boxed, true))
|
||||||
|
.ok()
|
||||||
|
.expect("Could not put job in queue");
|
||||||
|
}
|
||||||
|
|
||||||
pub fn spawn_worker<W>(&self, worker: W)
|
pub fn spawn_worker<W>(&self, worker: W)
|
||||||
where
|
where
|
||||||
W: Worker + 'static,
|
W: Worker + 'static,
|
||||||
|
|
|
@ -1,113 +0,0 @@
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use crate::error::{Error, OkOrMessage};
|
|
||||||
use crate::migrate::Migrate;
|
|
||||||
use crate::persister::PersisterShared;
|
|
||||||
|
|
||||||
pub struct BgVars {
|
|
||||||
vars: HashMap<&'static str, Box<dyn BgVarTrait>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BgVars {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
vars: HashMap::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_rw<V, T, GF, SF>(
|
|
||||||
&mut self,
|
|
||||||
p: &PersisterShared<V>,
|
|
||||||
name: &'static str,
|
|
||||||
get_fn: GF,
|
|
||||||
set_fn: SF,
|
|
||||||
) where
|
|
||||||
V: Migrate + Default + Send + Sync,
|
|
||||||
T: FromStr + ToString + Send + Sync + 'static,
|
|
||||||
GF: Fn(&PersisterShared<V>) -> T + Send + Sync + 'static,
|
|
||||||
SF: Fn(&PersisterShared<V>, T) -> Result<(), Error> + Send + Sync + 'static,
|
|
||||||
{
|
|
||||||
let p1 = p.clone();
|
|
||||||
let get_fn = move || get_fn(&p1);
|
|
||||||
|
|
||||||
let p2 = p.clone();
|
|
||||||
let set_fn = move |v| set_fn(&p2, v);
|
|
||||||
|
|
||||||
self.vars.insert(name, Box::new(BgVar { get_fn, set_fn }));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn register_ro<V, T, GF>(&mut self, p: &PersisterShared<V>, name: &'static str, get_fn: GF)
|
|
||||||
where
|
|
||||||
V: Migrate + Default + Send + Sync,
|
|
||||||
T: FromStr + ToString + Send + Sync + 'static,
|
|
||||||
GF: Fn(&PersisterShared<V>) -> T + Send + Sync + 'static,
|
|
||||||
{
|
|
||||||
let p1 = p.clone();
|
|
||||||
let get_fn = move || get_fn(&p1);
|
|
||||||
|
|
||||||
let set_fn = move |_| Err(Error::Message(format!("Cannot set value of {}", name)));
|
|
||||||
|
|
||||||
self.vars.insert(name, Box::new(BgVar { get_fn, set_fn }));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get(&self, var: &str) -> Result<String, Error> {
|
|
||||||
Ok(self
|
|
||||||
.vars
|
|
||||||
.get(var)
|
|
||||||
.ok_or_message("variable does not exist")?
|
|
||||||
.get())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_all(&self) -> Vec<(&'static str, String)> {
|
|
||||||
self.vars.iter().map(|(k, v)| (*k, v.get())).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set(&self, var: &str, val: &str) -> Result<(), Error> {
|
|
||||||
self.vars
|
|
||||||
.get(var)
|
|
||||||
.ok_or_message("variable does not exist")?
|
|
||||||
.set(val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for BgVars {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----
|
|
||||||
|
|
||||||
trait BgVarTrait: Send + Sync + 'static {
|
|
||||||
fn get(&self) -> String;
|
|
||||||
fn set(&self, v: &str) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct BgVar<T, GF, SF>
|
|
||||||
where
|
|
||||||
T: FromStr + ToString + Send + Sync + 'static,
|
|
||||||
GF: Fn() -> T + Send + Sync + 'static,
|
|
||||||
SF: Fn(T) -> Result<(), Error> + Sync + Send + 'static,
|
|
||||||
{
|
|
||||||
get_fn: GF,
|
|
||||||
set_fn: SF,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, GF, SF> BgVarTrait for BgVar<T, GF, SF>
|
|
||||||
where
|
|
||||||
T: FromStr + ToString + Sync + Send + 'static,
|
|
||||||
GF: Fn() -> T + Sync + Send + 'static,
|
|
||||||
SF: Fn(T) -> Result<(), Error> + Sync + Send + 'static,
|
|
||||||
{
|
|
||||||
fn get(&self) -> String {
|
|
||||||
(self.get_fn)().to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set(&self, vstr: &str) -> Result<(), Error> {
|
|
||||||
let value = vstr
|
|
||||||
.parse()
|
|
||||||
.map_err(|_| Error::Message(format!("invalid value: {}", vstr)))?;
|
|
||||||
(self.set_fn)(value)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,6 +1,6 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use futures::future::*;
|
use futures::future::*;
|
||||||
|
@ -10,14 +10,10 @@ use serde::{Deserialize, Serialize};
|
||||||
use tokio::select;
|
use tokio::select;
|
||||||
use tokio::sync::{mpsc, watch};
|
use tokio::sync::{mpsc, watch};
|
||||||
|
|
||||||
use crate::background::{WorkerInfo, WorkerStatus};
|
use crate::background::WorkerInfo;
|
||||||
use crate::error::Error;
|
use crate::error::Error;
|
||||||
use crate::time::now_msec;
|
use crate::time::now_msec;
|
||||||
|
|
||||||
// All workers that haven't exited for this time after an exit signal was recieved
|
|
||||||
// will be interrupted in the middle of whatever they are doing.
|
|
||||||
const EXIT_DEADLINE: Duration = Duration::from_secs(8);
|
|
||||||
|
|
||||||
#[derive(PartialEq, Copy, Clone, Serialize, Deserialize, Debug)]
|
#[derive(PartialEq, Copy, Clone, Serialize, Deserialize, Debug)]
|
||||||
pub enum WorkerState {
|
pub enum WorkerState {
|
||||||
Busy,
|
Busy,
|
||||||
|
@ -30,7 +26,7 @@ impl std::fmt::Display for WorkerState {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
WorkerState::Busy => write!(f, "Busy"),
|
WorkerState::Busy => write!(f, "Busy"),
|
||||||
WorkerState::Throttled(_) => write!(f, "Busy*"),
|
WorkerState::Throttled(t) => write!(f, "Thr:{:.3}", t),
|
||||||
WorkerState::Idle => write!(f, "Idle"),
|
WorkerState::Idle => write!(f, "Idle"),
|
||||||
WorkerState::Done => write!(f, "Done"),
|
WorkerState::Done => write!(f, "Done"),
|
||||||
}
|
}
|
||||||
|
@ -41,8 +37,8 @@ impl std::fmt::Display for WorkerState {
|
||||||
pub trait Worker: Send {
|
pub trait Worker: Send {
|
||||||
fn name(&self) -> String;
|
fn name(&self) -> String;
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
fn info(&self) -> Option<String> {
|
||||||
Default::default()
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Work: do a basic unit of work, if one is available (otherwise, should return
|
/// Work: do a basic unit of work, if one is available (otherwise, should return
|
||||||
|
@ -54,8 +50,10 @@ pub trait Worker: Send {
|
||||||
async fn work(&mut self, must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error>;
|
async fn work(&mut self, must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error>;
|
||||||
|
|
||||||
/// Wait for work: await for some task to become available. This future can be interrupted in
|
/// Wait for work: await for some task to become available. This future can be interrupted in
|
||||||
/// the middle for any reason, for example if an interrupt signal was recieved.
|
/// the middle for any reason. This future doesn't have to await on must_exit.changed(), we
|
||||||
async fn wait_for_work(&mut self) -> WorkerState;
|
/// are doing it for you. Therefore it only receives a read refernce to must_exit which allows
|
||||||
|
/// it to check if we are exiting.
|
||||||
|
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct WorkerProcessor {
|
pub(crate) struct WorkerProcessor {
|
||||||
|
@ -95,9 +93,11 @@ impl WorkerProcessor {
|
||||||
let task_id = next_task_id;
|
let task_id = next_task_id;
|
||||||
next_task_id += 1;
|
next_task_id += 1;
|
||||||
let stop_signal = self.stop_signal.clone();
|
let stop_signal = self.stop_signal.clone();
|
||||||
|
let stop_signal_worker = self.stop_signal.clone();
|
||||||
let mut worker = WorkerHandler {
|
let mut worker = WorkerHandler {
|
||||||
task_id,
|
task_id,
|
||||||
stop_signal,
|
stop_signal,
|
||||||
|
stop_signal_worker,
|
||||||
worker: new_worker,
|
worker: new_worker,
|
||||||
state: WorkerState::Busy,
|
state: WorkerState::Busy,
|
||||||
errors: 0,
|
errors: 0,
|
||||||
|
@ -119,7 +119,7 @@ impl WorkerProcessor {
|
||||||
match wi.get_mut(&worker.task_id) {
|
match wi.get_mut(&worker.task_id) {
|
||||||
Some(i) => {
|
Some(i) => {
|
||||||
i.state = worker.state;
|
i.state = worker.state;
|
||||||
i.status = worker.worker.status();
|
i.info = worker.worker.info();
|
||||||
i.errors = worker.errors;
|
i.errors = worker.errors;
|
||||||
i.consecutive_errors = worker.consecutive_errors;
|
i.consecutive_errors = worker.consecutive_errors;
|
||||||
if worker.last_error.is_some() {
|
if worker.last_error.is_some() {
|
||||||
|
@ -130,7 +130,7 @@ impl WorkerProcessor {
|
||||||
wi.insert(worker.task_id, WorkerInfo {
|
wi.insert(worker.task_id, WorkerInfo {
|
||||||
name: worker.worker.name(),
|
name: worker.worker.name(),
|
||||||
state: worker.state,
|
state: worker.state,
|
||||||
status: worker.worker.status(),
|
info: worker.worker.info(),
|
||||||
errors: worker.errors,
|
errors: worker.errors,
|
||||||
consecutive_errors: worker.consecutive_errors,
|
consecutive_errors: worker.consecutive_errors,
|
||||||
last_error: worker.last_error.take(),
|
last_error: worker.last_error.take(),
|
||||||
|
@ -153,14 +153,26 @@ impl WorkerProcessor {
|
||||||
}
|
}
|
||||||
|
|
||||||
// We are exiting, drain everything
|
// We are exiting, drain everything
|
||||||
|
let drain_half_time = Instant::now() + Duration::from_secs(5);
|
||||||
let drain_everything = async move {
|
let drain_everything = async move {
|
||||||
while let Some(worker) = workers.next().await {
|
while let Some(mut worker) = workers.next().await {
|
||||||
info!(
|
if worker.state == WorkerState::Done {
|
||||||
"Worker {} (TID {}) exited (last state: {:?})",
|
info!(
|
||||||
worker.worker.name(),
|
"Worker {} (TID {}) exited",
|
||||||
worker.task_id,
|
worker.worker.name(),
|
||||||
worker.state
|
worker.task_id
|
||||||
);
|
);
|
||||||
|
} else if Instant::now() > drain_half_time {
|
||||||
|
warn!("Worker {} (TID {}) interrupted between two iterations in state {:?} (this should be fine)", worker.worker.name(), worker.task_id, worker.state);
|
||||||
|
} else {
|
||||||
|
workers.push(
|
||||||
|
async move {
|
||||||
|
worker.step().await;
|
||||||
|
worker
|
||||||
|
}
|
||||||
|
.boxed(),
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -168,7 +180,7 @@ impl WorkerProcessor {
|
||||||
_ = drain_everything => {
|
_ = drain_everything => {
|
||||||
info!("All workers exited peacefully \\o/");
|
info!("All workers exited peacefully \\o/");
|
||||||
}
|
}
|
||||||
_ = tokio::time::sleep(EXIT_DEADLINE) => {
|
_ = tokio::time::sleep(Duration::from_secs(9)) => {
|
||||||
error!("Some workers could not exit in time, we are cancelling some things in the middle");
|
error!("Some workers could not exit in time, we are cancelling some things in the middle");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -178,6 +190,7 @@ impl WorkerProcessor {
|
||||||
struct WorkerHandler {
|
struct WorkerHandler {
|
||||||
task_id: usize,
|
task_id: usize,
|
||||||
stop_signal: watch::Receiver<bool>,
|
stop_signal: watch::Receiver<bool>,
|
||||||
|
stop_signal_worker: watch::Receiver<bool>,
|
||||||
worker: Box<dyn Worker>,
|
worker: Box<dyn Worker>,
|
||||||
state: WorkerState,
|
state: WorkerState,
|
||||||
errors: usize,
|
errors: usize,
|
||||||
|
@ -212,19 +225,33 @@ impl WorkerHandler {
|
||||||
},
|
},
|
||||||
WorkerState::Throttled(delay) => {
|
WorkerState::Throttled(delay) => {
|
||||||
// Sleep for given delay and go back to busy state
|
// Sleep for given delay and go back to busy state
|
||||||
select! {
|
if !*self.stop_signal.borrow() {
|
||||||
_ = tokio::time::sleep(Duration::from_secs_f32(delay)) => {
|
select! {
|
||||||
self.state = WorkerState::Busy;
|
_ = tokio::time::sleep(Duration::from_secs_f32(delay)) => (),
|
||||||
|
_ = self.stop_signal.changed() => (),
|
||||||
}
|
}
|
||||||
_ = self.stop_signal.changed() => (),
|
|
||||||
}
|
}
|
||||||
|
self.state = WorkerState::Busy;
|
||||||
}
|
}
|
||||||
WorkerState::Idle => {
|
WorkerState::Idle => {
|
||||||
select! {
|
if *self.stop_signal.borrow() {
|
||||||
new_st = self.worker.wait_for_work() => {
|
select! {
|
||||||
self.state = new_st;
|
new_st = self.worker.wait_for_work(&self.stop_signal_worker) => {
|
||||||
|
self.state = new_st;
|
||||||
|
}
|
||||||
|
_ = tokio::time::sleep(Duration::from_secs(1)) => {
|
||||||
|
// stay in Idle state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
select! {
|
||||||
|
new_st = self.worker.wait_for_work(&self.stop_signal_worker) => {
|
||||||
|
self.state = new_st;
|
||||||
|
}
|
||||||
|
_ = self.stop_signal.changed() => {
|
||||||
|
// stay in Idle state
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_ = self.stop_signal.changed() => (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
WorkerState::Done => unreachable!(),
|
WorkerState::Done => unreachable!(),
|
||||||
|
|
|
@ -34,11 +34,7 @@ pub struct Config {
|
||||||
pub compression_level: Option<i32>,
|
pub compression_level: Option<i32>,
|
||||||
|
|
||||||
/// RPC secret key: 32 bytes hex encoded
|
/// RPC secret key: 32 bytes hex encoded
|
||||||
/// Note: When using `read_config` this should never be `None`
|
pub rpc_secret: String,
|
||||||
pub rpc_secret: Option<String>,
|
|
||||||
|
|
||||||
/// Optional file where RPC secret key is read from
|
|
||||||
pub rpc_secret_file: Option<String>,
|
|
||||||
|
|
||||||
/// Address to bind for RPC
|
/// Address to bind for RPC
|
||||||
pub rpc_bind_addr: SocketAddr,
|
pub rpc_bind_addr: SocketAddr,
|
||||||
|
@ -181,31 +177,7 @@ pub fn read_config(config_file: PathBuf) -> Result<Config, Error> {
|
||||||
let mut config = String::new();
|
let mut config = String::new();
|
||||||
file.read_to_string(&mut config)?;
|
file.read_to_string(&mut config)?;
|
||||||
|
|
||||||
let mut parsed_config: Config = toml::from_str(&config)?;
|
Ok(toml::from_str(&config)?)
|
||||||
|
|
||||||
match (&parsed_config.rpc_secret, &parsed_config.rpc_secret_file) {
|
|
||||||
(Some(_), None) => {
|
|
||||||
// no-op
|
|
||||||
}
|
|
||||||
(Some(_), Some(_)) => {
|
|
||||||
return Err("only one of `rpc_secret` and `rpc_secret_file` can be set".into())
|
|
||||||
}
|
|
||||||
(None, Some(rpc_secret_file_path_string)) => {
|
|
||||||
let mut rpc_secret_file = std::fs::OpenOptions::new()
|
|
||||||
.read(true)
|
|
||||||
.open(rpc_secret_file_path_string)?;
|
|
||||||
let mut rpc_secret_from_file = String::new();
|
|
||||||
rpc_secret_file.read_to_string(&mut rpc_secret_from_file)?;
|
|
||||||
// trim_end: allows for use case such as `echo "$(openssl rand -hex 32)" > somefile`.
|
|
||||||
// also editors sometimes add a trailing newline
|
|
||||||
parsed_config.rpc_secret = Some(String::from(rpc_secret_from_file.trim_end()));
|
|
||||||
}
|
|
||||||
(None, None) => {
|
|
||||||
return Err("either `rpc_secret` or `rpc_secret_file` needs to be set".into())
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(parsed_config)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_compression() -> Option<i32> {
|
fn default_compression() -> Option<i32> {
|
||||||
|
@ -261,123 +233,3 @@ where
|
||||||
|
|
||||||
deserializer.deserialize_any(OptionVisitor)
|
deserializer.deserialize_any(OptionVisitor)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use crate::error::Error;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::Write;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_rpc_secret_is_required() -> Result<(), Error> {
|
|
||||||
let path1 = mktemp::Temp::new_file()?;
|
|
||||||
let mut file1 = File::create(path1.as_path())?;
|
|
||||||
writeln!(
|
|
||||||
file1,
|
|
||||||
r#"
|
|
||||||
metadata_dir = "/tmp/garage/meta"
|
|
||||||
data_dir = "/tmp/garage/data"
|
|
||||||
replication_mode = "3"
|
|
||||||
rpc_bind_addr = "[::]:3901"
|
|
||||||
|
|
||||||
[s3_api]
|
|
||||||
s3_region = "garage"
|
|
||||||
api_bind_addr = "[::]:3900"
|
|
||||||
"#
|
|
||||||
)?;
|
|
||||||
assert_eq!(
|
|
||||||
"either `rpc_secret` or `rpc_secret_file` needs to be set",
|
|
||||||
super::read_config(path1.to_path_buf())
|
|
||||||
.unwrap_err()
|
|
||||||
.to_string()
|
|
||||||
);
|
|
||||||
drop(path1);
|
|
||||||
drop(file1);
|
|
||||||
|
|
||||||
let path2 = mktemp::Temp::new_file()?;
|
|
||||||
let mut file2 = File::create(path2.as_path())?;
|
|
||||||
writeln!(
|
|
||||||
file2,
|
|
||||||
r#"
|
|
||||||
metadata_dir = "/tmp/garage/meta"
|
|
||||||
data_dir = "/tmp/garage/data"
|
|
||||||
replication_mode = "3"
|
|
||||||
rpc_bind_addr = "[::]:3901"
|
|
||||||
rpc_secret = "foo"
|
|
||||||
|
|
||||||
[s3_api]
|
|
||||||
s3_region = "garage"
|
|
||||||
api_bind_addr = "[::]:3900"
|
|
||||||
"#
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let config = super::read_config(path2.to_path_buf())?;
|
|
||||||
assert_eq!("foo", config.rpc_secret.unwrap());
|
|
||||||
drop(path2);
|
|
||||||
drop(file2);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_rpc_secret_file_works() -> Result<(), Error> {
|
|
||||||
let path_secret = mktemp::Temp::new_file()?;
|
|
||||||
let mut file_secret = File::create(path_secret.as_path())?;
|
|
||||||
writeln!(file_secret, "foo")?;
|
|
||||||
drop(file_secret);
|
|
||||||
|
|
||||||
let path_config = mktemp::Temp::new_file()?;
|
|
||||||
let mut file_config = File::create(path_config.as_path())?;
|
|
||||||
let path_secret_path = path_secret.as_path().display();
|
|
||||||
writeln!(
|
|
||||||
file_config,
|
|
||||||
r#"
|
|
||||||
metadata_dir = "/tmp/garage/meta"
|
|
||||||
data_dir = "/tmp/garage/data"
|
|
||||||
replication_mode = "3"
|
|
||||||
rpc_bind_addr = "[::]:3901"
|
|
||||||
rpc_secret_file = "{path_secret_path}"
|
|
||||||
|
|
||||||
[s3_api]
|
|
||||||
s3_region = "garage"
|
|
||||||
api_bind_addr = "[::]:3900"
|
|
||||||
"#
|
|
||||||
)?;
|
|
||||||
let config = super::read_config(path_config.to_path_buf())?;
|
|
||||||
assert_eq!("foo", config.rpc_secret.unwrap());
|
|
||||||
drop(path_config);
|
|
||||||
drop(path_secret);
|
|
||||||
drop(file_config);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_rcp_secret_and_rpc_secret_file_cannot_be_set_both() -> Result<(), Error> {
|
|
||||||
let path_config = mktemp::Temp::new_file()?;
|
|
||||||
let mut file_config = File::create(path_config.as_path())?;
|
|
||||||
writeln!(
|
|
||||||
file_config,
|
|
||||||
r#"
|
|
||||||
metadata_dir = "/tmp/garage/meta"
|
|
||||||
data_dir = "/tmp/garage/data"
|
|
||||||
replication_mode = "3"
|
|
||||||
rpc_bind_addr = "[::]:3901"
|
|
||||||
rpc_secret= "dummy"
|
|
||||||
rpc_secret_file = "dummy"
|
|
||||||
|
|
||||||
[s3_api]
|
|
||||||
s3_region = "garage"
|
|
||||||
api_bind_addr = "[::]:3900"
|
|
||||||
"#
|
|
||||||
)?;
|
|
||||||
assert_eq!(
|
|
||||||
"only one of `rpc_secret` and `rpc_secret_file` can be set",
|
|
||||||
super::read_config(path_config.to_path_buf())
|
|
||||||
.unwrap_err()
|
|
||||||
.to_string()
|
|
||||||
);
|
|
||||||
drop(path_config);
|
|
||||||
drop(file_config);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -140,3 +140,34 @@ pub fn fasthash(data: &[u8]) -> FastHash {
|
||||||
pub fn gen_uuid() -> Uuid {
|
pub fn gen_uuid() -> Uuid {
|
||||||
rand::thread_rng().gen::<[u8; 32]>().into()
|
rand::thread_rng().gen::<[u8; 32]>().into()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RMP serialization with names of fields and variants
|
||||||
|
|
||||||
|
/// Serialize to MessagePack
|
||||||
|
pub fn rmp_to_vec_all_named<T>(val: &T) -> Result<Vec<u8>, rmp_serde::encode::Error>
|
||||||
|
where
|
||||||
|
T: Serialize + ?Sized,
|
||||||
|
{
|
||||||
|
let mut wr = Vec::with_capacity(128);
|
||||||
|
let mut se = rmp_serde::Serializer::new(&mut wr)
|
||||||
|
.with_struct_map()
|
||||||
|
.with_string_variants();
|
||||||
|
val.serialize(&mut se)?;
|
||||||
|
Ok(wr)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Serialize to JSON, truncating long result
|
||||||
|
pub fn debug_serialize<T: Serialize>(x: T) -> String {
|
||||||
|
match serde_json::to_string(&x) {
|
||||||
|
Ok(ss) => {
|
||||||
|
if ss.len() > 100 {
|
||||||
|
// TODO this can panic if 100 is not a codepoint boundary, but inside a 2 Bytes
|
||||||
|
// (or more) codepoint
|
||||||
|
ss[..100].to_string()
|
||||||
|
} else {
|
||||||
|
ss
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => format!("<JSON serialization error: {}>", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// Serialize to MessagePacki, without versionning
|
|
||||||
/// (see garage_util::migrate for functions that manage versionned
|
|
||||||
/// data formats)
|
|
||||||
pub fn nonversioned_encode<T>(val: &T) -> Result<Vec<u8>, rmp_serde::encode::Error>
|
|
||||||
where
|
|
||||||
T: Serialize + ?Sized,
|
|
||||||
{
|
|
||||||
let mut wr = Vec::with_capacity(128);
|
|
||||||
let mut se = rmp_serde::Serializer::new(&mut wr)
|
|
||||||
.with_struct_map()
|
|
||||||
.with_string_variants();
|
|
||||||
val.serialize(&mut se)?;
|
|
||||||
Ok(wr)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserialize from MessagePacki, without versionning
|
|
||||||
/// (see garage_util::migrate for functions that manage versionned
|
|
||||||
/// data formats)
|
|
||||||
pub fn nonversioned_decode<T>(bytes: &[u8]) -> Result<T, rmp_serde::decode::Error>
|
|
||||||
where
|
|
||||||
T: for<'de> Deserialize<'de> + ?Sized,
|
|
||||||
{
|
|
||||||
rmp_serde::decode::from_read_ref::<_, T>(bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serialize to JSON, truncating long result
|
|
||||||
pub fn debug_serialize<T: Serialize>(x: T) -> String {
|
|
||||||
match serde_json::to_string(&x) {
|
|
||||||
Ok(ss) => {
|
|
||||||
if ss.len() > 100 {
|
|
||||||
// TODO this can panic if 100 is not a codepoint boundary, but inside a 2 Bytes
|
|
||||||
// (or more) codepoint
|
|
||||||
ss[..100].to_string()
|
|
||||||
} else {
|
|
||||||
ss
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => format!("<JSON serialization error: {}>", e),
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -7,7 +7,6 @@ use err_derive::Error;
|
||||||
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
|
||||||
use crate::data::*;
|
use crate::data::*;
|
||||||
use crate::encode::debug_serialize;
|
|
||||||
|
|
||||||
/// Regroup all Garage errors
|
/// Regroup all Garage errors
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
pub fn format_table_to_string(data: Vec<String>) -> String {
|
pub fn format_table(data: Vec<String>) {
|
||||||
let data = data
|
let data = data
|
||||||
.iter()
|
.iter()
|
||||||
.map(|s| s.split('\t').collect::<Vec<_>>())
|
.map(|s| s.split('\t').collect::<Vec<_>>())
|
||||||
|
@ -24,9 +24,5 @@ pub fn format_table_to_string(data: Vec<String>) -> String {
|
||||||
out.push('\n');
|
out.push('\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
out
|
print!("{}", out);
|
||||||
}
|
|
||||||
|
|
||||||
pub fn format_table(data: Vec<String>) {
|
|
||||||
print!("{}", format_table_to_string(data));
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,12 +8,11 @@ pub mod background;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod crdt;
|
pub mod crdt;
|
||||||
pub mod data;
|
pub mod data;
|
||||||
pub mod encode;
|
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod formater;
|
pub mod formater;
|
||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
pub mod migrate;
|
|
||||||
pub mod persister;
|
pub mod persister;
|
||||||
pub mod time;
|
pub mod time;
|
||||||
|
pub mod token_bucket;
|
||||||
pub mod tranquilizer;
|
pub mod tranquilizer;
|
||||||
pub mod version;
|
pub mod version;
|
||||||
|
|
|
@ -1,159 +0,0 @@
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// Indicates that this type has an encoding that can be migrated from
|
|
||||||
/// a previous version upon upgrades of Garage.
|
|
||||||
pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static {
|
|
||||||
/// A sequence of bytes to add at the beginning of the serialized
|
|
||||||
/// string, to identify that the data is of this version.
|
|
||||||
const VERSION_MARKER: &'static [u8] = b"";
|
|
||||||
|
|
||||||
/// The previous version of this data type, from which items of this version
|
|
||||||
/// can be migrated.
|
|
||||||
type Previous: Migrate;
|
|
||||||
|
|
||||||
/// The migration function that transforms a value decoded in the old format
|
|
||||||
/// to an up-to-date value.
|
|
||||||
fn migrate(previous: Self::Previous) -> Self;
|
|
||||||
|
|
||||||
/// Decode an encoded version of this type, going through a migration if necessary.
|
|
||||||
fn decode(bytes: &[u8]) -> Option<Self> {
|
|
||||||
let marker_len = Self::VERSION_MARKER.len();
|
|
||||||
if bytes.get(..marker_len) == Some(Self::VERSION_MARKER) {
|
|
||||||
if let Ok(value) = rmp_serde::decode::from_read_ref::<_, Self>(&bytes[marker_len..]) {
|
|
||||||
return Some(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::Previous::decode(bytes).map(Self::migrate)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Encode this type with optionnal version marker
|
|
||||||
fn encode(&self) -> Result<Vec<u8>, rmp_serde::encode::Error> {
|
|
||||||
let mut wr = Vec::with_capacity(128);
|
|
||||||
wr.extend_from_slice(Self::VERSION_MARKER);
|
|
||||||
let mut se = rmp_serde::Serializer::new(&mut wr)
|
|
||||||
.with_struct_map()
|
|
||||||
.with_string_variants();
|
|
||||||
self.serialize(&mut se)?;
|
|
||||||
Ok(wr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Indicates that this type has no previous encoding version to be migrated from.
|
|
||||||
pub trait InitialFormat: Serialize + for<'de> Deserialize<'de> + 'static {
|
|
||||||
/// A sequence of bytes to add at the beginning of the serialized
|
|
||||||
/// string, to identify that the data is of this version.
|
|
||||||
const VERSION_MARKER: &'static [u8] = b"";
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: InitialFormat> Migrate for T {
|
|
||||||
const VERSION_MARKER: &'static [u8] = <T as InitialFormat>::VERSION_MARKER;
|
|
||||||
|
|
||||||
type Previous = NoPrevious;
|
|
||||||
|
|
||||||
fn migrate(_previous: Self::Previous) -> Self {
|
|
||||||
unreachable!();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Internal type used by InitialFormat, not meant for general use.
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
pub enum NoPrevious {}
|
|
||||||
|
|
||||||
impl Migrate for NoPrevious {
|
|
||||||
type Previous = NoPrevious;
|
|
||||||
|
|
||||||
fn migrate(_previous: Self::Previous) -> Self {
|
|
||||||
unreachable!();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn decode(_bytes: &[u8]) -> Option<Self> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn encode(&self) -> Result<Vec<u8>, rmp_serde::encode::Error> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)]
|
|
||||||
struct V1 {
|
|
||||||
a: usize,
|
|
||||||
b: String,
|
|
||||||
}
|
|
||||||
impl InitialFormat for V1 {}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)]
|
|
||||||
struct V2 {
|
|
||||||
a: usize,
|
|
||||||
b: Vec<String>,
|
|
||||||
c: String,
|
|
||||||
}
|
|
||||||
impl Migrate for V2 {
|
|
||||||
const VERSION_MARKER: &'static [u8] = b"GtestV2";
|
|
||||||
type Previous = V1;
|
|
||||||
fn migrate(prev: V1) -> V2 {
|
|
||||||
V2 {
|
|
||||||
a: prev.a,
|
|
||||||
b: vec![prev.b],
|
|
||||||
c: String::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_v1() {
|
|
||||||
let x = V1 {
|
|
||||||
a: 12,
|
|
||||||
b: "hello".into(),
|
|
||||||
};
|
|
||||||
let x_enc = x.encode().unwrap();
|
|
||||||
let y = V1::decode(&x_enc).unwrap();
|
|
||||||
assert_eq!(x, y);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_v2() {
|
|
||||||
let x = V2 {
|
|
||||||
a: 12,
|
|
||||||
b: vec!["hello".into(), "world".into()],
|
|
||||||
c: "plop".into(),
|
|
||||||
};
|
|
||||||
let x_enc = x.encode().unwrap();
|
|
||||||
assert_eq!(&x_enc[..V2::VERSION_MARKER.len()], V2::VERSION_MARKER);
|
|
||||||
let y = V2::decode(&x_enc).unwrap();
|
|
||||||
assert_eq!(x, y);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_migrate() {
|
|
||||||
let x = V1 {
|
|
||||||
a: 12,
|
|
||||||
b: "hello".into(),
|
|
||||||
};
|
|
||||||
let x_enc = x.encode().unwrap();
|
|
||||||
|
|
||||||
let xx = V1::decode(&x_enc).unwrap();
|
|
||||||
assert_eq!(x, xx);
|
|
||||||
|
|
||||||
let y = V2::decode(&x_enc).unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
y,
|
|
||||||
V2 {
|
|
||||||
a: 12,
|
|
||||||
b: vec!["hello".into()],
|
|
||||||
c: "".into(),
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
let y_enc = y.encode().unwrap();
|
|
||||||
assert_eq!(&y_enc[..V2::VERSION_MARKER.len()], V2::VERSION_MARKER);
|
|
||||||
|
|
||||||
let z = V2::decode(&y_enc).unwrap();
|
|
||||||
assert_eq!(y, z);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,19 +1,23 @@
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
|
|
||||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||||
|
|
||||||
use crate::error::Error;
|
use serde::{Deserialize, Serialize};
|
||||||
use crate::migrate::Migrate;
|
|
||||||
|
|
||||||
pub struct Persister<T: Migrate> {
|
use crate::data::*;
|
||||||
|
use crate::error::Error;
|
||||||
|
|
||||||
|
pub struct Persister<T: Serialize + for<'de> Deserialize<'de>> {
|
||||||
path: PathBuf,
|
path: PathBuf,
|
||||||
|
|
||||||
_marker: std::marker::PhantomData<T>,
|
_marker: std::marker::PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Migrate> Persister<T> {
|
impl<T> Persister<T>
|
||||||
|
where
|
||||||
|
T: Serialize + for<'de> Deserialize<'de>,
|
||||||
|
{
|
||||||
pub fn new(base_dir: &Path, file_name: &str) -> Self {
|
pub fn new(base_dir: &Path, file_name: &str) -> Self {
|
||||||
let mut path = base_dir.to_path_buf();
|
let mut path = base_dir.to_path_buf();
|
||||||
path.push(file_name);
|
path.push(file_name);
|
||||||
|
@ -23,37 +27,18 @@ impl<T: Migrate> Persister<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode(&self, bytes: &[u8]) -> Result<T, Error> {
|
|
||||||
match T::decode(bytes) {
|
|
||||||
Some(v) => Ok(v),
|
|
||||||
None => {
|
|
||||||
error!(
|
|
||||||
"Unable to decode persisted data file {}",
|
|
||||||
self.path.display()
|
|
||||||
);
|
|
||||||
for line in hexdump::hexdump_iter(bytes) {
|
|
||||||
debug!("{}", line);
|
|
||||||
}
|
|
||||||
Err(Error::Message(format!(
|
|
||||||
"Unable to decode persisted data file {}",
|
|
||||||
self.path.display()
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn load(&self) -> Result<T, Error> {
|
pub fn load(&self) -> Result<T, Error> {
|
||||||
let mut file = std::fs::OpenOptions::new().read(true).open(&self.path)?;
|
let mut file = std::fs::OpenOptions::new().read(true).open(&self.path)?;
|
||||||
|
|
||||||
let mut bytes = vec![];
|
let mut bytes = vec![];
|
||||||
file.read_to_end(&mut bytes)?;
|
file.read_to_end(&mut bytes)?;
|
||||||
|
|
||||||
let value = self.decode(&bytes[..])?;
|
let value = rmp_serde::decode::from_read_ref(&bytes[..])?;
|
||||||
Ok(value)
|
Ok(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save(&self, t: &T) -> Result<(), Error> {
|
pub fn save(&self, t: &T) -> Result<(), Error> {
|
||||||
let bytes = t.encode()?;
|
let bytes = rmp_to_vec_all_named(t)?;
|
||||||
|
|
||||||
let mut file = std::fs::OpenOptions::new()
|
let mut file = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
|
@ -72,12 +57,12 @@ impl<T: Migrate> Persister<T> {
|
||||||
let mut bytes = vec![];
|
let mut bytes = vec![];
|
||||||
file.read_to_end(&mut bytes).await?;
|
file.read_to_end(&mut bytes).await?;
|
||||||
|
|
||||||
let value = self.decode(&bytes[..])?;
|
let value = rmp_serde::decode::from_read_ref(&bytes[..])?;
|
||||||
Ok(value)
|
Ok(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn save_async(&self, t: &T) -> Result<(), Error> {
|
pub async fn save_async(&self, t: &T) -> Result<(), Error> {
|
||||||
let bytes = t.encode()?;
|
let bytes = rmp_to_vec_all_named(t)?;
|
||||||
|
|
||||||
let mut file = tokio::fs::File::create(&self.path).await?;
|
let mut file = tokio::fs::File::create(&self.path).await?;
|
||||||
file.write_all(&bytes[..]).await?;
|
file.write_all(&bytes[..]).await?;
|
||||||
|
@ -85,36 +70,3 @@ impl<T: Migrate> Persister<T> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PersisterShared<V: Migrate + Default>(Arc<(Persister<V>, RwLock<V>)>);
|
|
||||||
|
|
||||||
impl<V: Migrate + Default> Clone for PersisterShared<V> {
|
|
||||||
fn clone(&self) -> PersisterShared<V> {
|
|
||||||
PersisterShared(self.0.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<V: Migrate + Default> PersisterShared<V> {
|
|
||||||
pub fn new(base_dir: &Path, file_name: &str) -> Self {
|
|
||||||
let persister = Persister::new(base_dir, file_name);
|
|
||||||
let value = persister.load().unwrap_or_default();
|
|
||||||
Self(Arc::new((persister, RwLock::new(value))))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_with<F, R>(&self, f: F) -> R
|
|
||||||
where
|
|
||||||
F: FnOnce(&V) -> R,
|
|
||||||
{
|
|
||||||
let value = self.0 .1.read().unwrap();
|
|
||||||
f(&value)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_with<F>(&self, f: F) -> Result<(), Error>
|
|
||||||
where
|
|
||||||
F: FnOnce(&mut V),
|
|
||||||
{
|
|
||||||
let mut value = self.0 .1.write().unwrap();
|
|
||||||
f(&mut value);
|
|
||||||
self.0 .0.save(&value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
40
src/util/token_bucket.rs
Normal file
40
src/util/token_bucket.rs
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use tokio::time::sleep;
|
||||||
|
|
||||||
|
pub struct TokenBucket {
|
||||||
|
// Replenish rate: number of tokens per second
|
||||||
|
replenish_rate: u64,
|
||||||
|
// Current number of tokens
|
||||||
|
tokens: u64,
|
||||||
|
// Last replenish time
|
||||||
|
last_replenish: Instant,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TokenBucket {
|
||||||
|
pub fn new(replenish_rate: u64) -> Self {
|
||||||
|
Self {
|
||||||
|
replenish_rate,
|
||||||
|
tokens: 0,
|
||||||
|
last_replenish: Instant::now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn take(&mut self, tokens: u64) {
|
||||||
|
while self.tokens < tokens {
|
||||||
|
let needed = tokens - self.tokens;
|
||||||
|
let delay = (needed as f64) / (self.replenish_rate as f64);
|
||||||
|
sleep(Duration::from_secs_f64(delay)).await;
|
||||||
|
self.replenish();
|
||||||
|
}
|
||||||
|
self.tokens -= tokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn replenish(&mut self) {
|
||||||
|
let now = Instant::now();
|
||||||
|
let new_tokens =
|
||||||
|
((now - self.last_replenish).as_secs_f64() * (self.replenish_rate as f64)) as u64;
|
||||||
|
self.tokens += new_tokens;
|
||||||
|
self.last_replenish = now;
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_web"
|
name = "garage_web"
|
||||||
version = "0.8.1"
|
version = "0.8.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,10 +14,10 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_api = { version = "0.8.1", path = "../api" }
|
garage_api = { version = "0.8.0", path = "../api" }
|
||||||
garage_model = { version = "0.8.1", path = "../model" }
|
garage_model = { version = "0.8.0", path = "../model" }
|
||||||
garage_util = { version = "0.8.1", path = "../util" }
|
garage_util = { version = "0.8.0", path = "../util" }
|
||||||
garage_table = { version = "0.8.1", path = "../table" }
|
garage_table = { version = "0.8.0", path = "../table" }
|
||||||
|
|
||||||
err-derive = "0.3"
|
err-derive = "0.3"
|
||||||
tracing = "0.1.30"
|
tracing = "0.1.30"
|
||||||
|
|
Loading…
Reference in a new issue