forked from Deuxfleurs/garage
Compare commits
9 commits
internals-
...
main
Author | SHA1 | Date | |
---|---|---|---|
76230f2028 | |||
6775569525 | |||
1649002e2b | |||
822e344845 | |||
7f7d53cfa9 | |||
fd10200bec | |||
0c7ed0b0af | |||
1af4a5ed56 | |||
a0abf41762 |
42 changed files with 887 additions and 706 deletions
19
Cargo.lock
generated
19
Cargo.lock
generated
|
@ -1048,7 +1048,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"assert-json-diff",
|
"assert-json-diff",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1096,7 +1096,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base64",
|
"base64",
|
||||||
|
@ -1141,7 +1141,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-compression",
|
"async-compression",
|
||||||
|
@ -1167,7 +1167,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_db"
|
name = "garage_db"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 3.1.18",
|
"clap 3.1.18",
|
||||||
"err-derive",
|
"err-derive",
|
||||||
|
@ -1182,7 +1182,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1210,7 +1210,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_rpc"
|
name = "garage_rpc"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1241,9 +1241,8 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_table"
|
name = "garage_table"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures",
|
"futures",
|
||||||
|
@ -1264,7 +1263,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_util"
|
name = "garage_util"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1295,7 +1294,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_web"
|
name = "garage_web"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"err-derive",
|
"err-derive",
|
||||||
"futures",
|
"futures",
|
||||||
|
|
121
Cargo.nix
121
Cargo.nix
|
@ -32,7 +32,7 @@ args@{
|
||||||
ignoreLockHash,
|
ignoreLockHash,
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
nixifiedLockHash = "a1d84930f23d3d8abc8abbed59b8ce3c9adf9f25d06bc1f39cbdf5bd90aceead";
|
nixifiedLockHash = "463114c4544bfa9b442a43afc6b39eb588f5720825c7a246ba9188c4bdb52944";
|
||||||
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
||||||
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
||||||
lockHashIgnored = if ignoreLockHash
|
lockHashIgnored = if ignoreLockHash
|
||||||
|
@ -56,15 +56,15 @@ in
|
||||||
{
|
{
|
||||||
cargo2nixVersion = "0.11.0";
|
cargo2nixVersion = "0.11.0";
|
||||||
workspace = {
|
workspace = {
|
||||||
garage_db = rustPackages.unknown.garage_db."0.8.0";
|
garage_db = rustPackages.unknown.garage_db."0.8.1";
|
||||||
garage_util = rustPackages.unknown.garage_util."0.8.0";
|
garage_util = rustPackages.unknown.garage_util."0.8.1";
|
||||||
garage_rpc = rustPackages.unknown.garage_rpc."0.8.0";
|
garage_rpc = rustPackages.unknown.garage_rpc."0.8.1";
|
||||||
garage_table = rustPackages.unknown.garage_table."0.8.0";
|
garage_table = rustPackages.unknown.garage_table."0.8.1";
|
||||||
garage_block = rustPackages.unknown.garage_block."0.8.0";
|
garage_block = rustPackages.unknown.garage_block."0.8.1";
|
||||||
garage_model = rustPackages.unknown.garage_model."0.8.0";
|
garage_model = rustPackages.unknown.garage_model."0.8.1";
|
||||||
garage_api = rustPackages.unknown.garage_api."0.8.0";
|
garage_api = rustPackages.unknown.garage_api."0.8.1";
|
||||||
garage_web = rustPackages.unknown.garage_web."0.8.0";
|
garage_web = rustPackages.unknown.garage_web."0.8.1";
|
||||||
garage = rustPackages.unknown.garage."0.8.0";
|
garage = rustPackages.unknown.garage."0.8.1";
|
||||||
k2v-client = rustPackages.unknown.k2v-client."0.0.1";
|
k2v-client = rustPackages.unknown.k2v-client."0.0.1";
|
||||||
};
|
};
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.17.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.17.0" = overridableMkRustCrate (profileName: rec {
|
||||||
|
@ -1494,9 +1494,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage";
|
name = "garage";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/garage");
|
src = fetchCrateLocal (workspaceSrc + "/src/garage");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1522,14 +1522,14 @@ in
|
||||||
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.1.0" { inherit profileName; }).out;
|
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.1.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_api = (rustPackages."unknown".garage_api."0.8.0" { inherit profileName; }).out;
|
garage_api = (rustPackages."unknown".garage_api."0.8.1" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
garage_web = (rustPackages."unknown".garage_web."0.8.0" { inherit profileName; }).out;
|
garage_web = (rustPackages."unknown".garage_web."0.8.1" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
|
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
|
||||||
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
||||||
|
@ -1563,9 +1563,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_api."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_api."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_api";
|
name = "garage_api";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/api");
|
src = fetchCrateLocal (workspaceSrc + "/src/api");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1584,11 +1584,11 @@ in
|
||||||
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.0.1" { inherit profileName; }).out;
|
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.0.1" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
|
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
|
@ -1617,9 +1617,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_block."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_block."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_block";
|
name = "garage_block";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/block");
|
src = fetchCrateLocal (workspaceSrc + "/src/block");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1632,10 +1632,10 @@ in
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
|
@ -1649,9 +1649,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_db."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_db."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_db";
|
name = "garage_db";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/db");
|
src = fetchCrateLocal (workspaceSrc + "/src/db");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1681,9 +1681,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_model."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_model."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_model";
|
name = "garage_model";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/model");
|
src = fetchCrateLocal (workspaceSrc + "/src/model");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1701,11 +1701,11 @@ in
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
|
@ -1719,9 +1719,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_rpc."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_rpc."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_rpc";
|
name = "garage_rpc";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
|
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1741,7 +1741,7 @@ in
|
||||||
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive" then "err_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive" then "err_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.3" { inherit profileName; }).out;
|
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.3" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.16.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.16.0" { inherit profileName; }).out;
|
||||||
|
@ -1763,20 +1763,19 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_table."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_table."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_table";
|
name = "garage_table";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/table");
|
src = fetchCrateLocal (workspaceSrc + "/src/table");
|
||||||
dependencies = {
|
dependencies = {
|
||||||
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }).out;
|
|
||||||
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }).out;
|
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }).out;
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
|
@ -1789,9 +1788,9 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_util."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_util."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_util";
|
name = "garage_util";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/util");
|
src = fetchCrateLocal (workspaceSrc + "/src/util");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
|
@ -1806,7 +1805,7 @@ in
|
||||||
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.3" { inherit profileName; }).out;
|
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.3" { inherit profileName; }).out;
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
|
||||||
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out;
|
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
|
@ -1826,18 +1825,18 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_web."0.8.0" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_web."0.8.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_web";
|
name = "garage_web";
|
||||||
version = "0.8.0";
|
version = "0.8.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal (workspaceSrc + "/src/web");
|
src = fetchCrateLocal (workspaceSrc + "/src/web");
|
||||||
dependencies = {
|
dependencies = {
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
|
||||||
garage_api = (rustPackages."unknown".garage_api."0.8.0" { inherit profileName; }).out;
|
garage_api = (rustPackages."unknown".garage_api."0.8.1" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
|
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
|
@ -2451,7 +2450,7 @@ in
|
||||||
dependencies = {
|
dependencies = {
|
||||||
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }).out;
|
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli" then "clap" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".clap."3.1.18" { inherit profileName; }).out;
|
${ if rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli" then "clap" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".clap."3.1.18" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "k2v-client/cli" || rootFeatures' ? "k2v-client/garage_util" then "garage_util" else null } = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "k2v-client/cli" || rootFeatures' ? "k2v-client/garage_util" then "garage_util" else null } = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
|
||||||
log = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out;
|
log = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out;
|
||||||
rusoto_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusoto_core."0.48.0" { inherit profileName; }).out;
|
rusoto_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusoto_core."0.48.0" { inherit profileName; }).out;
|
||||||
|
|
|
@ -5,13 +5,56 @@ weight = 20
|
||||||
|
|
||||||
## S3
|
## S3
|
||||||
|
|
||||||
|
### Using Minio SDK
|
||||||
|
|
||||||
|
First install the SDK:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install minio
|
||||||
|
```
|
||||||
|
|
||||||
|
Then instantiate a client object using garage root domain, api key and secret:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import minio
|
||||||
|
|
||||||
|
client = minio.Minio(
|
||||||
|
"your.domain.tld",
|
||||||
|
"GKyourapikey",
|
||||||
|
"abcd[...]1234",
|
||||||
|
# Force the region, this is specific to garage
|
||||||
|
region="region",
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then use all the standard S3 endpoints as implemented by the Minio SDK:
|
||||||
|
|
||||||
|
```
|
||||||
|
# List buckets
|
||||||
|
print(client.list_buckets())
|
||||||
|
|
||||||
|
# Put an object containing 'content' to /path in bucket named 'bucket':
|
||||||
|
content = b"content"
|
||||||
|
client.put_object(
|
||||||
|
"bucket",
|
||||||
|
"path",
|
||||||
|
io.BytesIO(content),
|
||||||
|
len(content),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Read the object back and check contents
|
||||||
|
data = client.get_object("bucket", "path").read()
|
||||||
|
assert data == content
|
||||||
|
```
|
||||||
|
|
||||||
|
For further documentation, see the Minio SDK
|
||||||
|
[Reference](https://docs.min.io/docs/python-client-api-reference.html)
|
||||||
|
|
||||||
|
### Using Amazon boto3
|
||||||
|
|
||||||
*Coming soon*
|
*Coming soon*
|
||||||
|
|
||||||
Some refs:
|
See the official documentation:
|
||||||
- Minio SDK
|
|
||||||
- [Reference](https://docs.min.io/docs/python-client-api-reference.html)
|
|
||||||
|
|
||||||
- Amazon boto3
|
|
||||||
- [Installation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html)
|
- [Installation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html)
|
||||||
- [Reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html)
|
- [Reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html)
|
||||||
- [Example](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html)
|
- [Example](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html)
|
||||||
|
|
|
@ -8,7 +8,7 @@ In this section, we cover the following web applications:
|
||||||
| Name | Status | Note |
|
| Name | Status | Note |
|
||||||
|------|--------|------|
|
|------|--------|------|
|
||||||
| [Nextcloud](#nextcloud) | ✅ | Both Primary Storage and External Storage are supported |
|
| [Nextcloud](#nextcloud) | ✅ | Both Primary Storage and External Storage are supported |
|
||||||
| [Peertube](#peertube) | ✅ | Must be configured with the website endpoint |
|
| [Peertube](#peertube) | ✅ | Supported with the website endpoint, proxifying private videos unsupported |
|
||||||
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
||||||
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
||||||
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
||||||
|
@ -128,6 +128,10 @@ In other words, Peertube is only responsible of the "control plane" and offload
|
||||||
In return, this system is a bit harder to configure.
|
In return, this system is a bit harder to configure.
|
||||||
We show how it is still possible to configure Garage with Peertube, allowing you to spread the load and the bandwidth usage on the Garage cluster.
|
We show how it is still possible to configure Garage with Peertube, allowing you to spread the load and the bandwidth usage on the Garage cluster.
|
||||||
|
|
||||||
|
Starting from version 5.0, Peertube also supports improving the security for private videos by not exposing them directly
|
||||||
|
but relying on a single control point in the Peertube instance. This is based on S3 per-object and prefix ACL, which are not currently supported
|
||||||
|
in Garage, so this feature is unsupported. While this technically impedes security for private videos, it is not a blocking issue and could be
|
||||||
|
a reasonable trade-off for some instances.
|
||||||
|
|
||||||
### Create resources in Garage
|
### Create resources in Garage
|
||||||
|
|
||||||
|
@ -195,6 +199,11 @@ object_storage:
|
||||||
|
|
||||||
max_upload_part: 2GB
|
max_upload_part: 2GB
|
||||||
|
|
||||||
|
proxy:
|
||||||
|
# You may enable this feature, yet it will not provide any security benefit, so
|
||||||
|
# you should rather benefit from Garage public endpoint for all videos
|
||||||
|
proxify_private_files: false
|
||||||
|
|
||||||
streaming_playlists:
|
streaming_playlists:
|
||||||
bucket_name: 'peertube-playlist'
|
bucket_name: 'peertube-playlist'
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,11 +14,11 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_model = { version = "0.8.0", path = "../model" }
|
garage_model = { version = "0.8.1", path = "../model" }
|
||||||
garage_table = { version = "0.8.0", path = "../table" }
|
garage_table = { version = "0.8.1", path = "../table" }
|
||||||
garage_block = { version = "0.8.0", path = "../block" }
|
garage_block = { version = "0.8.1", path = "../block" }
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
base64 = "0.13"
|
base64 = "0.13"
|
||||||
|
|
|
@ -143,10 +143,13 @@ impl Endpoint {
|
||||||
}
|
}
|
||||||
|
|
||||||
generateQueryParameters! {
|
generateQueryParameters! {
|
||||||
|
keywords: [],
|
||||||
|
fields: [
|
||||||
"format" => format,
|
"format" => format,
|
||||||
"id" => id,
|
"id" => id,
|
||||||
"search" => search,
|
"search" => search,
|
||||||
"globalAlias" => global_alias,
|
"globalAlias" => global_alias,
|
||||||
"alias" => alias,
|
"alias" => alias,
|
||||||
"accessKeyId" => access_key_id
|
"accessKeyId" => access_key_id
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,7 +96,7 @@ impl Endpoint {
|
||||||
fn from_get(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_get(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY if causality_token => PollItem (query::sort_key, query::causality_token, opt_parse::timeout),
|
EMPTY if causality_token => PollItem (query::sort_key, query::causality_token, opt_parse::timeout),
|
||||||
EMPTY => ReadItem (query::sort_key),
|
EMPTY => ReadItem (query::sort_key),
|
||||||
|
@ -111,7 +111,7 @@ impl Endpoint {
|
||||||
fn from_search(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_search(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
],
|
],
|
||||||
no_key: [
|
no_key: [
|
||||||
|
@ -125,7 +125,7 @@ impl Endpoint {
|
||||||
fn from_head(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_head(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
|
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
|
||||||
],
|
],
|
||||||
|
@ -140,7 +140,7 @@ impl Endpoint {
|
||||||
fn from_post(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_post(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
],
|
],
|
||||||
no_key: [
|
no_key: [
|
||||||
|
@ -155,7 +155,7 @@ impl Endpoint {
|
||||||
fn from_put(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_put(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY => InsertItem (query::sort_key),
|
EMPTY => InsertItem (query::sort_key),
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ impl Endpoint {
|
||||||
fn from_delete(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_delete(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY => DeleteItem (query::sort_key),
|
EMPTY => DeleteItem (query::sort_key),
|
||||||
],
|
],
|
||||||
|
@ -232,6 +232,11 @@ impl Endpoint {
|
||||||
|
|
||||||
// parameter name => struct field
|
// parameter name => struct field
|
||||||
generateQueryParameters! {
|
generateQueryParameters! {
|
||||||
|
keywords: [
|
||||||
|
"delete" => DELETE,
|
||||||
|
"search" => SEARCH
|
||||||
|
],
|
||||||
|
fields: [
|
||||||
"prefix" => prefix,
|
"prefix" => prefix,
|
||||||
"start" => start,
|
"start" => start,
|
||||||
"causality_token" => causality_token,
|
"causality_token" => causality_token,
|
||||||
|
@ -240,13 +245,5 @@ generateQueryParameters! {
|
||||||
"reverse" => reverse,
|
"reverse" => reverse,
|
||||||
"sort_key" => sort_key,
|
"sort_key" => sort_key,
|
||||||
"timeout" => timeout
|
"timeout" => timeout
|
||||||
}
|
]
|
||||||
|
|
||||||
mod keywords {
|
|
||||||
//! This module contain all query parameters with no associated value
|
|
||||||
//! used to differentiate endpoints.
|
|
||||||
pub const EMPTY: &str = "";
|
|
||||||
|
|
||||||
pub const DELETE: &str = "delete";
|
|
||||||
pub const SEARCH: &str = "search";
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,10 +4,9 @@ macro_rules! router_match {
|
||||||
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
|
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
|
||||||
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
|
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
|
||||||
// returns true if the variant was one of the listed variants, false otherwise.
|
// returns true if the variant was one of the listed variants, false otherwise.
|
||||||
use Endpoint::*;
|
|
||||||
match $enum {
|
match $enum {
|
||||||
$(
|
$(
|
||||||
$endpoint { .. } => true,
|
Endpoint::$endpoint { .. } => true,
|
||||||
)*
|
)*
|
||||||
_ => false
|
_ => false
|
||||||
}
|
}
|
||||||
|
@ -15,10 +14,9 @@ macro_rules! router_match {
|
||||||
(@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{
|
(@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{
|
||||||
// usage: router_match {@extract my_enum, field_name, [ VariantWithField1, VariantWithField2 ..] }
|
// usage: router_match {@extract my_enum, field_name, [ VariantWithField1, VariantWithField2 ..] }
|
||||||
// returns Some(field_value), or None if the variant was not one of the listed variants.
|
// returns Some(field_value), or None if the variant was not one of the listed variants.
|
||||||
use Endpoint::*;
|
|
||||||
match $enum {
|
match $enum {
|
||||||
$(
|
$(
|
||||||
$endpoint {$param, ..} => Some($param),
|
Endpoint::$endpoint {$param, ..} => Some($param),
|
||||||
)*
|
)*
|
||||||
_ => None
|
_ => None
|
||||||
}
|
}
|
||||||
|
@ -28,10 +26,9 @@ macro_rules! router_match {
|
||||||
$($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)*
|
$($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)*
|
||||||
]) => {{
|
]) => {{
|
||||||
{
|
{
|
||||||
use Endpoint::*;
|
|
||||||
match ($method, $reqpath) {
|
match ($method, $reqpath) {
|
||||||
$(
|
$(
|
||||||
(&Method::$meth, $path) if true $(&& $query.$required.is_some())? => $api {
|
(&Method::$meth, $path) if true $(&& $query.$required.is_some())? => Endpoint::$api {
|
||||||
$($(
|
$($(
|
||||||
$param: router_match!(@@parse_param $query, $conv, $param),
|
$param: router_match!(@@parse_param $query, $conv, $param),
|
||||||
)*)?
|
)*)?
|
||||||
|
@ -60,11 +57,9 @@ macro_rules! router_match {
|
||||||
// ]
|
// ]
|
||||||
// }
|
// }
|
||||||
// See in from_{method} for more detailed usage.
|
// See in from_{method} for more detailed usage.
|
||||||
use Endpoint::*;
|
|
||||||
use keywords::*;
|
|
||||||
match ($keyword, !$key.is_empty()){
|
match ($keyword, !$key.is_empty()){
|
||||||
$(
|
$(
|
||||||
($kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok($api_k {
|
(Keyword::$kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok(Endpoint::$api_k {
|
||||||
$key,
|
$key,
|
||||||
$($(
|
$($(
|
||||||
$param_k: router_match!(@@parse_param $query, $conv_k, $param_k),
|
$param_k: router_match!(@@parse_param $query, $conv_k, $param_k),
|
||||||
|
@ -72,7 +67,7 @@ macro_rules! router_match {
|
||||||
}),
|
}),
|
||||||
)*
|
)*
|
||||||
$(
|
$(
|
||||||
($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk {
|
(Keyword::$kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok(Endpoint::$api_nk {
|
||||||
$($(
|
$($(
|
||||||
$param_nk: router_match!(@@parse_param $query, $conv_nk, $param_nk),
|
$param_nk: router_match!(@@parse_param $query, $conv_nk, $param_nk),
|
||||||
)*)?
|
)*)?
|
||||||
|
@ -144,14 +139,39 @@ macro_rules! router_match {
|
||||||
/// This macro is used to generate part of the code in this module. It must be called only one, and
|
/// This macro is used to generate part of the code in this module. It must be called only one, and
|
||||||
/// is useless outside of this module.
|
/// is useless outside of this module.
|
||||||
macro_rules! generateQueryParameters {
|
macro_rules! generateQueryParameters {
|
||||||
( $($rest:expr => $name:ident),* ) => {
|
(
|
||||||
|
keywords: [ $($kw_param:expr => $kw_name: ident),* ],
|
||||||
|
fields: [ $($f_param:expr => $f_name:ident),* ]
|
||||||
|
) => {
|
||||||
|
#[derive(Debug)]
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
enum Keyword {
|
||||||
|
EMPTY,
|
||||||
|
$( $kw_name, )*
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for Keyword {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Keyword::EMPTY => write!(f, "``"),
|
||||||
|
$( Keyword::$kw_name => write!(f, "`{}`", $kw_param), )*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Keyword {
|
||||||
|
fn default() -> Self {
|
||||||
|
Keyword::EMPTY
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Struct containing all query parameters used in endpoints. Think of it as an HashMap,
|
/// Struct containing all query parameters used in endpoints. Think of it as an HashMap,
|
||||||
/// but with keys statically known.
|
/// but with keys statically known.
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
struct QueryParameters<'a> {
|
struct QueryParameters<'a> {
|
||||||
keyword: Option<Cow<'a, str>>,
|
keyword: Option<Keyword>,
|
||||||
$(
|
$(
|
||||||
$name: Option<Cow<'a, str>>,
|
$f_name: Option<Cow<'a, str>>,
|
||||||
)*
|
)*
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,34 +180,29 @@ macro_rules! generateQueryParameters {
|
||||||
fn from_query(query: &'a str) -> Result<Self, Error> {
|
fn from_query(query: &'a str) -> Result<Self, Error> {
|
||||||
let mut res: Self = Default::default();
|
let mut res: Self = Default::default();
|
||||||
for (k, v) in url::form_urlencoded::parse(query.as_bytes()) {
|
for (k, v) in url::form_urlencoded::parse(query.as_bytes()) {
|
||||||
let repeated = match k.as_ref() {
|
match k.as_ref() {
|
||||||
$(
|
$(
|
||||||
$rest => if !v.is_empty() {
|
$kw_param => if let Some(prev_kw) = res.keyword.replace(Keyword::$kw_name) {
|
||||||
res.$name.replace(v).is_some()
|
return Err(Error::bad_request(format!(
|
||||||
} else {
|
"Multiple keywords: '{}' and '{}'", prev_kw, $kw_param
|
||||||
false
|
)));
|
||||||
|
},
|
||||||
|
)*
|
||||||
|
$(
|
||||||
|
$f_param => if !v.is_empty() {
|
||||||
|
if res.$f_name.replace(v).is_some() {
|
||||||
|
return Err(Error::bad_request(format!(
|
||||||
|
"Query parameter repeated: '{}'", k
|
||||||
|
)));
|
||||||
|
}
|
||||||
},
|
},
|
||||||
)*
|
)*
|
||||||
_ => {
|
_ => {
|
||||||
if k.starts_with("response-") || k.starts_with("X-Amz-") {
|
if !(k.starts_with("response-") || k.starts_with("X-Amz-")) {
|
||||||
false
|
|
||||||
} else if v.as_ref().is_empty() {
|
|
||||||
if res.keyword.replace(k).is_some() {
|
|
||||||
return Err(Error::bad_request("Multiple keywords"));
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
debug!("Received an unknown query parameter: '{}'", k);
|
debug!("Received an unknown query parameter: '{}'", k);
|
||||||
false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if repeated {
|
|
||||||
return Err(Error::bad_request(format!(
|
|
||||||
"Query parameter repeated: '{}'",
|
|
||||||
k
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
@ -198,8 +213,8 @@ macro_rules! generateQueryParameters {
|
||||||
if self.keyword.is_some() {
|
if self.keyword.is_some() {
|
||||||
Some("Keyword not used")
|
Some("Keyword not used")
|
||||||
} $(
|
} $(
|
||||||
else if self.$name.is_some() {
|
else if self.$f_name.is_some() {
|
||||||
Some(concat!("'", $rest, "'"))
|
Some(concat!("'", $f_param, "'"))
|
||||||
}
|
}
|
||||||
)* else {
|
)* else {
|
||||||
None
|
None
|
||||||
|
|
|
@ -355,7 +355,7 @@ impl Endpoint {
|
||||||
fn from_get(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_get(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY if upload_id => ListParts (query::upload_id, opt_parse::max_parts, opt_parse::part_number_marker),
|
EMPTY if upload_id => ListParts (query::upload_id, opt_parse::max_parts, opt_parse::part_number_marker),
|
||||||
EMPTY => GetObject (query_opt::version_id, opt_parse::part_number),
|
EMPTY => GetObject (query_opt::version_id, opt_parse::part_number),
|
||||||
|
@ -412,7 +412,7 @@ impl Endpoint {
|
||||||
fn from_head(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_head(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
|
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
|
||||||
],
|
],
|
||||||
|
@ -426,7 +426,7 @@ impl Endpoint {
|
||||||
fn from_post(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_post(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY if upload_id => CompleteMultipartUpload (query::upload_id),
|
EMPTY if upload_id => CompleteMultipartUpload (query::upload_id),
|
||||||
RESTORE => RestoreObject (query_opt::version_id),
|
RESTORE => RestoreObject (query_opt::version_id),
|
||||||
|
@ -448,7 +448,7 @@ impl Endpoint {
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, headers),
|
(query.keyword.take().unwrap_or_default(), key, query, headers),
|
||||||
key: [
|
key: [
|
||||||
EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id),
|
EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id),
|
||||||
EMPTY header "x-amz-copy-source" => CopyObject,
|
EMPTY header "x-amz-copy-source" => CopyObject,
|
||||||
|
@ -490,7 +490,7 @@ impl Endpoint {
|
||||||
fn from_delete(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_delete(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
router_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
EMPTY if upload_id => AbortMultipartUpload (query::upload_id),
|
EMPTY if upload_id => AbortMultipartUpload (query::upload_id),
|
||||||
EMPTY => DeleteObject (query_opt::version_id),
|
EMPTY => DeleteObject (query_opt::version_id),
|
||||||
|
@ -624,6 +624,39 @@ impl Endpoint {
|
||||||
|
|
||||||
// parameter name => struct field
|
// parameter name => struct field
|
||||||
generateQueryParameters! {
|
generateQueryParameters! {
|
||||||
|
keywords: [
|
||||||
|
"accelerate" => ACCELERATE,
|
||||||
|
"acl" => ACL,
|
||||||
|
"analytics" => ANALYTICS,
|
||||||
|
"cors" => CORS,
|
||||||
|
"delete" => DELETE,
|
||||||
|
"encryption" => ENCRYPTION,
|
||||||
|
"intelligent-tiering" => INTELLIGENT_TIERING,
|
||||||
|
"inventory" => INVENTORY,
|
||||||
|
"legal-hold" => LEGAL_HOLD,
|
||||||
|
"lifecycle" => LIFECYCLE,
|
||||||
|
"location" => LOCATION,
|
||||||
|
"logging" => LOGGING,
|
||||||
|
"metrics" => METRICS,
|
||||||
|
"notification" => NOTIFICATION,
|
||||||
|
"object-lock" => OBJECT_LOCK,
|
||||||
|
"ownershipControls" => OWNERSHIP_CONTROLS,
|
||||||
|
"policy" => POLICY,
|
||||||
|
"policyStatus" => POLICY_STATUS,
|
||||||
|
"publicAccessBlock" => PUBLIC_ACCESS_BLOCK,
|
||||||
|
"replication" => REPLICATION,
|
||||||
|
"requestPayment" => REQUEST_PAYMENT,
|
||||||
|
"restore" => RESTORE,
|
||||||
|
"retention" => RETENTION,
|
||||||
|
"select" => SELECT,
|
||||||
|
"tagging" => TAGGING,
|
||||||
|
"torrent" => TORRENT,
|
||||||
|
"uploads" => UPLOADS,
|
||||||
|
"versioning" => VERSIONING,
|
||||||
|
"versions" => VERSIONS,
|
||||||
|
"website" => WEBSITE
|
||||||
|
],
|
||||||
|
fields: [
|
||||||
"continuation-token" => continuation_token,
|
"continuation-token" => continuation_token,
|
||||||
"delimiter" => delimiter,
|
"delimiter" => delimiter,
|
||||||
"encoding-type" => encoding_type,
|
"encoding-type" => encoding_type,
|
||||||
|
@ -644,43 +677,7 @@ generateQueryParameters! {
|
||||||
"upload-id-marker" => upload_id_marker,
|
"upload-id-marker" => upload_id_marker,
|
||||||
"versionId" => version_id,
|
"versionId" => version_id,
|
||||||
"version-id-marker" => version_id_marker
|
"version-id-marker" => version_id_marker
|
||||||
}
|
]
|
||||||
|
|
||||||
mod keywords {
|
|
||||||
//! This module contain all query parameters with no associated value S3 uses to differentiate
|
|
||||||
//! endpoints.
|
|
||||||
pub const EMPTY: &str = "";
|
|
||||||
|
|
||||||
pub const ACCELERATE: &str = "accelerate";
|
|
||||||
pub const ACL: &str = "acl";
|
|
||||||
pub const ANALYTICS: &str = "analytics";
|
|
||||||
pub const CORS: &str = "cors";
|
|
||||||
pub const DELETE: &str = "delete";
|
|
||||||
pub const ENCRYPTION: &str = "encryption";
|
|
||||||
pub const INTELLIGENT_TIERING: &str = "intelligent-tiering";
|
|
||||||
pub const INVENTORY: &str = "inventory";
|
|
||||||
pub const LEGAL_HOLD: &str = "legal-hold";
|
|
||||||
pub const LIFECYCLE: &str = "lifecycle";
|
|
||||||
pub const LOCATION: &str = "location";
|
|
||||||
pub const LOGGING: &str = "logging";
|
|
||||||
pub const METRICS: &str = "metrics";
|
|
||||||
pub const NOTIFICATION: &str = "notification";
|
|
||||||
pub const OBJECT_LOCK: &str = "object-lock";
|
|
||||||
pub const OWNERSHIP_CONTROLS: &str = "ownershipControls";
|
|
||||||
pub const POLICY: &str = "policy";
|
|
||||||
pub const POLICY_STATUS: &str = "policyStatus";
|
|
||||||
pub const PUBLIC_ACCESS_BLOCK: &str = "publicAccessBlock";
|
|
||||||
pub const REPLICATION: &str = "replication";
|
|
||||||
pub const REQUEST_PAYMENT: &str = "requestPayment";
|
|
||||||
pub const RESTORE: &str = "restore";
|
|
||||||
pub const RETENTION: &str = "retention";
|
|
||||||
pub const SELECT: &str = "select";
|
|
||||||
pub const TAGGING: &str = "tagging";
|
|
||||||
pub const TORRENT: &str = "torrent";
|
|
||||||
pub const UPLOADS: &str = "uploads";
|
|
||||||
pub const VERSIONING: &str = "versioning";
|
|
||||||
pub const VERSIONS: &str = "versions";
|
|
||||||
pub const WEBSITE: &str = "website";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,10 +14,10 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.0", path = "../db" }
|
garage_db = { version = "0.8.1", path = "../db" }
|
||||||
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
garage_table = { version = "0.8.0", path = "../table" }
|
garage_table = { version = "0.8.1", path = "../table" }
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
opentelemetry = "0.17"
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,6 @@ use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use arc_swap::ArcSwapOption;
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
@ -23,7 +22,6 @@ use garage_rpc::rpc_helper::netapp::stream::{stream_asyncread, ByteStream};
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::background::BackgroundRunner;
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
|
@ -89,7 +87,7 @@ pub struct BlockManager {
|
||||||
|
|
||||||
pub(crate) metrics: BlockManagerMetrics,
|
pub(crate) metrics: BlockManagerMetrics,
|
||||||
|
|
||||||
tx_scrub_command: ArcSwapOption<mpsc::Sender<ScrubWorkerCommand>>,
|
tx_scrub_command: mpsc::Sender<ScrubWorkerCommand>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
@ -128,6 +126,8 @@ impl BlockManager {
|
||||||
let metrics =
|
let metrics =
|
||||||
BlockManagerMetrics::new(rc.rc.clone(), resync.queue.clone(), resync.errors.clone());
|
BlockManagerMetrics::new(rc.rc.clone(), resync.queue.clone(), resync.errors.clone());
|
||||||
|
|
||||||
|
let (scrub_tx, scrub_rx) = mpsc::channel(1);
|
||||||
|
|
||||||
let block_manager = Arc::new(Self {
|
let block_manager = Arc::new(Self {
|
||||||
replication,
|
replication,
|
||||||
data_dir,
|
data_dir,
|
||||||
|
@ -138,24 +138,21 @@ impl BlockManager {
|
||||||
system,
|
system,
|
||||||
endpoint,
|
endpoint,
|
||||||
metrics,
|
metrics,
|
||||||
tx_scrub_command: ArcSwapOption::new(None),
|
tx_scrub_command: scrub_tx,
|
||||||
});
|
});
|
||||||
block_manager.endpoint.set_handler(block_manager.clone());
|
block_manager.endpoint.set_handler(block_manager.clone());
|
||||||
|
|
||||||
block_manager
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
|
||||||
// Spawn a bunch of resync workers
|
// Spawn a bunch of resync workers
|
||||||
for index in 0..MAX_RESYNC_WORKERS {
|
for index in 0..MAX_RESYNC_WORKERS {
|
||||||
let worker = ResyncWorker::new(index, self.clone());
|
let worker = ResyncWorker::new(index, block_manager.clone());
|
||||||
bg.spawn_worker(worker);
|
block_manager.system.background.spawn_worker(worker);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spawn scrub worker
|
// Spawn scrub worker
|
||||||
let (scrub_tx, scrub_rx) = mpsc::channel(1);
|
let scrub_worker = ScrubWorker::new(block_manager.clone(), scrub_rx);
|
||||||
self.tx_scrub_command.store(Some(Arc::new(scrub_tx)));
|
block_manager.system.background.spawn_worker(scrub_worker);
|
||||||
bg.spawn_worker(ScrubWorker::new(self.clone(), scrub_rx));
|
|
||||||
|
block_manager
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ask nodes that might have a (possibly compressed) block for it
|
/// Ask nodes that might have a (possibly compressed) block for it
|
||||||
|
@ -328,11 +325,8 @@ impl BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send command to start/stop/manager scrub worker
|
/// Send command to start/stop/manager scrub worker
|
||||||
pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) -> Result<(), Error> {
|
pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) {
|
||||||
let tx = self.tx_scrub_command.load();
|
let _ = self.tx_scrub_command.send(cmd).await;
|
||||||
let tx = tx.as_ref().ok_or_message("scrub worker is not running")?;
|
|
||||||
tx.send(cmd).await.ok_or_message("send error")?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the reference count of a block
|
/// Get the reference count of a block
|
||||||
|
|
|
@ -148,7 +148,7 @@ impl Worker for RepairWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -341,7 +341,7 @@ impl Worker for ScrubWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
let (wait_until, command) = match &self.work {
|
let (wait_until, command) = match &self.work {
|
||||||
ScrubWorkerState::Running(_) => return WorkerState::Busy,
|
ScrubWorkerState::Running(_) => return WorkerState::Busy,
|
||||||
ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume),
|
ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume),
|
||||||
|
|
|
@ -540,7 +540,7 @@ impl Worker for ResyncWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
while self.index >= self.manager.resync.persisted.load().n_workers {
|
while self.index >= self.manager.resync.persisted.load().n_workers {
|
||||||
self.manager.resync.notify.notified().await
|
self.manager.resync.notify.notified().await
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_db"
|
name = "garage_db"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -21,14 +21,14 @@ path = "tests/lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.0", path = "../db" }
|
garage_db = { version = "0.8.1", path = "../db" }
|
||||||
garage_api = { version = "0.8.0", path = "../api" }
|
garage_api = { version = "0.8.1", path = "../api" }
|
||||||
garage_block = { version = "0.8.0", path = "../block" }
|
garage_block = { version = "0.8.1", path = "../block" }
|
||||||
garage_model = { version = "0.8.0", path = "../model" }
|
garage_model = { version = "0.8.1", path = "../model" }
|
||||||
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
||||||
garage_table = { version = "0.8.0", path = "../table" }
|
garage_table = { version = "0.8.1", path = "../table" }
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
garage_web = { version = "0.8.0", path = "../web" }
|
garage_web = { version = "0.8.1", path = "../web" }
|
||||||
|
|
||||||
backtrace = "0.3"
|
backtrace = "0.3"
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
|
|
|
@ -5,7 +5,6 @@ use std::sync::Arc;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::background::BackgroundRunner;
|
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
|
@ -75,18 +74,13 @@ impl Rpc for AdminRpc {
|
||||||
|
|
||||||
pub struct AdminRpcHandler {
|
pub struct AdminRpcHandler {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
background: Arc<BackgroundRunner>,
|
|
||||||
endpoint: Arc<Endpoint<AdminRpc, Self>>,
|
endpoint: Arc<Endpoint<AdminRpc, Self>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AdminRpcHandler {
|
impl AdminRpcHandler {
|
||||||
pub fn new(garage: Arc<Garage>, background: Arc<BackgroundRunner>) -> Arc<Self> {
|
pub fn new(garage: Arc<Garage>) -> Arc<Self> {
|
||||||
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
|
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
|
||||||
let admin = Arc::new(Self {
|
let admin = Arc::new(Self { garage, endpoint });
|
||||||
garage,
|
|
||||||
background,
|
|
||||||
endpoint,
|
|
||||||
});
|
|
||||||
admin.endpoint.set_handler(admin.clone());
|
admin.endpoint.set_handler(admin.clone());
|
||||||
admin
|
admin
|
||||||
}
|
}
|
||||||
|
@ -765,7 +759,7 @@ impl AdminRpcHandler {
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
launch_online_repair(&self.garage, &self.background, opt).await?;
|
launch_online_repair(self.garage.clone(), opt).await;
|
||||||
Ok(AdminRpc::Ok(format!(
|
Ok(AdminRpc::Ok(format!(
|
||||||
"Repair launched on {:?}",
|
"Repair launched on {:?}",
|
||||||
self.garage.system.id
|
self.garage.system.id
|
||||||
|
@ -931,11 +925,12 @@ impl AdminRpcHandler {
|
||||||
async fn handle_worker_cmd(&self, cmd: &WorkerOperation) -> Result<AdminRpc, Error> {
|
async fn handle_worker_cmd(&self, cmd: &WorkerOperation) -> Result<AdminRpc, Error> {
|
||||||
match cmd {
|
match cmd {
|
||||||
WorkerOperation::List { opt } => {
|
WorkerOperation::List { opt } => {
|
||||||
let workers = self.background.get_worker_info();
|
let workers = self.garage.background.get_worker_info();
|
||||||
Ok(AdminRpc::WorkerList(workers, *opt))
|
Ok(AdminRpc::WorkerList(workers, *opt))
|
||||||
}
|
}
|
||||||
WorkerOperation::Info { tid } => {
|
WorkerOperation::Info { tid } => {
|
||||||
let info = self
|
let info = self
|
||||||
|
.garage
|
||||||
.background
|
.background
|
||||||
.get_worker_info()
|
.get_worker_info()
|
||||||
.get(tid)
|
.get(tid)
|
||||||
|
@ -949,7 +944,7 @@ impl AdminRpcHandler {
|
||||||
self.garage
|
self.garage
|
||||||
.block_manager
|
.block_manager
|
||||||
.send_scrub_command(scrub_command)
|
.send_scrub_command(scrub_command)
|
||||||
.await?;
|
.await;
|
||||||
Ok(AdminRpc::Ok("Scrub tranquility updated".into()))
|
Ok(AdminRpc::Ok("Scrub tranquility updated".into()))
|
||||||
}
|
}
|
||||||
WorkerSetCmd::ResyncWorkerCount { worker_count } => {
|
WorkerSetCmd::ResyncWorkerCount { worker_count } => {
|
||||||
|
|
|
@ -1,5 +1,8 @@
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
use garage_util::background::*;
|
||||||
use garage_util::config::*;
|
use garage_util::config::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
|
|
||||||
|
@ -17,8 +20,12 @@ pub async fn offline_repair(config_file: PathBuf, opt: OfflineRepairOpt) -> Resu
|
||||||
info!("Loading configuration...");
|
info!("Loading configuration...");
|
||||||
let config = read_config(config_file)?;
|
let config = read_config(config_file)?;
|
||||||
|
|
||||||
|
info!("Initializing background runner...");
|
||||||
|
let (done_tx, done_rx) = watch::channel(false);
|
||||||
|
let (background, await_background_done) = BackgroundRunner::new(16, done_rx);
|
||||||
|
|
||||||
info!("Initializing Garage main data store...");
|
info!("Initializing Garage main data store...");
|
||||||
let garage = Garage::new(config)?;
|
let garage = Garage::new(config.clone(), background)?;
|
||||||
|
|
||||||
info!("Launching repair operation...");
|
info!("Launching repair operation...");
|
||||||
match opt.what {
|
match opt.what {
|
||||||
|
@ -36,7 +43,13 @@ pub async fn offline_repair(config_file: PathBuf, opt: OfflineRepairOpt) -> Resu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Repair operation finished, shutting down...");
|
info!("Repair operation finished, shutting down Garage internals...");
|
||||||
|
done_tx.send(true).unwrap();
|
||||||
|
drop(garage);
|
||||||
|
|
||||||
|
await_background_done.await?;
|
||||||
|
|
||||||
|
info!("Cleaning up...");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,31 +15,33 @@ use garage_util::error::Error;
|
||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
pub async fn launch_online_repair(
|
pub async fn launch_online_repair(garage: Arc<Garage>, opt: RepairOpt) {
|
||||||
garage: &Arc<Garage>,
|
|
||||||
bg: &BackgroundRunner,
|
|
||||||
opt: RepairOpt,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
match opt.what {
|
match opt.what {
|
||||||
RepairWhat::Tables => {
|
RepairWhat::Tables => {
|
||||||
info!("Launching a full sync of tables");
|
info!("Launching a full sync of tables");
|
||||||
garage.bucket_table.syncer.add_full_sync()?;
|
garage.bucket_table.syncer.add_full_sync();
|
||||||
garage.object_table.syncer.add_full_sync()?;
|
garage.object_table.syncer.add_full_sync();
|
||||||
garage.version_table.syncer.add_full_sync()?;
|
garage.version_table.syncer.add_full_sync();
|
||||||
garage.block_ref_table.syncer.add_full_sync()?;
|
garage.block_ref_table.syncer.add_full_sync();
|
||||||
garage.key_table.syncer.add_full_sync()?;
|
garage.key_table.syncer.add_full_sync();
|
||||||
}
|
}
|
||||||
RepairWhat::Versions => {
|
RepairWhat::Versions => {
|
||||||
info!("Repairing the versions table");
|
info!("Repairing the versions table");
|
||||||
bg.spawn_worker(RepairVersionsWorker::new(garage.clone()));
|
garage
|
||||||
|
.background
|
||||||
|
.spawn_worker(RepairVersionsWorker::new(garage.clone()));
|
||||||
}
|
}
|
||||||
RepairWhat::BlockRefs => {
|
RepairWhat::BlockRefs => {
|
||||||
info!("Repairing the block refs table");
|
info!("Repairing the block refs table");
|
||||||
bg.spawn_worker(RepairBlockrefsWorker::new(garage.clone()));
|
garage
|
||||||
|
.background
|
||||||
|
.spawn_worker(RepairBlockrefsWorker::new(garage.clone()));
|
||||||
}
|
}
|
||||||
RepairWhat::Blocks => {
|
RepairWhat::Blocks => {
|
||||||
info!("Repairing the stored blocks");
|
info!("Repairing the stored blocks");
|
||||||
bg.spawn_worker(garage_block::repair::RepairWorker::new(
|
garage
|
||||||
|
.background
|
||||||
|
.spawn_worker(garage_block::repair::RepairWorker::new(
|
||||||
garage.block_manager.clone(),
|
garage.block_manager.clone(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -54,10 +56,9 @@ pub async fn launch_online_repair(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
info!("Sending command to scrub worker: {:?}", cmd);
|
info!("Sending command to scrub worker: {:?}", cmd);
|
||||||
garage.block_manager.send_scrub_command(cmd).await?;
|
garage.block_manager.send_scrub_command(cmd).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
@ -92,14 +93,19 @@ impl Worker for RepairVersionsWorker {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
let (item_bytes, next_pos) = match self.garage.version_table.data.store.get_gt(&self.pos)? {
|
let item_bytes = match self.garage.version_table.data.store.get_gt(&self.pos)? {
|
||||||
Some((k, v)) => (v, k),
|
Some((k, v)) => {
|
||||||
|
self.pos = k;
|
||||||
|
v
|
||||||
|
}
|
||||||
None => {
|
None => {
|
||||||
info!("repair_versions: finished, done {}", self.counter);
|
info!("repair_versions: finished, done {}", self.counter);
|
||||||
return Ok(WorkerState::Done);
|
return Ok(WorkerState::Done);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
self.counter += 1;
|
||||||
|
|
||||||
let version = rmp_serde::decode::from_read_ref::<_, Version>(&item_bytes)?;
|
let version = rmp_serde::decode::from_read_ref::<_, Version>(&item_bytes)?;
|
||||||
if !version.deleted.get() {
|
if !version.deleted.get() {
|
||||||
let object = self
|
let object = self
|
||||||
|
@ -128,13 +134,10 @@ impl Worker for RepairVersionsWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.counter += 1;
|
|
||||||
self.pos = next_pos;
|
|
||||||
|
|
||||||
Ok(WorkerState::Busy)
|
Ok(WorkerState::Busy)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -171,15 +174,19 @@ impl Worker for RepairBlockrefsWorker {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
let (item_bytes, next_pos) =
|
let item_bytes = match self.garage.block_ref_table.data.store.get_gt(&self.pos)? {
|
||||||
match self.garage.block_ref_table.data.store.get_gt(&self.pos)? {
|
Some((k, v)) => {
|
||||||
Some((k, v)) => (v, k),
|
self.pos = k;
|
||||||
|
v
|
||||||
|
}
|
||||||
None => {
|
None => {
|
||||||
info!("repair_block_ref: finished, done {}", self.counter);
|
info!("repair_block_ref: finished, done {}", self.counter);
|
||||||
return Ok(WorkerState::Done);
|
return Ok(WorkerState::Done);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
self.counter += 1;
|
||||||
|
|
||||||
let block_ref = rmp_serde::decode::from_read_ref::<_, BlockRef>(&item_bytes)?;
|
let block_ref = rmp_serde::decode::from_read_ref::<_, BlockRef>(&item_bytes)?;
|
||||||
if !block_ref.deleted.get() {
|
if !block_ref.deleted.get() {
|
||||||
let version = self
|
let version = self
|
||||||
|
@ -205,13 +212,10 @@ impl Worker for RepairBlockrefsWorker {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.counter += 1;
|
|
||||||
self.pos = next_pos;
|
|
||||||
|
|
||||||
Ok(WorkerState::Busy)
|
Ok(WorkerState::Busy)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
unreachable!()
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,15 +35,12 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> {
|
||||||
#[cfg(feature = "metrics")]
|
#[cfg(feature = "metrics")]
|
||||||
let metrics_exporter = opentelemetry_prometheus::exporter().init();
|
let metrics_exporter = opentelemetry_prometheus::exporter().init();
|
||||||
|
|
||||||
info!("Initializing Garage main data store...");
|
|
||||||
let garage = Garage::new(config.clone())?;
|
|
||||||
|
|
||||||
info!("Initializing background runner...");
|
info!("Initializing background runner...");
|
||||||
let watch_cancel = watch_shutdown_signal();
|
let watch_cancel = watch_shutdown_signal();
|
||||||
let (background, await_background_done) = BackgroundRunner::new(watch_cancel.clone());
|
let (background, await_background_done) = BackgroundRunner::new(16, watch_cancel.clone());
|
||||||
|
|
||||||
info!("Spawning Garage workers...");
|
info!("Initializing Garage main data store...");
|
||||||
garage.spawn_workers(&background);
|
let garage = Garage::new(config.clone(), background)?;
|
||||||
|
|
||||||
if config.admin.trace_sink.is_some() {
|
if config.admin.trace_sink.is_some() {
|
||||||
info!("Initialize tracing...");
|
info!("Initialize tracing...");
|
||||||
|
@ -66,7 +63,7 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> {
|
||||||
let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone()));
|
let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone()));
|
||||||
|
|
||||||
info!("Create admin RPC handler...");
|
info!("Create admin RPC handler...");
|
||||||
AdminRpcHandler::new(garage.clone(), background.clone());
|
AdminRpcHandler::new(garage.clone());
|
||||||
|
|
||||||
// ---- Launch public-facing API servers ----
|
// ---- Launch public-facing API servers ----
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ tokio = "1.17.0"
|
||||||
|
|
||||||
# cli deps
|
# cli deps
|
||||||
clap = { version = "3.1.18", optional = true, features = ["derive", "env"] }
|
clap = { version = "3.1.18", optional = true, features = ["derive", "env"] }
|
||||||
garage_util = { version = "0.8.0", path = "../util", optional = true }
|
garage_util = { version = "0.8.1", path = "../util", optional = true }
|
||||||
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,11 +14,11 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.0", default-features = false, path = "../db" }
|
garage_db = { version = "0.8.1", default-features = false, path = "../db" }
|
||||||
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
||||||
garage_table = { version = "0.8.0", path = "../table" }
|
garage_table = { version = "0.8.1", path = "../table" }
|
||||||
garage_block = { version = "0.8.0", path = "../block" }
|
garage_block = { version = "0.8.1", path = "../block" }
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
|
|
|
@ -39,6 +39,8 @@ pub struct Garage {
|
||||||
|
|
||||||
/// The local database
|
/// The local database
|
||||||
pub db: db::Db,
|
pub db: db::Db,
|
||||||
|
/// A background job runner
|
||||||
|
pub background: Arc<BackgroundRunner>,
|
||||||
/// The membership manager
|
/// The membership manager
|
||||||
pub system: Arc<System>,
|
pub system: Arc<System>,
|
||||||
/// The block manager
|
/// The block manager
|
||||||
|
@ -76,7 +78,7 @@ pub struct GarageK2V {
|
||||||
|
|
||||||
impl Garage {
|
impl Garage {
|
||||||
/// Create and run garage
|
/// Create and run garage
|
||||||
pub fn new(config: Config) -> Result<Arc<Self>, Error> {
|
pub fn new(config: Config, background: Arc<BackgroundRunner>) -> Result<Arc<Self>, Error> {
|
||||||
// Create meta dir and data dir if they don't exist already
|
// Create meta dir and data dir if they don't exist already
|
||||||
std::fs::create_dir_all(&config.metadata_dir)
|
std::fs::create_dir_all(&config.metadata_dir)
|
||||||
.ok_or_message("Unable to create Garage metadata directory")?;
|
.ok_or_message("Unable to create Garage metadata directory")?;
|
||||||
|
@ -165,7 +167,7 @@ impl Garage {
|
||||||
.expect("Invalid replication_mode in config file.");
|
.expect("Invalid replication_mode in config file.");
|
||||||
|
|
||||||
info!("Initialize membership management system...");
|
info!("Initialize membership management system...");
|
||||||
let system = System::new(network_key, replication_mode, &config)?;
|
let system = System::new(network_key, background.clone(), replication_mode, &config)?;
|
||||||
|
|
||||||
let data_rep_param = TableShardedReplication {
|
let data_rep_param = TableShardedReplication {
|
||||||
system: system.clone(),
|
system: system.clone(),
|
||||||
|
@ -223,6 +225,7 @@ impl Garage {
|
||||||
info!("Initialize version_table...");
|
info!("Initialize version_table...");
|
||||||
let version_table = Table::new(
|
let version_table = Table::new(
|
||||||
VersionTable {
|
VersionTable {
|
||||||
|
background: background.clone(),
|
||||||
block_ref_table: block_ref_table.clone(),
|
block_ref_table: block_ref_table.clone(),
|
||||||
},
|
},
|
||||||
meta_rep_param.clone(),
|
meta_rep_param.clone(),
|
||||||
|
@ -237,6 +240,7 @@ impl Garage {
|
||||||
#[allow(clippy::redundant_clone)]
|
#[allow(clippy::redundant_clone)]
|
||||||
let object_table = Table::new(
|
let object_table = Table::new(
|
||||||
ObjectTable {
|
ObjectTable {
|
||||||
|
background: background.clone(),
|
||||||
version_table: version_table.clone(),
|
version_table: version_table.clone(),
|
||||||
object_counter_table: object_counter_table.clone(),
|
object_counter_table: object_counter_table.clone(),
|
||||||
},
|
},
|
||||||
|
@ -254,6 +258,7 @@ impl Garage {
|
||||||
config,
|
config,
|
||||||
replication_mode,
|
replication_mode,
|
||||||
db,
|
db,
|
||||||
|
background,
|
||||||
system,
|
system,
|
||||||
block_manager,
|
block_manager,
|
||||||
bucket_table,
|
bucket_table,
|
||||||
|
@ -268,22 +273,6 @@ impl Garage {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
|
|
||||||
self.block_manager.spawn_workers(bg);
|
|
||||||
|
|
||||||
self.bucket_table.spawn_workers(bg);
|
|
||||||
self.bucket_alias_table.spawn_workers(bg);
|
|
||||||
self.key_table.spawn_workers(bg);
|
|
||||||
|
|
||||||
self.object_table.spawn_workers(bg);
|
|
||||||
self.object_counter_table.spawn_workers(bg);
|
|
||||||
self.version_table.spawn_workers(bg);
|
|
||||||
self.block_ref_table.spawn_workers(bg);
|
|
||||||
|
|
||||||
#[cfg(feature = "k2v")]
|
|
||||||
self.k2v.spawn_workers(bg);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
||||||
helper::bucket::BucketHelper(self)
|
helper::bucket::BucketHelper(self)
|
||||||
}
|
}
|
||||||
|
@ -318,9 +307,4 @@ impl GarageK2V {
|
||||||
rpc,
|
rpc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
|
|
||||||
self.item_table.spawn_workers(bg);
|
|
||||||
self.counter_table.spawn_workers(bg);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +1,17 @@
|
||||||
use core::ops::Bound;
|
use core::ops::Bound;
|
||||||
use std::collections::{BTreeMap, HashMap};
|
use std::collections::{hash_map, BTreeMap, HashMap};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::sync::{mpsc, watch};
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_rpc::ring::Ring;
|
use garage_rpc::ring::Ring;
|
||||||
use garage_rpc::system::System;
|
use garage_rpc::system::System;
|
||||||
use garage_util::background::BackgroundRunner;
|
use garage_util::background::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
@ -140,6 +142,7 @@ impl<T: CountedItem> TableSchema for CounterTable<T> {
|
||||||
pub struct IndexCounter<T: CountedItem> {
|
pub struct IndexCounter<T: CountedItem> {
|
||||||
this_node: Uuid,
|
this_node: Uuid,
|
||||||
local_counter: db::Tree,
|
local_counter: db::Tree,
|
||||||
|
propagate_tx: mpsc::UnboundedSender<(T::CP, T::CS, LocalCounterEntry<T>)>,
|
||||||
pub table: Arc<Table<CounterTable<T>, TableShardedReplication>>,
|
pub table: Arc<Table<CounterTable<T>, TableShardedReplication>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,11 +152,16 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
replication: TableShardedReplication,
|
replication: TableShardedReplication,
|
||||||
db: &db::Db,
|
db: &db::Db,
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
Arc::new(Self {
|
let background = system.background.clone();
|
||||||
|
|
||||||
|
let (propagate_tx, propagate_rx) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
let this = Arc::new(Self {
|
||||||
this_node: system.id,
|
this_node: system.id,
|
||||||
local_counter: db
|
local_counter: db
|
||||||
.open_tree(format!("local_counter_v2:{}", T::COUNTER_TABLE_NAME))
|
.open_tree(format!("local_counter_v2:{}", T::COUNTER_TABLE_NAME))
|
||||||
.expect("Unable to open local counter tree"),
|
.expect("Unable to open local counter tree"),
|
||||||
|
propagate_tx,
|
||||||
table: Table::new(
|
table: Table::new(
|
||||||
CounterTable {
|
CounterTable {
|
||||||
_phantom_t: Default::default(),
|
_phantom_t: Default::default(),
|
||||||
|
@ -162,11 +170,16 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
system,
|
system,
|
||||||
db,
|
db,
|
||||||
),
|
),
|
||||||
})
|
});
|
||||||
}
|
|
||||||
|
|
||||||
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
|
background.spawn_worker(IndexPropagatorWorker {
|
||||||
self.table.spawn_workers(bg);
|
index_counter: this.clone(),
|
||||||
|
propagate_rx,
|
||||||
|
buf: HashMap::new(),
|
||||||
|
errors: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
this
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn count(
|
pub fn count(
|
||||||
|
@ -219,8 +232,12 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
.map_err(db::TxError::Abort)?;
|
.map_err(db::TxError::Abort)?;
|
||||||
tx.insert(&self.local_counter, &tree_key[..], new_entry_bytes)?;
|
tx.insert(&self.local_counter, &tree_key[..], new_entry_bytes)?;
|
||||||
|
|
||||||
let dist_entry = entry.into_counter_entry(self.this_node);
|
if let Err(e) = self.propagate_tx.send((pk.clone(), sk.clone(), entry)) {
|
||||||
self.table.queue_insert(tx, &dist_entry)?;
|
error!(
|
||||||
|
"Could not propagate updated counter values, failed to send to channel: {}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -233,6 +250,23 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
TS: TableSchema<E = T>,
|
TS: TableSchema<E = T>,
|
||||||
TR: TableReplication,
|
TR: TableReplication,
|
||||||
{
|
{
|
||||||
|
let save_counter_entry = |entry: CounterEntry<T>| -> Result<(), Error> {
|
||||||
|
let entry_k = self
|
||||||
|
.table
|
||||||
|
.data
|
||||||
|
.tree_key(entry.partition_key(), entry.sort_key());
|
||||||
|
self.table
|
||||||
|
.data
|
||||||
|
.update_entry_with(&entry_k, |ent| match ent {
|
||||||
|
Some(mut ent) => {
|
||||||
|
ent.merge(&entry);
|
||||||
|
ent
|
||||||
|
}
|
||||||
|
None => entry.clone(),
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
};
|
||||||
|
|
||||||
// 1. Set all old local counters to zero
|
// 1. Set all old local counters to zero
|
||||||
let now = now_msec();
|
let now = now_msec();
|
||||||
let mut next_start: Option<Vec<u8>> = None;
|
let mut next_start: Option<Vec<u8>> = None;
|
||||||
|
@ -268,9 +302,7 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
.insert(&local_counter_k, &local_counter_bytes)?;
|
.insert(&local_counter_k, &local_counter_bytes)?;
|
||||||
|
|
||||||
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
||||||
self.local_counter
|
save_counter_entry(counter_entry)?;
|
||||||
.db()
|
|
||||||
.transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
|
|
||||||
|
|
||||||
next_start = Some(local_counter_k);
|
next_start = Some(local_counter_k);
|
||||||
}
|
}
|
||||||
|
@ -335,9 +367,7 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
.insert(&local_counter_key, local_counter_bytes)?;
|
.insert(&local_counter_key, local_counter_bytes)?;
|
||||||
|
|
||||||
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
let counter_entry = local_counter.into_counter_entry(self.this_node);
|
||||||
self.local_counter
|
save_counter_entry(counter_entry)?;
|
||||||
.db()
|
|
||||||
.transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
|
|
||||||
|
|
||||||
next_start = Some(counted_entry_k);
|
next_start = Some(counted_entry_k);
|
||||||
}
|
}
|
||||||
|
@ -348,7 +378,96 @@ impl<T: CountedItem> IndexCounter<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----
|
struct IndexPropagatorWorker<T: CountedItem> {
|
||||||
|
index_counter: Arc<IndexCounter<T>>,
|
||||||
|
propagate_rx: mpsc::UnboundedReceiver<(T::CP, T::CS, LocalCounterEntry<T>)>,
|
||||||
|
|
||||||
|
buf: HashMap<Vec<u8>, CounterEntry<T>>,
|
||||||
|
errors: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: CountedItem> IndexPropagatorWorker<T> {
|
||||||
|
fn add_ent(&mut self, pk: T::CP, sk: T::CS, counters: LocalCounterEntry<T>) {
|
||||||
|
let tree_key = self.index_counter.table.data.tree_key(&pk, &sk);
|
||||||
|
let dist_entry = counters.into_counter_entry(self.index_counter.this_node);
|
||||||
|
match self.buf.entry(tree_key) {
|
||||||
|
hash_map::Entry::Vacant(e) => {
|
||||||
|
e.insert(dist_entry);
|
||||||
|
}
|
||||||
|
hash_map::Entry::Occupied(mut e) => {
|
||||||
|
e.get_mut().merge(&dist_entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T: CountedItem> Worker for IndexPropagatorWorker<T> {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!("{} counter", T::COUNTER_TABLE_NAME)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn status(&self) -> WorkerStatus {
|
||||||
|
WorkerStatus {
|
||||||
|
queue_length: Some(self.buf.len() as u64),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn work(&mut self, must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
|
// This loop batches updates to counters to be sent all at once.
|
||||||
|
// They are sent once the propagate_rx channel has been emptied (or is closed).
|
||||||
|
let closed = loop {
|
||||||
|
match self.propagate_rx.try_recv() {
|
||||||
|
Ok((pk, sk, counters)) => {
|
||||||
|
self.add_ent(pk, sk, counters);
|
||||||
|
}
|
||||||
|
Err(mpsc::error::TryRecvError::Empty) => break false,
|
||||||
|
Err(mpsc::error::TryRecvError::Disconnected) => break true,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if !self.buf.is_empty() {
|
||||||
|
let entries_k = self.buf.keys().take(100).cloned().collect::<Vec<_>>();
|
||||||
|
let entries = entries_k.iter().map(|k| self.buf.get(k).unwrap());
|
||||||
|
if let Err(e) = self.index_counter.table.insert_many(entries).await {
|
||||||
|
self.errors += 1;
|
||||||
|
if self.errors >= 2 && *must_exit.borrow() {
|
||||||
|
error!("({}) Could not propagate {} counter values: {}, these counters will not be updated correctly.", T::COUNTER_TABLE_NAME, self.buf.len(), e);
|
||||||
|
return Ok(WorkerState::Done);
|
||||||
|
}
|
||||||
|
// Propagate error up to worker manager, it will log it, increment a counter,
|
||||||
|
// and sleep for a certain delay (with exponential backoff), waiting for
|
||||||
|
// things to go back to normal
|
||||||
|
return Err(e);
|
||||||
|
} else {
|
||||||
|
for k in entries_k {
|
||||||
|
self.buf.remove(&k);
|
||||||
|
}
|
||||||
|
self.errors = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Ok(WorkerState::Busy);
|
||||||
|
} else if closed {
|
||||||
|
return Ok(WorkerState::Done);
|
||||||
|
} else {
|
||||||
|
return Ok(WorkerState::Idle);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
|
match self.propagate_rx.recv().await {
|
||||||
|
Some((pk, sk, counters)) => {
|
||||||
|
self.add_ent(pk, sk, counters);
|
||||||
|
WorkerState::Busy
|
||||||
|
}
|
||||||
|
None => match self.buf.is_empty() {
|
||||||
|
false => WorkerState::Busy,
|
||||||
|
true => WorkerState::Done,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||||
struct LocalCounterEntry<T: CountedItem> {
|
struct LocalCounterEntry<T: CountedItem> {
|
||||||
|
|
|
@ -273,9 +273,14 @@ impl K2VRpcHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn local_insert(&self, item: &InsertedItem) -> Result<Option<K2VItem>, Error> {
|
fn local_insert(&self, item: &InsertedItem) -> Result<Option<K2VItem>, Error> {
|
||||||
|
let tree_key = self
|
||||||
|
.item_table
|
||||||
|
.data
|
||||||
|
.tree_key(&item.partition, &item.sort_key);
|
||||||
|
|
||||||
self.item_table
|
self.item_table
|
||||||
.data
|
.data
|
||||||
.update_entry_with(&item.partition, &item.sort_key, |ent| {
|
.update_entry_with(&tree_key[..], |ent| {
|
||||||
let mut ent = ent.unwrap_or_else(|| {
|
let mut ent = ent.unwrap_or_else(|| {
|
||||||
K2VItem::new(
|
K2VItem::new(
|
||||||
item.partition.bucket_id,
|
item.partition.bucket_id,
|
||||||
|
|
|
@ -4,6 +4,7 @@ use std::sync::Arc;
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
|
@ -220,6 +221,7 @@ impl Crdt for Object {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct ObjectTable {
|
pub struct ObjectTable {
|
||||||
|
pub background: Arc<BackgroundRunner>,
|
||||||
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
|
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
|
||||||
pub object_counter_table: Arc<IndexCounter<Object>>,
|
pub object_counter_table: Arc<IndexCounter<Object>>,
|
||||||
}
|
}
|
||||||
|
@ -253,7 +255,12 @@ impl TableSchema for ObjectTable {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Enqueue propagation deletions to version table
|
// 2. Spawn threads that propagates deletions to version table
|
||||||
|
let version_table = self.version_table.clone();
|
||||||
|
let old = old.cloned();
|
||||||
|
let new = new.cloned();
|
||||||
|
|
||||||
|
self.background.spawn(async move {
|
||||||
if let (Some(old_v), Some(new_v)) = (old, new) {
|
if let (Some(old_v), Some(new_v)) = (old, new) {
|
||||||
// Propagate deletion of old versions
|
// Propagate deletion of old versions
|
||||||
for v in old_v.versions.iter() {
|
for v in old_v.versions.iter() {
|
||||||
|
@ -270,17 +277,12 @@ impl TableSchema for ObjectTable {
|
||||||
if newly_deleted {
|
if newly_deleted {
|
||||||
let deleted_version =
|
let deleted_version =
|
||||||
Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true);
|
Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true);
|
||||||
let res = self.version_table.queue_insert(tx, &deleted_version);
|
version_table.insert(&deleted_version).await?;
|
||||||
if let Err(e) = db::unabort(res)? {
|
|
||||||
error!(
|
|
||||||
"Unable to enqueue version deletion propagation: {}. A repair will be needed.",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
Ok(())
|
||||||
|
});
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ use std::sync::Arc;
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_table::crdt::*;
|
use garage_table::crdt::*;
|
||||||
|
@ -126,6 +127,7 @@ impl Crdt for Version {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct VersionTable {
|
pub struct VersionTable {
|
||||||
|
pub background: Arc<BackgroundRunner>,
|
||||||
pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,26 +141,33 @@ impl TableSchema for VersionTable {
|
||||||
|
|
||||||
fn updated(
|
fn updated(
|
||||||
&self,
|
&self,
|
||||||
tx: &mut db::Transaction,
|
_tx: &mut db::Transaction,
|
||||||
old: Option<&Self::E>,
|
old: Option<&Self::E>,
|
||||||
new: Option<&Self::E>,
|
new: Option<&Self::E>,
|
||||||
) -> db::TxOpResult<()> {
|
) -> db::TxOpResult<()> {
|
||||||
|
let block_ref_table = self.block_ref_table.clone();
|
||||||
|
let old = old.cloned();
|
||||||
|
let new = new.cloned();
|
||||||
|
|
||||||
|
self.background.spawn(async move {
|
||||||
if let (Some(old_v), Some(new_v)) = (old, new) {
|
if let (Some(old_v), Some(new_v)) = (old, new) {
|
||||||
// Propagate deletion of version blocks
|
// Propagate deletion of version blocks
|
||||||
if new_v.deleted.get() && !old_v.deleted.get() {
|
if new_v.deleted.get() && !old_v.deleted.get() {
|
||||||
let deleted_block_refs = old_v.blocks.items().iter().map(|(_k, vb)| BlockRef {
|
let deleted_block_refs = old_v
|
||||||
|
.blocks
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.map(|(_k, vb)| BlockRef {
|
||||||
block: vb.hash,
|
block: vb.hash,
|
||||||
version: old_v.uuid,
|
version: old_v.uuid,
|
||||||
deleted: true.into(),
|
deleted: true.into(),
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
block_ref_table.insert_many(&deleted_block_refs[..]).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
});
|
});
|
||||||
for block_ref in deleted_block_refs {
|
|
||||||
let res = self.block_ref_table.queue_insert(tx, &block_ref);
|
|
||||||
if let Err(e) = db::unabort(res)? {
|
|
||||||
error!("Unable to enqueue block ref deletion propagation: {}. A repair will be needed.", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_rpc"
|
name = "garage_rpc"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,7 +14,7 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
|
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
|
|
|
@ -5,6 +5,7 @@ use std::time::Duration;
|
||||||
use futures::future::join_all;
|
use futures::future::join_all;
|
||||||
use futures::stream::futures_unordered::FuturesUnordered;
|
use futures::stream::futures_unordered::FuturesUnordered;
|
||||||
use futures::stream::StreamExt;
|
use futures::stream::StreamExt;
|
||||||
|
use futures_util::future::FutureExt;
|
||||||
use tokio::select;
|
use tokio::select;
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
@ -23,6 +24,7 @@ pub use netapp::message::{
|
||||||
use netapp::peering::fullmesh::FullMeshPeeringStrategy;
|
use netapp::peering::fullmesh::FullMeshPeeringStrategy;
|
||||||
pub use netapp::{self, NetApp, NodeID};
|
pub use netapp::{self, NetApp, NodeID};
|
||||||
|
|
||||||
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
|
@ -92,6 +94,7 @@ pub struct RpcHelper(Arc<RpcHelperInner>);
|
||||||
struct RpcHelperInner {
|
struct RpcHelperInner {
|
||||||
our_node_id: Uuid,
|
our_node_id: Uuid,
|
||||||
fullmesh: Arc<FullMeshPeeringStrategy>,
|
fullmesh: Arc<FullMeshPeeringStrategy>,
|
||||||
|
background: Arc<BackgroundRunner>,
|
||||||
ring: watch::Receiver<Arc<Ring>>,
|
ring: watch::Receiver<Arc<Ring>>,
|
||||||
metrics: RpcMetrics,
|
metrics: RpcMetrics,
|
||||||
rpc_timeout: Duration,
|
rpc_timeout: Duration,
|
||||||
|
@ -101,6 +104,7 @@ impl RpcHelper {
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
our_node_id: Uuid,
|
our_node_id: Uuid,
|
||||||
fullmesh: Arc<FullMeshPeeringStrategy>,
|
fullmesh: Arc<FullMeshPeeringStrategy>,
|
||||||
|
background: Arc<BackgroundRunner>,
|
||||||
ring: watch::Receiver<Arc<Ring>>,
|
ring: watch::Receiver<Arc<Ring>>,
|
||||||
rpc_timeout: Option<Duration>,
|
rpc_timeout: Option<Duration>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
@ -109,6 +113,7 @@ impl RpcHelper {
|
||||||
Self(Arc::new(RpcHelperInner {
|
Self(Arc::new(RpcHelperInner {
|
||||||
our_node_id,
|
our_node_id,
|
||||||
fullmesh,
|
fullmesh,
|
||||||
|
background,
|
||||||
ring,
|
ring,
|
||||||
metrics,
|
metrics,
|
||||||
rpc_timeout: rpc_timeout.unwrap_or(DEFAULT_TIMEOUT),
|
rpc_timeout: rpc_timeout.unwrap_or(DEFAULT_TIMEOUT),
|
||||||
|
@ -372,13 +377,16 @@ impl RpcHelper {
|
||||||
|
|
||||||
if !resp_stream.is_empty() {
|
if !resp_stream.is_empty() {
|
||||||
// Continue remaining requests in background.
|
// Continue remaining requests in background.
|
||||||
// Note: these requests can get interrupted on process shutdown,
|
// Continue the remaining requests immediately using tokio::spawn
|
||||||
// we must not count on them being executed for certain.
|
// but enqueue a task in the background runner
|
||||||
// For all background things that have to happen with certainty,
|
// to ensure that the process won't exit until the requests are done
|
||||||
// they have to be put in a proper queue that is persisted to disk.
|
// (if we had just enqueued the resp_stream.collect directly in the background runner,
|
||||||
tokio::spawn(async move {
|
// the requests might have been put on hold in the background runner's queue,
|
||||||
|
// in which case they might timeout or otherwise fail)
|
||||||
|
let wait_finished_fut = tokio::spawn(async move {
|
||||||
resp_stream.collect::<Vec<Result<_, _>>>().await;
|
resp_stream.collect::<Vec<Result<_, _>>>().await;
|
||||||
});
|
});
|
||||||
|
self.0.background.spawn(wait_finished_fut.map(|_| Ok(())));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ use netapp::peering::fullmesh::FullMeshPeeringStrategy;
|
||||||
use netapp::util::parse_and_resolve_peer_addr_async;
|
use netapp::util::parse_and_resolve_peer_addr_async;
|
||||||
use netapp::{NetApp, NetworkKey, NodeID, NodeKey};
|
use netapp::{NetApp, NetworkKey, NodeID, NodeKey};
|
||||||
|
|
||||||
|
use garage_util::background::BackgroundRunner;
|
||||||
use garage_util::config::Config;
|
use garage_util::config::Config;
|
||||||
#[cfg(feature = "kubernetes-discovery")]
|
#[cfg(feature = "kubernetes-discovery")]
|
||||||
use garage_util::config::KubernetesDiscoveryConfig;
|
use garage_util::config::KubernetesDiscoveryConfig;
|
||||||
|
@ -49,6 +50,8 @@ pub const GARAGE_VERSION_TAG: u64 = 0x6761726167650008; // garage 0x0008
|
||||||
/// RPC endpoint used for calls related to membership
|
/// RPC endpoint used for calls related to membership
|
||||||
pub const SYSTEM_RPC_PATH: &str = "garage_rpc/membership.rs/SystemRpc";
|
pub const SYSTEM_RPC_PATH: &str = "garage_rpc/membership.rs/SystemRpc";
|
||||||
|
|
||||||
|
pub const CONNECT_ERROR_MESSAGE: &str = "Error establishing RPC connection to remote node. This can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret";
|
||||||
|
|
||||||
/// RPC messages related to membership
|
/// RPC messages related to membership
|
||||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
pub enum SystemRpc {
|
pub enum SystemRpc {
|
||||||
|
@ -107,6 +110,9 @@ pub struct System {
|
||||||
pub ring: watch::Receiver<Arc<Ring>>,
|
pub ring: watch::Receiver<Arc<Ring>>,
|
||||||
update_ring: Mutex<watch::Sender<Arc<Ring>>>,
|
update_ring: Mutex<watch::Sender<Arc<Ring>>>,
|
||||||
|
|
||||||
|
/// The job runner of this node
|
||||||
|
pub background: Arc<BackgroundRunner>,
|
||||||
|
|
||||||
/// Path to metadata directory
|
/// Path to metadata directory
|
||||||
pub metadata_dir: PathBuf,
|
pub metadata_dir: PathBuf,
|
||||||
}
|
}
|
||||||
|
@ -226,6 +232,7 @@ impl System {
|
||||||
/// Create this node's membership manager
|
/// Create this node's membership manager
|
||||||
pub fn new(
|
pub fn new(
|
||||||
network_key: NetworkKey,
|
network_key: NetworkKey,
|
||||||
|
background: Arc<BackgroundRunner>,
|
||||||
replication_mode: ReplicationMode,
|
replication_mode: ReplicationMode,
|
||||||
config: &Config,
|
config: &Config,
|
||||||
) -> Result<Arc<Self>, Error> {
|
) -> Result<Arc<Self>, Error> {
|
||||||
|
@ -347,6 +354,7 @@ impl System {
|
||||||
rpc: RpcHelper::new(
|
rpc: RpcHelper::new(
|
||||||
netapp.id.into(),
|
netapp.id.into(),
|
||||||
fullmesh,
|
fullmesh,
|
||||||
|
background.clone(),
|
||||||
ring.clone(),
|
ring.clone(),
|
||||||
config.rpc_timeout_msec.map(Duration::from_millis),
|
config.rpc_timeout_msec.map(Duration::from_millis),
|
||||||
),
|
),
|
||||||
|
@ -364,6 +372,7 @@ impl System {
|
||||||
|
|
||||||
ring,
|
ring,
|
||||||
update_ring: Mutex::new(update_ring),
|
update_ring: Mutex::new(update_ring),
|
||||||
|
background,
|
||||||
metadata_dir: config.metadata_dir.clone(),
|
metadata_dir: config.metadata_dir.clone(),
|
||||||
});
|
});
|
||||||
sys.system_endpoint.set_handler(sys.clone());
|
sys.system_endpoint.set_handler(sys.clone());
|
||||||
|
@ -435,14 +444,17 @@ impl System {
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
let mut errors = vec![];
|
let mut errors = vec![];
|
||||||
for addr in addrs.iter() {
|
for ip in addrs.iter() {
|
||||||
match self.netapp.clone().try_connect(*addr, pubkey).await {
|
match self
|
||||||
|
.netapp
|
||||||
|
.clone()
|
||||||
|
.try_connect(*ip, pubkey)
|
||||||
|
.await
|
||||||
|
.err_context(CONNECT_ERROR_MESSAGE)
|
||||||
|
{
|
||||||
Ok(()) => return Ok(()),
|
Ok(()) => return Ok(()),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
errors.push((
|
errors.push((*ip, e));
|
||||||
*addr,
|
|
||||||
Error::Message(connect_error_message(*addr, pubkey, e)),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -566,7 +578,7 @@ impl System {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Save network configuration to disc
|
/// Save network configuration to disc
|
||||||
async fn save_cluster_layout(&self) -> Result<(), Error> {
|
async fn save_cluster_layout(self: Arc<Self>) -> Result<(), Error> {
|
||||||
let ring: Arc<Ring> = self.ring.borrow().clone();
|
let ring: Arc<Ring> = self.ring.borrow().clone();
|
||||||
self.persist_cluster_layout
|
self.persist_cluster_layout
|
||||||
.save_async(&ring.layout)
|
.save_async(&ring.layout)
|
||||||
|
@ -618,7 +630,11 @@ impl System {
|
||||||
if info.cluster_layout_version > local_info.cluster_layout_version
|
if info.cluster_layout_version > local_info.cluster_layout_version
|
||||||
|| info.cluster_layout_staging_hash != local_info.cluster_layout_staging_hash
|
|| info.cluster_layout_staging_hash != local_info.cluster_layout_staging_hash
|
||||||
{
|
{
|
||||||
tokio::spawn(self.clone().pull_cluster_layout(from));
|
let self2 = self.clone();
|
||||||
|
self.background.spawn_cancellable(async move {
|
||||||
|
self2.pull_cluster_layout(from).await;
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
self.node_status
|
self.node_status
|
||||||
|
@ -660,21 +676,18 @@ impl System {
|
||||||
drop(update_ring);
|
drop(update_ring);
|
||||||
|
|
||||||
let self2 = self.clone();
|
let self2 = self.clone();
|
||||||
tokio::spawn(async move {
|
self.background.spawn_cancellable(async move {
|
||||||
if let Err(e) = self2
|
self2
|
||||||
.rpc
|
.rpc
|
||||||
.broadcast(
|
.broadcast(
|
||||||
&self2.system_endpoint,
|
&self2.system_endpoint,
|
||||||
SystemRpc::AdvertiseClusterLayout(layout),
|
SystemRpc::AdvertiseClusterLayout(layout),
|
||||||
RequestStrategy::with_priority(PRIO_HIGH),
|
RequestStrategy::with_priority(PRIO_HIGH),
|
||||||
)
|
)
|
||||||
.await
|
.await?;
|
||||||
{
|
Ok(())
|
||||||
warn!("Error while broadcasting new cluster layout: {}", e);
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
self.background.spawn(self.clone().save_cluster_layout());
|
||||||
self.save_cluster_layout().await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(SystemRpc::Ok)
|
Ok(SystemRpc::Ok)
|
||||||
|
@ -760,12 +773,12 @@ impl System {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (node_id, node_addr) in ping_list {
|
for (node_id, node_addr) in ping_list {
|
||||||
let self2 = self.clone();
|
tokio::spawn(
|
||||||
tokio::spawn(async move {
|
self.netapp
|
||||||
if let Err(e) = self2.netapp.clone().try_connect(node_addr, node_id).await {
|
.clone()
|
||||||
error!("{}", connect_error_message(node_addr, node_id, e));
|
.try_connect(node_addr, node_id)
|
||||||
}
|
.map(|r| r.err_context(CONNECT_ERROR_MESSAGE)),
|
||||||
});
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -774,10 +787,11 @@ impl System {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "consul-discovery")]
|
#[cfg(feature = "consul-discovery")]
|
||||||
background::spawn(self.clone().advertise_to_consul());
|
self.background.spawn(self.clone().advertise_to_consul());
|
||||||
|
|
||||||
#[cfg(feature = "kubernetes-discovery")]
|
#[cfg(feature = "kubernetes-discovery")]
|
||||||
background::spawn(self.clone().advertise_to_kubernetes());
|
self.background
|
||||||
|
.spawn(self.clone().advertise_to_kubernetes());
|
||||||
|
|
||||||
let restart_at = tokio::time::sleep(DISCOVERY_INTERVAL);
|
let restart_at = tokio::time::sleep(DISCOVERY_INTERVAL);
|
||||||
select! {
|
select! {
|
||||||
|
@ -867,11 +881,3 @@ async fn resolve_peers(peers: &[String]) -> Vec<(NodeID, SocketAddr)> {
|
||||||
|
|
||||||
ret
|
ret
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connect_error_message(
|
|
||||||
addr: SocketAddr,
|
|
||||||
pubkey: ed25519::PublicKey,
|
|
||||||
e: netapp::error::Error,
|
|
||||||
) -> String {
|
|
||||||
format!("Error establishing RPC connection to remote node: {}@{}.\nThis can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret.\n{}", hex::encode(pubkey), addr, e)
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_table"
|
name = "garage_table"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,14 +14,13 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.0", path = "../db" }
|
garage_db = { version = "0.8.1", path = "../db" }
|
||||||
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
garage_rpc = { version = "0.8.1", path = "../rpc" }
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
opentelemetry = "0.17"
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
arc-swap = "1.0"
|
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
hexdump = "0.1"
|
hexdump = "0.1"
|
||||||
|
|
|
@ -31,10 +31,6 @@ pub struct TableData<F: TableSchema, R: TableReplication> {
|
||||||
pub(crate) merkle_tree: db::Tree,
|
pub(crate) merkle_tree: db::Tree,
|
||||||
pub(crate) merkle_todo: db::Tree,
|
pub(crate) merkle_todo: db::Tree,
|
||||||
pub(crate) merkle_todo_notify: Notify,
|
pub(crate) merkle_todo_notify: Notify,
|
||||||
|
|
||||||
pub(crate) insert_queue: db::Tree,
|
|
||||||
pub(crate) insert_queue_notify: Notify,
|
|
||||||
|
|
||||||
pub(crate) gc_todo: CountedTree,
|
pub(crate) gc_todo: CountedTree,
|
||||||
|
|
||||||
pub(crate) metrics: TableMetrics,
|
pub(crate) metrics: TableMetrics,
|
||||||
|
@ -57,13 +53,9 @@ where
|
||||||
.open_tree(&format!("{}:merkle_todo", F::TABLE_NAME))
|
.open_tree(&format!("{}:merkle_todo", F::TABLE_NAME))
|
||||||
.expect("Unable to open DB Merkle TODO tree");
|
.expect("Unable to open DB Merkle TODO tree");
|
||||||
|
|
||||||
let insert_queue = db
|
|
||||||
.open_tree(&format!("{}:insert_queue", F::TABLE_NAME))
|
|
||||||
.expect("Unable to open insert queue DB tree");
|
|
||||||
|
|
||||||
let gc_todo = db
|
let gc_todo = db
|
||||||
.open_tree(&format!("{}:gc_todo_v2", F::TABLE_NAME))
|
.open_tree(&format!("{}:gc_todo_v2", F::TABLE_NAME))
|
||||||
.expect("Unable to open GC DB tree");
|
.expect("Unable to open DB tree");
|
||||||
let gc_todo = CountedTree::new(gc_todo).expect("Cannot count gc_todo_v2");
|
let gc_todo = CountedTree::new(gc_todo).expect("Cannot count gc_todo_v2");
|
||||||
|
|
||||||
let metrics = TableMetrics::new(
|
let metrics = TableMetrics::new(
|
||||||
|
@ -82,8 +74,6 @@ where
|
||||||
merkle_tree,
|
merkle_tree,
|
||||||
merkle_todo,
|
merkle_todo,
|
||||||
merkle_todo_notify: Notify::new(),
|
merkle_todo_notify: Notify::new(),
|
||||||
insert_queue,
|
|
||||||
insert_queue_notify: Notify::new(),
|
|
||||||
gc_todo,
|
gc_todo,
|
||||||
metrics,
|
metrics,
|
||||||
})
|
})
|
||||||
|
@ -183,8 +173,9 @@ where
|
||||||
|
|
||||||
pub(crate) fn update_entry(&self, update_bytes: &[u8]) -> Result<(), Error> {
|
pub(crate) fn update_entry(&self, update_bytes: &[u8]) -> Result<(), Error> {
|
||||||
let update = self.decode_entry(update_bytes)?;
|
let update = self.decode_entry(update_bytes)?;
|
||||||
|
let tree_key = self.tree_key(update.partition_key(), update.sort_key());
|
||||||
|
|
||||||
self.update_entry_with(update.partition_key(), update.sort_key(), |ent| match ent {
|
self.update_entry_with(&tree_key[..], |ent| match ent {
|
||||||
Some(mut ent) => {
|
Some(mut ent) => {
|
||||||
ent.merge(&update);
|
ent.merge(&update);
|
||||||
ent
|
ent
|
||||||
|
@ -196,14 +187,11 @@ where
|
||||||
|
|
||||||
pub fn update_entry_with(
|
pub fn update_entry_with(
|
||||||
&self,
|
&self,
|
||||||
partition_key: &F::P,
|
tree_key: &[u8],
|
||||||
sort_key: &F::S,
|
|
||||||
f: impl Fn(Option<F::E>) -> F::E,
|
f: impl Fn(Option<F::E>) -> F::E,
|
||||||
) -> Result<Option<F::E>, Error> {
|
) -> Result<Option<F::E>, Error> {
|
||||||
let tree_key = self.tree_key(partition_key, sort_key);
|
|
||||||
|
|
||||||
let changed = self.store.db().transaction(|mut tx| {
|
let changed = self.store.db().transaction(|mut tx| {
|
||||||
let (old_entry, old_bytes, new_entry) = match tx.get(&self.store, &tree_key)? {
|
let (old_entry, old_bytes, new_entry) = match tx.get(&self.store, tree_key)? {
|
||||||
Some(old_bytes) => {
|
Some(old_bytes) => {
|
||||||
let old_entry = self.decode_entry(&old_bytes).map_err(db::TxError::Abort)?;
|
let old_entry = self.decode_entry(&old_bytes).map_err(db::TxError::Abort)?;
|
||||||
let new_entry = f(Some(old_entry.clone()));
|
let new_entry = f(Some(old_entry.clone()));
|
||||||
|
@ -212,23 +200,23 @@ where
|
||||||
None => (None, None, f(None)),
|
None => (None, None, f(None)),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Changed can be true in two scenarios
|
// Scenario 1: the value changed, so of course there is a change
|
||||||
// Scenario 1: the actual represented value changed,
|
let value_changed = Some(&new_entry) != old_entry.as_ref();
|
||||||
// so of course the messagepack encoding changed as well
|
|
||||||
// Scenario 2: the value didn't change but due to a migration in the
|
// Scenario 2: the value didn't change but due to a migration in the
|
||||||
// data format, the messagepack encoding changed. In this case,
|
// data format, the messagepack encoding changed. In this case
|
||||||
// we also have to write the migrated value in the table and update
|
// we have to write the migrated value in the table and update
|
||||||
// the associated Merkle tree entry.
|
// the associated Merkle tree entry.
|
||||||
let new_bytes = rmp_to_vec_all_named(&new_entry)
|
let new_bytes = rmp_to_vec_all_named(&new_entry)
|
||||||
.map_err(Error::RmpEncode)
|
.map_err(Error::RmpEncode)
|
||||||
.map_err(db::TxError::Abort)?;
|
.map_err(db::TxError::Abort)?;
|
||||||
let changed = Some(&new_bytes[..]) != old_bytes.as_deref();
|
let encoding_changed = Some(&new_bytes[..]) != old_bytes.as_ref().map(|x| &x[..]);
|
||||||
drop(old_bytes);
|
drop(old_bytes);
|
||||||
|
|
||||||
if changed {
|
if value_changed || encoding_changed {
|
||||||
let new_bytes_hash = blake2sum(&new_bytes);
|
let new_bytes_hash = blake2sum(&new_bytes[..]);
|
||||||
tx.insert(&self.merkle_todo, &tree_key, new_bytes_hash.as_slice())?;
|
tx.insert(&self.merkle_todo, tree_key, new_bytes_hash.as_slice())?;
|
||||||
tx.insert(&self.store, &tree_key, new_bytes)?;
|
tx.insert(&self.store, tree_key, new_bytes)?;
|
||||||
|
|
||||||
self.instance
|
self.instance
|
||||||
.updated(&mut tx, old_entry.as_ref(), Some(&new_entry))?;
|
.updated(&mut tx, old_entry.as_ref(), Some(&new_entry))?;
|
||||||
|
@ -254,7 +242,7 @@ where
|
||||||
let pk_hash = Hash::try_from(&tree_key[..32]).unwrap();
|
let pk_hash = Hash::try_from(&tree_key[..32]).unwrap();
|
||||||
let nodes = self.replication.write_nodes(&pk_hash);
|
let nodes = self.replication.write_nodes(&pk_hash);
|
||||||
if nodes.first() == Some(&self.system.id) {
|
if nodes.first() == Some(&self.system.id) {
|
||||||
GcTodoEntry::new(tree_key, new_bytes_hash).save(&self.gc_todo)?;
|
GcTodoEntry::new(tree_key.to_vec(), new_bytes_hash).save(&self.gc_todo)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,11 +258,10 @@ where
|
||||||
.db()
|
.db()
|
||||||
.transaction(|mut tx| match tx.get(&self.store, k)? {
|
.transaction(|mut tx| match tx.get(&self.store, k)? {
|
||||||
Some(cur_v) if cur_v == v => {
|
Some(cur_v) if cur_v == v => {
|
||||||
let old_entry = self.decode_entry(v).map_err(db::TxError::Abort)?;
|
|
||||||
|
|
||||||
tx.remove(&self.store, k)?;
|
tx.remove(&self.store, k)?;
|
||||||
tx.insert(&self.merkle_todo, k, vec![])?;
|
tx.insert(&self.merkle_todo, k, vec![])?;
|
||||||
|
|
||||||
|
let old_entry = self.decode_entry(v).map_err(db::TxError::Abort)?;
|
||||||
self.instance.updated(&mut tx, Some(&old_entry), None)?;
|
self.instance.updated(&mut tx, Some(&old_entry), None)?;
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
@ -298,11 +285,10 @@ where
|
||||||
.db()
|
.db()
|
||||||
.transaction(|mut tx| match tx.get(&self.store, k)? {
|
.transaction(|mut tx| match tx.get(&self.store, k)? {
|
||||||
Some(cur_v) if blake2sum(&cur_v[..]) == vhash => {
|
Some(cur_v) if blake2sum(&cur_v[..]) == vhash => {
|
||||||
let old_entry = self.decode_entry(&cur_v[..]).map_err(db::TxError::Abort)?;
|
|
||||||
|
|
||||||
tx.remove(&self.store, k)?;
|
tx.remove(&self.store, k)?;
|
||||||
tx.insert(&self.merkle_todo, k, vec![])?;
|
tx.insert(&self.merkle_todo, k, vec![])?;
|
||||||
|
|
||||||
|
let old_entry = self.decode_entry(&cur_v[..]).map_err(db::TxError::Abort)?;
|
||||||
self.instance.updated(&mut tx, Some(&old_entry), None)?;
|
self.instance.updated(&mut tx, Some(&old_entry), None)?;
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
@ -316,32 +302,6 @@ where
|
||||||
Ok(removed)
|
Ok(removed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- Insert queue functions ----
|
|
||||||
|
|
||||||
pub(crate) fn queue_insert(
|
|
||||||
&self,
|
|
||||||
tx: &mut db::Transaction,
|
|
||||||
ins: &F::E,
|
|
||||||
) -> db::TxResult<(), Error> {
|
|
||||||
let tree_key = self.tree_key(ins.partition_key(), ins.sort_key());
|
|
||||||
|
|
||||||
let new_entry = match tx.get(&self.insert_queue, &tree_key)? {
|
|
||||||
Some(old_v) => {
|
|
||||||
let mut entry = self.decode_entry(&old_v).map_err(db::TxError::Abort)?;
|
|
||||||
entry.merge(ins);
|
|
||||||
rmp_to_vec_all_named(&entry)
|
|
||||||
}
|
|
||||||
None => rmp_to_vec_all_named(ins),
|
|
||||||
};
|
|
||||||
let new_entry = new_entry
|
|
||||||
.map_err(Error::RmpEncode)
|
|
||||||
.map_err(db::TxError::Abort)?;
|
|
||||||
tx.insert(&self.insert_queue, &tree_key, new_entry)?;
|
|
||||||
self.insert_queue_notify.notify_one();
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- Utility functions ----
|
// ---- Utility functions ----
|
||||||
|
|
||||||
pub fn tree_key(&self, p: &F::P, s: &F::S) -> Vec<u8> {
|
pub fn tree_key(&self, p: &F::P, s: &F::S) -> Vec<u8> {
|
||||||
|
|
|
@ -54,23 +54,22 @@ where
|
||||||
F: TableSchema + 'static,
|
F: TableSchema + 'static,
|
||||||
R: TableReplication + 'static,
|
R: TableReplication + 'static,
|
||||||
{
|
{
|
||||||
pub(crate) fn new(system: Arc<System>, data: Arc<TableData<F, R>>) -> Arc<Self> {
|
pub(crate) fn launch(system: Arc<System>, data: Arc<TableData<F, R>>) -> Arc<Self> {
|
||||||
let endpoint = system
|
let endpoint = system
|
||||||
.netapp
|
.netapp
|
||||||
.endpoint(format!("garage_table/gc.rs/Rpc:{}", F::TABLE_NAME));
|
.endpoint(format!("garage_table/gc.rs/Rpc:{}", F::TABLE_NAME));
|
||||||
|
|
||||||
let gc = Arc::new(Self {
|
let gc = Arc::new(Self {
|
||||||
system,
|
system: system.clone(),
|
||||||
data,
|
data,
|
||||||
endpoint,
|
endpoint,
|
||||||
});
|
});
|
||||||
|
|
||||||
gc.endpoint.set_handler(gc.clone());
|
gc.endpoint.set_handler(gc.clone());
|
||||||
|
|
||||||
gc
|
system.background.spawn_worker(GcWorker::new(gc.clone()));
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
gc
|
||||||
bg.spawn_worker(GcWorker::new(self.clone()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn gc_loop_iter(&self) -> Result<Option<Duration>, Error> {
|
async fn gc_loop_iter(&self) -> Result<Option<Duration>, Error> {
|
||||||
|
@ -348,7 +347,10 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
|
if *must_exit.borrow() {
|
||||||
|
return WorkerState::Done;
|
||||||
|
}
|
||||||
tokio::time::sleep(self.wait_delay).await;
|
tokio::time::sleep(self.wait_delay).await;
|
||||||
WorkerState::Busy
|
WorkerState::Busy
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,18 +4,16 @@
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate tracing;
|
extern crate tracing;
|
||||||
|
|
||||||
|
mod metrics;
|
||||||
pub mod schema;
|
pub mod schema;
|
||||||
pub mod util;
|
pub mod util;
|
||||||
|
|
||||||
pub mod data;
|
pub mod data;
|
||||||
pub mod replication;
|
|
||||||
pub mod table;
|
|
||||||
|
|
||||||
mod gc;
|
mod gc;
|
||||||
mod merkle;
|
mod merkle;
|
||||||
mod metrics;
|
pub mod replication;
|
||||||
mod queue;
|
|
||||||
mod sync;
|
mod sync;
|
||||||
|
pub mod table;
|
||||||
|
|
||||||
pub use schema::*;
|
pub use schema::*;
|
||||||
pub use table::*;
|
pub use table::*;
|
||||||
|
|
|
@ -3,7 +3,6 @@ use std::time::Duration;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::select;
|
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
@ -70,17 +69,17 @@ where
|
||||||
F: TableSchema + 'static,
|
F: TableSchema + 'static,
|
||||||
R: TableReplication + 'static,
|
R: TableReplication + 'static,
|
||||||
{
|
{
|
||||||
pub(crate) fn new(data: Arc<TableData<F, R>>) -> Arc<Self> {
|
pub(crate) fn launch(background: &BackgroundRunner, data: Arc<TableData<F, R>>) -> Arc<Self> {
|
||||||
let empty_node_hash = blake2sum(&rmp_to_vec_all_named(&MerkleNode::Empty).unwrap()[..]);
|
let empty_node_hash = blake2sum(&rmp_to_vec_all_named(&MerkleNode::Empty).unwrap()[..]);
|
||||||
|
|
||||||
Arc::new(Self {
|
let ret = Arc::new(Self {
|
||||||
data,
|
data,
|
||||||
empty_node_hash,
|
empty_node_hash,
|
||||||
})
|
});
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn spawn_workers(self: &Arc<Self>, background: &BackgroundRunner) {
|
background.spawn_worker(MerkleWorker(ret.clone()));
|
||||||
background.spawn_worker(MerkleWorker(self.clone()));
|
|
||||||
|
ret
|
||||||
}
|
}
|
||||||
|
|
||||||
fn updater_loop_iter(&self) -> Result<WorkerState, Error> {
|
fn updater_loop_iter(&self) -> Result<WorkerState, Error> {
|
||||||
|
@ -340,11 +339,11 @@ where
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
select! {
|
if *must_exit.borrow() {
|
||||||
_ = tokio::time::sleep(Duration::from_secs(60)) => (),
|
return WorkerState::Done;
|
||||||
_ = self.0.data.merkle_todo_notify.notified() => (),
|
|
||||||
}
|
}
|
||||||
|
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||||
WorkerState::Busy
|
WorkerState::Busy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,81 +0,0 @@
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use tokio::select;
|
|
||||||
use tokio::sync::watch;
|
|
||||||
|
|
||||||
use garage_util::background::*;
|
|
||||||
use garage_util::error::Error;
|
|
||||||
|
|
||||||
use crate::replication::*;
|
|
||||||
use crate::schema::*;
|
|
||||||
use crate::table::*;
|
|
||||||
|
|
||||||
const BATCH_SIZE: usize = 100;
|
|
||||||
|
|
||||||
pub(crate) struct InsertQueueWorker<F, R>(pub(crate) Arc<Table<F, R>>)
|
|
||||||
where
|
|
||||||
F: TableSchema + 'static,
|
|
||||||
R: TableReplication + 'static;
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<F, R> Worker for InsertQueueWorker<F, R>
|
|
||||||
where
|
|
||||||
F: TableSchema + 'static,
|
|
||||||
R: TableReplication + 'static,
|
|
||||||
{
|
|
||||||
fn name(&self) -> String {
|
|
||||||
format!("{} queue", F::TABLE_NAME)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
|
||||||
WorkerStatus {
|
|
||||||
queue_length: Some(self.0.data.insert_queue.len().unwrap_or(0) as u64),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
|
||||||
let mut kv_pairs = vec![];
|
|
||||||
let mut values = vec![];
|
|
||||||
|
|
||||||
for entry_kv in self.0.data.insert_queue.iter()? {
|
|
||||||
let (k, v) = entry_kv?;
|
|
||||||
|
|
||||||
values.push(self.0.data.decode_entry(&v)?);
|
|
||||||
kv_pairs.push((k, v));
|
|
||||||
|
|
||||||
if kv_pairs.len() > BATCH_SIZE {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if kv_pairs.is_empty() {
|
|
||||||
return Ok(WorkerState::Idle);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.0.insert_many(values).await?;
|
|
||||||
|
|
||||||
self.0.data.insert_queue.db().transaction(|mut tx| {
|
|
||||||
for (k, v) in kv_pairs.iter() {
|
|
||||||
if let Some(v2) = tx.get(&self.0.data.insert_queue, k)? {
|
|
||||||
if &v2 == v {
|
|
||||||
tx.remove(&self.0.data.insert_queue, k)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(WorkerState::Busy)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
|
||||||
select! {
|
|
||||||
_ = tokio::time::sleep(Duration::from_secs(600)) => (),
|
|
||||||
_ = self.0.data.insert_queue_notify.notified() => (),
|
|
||||||
}
|
|
||||||
WorkerState::Busy
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -2,7 +2,6 @@ use std::collections::VecDeque;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use arc_swap::ArcSwapOption;
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use futures_util::stream::*;
|
use futures_util::stream::*;
|
||||||
use opentelemetry::KeyValue;
|
use opentelemetry::KeyValue;
|
||||||
|
@ -14,7 +13,7 @@ use tokio::sync::{mpsc, watch};
|
||||||
|
|
||||||
use garage_util::background::*;
|
use garage_util::background::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::{Error, OkOrMessage};
|
use garage_util::error::Error;
|
||||||
|
|
||||||
use garage_rpc::ring::*;
|
use garage_rpc::ring::*;
|
||||||
use garage_rpc::system::System;
|
use garage_rpc::system::System;
|
||||||
|
@ -33,7 +32,7 @@ pub struct TableSyncer<F: TableSchema + 'static, R: TableReplication + 'static>
|
||||||
data: Arc<TableData<F, R>>,
|
data: Arc<TableData<F, R>>,
|
||||||
merkle: Arc<MerkleUpdater<F, R>>,
|
merkle: Arc<MerkleUpdater<F, R>>,
|
||||||
|
|
||||||
add_full_sync_tx: ArcSwapOption<mpsc::UnboundedSender<()>>,
|
add_full_sync_tx: mpsc::UnboundedSender<()>,
|
||||||
endpoint: Arc<Endpoint<SyncRpc, Self>>,
|
endpoint: Arc<Endpoint<SyncRpc, Self>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +65,7 @@ where
|
||||||
F: TableSchema + 'static,
|
F: TableSchema + 'static,
|
||||||
R: TableReplication + 'static,
|
R: TableReplication + 'static,
|
||||||
{
|
{
|
||||||
pub(crate) fn new(
|
pub(crate) fn launch(
|
||||||
system: Arc<System>,
|
system: Arc<System>,
|
||||||
data: Arc<TableData<F, R>>,
|
data: Arc<TableData<F, R>>,
|
||||||
merkle: Arc<MerkleUpdater<F, R>>,
|
merkle: Arc<MerkleUpdater<F, R>>,
|
||||||
|
@ -75,40 +74,34 @@ where
|
||||||
.netapp
|
.netapp
|
||||||
.endpoint(format!("garage_table/sync.rs/Rpc:{}", F::TABLE_NAME));
|
.endpoint(format!("garage_table/sync.rs/Rpc:{}", F::TABLE_NAME));
|
||||||
|
|
||||||
|
let (add_full_sync_tx, add_full_sync_rx) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
let syncer = Arc::new(Self {
|
let syncer = Arc::new(Self {
|
||||||
system,
|
system: system.clone(),
|
||||||
data,
|
data,
|
||||||
merkle,
|
merkle,
|
||||||
add_full_sync_tx: ArcSwapOption::new(None),
|
add_full_sync_tx,
|
||||||
endpoint,
|
endpoint,
|
||||||
});
|
});
|
||||||
|
|
||||||
syncer.endpoint.set_handler(syncer.clone());
|
syncer.endpoint.set_handler(syncer.clone());
|
||||||
|
|
||||||
syncer
|
system.background.spawn_worker(SyncWorker {
|
||||||
}
|
syncer: syncer.clone(),
|
||||||
|
ring_recv: system.ring.clone(),
|
||||||
pub(crate) fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
ring: system.ring.borrow().clone(),
|
||||||
let (add_full_sync_tx, add_full_sync_rx) = mpsc::unbounded_channel();
|
|
||||||
self.add_full_sync_tx
|
|
||||||
.store(Some(Arc::new(add_full_sync_tx)));
|
|
||||||
|
|
||||||
bg.spawn_worker(SyncWorker {
|
|
||||||
syncer: self.clone(),
|
|
||||||
ring_recv: self.system.ring.clone(),
|
|
||||||
ring: self.system.ring.borrow().clone(),
|
|
||||||
add_full_sync_rx,
|
add_full_sync_rx,
|
||||||
todo: vec![],
|
todo: vec![],
|
||||||
next_full_sync: Instant::now() + Duration::from_secs(20),
|
next_full_sync: Instant::now() + Duration::from_secs(20),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
syncer
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_full_sync(&self) -> Result<(), Error> {
|
pub fn add_full_sync(&self) {
|
||||||
let tx = self.add_full_sync_tx.load();
|
if self.add_full_sync_tx.send(()).is_err() {
|
||||||
let tx = tx
|
error!("({}) Could not add full sync", F::TABLE_NAME);
|
||||||
.as_ref()
|
}
|
||||||
.ok_or_message("table sync worker is not running")?;
|
|
||||||
tx.send(()).ok_or_message("send error")?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
@ -593,7 +586,10 @@ impl<F: TableSchema + 'static, R: TableReplication + 'static> Worker for SyncWor
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
|
if *must_exit.borrow() {
|
||||||
|
return WorkerState::Done;
|
||||||
|
}
|
||||||
select! {
|
select! {
|
||||||
s = self.add_full_sync_rx.recv() => {
|
s = self.add_full_sync_rx.recv() => {
|
||||||
if let Some(()) = s {
|
if let Some(()) = s {
|
||||||
|
|
|
@ -14,7 +14,6 @@ use opentelemetry::{
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::background::BackgroundRunner;
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
|
@ -26,7 +25,6 @@ use crate::crdt::Crdt;
|
||||||
use crate::data::*;
|
use crate::data::*;
|
||||||
use crate::gc::*;
|
use crate::gc::*;
|
||||||
use crate::merkle::*;
|
use crate::merkle::*;
|
||||||
use crate::queue::InsertQueueWorker;
|
|
||||||
use crate::replication::*;
|
use crate::replication::*;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::sync::*;
|
use crate::sync::*;
|
||||||
|
@ -37,7 +35,6 @@ pub struct Table<F: TableSchema + 'static, R: TableReplication + 'static> {
|
||||||
pub data: Arc<TableData<F, R>>,
|
pub data: Arc<TableData<F, R>>,
|
||||||
pub merkle_updater: Arc<MerkleUpdater<F, R>>,
|
pub merkle_updater: Arc<MerkleUpdater<F, R>>,
|
||||||
pub syncer: Arc<TableSyncer<F, R>>,
|
pub syncer: Arc<TableSyncer<F, R>>,
|
||||||
gc: Arc<TableGc<F, R>>,
|
|
||||||
endpoint: Arc<Endpoint<TableRpc<F>, Self>>,
|
endpoint: Arc<Endpoint<TableRpc<F>, Self>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,16 +75,15 @@ where
|
||||||
|
|
||||||
let data = TableData::new(system.clone(), instance, replication, db);
|
let data = TableData::new(system.clone(), instance, replication, db);
|
||||||
|
|
||||||
let merkle_updater = MerkleUpdater::new(data.clone());
|
let merkle_updater = MerkleUpdater::launch(&system.background, data.clone());
|
||||||
|
|
||||||
let syncer = TableSyncer::new(system.clone(), data.clone(), merkle_updater.clone());
|
let syncer = TableSyncer::launch(system.clone(), data.clone(), merkle_updater.clone());
|
||||||
let gc = TableGc::new(system.clone(), data.clone());
|
TableGc::launch(system.clone(), data.clone());
|
||||||
|
|
||||||
let table = Arc::new(Self {
|
let table = Arc::new(Self {
|
||||||
system,
|
system,
|
||||||
data,
|
data,
|
||||||
merkle_updater,
|
merkle_updater,
|
||||||
gc,
|
|
||||||
syncer,
|
syncer,
|
||||||
endpoint,
|
endpoint,
|
||||||
});
|
});
|
||||||
|
@ -97,13 +93,6 @@ where
|
||||||
table
|
table
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
|
||||||
self.merkle_updater.spawn_workers(bg);
|
|
||||||
self.syncer.spawn_workers(bg);
|
|
||||||
self.gc.spawn_workers(bg);
|
|
||||||
bg.spawn_worker(InsertQueueWorker(self.clone()));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn insert(&self, e: &F::E) -> Result<(), Error> {
|
pub async fn insert(&self, e: &F::E) -> Result<(), Error> {
|
||||||
let tracer = opentelemetry::global::tracer("garage_table");
|
let tracer = opentelemetry::global::tracer("garage_table");
|
||||||
let span = tracer.start(format!("{} insert", F::TABLE_NAME));
|
let span = tracer.start(format!("{} insert", F::TABLE_NAME));
|
||||||
|
@ -139,11 +128,6 @@ where
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Insert item locally
|
|
||||||
pub fn queue_insert(&self, tx: &mut db::Transaction, e: &F::E) -> db::TxResult<(), Error> {
|
|
||||||
self.data.queue_insert(tx, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn insert_many<I, IE>(&self, entries: I) -> Result<(), Error>
|
pub async fn insert_many<I, IE>(&self, entries: I) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = IE> + Send + Sync,
|
I: IntoIterator<Item = IE> + Send + Sync,
|
||||||
|
@ -275,11 +259,9 @@ where
|
||||||
if not_all_same {
|
if not_all_same {
|
||||||
let self2 = self.clone();
|
let self2 = self.clone();
|
||||||
let ent2 = ret_entry.clone();
|
let ent2 = ret_entry.clone();
|
||||||
tokio::spawn(async move {
|
self.system
|
||||||
if let Err(e) = self2.repair_on_read(&who[..], ent2).await {
|
.background
|
||||||
warn!("Error doing repair on read: {}", e);
|
.spawn_cancellable(async move { self2.repair_on_read(&who[..], ent2).await });
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,12 +358,11 @@ where
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|k| ret.get(&k).unwrap().clone())
|
.map(|k| ret.get(&k).unwrap().clone())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
tokio::spawn(async move {
|
self.system.background.spawn_cancellable(async move {
|
||||||
for v in to_repair {
|
for v in to_repair {
|
||||||
if let Err(e) = self2.repair_on_read(&who[..], v).await {
|
self2.repair_on_read(&who[..], v).await?;
|
||||||
warn!("Error doing repair on read: {}", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_util"
|
name = "garage_util"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,7 +14,7 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db = { version = "0.8.0", path = "../db" }
|
garage_db = { version = "0.8.1", path = "../db" }
|
||||||
|
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
|
48
src/util/background/job_worker.rs
Normal file
48
src/util/background/job_worker.rs
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
//! Job worker: a generic worker that just processes incoming
|
||||||
|
//! jobs one by one
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use tokio::sync::{mpsc, Mutex};
|
||||||
|
|
||||||
|
use crate::background::worker::*;
|
||||||
|
use crate::background::*;
|
||||||
|
|
||||||
|
pub(crate) struct JobWorker {
|
||||||
|
pub(crate) index: usize,
|
||||||
|
pub(crate) job_chan: Arc<Mutex<mpsc::UnboundedReceiver<(Job, bool)>>>,
|
||||||
|
pub(crate) next_job: Option<Job>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Worker for JobWorker {
|
||||||
|
fn name(&self) -> String {
|
||||||
|
format!("Job worker #{}", self.index)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
||||||
|
match self.next_job.take() {
|
||||||
|
None => return Ok(WorkerState::Idle),
|
||||||
|
Some(job) => {
|
||||||
|
job.await?;
|
||||||
|
Ok(WorkerState::Busy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
|
||||||
|
loop {
|
||||||
|
match self.job_chan.lock().await.recv().await {
|
||||||
|
Some((job, cancellable)) => {
|
||||||
|
if cancellable && *must_exit.borrow() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
self.next_job = Some(job);
|
||||||
|
return WorkerState::Busy;
|
||||||
|
}
|
||||||
|
None => return WorkerState::Done,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,18 +1,27 @@
|
||||||
//! Job runner for futures and async functions
|
//! Job runner for futures and async functions
|
||||||
|
|
||||||
|
pub mod job_worker;
|
||||||
pub mod worker;
|
pub mod worker;
|
||||||
|
|
||||||
|
use core::future::Future;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::sync::{mpsc, watch};
|
use tokio::sync::{mpsc, watch, Mutex};
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
use worker::WorkerProcessor;
|
use worker::WorkerProcessor;
|
||||||
pub use worker::{Worker, WorkerState};
|
pub use worker::{Worker, WorkerState};
|
||||||
|
|
||||||
|
pub(crate) type JobOutput = Result<(), Error>;
|
||||||
|
pub(crate) type Job = Pin<Box<dyn Future<Output = JobOutput> + Send>>;
|
||||||
|
|
||||||
/// Job runner for futures and async functions
|
/// Job runner for futures and async functions
|
||||||
pub struct BackgroundRunner {
|
pub struct BackgroundRunner {
|
||||||
|
send_job: mpsc::UnboundedSender<(Job, bool)>,
|
||||||
send_worker: mpsc::UnboundedSender<Box<dyn Worker>>,
|
send_worker: mpsc::UnboundedSender<Box<dyn Worker>>,
|
||||||
worker_info: Arc<std::sync::Mutex<HashMap<usize, WorkerInfo>>>,
|
worker_info: Arc<std::sync::Mutex<HashMap<usize, WorkerInfo>>>,
|
||||||
}
|
}
|
||||||
|
@ -40,7 +49,10 @@ pub struct WorkerStatus {
|
||||||
|
|
||||||
impl BackgroundRunner {
|
impl BackgroundRunner {
|
||||||
/// Create a new BackgroundRunner
|
/// Create a new BackgroundRunner
|
||||||
pub fn new(stop_signal: watch::Receiver<bool>) -> (Arc<Self>, tokio::task::JoinHandle<()>) {
|
pub fn new(
|
||||||
|
n_runners: usize,
|
||||||
|
stop_signal: watch::Receiver<bool>,
|
||||||
|
) -> (Arc<Self>, tokio::task::JoinHandle<()>) {
|
||||||
let (send_worker, worker_out) = mpsc::unbounded_channel::<Box<dyn Worker>>();
|
let (send_worker, worker_out) = mpsc::unbounded_channel::<Box<dyn Worker>>();
|
||||||
|
|
||||||
let worker_info = Arc::new(std::sync::Mutex::new(HashMap::new()));
|
let worker_info = Arc::new(std::sync::Mutex::new(HashMap::new()));
|
||||||
|
@ -51,7 +63,24 @@ impl BackgroundRunner {
|
||||||
worker_processor.run().await;
|
worker_processor.run().await;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let (send_job, queue_out) = mpsc::unbounded_channel();
|
||||||
|
let queue_out = Arc::new(Mutex::new(queue_out));
|
||||||
|
|
||||||
|
for i in 0..n_runners {
|
||||||
|
let queue_out = queue_out.clone();
|
||||||
|
|
||||||
|
send_worker
|
||||||
|
.send(Box::new(job_worker::JobWorker {
|
||||||
|
index: i,
|
||||||
|
job_chan: queue_out.clone(),
|
||||||
|
next_job: None,
|
||||||
|
}))
|
||||||
|
.ok()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
let bgrunner = Arc::new(Self {
|
let bgrunner = Arc::new(Self {
|
||||||
|
send_job,
|
||||||
send_worker,
|
send_worker,
|
||||||
worker_info,
|
worker_info,
|
||||||
});
|
});
|
||||||
|
@ -62,6 +91,31 @@ impl BackgroundRunner {
|
||||||
self.worker_info.lock().unwrap().clone()
|
self.worker_info.lock().unwrap().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Spawn a task to be run in background
|
||||||
|
pub fn spawn<T>(&self, job: T)
|
||||||
|
where
|
||||||
|
T: Future<Output = JobOutput> + Send + 'static,
|
||||||
|
{
|
||||||
|
let boxed: Job = Box::pin(job);
|
||||||
|
self.send_job
|
||||||
|
.send((boxed, false))
|
||||||
|
.ok()
|
||||||
|
.expect("Could not put job in queue");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawn a task to be run in background. It may get discarded before running if spawned while
|
||||||
|
/// the runner is stopping
|
||||||
|
pub fn spawn_cancellable<T>(&self, job: T)
|
||||||
|
where
|
||||||
|
T: Future<Output = JobOutput> + Send + 'static,
|
||||||
|
{
|
||||||
|
let boxed: Job = Box::pin(job);
|
||||||
|
self.send_job
|
||||||
|
.send((boxed, true))
|
||||||
|
.ok()
|
||||||
|
.expect("Could not put job in queue");
|
||||||
|
}
|
||||||
|
|
||||||
pub fn spawn_worker<W>(&self, worker: W)
|
pub fn spawn_worker<W>(&self, worker: W)
|
||||||
where
|
where
|
||||||
W: Worker + 'static,
|
W: Worker + 'static,
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use futures::future::*;
|
use futures::future::*;
|
||||||
|
@ -14,10 +14,6 @@ use crate::background::{WorkerInfo, WorkerStatus};
|
||||||
use crate::error::Error;
|
use crate::error::Error;
|
||||||
use crate::time::now_msec;
|
use crate::time::now_msec;
|
||||||
|
|
||||||
// All workers that haven't exited for this time after an exit signal was recieved
|
|
||||||
// will be interrupted in the middle of whatever they are doing.
|
|
||||||
const EXIT_DEADLINE: Duration = Duration::from_secs(8);
|
|
||||||
|
|
||||||
#[derive(PartialEq, Copy, Clone, Serialize, Deserialize, Debug)]
|
#[derive(PartialEq, Copy, Clone, Serialize, Deserialize, Debug)]
|
||||||
pub enum WorkerState {
|
pub enum WorkerState {
|
||||||
Busy,
|
Busy,
|
||||||
|
@ -54,8 +50,10 @@ pub trait Worker: Send {
|
||||||
async fn work(&mut self, must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error>;
|
async fn work(&mut self, must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error>;
|
||||||
|
|
||||||
/// Wait for work: await for some task to become available. This future can be interrupted in
|
/// Wait for work: await for some task to become available. This future can be interrupted in
|
||||||
/// the middle for any reason, for example if an interrupt signal was recieved.
|
/// the middle for any reason. This future doesn't have to await on must_exit.changed(), we
|
||||||
async fn wait_for_work(&mut self) -> WorkerState;
|
/// are doing it for you. Therefore it only receives a read refernce to must_exit which allows
|
||||||
|
/// it to check if we are exiting.
|
||||||
|
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct WorkerProcessor {
|
pub(crate) struct WorkerProcessor {
|
||||||
|
@ -95,9 +93,11 @@ impl WorkerProcessor {
|
||||||
let task_id = next_task_id;
|
let task_id = next_task_id;
|
||||||
next_task_id += 1;
|
next_task_id += 1;
|
||||||
let stop_signal = self.stop_signal.clone();
|
let stop_signal = self.stop_signal.clone();
|
||||||
|
let stop_signal_worker = self.stop_signal.clone();
|
||||||
let mut worker = WorkerHandler {
|
let mut worker = WorkerHandler {
|
||||||
task_id,
|
task_id,
|
||||||
stop_signal,
|
stop_signal,
|
||||||
|
stop_signal_worker,
|
||||||
worker: new_worker,
|
worker: new_worker,
|
||||||
state: WorkerState::Busy,
|
state: WorkerState::Busy,
|
||||||
errors: 0,
|
errors: 0,
|
||||||
|
@ -153,14 +153,26 @@ impl WorkerProcessor {
|
||||||
}
|
}
|
||||||
|
|
||||||
// We are exiting, drain everything
|
// We are exiting, drain everything
|
||||||
|
let drain_half_time = Instant::now() + Duration::from_secs(5);
|
||||||
let drain_everything = async move {
|
let drain_everything = async move {
|
||||||
while let Some(worker) = workers.next().await {
|
while let Some(mut worker) = workers.next().await {
|
||||||
|
if worker.state == WorkerState::Done {
|
||||||
info!(
|
info!(
|
||||||
"Worker {} (TID {}) exited (last state: {:?})",
|
"Worker {} (TID {}) exited",
|
||||||
worker.worker.name(),
|
worker.worker.name(),
|
||||||
worker.task_id,
|
worker.task_id
|
||||||
worker.state
|
|
||||||
);
|
);
|
||||||
|
} else if Instant::now() > drain_half_time {
|
||||||
|
warn!("Worker {} (TID {}) interrupted between two iterations in state {:?} (this should be fine)", worker.worker.name(), worker.task_id, worker.state);
|
||||||
|
} else {
|
||||||
|
workers.push(
|
||||||
|
async move {
|
||||||
|
worker.step().await;
|
||||||
|
worker
|
||||||
|
}
|
||||||
|
.boxed(),
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -168,7 +180,7 @@ impl WorkerProcessor {
|
||||||
_ = drain_everything => {
|
_ = drain_everything => {
|
||||||
info!("All workers exited peacefully \\o/");
|
info!("All workers exited peacefully \\o/");
|
||||||
}
|
}
|
||||||
_ = tokio::time::sleep(EXIT_DEADLINE) => {
|
_ = tokio::time::sleep(Duration::from_secs(9)) => {
|
||||||
error!("Some workers could not exit in time, we are cancelling some things in the middle");
|
error!("Some workers could not exit in time, we are cancelling some things in the middle");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -178,6 +190,7 @@ impl WorkerProcessor {
|
||||||
struct WorkerHandler {
|
struct WorkerHandler {
|
||||||
task_id: usize,
|
task_id: usize,
|
||||||
stop_signal: watch::Receiver<bool>,
|
stop_signal: watch::Receiver<bool>,
|
||||||
|
stop_signal_worker: watch::Receiver<bool>,
|
||||||
worker: Box<dyn Worker>,
|
worker: Box<dyn Worker>,
|
||||||
state: WorkerState,
|
state: WorkerState,
|
||||||
errors: usize,
|
errors: usize,
|
||||||
|
@ -212,19 +225,33 @@ impl WorkerHandler {
|
||||||
},
|
},
|
||||||
WorkerState::Throttled(delay) => {
|
WorkerState::Throttled(delay) => {
|
||||||
// Sleep for given delay and go back to busy state
|
// Sleep for given delay and go back to busy state
|
||||||
|
if !*self.stop_signal.borrow() {
|
||||||
select! {
|
select! {
|
||||||
_ = tokio::time::sleep(Duration::from_secs_f32(delay)) => {
|
_ = tokio::time::sleep(Duration::from_secs_f32(delay)) => (),
|
||||||
|
_ = self.stop_signal.changed() => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
self.state = WorkerState::Busy;
|
self.state = WorkerState::Busy;
|
||||||
}
|
}
|
||||||
_ = self.stop_signal.changed() => (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
WorkerState::Idle => {
|
WorkerState::Idle => {
|
||||||
|
if *self.stop_signal.borrow() {
|
||||||
select! {
|
select! {
|
||||||
new_st = self.worker.wait_for_work() => {
|
new_st = self.worker.wait_for_work(&self.stop_signal_worker) => {
|
||||||
self.state = new_st;
|
self.state = new_st;
|
||||||
}
|
}
|
||||||
_ = self.stop_signal.changed() => (),
|
_ = tokio::time::sleep(Duration::from_secs(1)) => {
|
||||||
|
// stay in Idle state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
select! {
|
||||||
|
new_st = self.worker.wait_for_work(&self.stop_signal_worker) => {
|
||||||
|
self.state = new_st;
|
||||||
|
}
|
||||||
|
_ = self.stop_signal.changed() => {
|
||||||
|
// stay in Idle state
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
WorkerState::Done => unreachable!(),
|
WorkerState::Done => unreachable!(),
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_web"
|
name = "garage_web"
|
||||||
version = "0.8.0"
|
version = "0.8.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,10 +14,10 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_api = { version = "0.8.0", path = "../api" }
|
garage_api = { version = "0.8.1", path = "../api" }
|
||||||
garage_model = { version = "0.8.0", path = "../model" }
|
garage_model = { version = "0.8.1", path = "../model" }
|
||||||
garage_util = { version = "0.8.0", path = "../util" }
|
garage_util = { version = "0.8.1", path = "../util" }
|
||||||
garage_table = { version = "0.8.0", path = "../table" }
|
garage_table = { version = "0.8.1", path = "../table" }
|
||||||
|
|
||||||
err-derive = "0.3"
|
err-derive = "0.3"
|
||||||
tracing = "0.1.30"
|
tracing = "0.1.30"
|
||||||
|
|
Loading…
Reference in a new issue