New model for buckets #172

Merged
lx merged 19 commits from new-buckets into main 2022-01-10 11:32:42 +00:00
58 changed files with 2758 additions and 757 deletions

150
Cargo.lock generated
View file

@ -382,17 +382,17 @@ dependencies = [
[[package]] [[package]]
name = "garage" name = "garage"
version = "0.5.0" version = "0.6.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"bytes 1.1.0", "bytes 1.1.0",
"futures", "futures",
"futures-util", "futures-util",
"garage_api", "garage_api",
"garage_model", "garage_model 0.6.0",
"garage_rpc", "garage_rpc 0.6.0",
"garage_table", "garage_table 0.6.0",
"garage_util", "garage_util 0.6.0",
"garage_web", "garage_web",
"git-version", "git-version",
"hex", "hex",
@ -403,6 +403,7 @@ dependencies = [
"rand", "rand",
"rmp-serde 0.15.5", "rmp-serde 0.15.5",
"serde", "serde",
"serde_bytes",
"sled", "sled",
"structopt", "structopt",
"tokio", "tokio",
@ -411,7 +412,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_api" name = "garage_api"
version = "0.5.0" version = "0.6.0"
dependencies = [ dependencies = [
"base64", "base64",
"bytes 1.1.0", "bytes 1.1.0",
@ -420,9 +421,9 @@ dependencies = [
"err-derive 0.3.0", "err-derive 0.3.0",
"futures", "futures",
"futures-util", "futures-util",
"garage_model", "garage_model 0.6.0",
"garage_table", "garage_table 0.6.0",
"garage_util", "garage_util 0.6.0",
"hex", "hex",
"hmac", "hmac",
"http", "http",
@ -436,6 +437,7 @@ dependencies = [
"quick-xml", "quick-xml",
"roxmltree", "roxmltree",
"serde", "serde",
"serde_bytes",
"sha2", "sha2",
"tokio", "tokio",
"url", "url",
@ -443,15 +445,42 @@ dependencies = [
[[package]] [[package]]
name = "garage_model" name = "garage_model"
version = "0.5.0" version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "584619e8999713d73761775591ad6f01ff8c9d724f3b20984f5932f1fc7f9988"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
"futures", "futures",
"futures-util", "futures-util",
"garage_rpc", "garage_rpc 0.5.1",
"garage_table", "garage_table 0.5.1",
"garage_util", "garage_util 0.5.1",
"hex",
"log",
"netapp",
"rand",
"rmp-serde 0.15.5",
"serde",
"serde_bytes",
"sled",
"tokio",
"zstd",
]
[[package]]
name = "garage_model"
version = "0.6.0"
dependencies = [
"arc-swap",
"async-trait",
"err-derive 0.3.0",
"futures",
"futures-util",
"garage_model 0.5.1",
"garage_rpc 0.6.0",
"garage_table 0.6.0",
"garage_util 0.6.0",
"hex", "hex",
"log", "log",
"netapp", "netapp",
@ -466,14 +495,41 @@ dependencies = [
[[package]] [[package]]
name = "garage_rpc" name = "garage_rpc"
version = "0.5.0" version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81e693aa4582cfe7a7ce70c07880e3662544b5d0cd68bc4b59c53febfbb8d1ec"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
"bytes 1.1.0", "bytes 1.1.0",
"futures", "futures",
"futures-util", "futures-util",
"garage_util", "garage_util 0.5.1",
"gethostname",
"hex",
"hyper",
"kuska-sodiumoxide",
"log",
"netapp",
"rand",
"rmp-serde 0.15.5",
"serde",
"serde_bytes",
"serde_json",
"tokio",
"tokio-stream",
]
[[package]]
name = "garage_rpc"
version = "0.6.0"
dependencies = [
"arc-swap",
"async-trait",
"bytes 1.1.0",
"futures",
"futures-util",
"garage_util 0.6.0",
"gethostname", "gethostname",
"hex", "hex",
"hyper", "hyper",
@ -491,14 +547,36 @@ dependencies = [
[[package]] [[package]]
name = "garage_table" name = "garage_table"
version = "0.5.0" version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c3557f3757e2acd29eaee86804d4e6c38d2abda81b4b349d8a0d2277044265c"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"bytes 1.1.0", "bytes 1.1.0",
"futures", "futures",
"futures-util", "futures-util",
"garage_rpc", "garage_rpc 0.5.1",
"garage_util", "garage_util 0.5.1",
"hexdump",
"log",
"rand",
"rmp-serde 0.15.5",
"serde",
"serde_bytes",
"sled",
"tokio",
]
[[package]]
name = "garage_table"
version = "0.6.0"
dependencies = [
"async-trait",
"bytes 1.1.0",
"futures",
"futures-util",
"garage_rpc 0.6.0",
"garage_util 0.6.0",
"hexdump", "hexdump",
"log", "log",
"rand", "rand",
@ -511,7 +589,33 @@ dependencies = [
[[package]] [[package]]
name = "garage_util" name = "garage_util"
version = "0.5.0" version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e096994382447431e2f3c70e3685eb8b24c00eceff8667bb22a2a27ff17832f"
dependencies = [
"blake2",
"chrono",
"err-derive 0.3.0",
"futures",
"hex",
"http",
"hyper",
"log",
"netapp",
"rand",
"rmp-serde 0.15.5",
"serde",
"serde_json",
"sha2",
"sled",
"tokio",
"toml",
"xxhash-rust",
]
[[package]]
name = "garage_util"
version = "0.6.0"
dependencies = [ dependencies = [
"blake2", "blake2",
"chrono", "chrono",
@ -535,14 +639,14 @@ dependencies = [
[[package]] [[package]]
name = "garage_web" name = "garage_web"
version = "0.5.0" version = "0.6.0"
dependencies = [ dependencies = [
"err-derive 0.3.0", "err-derive 0.3.0",
"futures", "futures",
"garage_api", "garage_api",
"garage_model", "garage_model 0.6.0",
"garage_table", "garage_table 0.6.0",
"garage_util", "garage_util 0.6.0",
"http", "http",
"hyper", "hyper",
"log", "log",

200
Cargo.nix
View file

@ -40,13 +40,13 @@ in
{ {
cargo2nixVersion = "0.9.0"; cargo2nixVersion = "0.9.0";
workspace = { workspace = {
garage_util = rustPackages.unknown.garage_util."0.5.0"; garage_util = rustPackages.unknown.garage_util."0.6.0";
garage_rpc = rustPackages.unknown.garage_rpc."0.5.0"; garage_rpc = rustPackages.unknown.garage_rpc."0.6.0";
garage_table = rustPackages.unknown.garage_table."0.5.0"; garage_table = rustPackages.unknown.garage_table."0.6.0";
garage_model = rustPackages.unknown.garage_model."0.5.0"; garage_model = rustPackages.unknown.garage_model."0.6.0";
garage_api = rustPackages.unknown.garage_api."0.5.0"; garage_api = rustPackages.unknown.garage_api."0.6.0";
garage_web = rustPackages.unknown.garage_web."0.5.0"; garage_web = rustPackages.unknown.garage_web."0.6.0";
garage = rustPackages.unknown.garage."0.5.0"; garage = rustPackages.unknown.garage."0.6.0";
}; };
"registry+https://github.com/rust-lang/crates.io-index".aho-corasick."0.7.18" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".aho-corasick."0.7.18" = overridableMkRustCrate (profileName: rec {
name = "aho-corasick"; name = "aho-corasick";
@ -613,9 +613,9 @@ in
}; };
}); });
"unknown".garage."0.5.0" = overridableMkRustCrate (profileName: rec { "unknown".garage."0.6.0" = overridableMkRustCrate (profileName: rec {
name = "garage"; name = "garage";
version = "0.5.0"; version = "0.6.0";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/garage"); src = fetchCrateLocal (workspaceSrc + "/src/garage");
dependencies = { dependencies = {
@ -623,12 +623,12 @@ in
bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; };
futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; };
futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; };
garage_api = rustPackages."unknown".garage_api."0.5.0" { inherit profileName; }; garage_api = rustPackages."unknown".garage_api."0.6.0" { inherit profileName; };
garage_model = rustPackages."unknown".garage_model."0.5.0" { inherit profileName; }; garage_model = rustPackages."unknown".garage_model."0.6.0" { inherit profileName; };
garage_rpc = rustPackages."unknown".garage_rpc."0.5.0" { inherit profileName; }; garage_rpc = rustPackages."unknown".garage_rpc."0.6.0" { inherit profileName; };
garage_table = rustPackages."unknown".garage_table."0.5.0" { inherit profileName; }; garage_table = rustPackages."unknown".garage_table."0.6.0" { inherit profileName; };
garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; };
garage_web = rustPackages."unknown".garage_web."0.5.0" { inherit profileName; }; garage_web = rustPackages."unknown".garage_web."0.6.0" { inherit profileName; };
git_version = rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }; git_version = rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; };
hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; };
sodiumoxide = rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }; sodiumoxide = rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; };
@ -638,6 +638,7 @@ in
rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; }; rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; };
rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }; rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; };
serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; }; serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; };
serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; };
sled = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }; sled = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; };
structopt = rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.23" { inherit profileName; }; structopt = rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.23" { inherit profileName; };
tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; }; tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; };
@ -645,9 +646,9 @@ in
}; };
}); });
"unknown".garage_api."0.5.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_api."0.6.0" = overridableMkRustCrate (profileName: rec {
name = "garage_api"; name = "garage_api";
version = "0.5.0"; version = "0.6.0";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/api"); src = fetchCrateLocal (workspaceSrc + "/src/api");
dependencies = { dependencies = {
@ -658,9 +659,9 @@ in
err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.0" { profileName = "__noProfile"; }; err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.0" { profileName = "__noProfile"; };
futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; };
futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; };
garage_model = rustPackages."unknown".garage_model."0.5.0" { inherit profileName; }; garage_model = rustPackages."unknown".garage_model."0.6.0" { inherit profileName; };
garage_table = rustPackages."unknown".garage_table."0.5.0" { inherit profileName; }; garage_table = rustPackages."unknown".garage_table."0.6.0" { inherit profileName; };
garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; };
hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; };
hmac = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.10.1" { inherit profileName; }; hmac = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.10.1" { inherit profileName; };
http = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.5" { inherit profileName; }; http = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.5" { inherit profileName; };
@ -674,25 +675,26 @@ in
quick_xml = rustPackages."registry+https://github.com/rust-lang/crates.io-index".quick-xml."0.21.0" { inherit profileName; }; quick_xml = rustPackages."registry+https://github.com/rust-lang/crates.io-index".quick-xml."0.21.0" { inherit profileName; };
roxmltree = rustPackages."registry+https://github.com/rust-lang/crates.io-index".roxmltree."0.14.1" { inherit profileName; }; roxmltree = rustPackages."registry+https://github.com/rust-lang/crates.io-index".roxmltree."0.14.1" { inherit profileName; };
serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; }; serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; };
serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; };
sha2 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.9.8" { inherit profileName; }; sha2 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.9.8" { inherit profileName; };
tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; }; tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; };
url = rustPackages."registry+https://github.com/rust-lang/crates.io-index".url."2.2.2" { inherit profileName; }; url = rustPackages."registry+https://github.com/rust-lang/crates.io-index".url."2.2.2" { inherit profileName; };
}; };
}); });
"unknown".garage_model."0.5.0" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".garage_model."0.5.1" = overridableMkRustCrate (profileName: rec {
name = "garage_model"; name = "garage_model";
version = "0.5.0"; version = "0.5.1";
registry = "unknown"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCrateLocal (workspaceSrc + "/src/model"); src = fetchCratesIo { inherit name version; sha256 = "584619e8999713d73761775591ad6f01ff8c9d724f3b20984f5932f1fc7f9988"; };
dependencies = { dependencies = {
arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.4.0" { inherit profileName; }; arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.4.0" { inherit profileName; };
async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; }; async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; };
futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; };
futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; };
garage_rpc = rustPackages."unknown".garage_rpc."0.5.0" { inherit profileName; }; garage_rpc = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_rpc."0.5.1" { inherit profileName; };
garage_table = rustPackages."unknown".garage_table."0.5.0" { inherit profileName; }; garage_table = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_table."0.5.1" { inherit profileName; };
garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; garage_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_util."0.5.1" { inherit profileName; };
hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; };
log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; }; log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; };
netapp = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.3.0" { inherit profileName; }; netapp = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.3.0" { inherit profileName; };
@ -706,18 +708,46 @@ in
}; };
}); });
"unknown".garage_rpc."0.5.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_model."0.6.0" = overridableMkRustCrate (profileName: rec {
name = "garage_rpc"; name = "garage_model";
version = "0.5.0"; version = "0.6.0";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/rpc"); src = fetchCrateLocal (workspaceSrc + "/src/model");
dependencies = {
arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.4.0" { inherit profileName; };
async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; };
err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.0" { profileName = "__noProfile"; };
futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; };
futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; };
garage_model_050 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_model."0.5.1" { inherit profileName; };
garage_rpc = rustPackages."unknown".garage_rpc."0.6.0" { inherit profileName; };
garage_table = rustPackages."unknown".garage_table."0.6.0" { inherit profileName; };
garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; };
hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; };
log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; };
netapp = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.3.0" { inherit profileName; };
rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; };
rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; };
serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; };
serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; };
sled = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; };
tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; };
zstd = rustPackages."registry+https://github.com/rust-lang/crates.io-index".zstd."0.9.0+zstd.1.5.0" { inherit profileName; };
};
});
"registry+https://github.com/rust-lang/crates.io-index".garage_rpc."0.5.1" = overridableMkRustCrate (profileName: rec {
name = "garage_rpc";
version = "0.5.1";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "81e693aa4582cfe7a7ce70c07880e3662544b5d0cd68bc4b59c53febfbb8d1ec"; };
dependencies = { dependencies = {
arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.4.0" { inherit profileName; }; arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.4.0" { inherit profileName; };
async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; }; async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; };
bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; };
futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; };
futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; };
garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; garage_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_util."0.5.1" { inherit profileName; };
gethostname = rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.1" { inherit profileName; }; gethostname = rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.1" { inherit profileName; };
hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; };
hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.13" { inherit profileName; }; hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.13" { inherit profileName; };
@ -734,18 +764,46 @@ in
}; };
}); });
"unknown".garage_table."0.5.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_rpc."0.6.0" = overridableMkRustCrate (profileName: rec {
name = "garage_table"; name = "garage_rpc";
version = "0.5.0"; version = "0.6.0";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/table"); src = fetchCrateLocal (workspaceSrc + "/src/rpc");
dependencies = {
arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.4.0" { inherit profileName; };
async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; };
bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; };
futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; };
futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; };
garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; };
gethostname = rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.1" { inherit profileName; };
hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; };
hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.13" { inherit profileName; };
sodiumoxide = rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; };
log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; };
netapp = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.3.0" { inherit profileName; };
rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; };
rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; };
serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; };
serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; };
serde_json = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.68" { inherit profileName; };
tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; };
tokio_stream = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.7" { inherit profileName; };
};
});
"registry+https://github.com/rust-lang/crates.io-index".garage_table."0.5.1" = overridableMkRustCrate (profileName: rec {
name = "garage_table";
version = "0.5.1";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "5c3557f3757e2acd29eaee86804d4e6c38d2abda81b4b349d8a0d2277044265c"; };
dependencies = { dependencies = {
async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; }; async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; };
bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; };
futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; };
futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; }; futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; };
garage_rpc = rustPackages."unknown".garage_rpc."0.5.0" { inherit profileName; }; garage_rpc = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_rpc."0.5.1" { inherit profileName; };
garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; garage_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_util."0.5.1" { inherit profileName; };
hexdump = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }; hexdump = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; };
log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; }; log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; };
rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; }; rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; };
@ -757,9 +815,59 @@ in
}; };
}); });
"unknown".garage_util."0.5.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_table."0.6.0" = overridableMkRustCrate (profileName: rec {
name = "garage_table";
version = "0.6.0";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/table");
dependencies = {
async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.51" { profileName = "__noProfile"; };
bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; };
futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; };
futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.17" { inherit profileName; };
garage_rpc = rustPackages."unknown".garage_rpc."0.6.0" { inherit profileName; };
garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; };
hexdump = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; };
log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; };
rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; };
rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; };
serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; };
serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; };
sled = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; };
tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; };
};
});
"registry+https://github.com/rust-lang/crates.io-index".garage_util."0.5.1" = overridableMkRustCrate (profileName: rec {
name = "garage_util"; name = "garage_util";
version = "0.5.0"; version = "0.5.1";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "1e096994382447431e2f3c70e3685eb8b24c00eceff8667bb22a2a27ff17832f"; };
dependencies = {
blake2 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".blake2."0.9.2" { inherit profileName; };
chrono = rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.19" { inherit profileName; };
err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.0" { profileName = "__noProfile"; };
futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; };
hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; };
http = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.5" { inherit profileName; };
hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.13" { inherit profileName; };
log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; };
netapp = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.3.0" { inherit profileName; };
rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.4" { inherit profileName; };
rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; };
serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.130" { inherit profileName; };
serde_json = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.68" { inherit profileName; };
sha2 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.9.8" { inherit profileName; };
sled = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; };
tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.12.0" { inherit profileName; };
toml = rustPackages."registry+https://github.com/rust-lang/crates.io-index".toml."0.5.8" { inherit profileName; };
xxhash_rust = rustPackages."registry+https://github.com/rust-lang/crates.io-index".xxhash-rust."0.8.2" { inherit profileName; };
};
});
"unknown".garage_util."0.6.0" = overridableMkRustCrate (profileName: rec {
name = "garage_util";
version = "0.6.0";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/util"); src = fetchCrateLocal (workspaceSrc + "/src/util");
dependencies = { dependencies = {
@ -784,18 +892,18 @@ in
}; };
}); });
"unknown".garage_web."0.5.0" = overridableMkRustCrate (profileName: rec { "unknown".garage_web."0.6.0" = overridableMkRustCrate (profileName: rec {
name = "garage_web"; name = "garage_web";
version = "0.5.0"; version = "0.6.0";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/web"); src = fetchCrateLocal (workspaceSrc + "/src/web");
dependencies = { dependencies = {
err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.0" { profileName = "__noProfile"; }; err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.0" { profileName = "__noProfile"; };
futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; }; futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.17" { inherit profileName; };
garage_api = rustPackages."unknown".garage_api."0.5.0" { inherit profileName; }; garage_api = rustPackages."unknown".garage_api."0.6.0" { inherit profileName; };
garage_model = rustPackages."unknown".garage_model."0.5.0" { inherit profileName; }; garage_model = rustPackages."unknown".garage_model."0.6.0" { inherit profileName; };
garage_table = rustPackages."unknown".garage_table."0.5.0" { inherit profileName; }; garage_table = rustPackages."unknown".garage_table."0.6.0" { inherit profileName; };
garage_util = rustPackages."unknown".garage_util."0.5.0" { inherit profileName; }; garage_util = rustPackages."unknown".garage_util."0.6.0" { inherit profileName; };
http = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.5" { inherit profileName; }; http = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.5" { inherit profileName; };
hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.13" { inherit profileName; }; hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.13" { inherit profileName; };
log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; }; log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.14" { inherit profileName; };

View file

@ -56,7 +56,7 @@ in let
*/ */
''^(src|tests)'' # fixed default ''^(src|tests)'' # fixed default
''.*\.(rs|toml)$'' # fixed default ''.*\.(rs|toml)$'' # fixed default
''^(crdt|replication|cli)'' # our crate submodules ''^(crdt|replication|cli|helper)'' # our crate submodules
]; ];
}; };

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_api" name = "garage_api"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -14,9 +14,9 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_model = { version = "0.5.0", path = "../model" } garage_model = { version = "0.6.0", path = "../model" }
garage_table = { version = "0.5.0", path = "../table" } garage_table = { version = "0.6.0", path = "../table" }
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
base64 = "0.13" base64 = "0.13"
bytes = "1.0" bytes = "1.0"
@ -41,5 +41,6 @@ hyper = { version = "0.14", features = ["server", "http1", "runtime", "tcp", "st
percent-encoding = "2.1.0" percent-encoding = "2.1.0"
roxmltree = "0.14" roxmltree = "0.14"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_bytes = "0.11"
quick-xml = { version = "0.21", features = [ "serialize" ] } quick-xml = { version = "0.21", features = [ "serialize" ] }
url = "2.1" url = "2.1"

View file

@ -7,9 +7,11 @@ use hyper::server::conn::AddrStream;
use hyper::service::{make_service_fn, service_fn}; use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server}; use hyper::{Body, Request, Response, Server};
use garage_util::data::*;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::key_table::Key;
use crate::error::*; use crate::error::*;
use crate::signature::check_signature; use crate::signature::check_signature;
@ -105,10 +107,18 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
.and_then(|root_domain| host_to_bucket(&host, root_domain)); .and_then(|root_domain| host_to_bucket(&host, root_domain));
let endpoint = Endpoint::from_request(&req, bucket.map(ToOwned::to_owned))?; let endpoint = Endpoint::from_request(&req, bucket.map(ToOwned::to_owned))?;
let bucket_name = match endpoint.get_bucket() {
None => return handle_request_without_bucket(garage, req, api_key, endpoint).await,
lx marked this conversation as resolved Outdated

there is a .get_bucket() on Endpoint which can be used to do the same thing more concisely

there is a `.get_bucket()` on `Endpoint` which can be used to do the same thing more concisely
Some(bucket) => bucket.to_string(),
};
let bucket_id = resolve_bucket(&garage, &bucket_name, &api_key).await?;
let allowed = match endpoint.authorization_type() { let allowed = match endpoint.authorization_type() {
Authorization::None => true, Authorization::Read(_) => api_key.allow_read(&bucket_id),
Authorization::Read(bucket) => api_key.allow_read(bucket), Authorization::Write(_) => api_key.allow_write(&bucket_id),
Authorization::Write(bucket) => api_key.allow_write(bucket), _ => unreachable!(),
}; };
if !allowed { if !allowed {
@ -118,19 +128,18 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
} }
match endpoint { match endpoint {
Endpoint::ListBuckets => handle_list_buckets(&api_key), Endpoint::HeadObject { key, .. } => handle_head(garage, &req, bucket_id, &key).await,
Endpoint::HeadObject { bucket, key, .. } => handle_head(garage, &req, &bucket, &key).await, Endpoint::GetObject { key, .. } => handle_get(garage, &req, bucket_id, &key).await,
Endpoint::GetObject { bucket, key, .. } => handle_get(garage, &req, &bucket, &key).await,
Endpoint::UploadPart { Endpoint::UploadPart {
bucket,
key, key,
part_number, part_number,
upload_id, upload_id,
..
} => { } => {
handle_put_part( handle_put_part(
garage, garage,
req, req,
&bucket, bucket_id,
&key, &key,
part_number, part_number,
&upload_id, &upload_id,
@ -138,38 +147,46 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
) )
.await .await
} }
Endpoint::CopyObject { bucket, key } => { Endpoint::CopyObject { key, .. } => {
let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?; let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?;
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?; let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
let (source_bucket, source_key) = parse_bucket_key(&copy_source, None)?; let (source_bucket, source_key) = parse_bucket_key(&copy_source, None)?;
if !api_key.allow_read(source_bucket) { let source_bucket_id =
resolve_bucket(&garage, &source_bucket.to_string(), &api_key).await?;
if !api_key.allow_read(&source_bucket_id) {
return Err(Error::Forbidden(format!( return Err(Error::Forbidden(format!(
"Reading from bucket {} not allowed for this key", "Reading from bucket {} not allowed for this key",
source_bucket source_bucket
))); )));
} }
let source_key = source_key.ok_or_bad_request("No source key specified")?; let source_key = source_key.ok_or_bad_request("No source key specified")?;
handle_copy(garage, &req, &bucket, &key, source_bucket, source_key).await handle_copy(garage, &req, bucket_id, &key, source_bucket_id, source_key).await
} }
Endpoint::PutObject { bucket, key } => { Endpoint::PutObject { key, .. } => {
handle_put(garage, req, &bucket, &key, content_sha256).await handle_put(garage, req, bucket_id, &key, content_sha256).await
} }
Endpoint::AbortMultipartUpload { Endpoint::AbortMultipartUpload { key, upload_id, .. } => {
bucket, handle_abort_multipart_upload(garage, bucket_id, &key, &upload_id).await
key, }
upload_id, Endpoint::DeleteObject { key, .. } => handle_delete(garage, bucket_id, &key).await,
} => handle_abort_multipart_upload(garage, &bucket, &key, &upload_id).await,
Endpoint::DeleteObject { bucket, key, .. } => handle_delete(garage, &bucket, &key).await,
Endpoint::CreateMultipartUpload { bucket, key } => { Endpoint::CreateMultipartUpload { bucket, key } => {
handle_create_multipart_upload(garage, &req, &bucket, &key).await handle_create_multipart_upload(garage, &req, &bucket, bucket_id, &key).await
} }
Endpoint::CompleteMultipartUpload { Endpoint::CompleteMultipartUpload {
bucket, bucket,
key, key,
upload_id, upload_id,
} => { } => {
handle_complete_multipart_upload(garage, req, &bucket, &key, &upload_id, content_sha256) handle_complete_multipart_upload(
.await garage,
req,
&bucket,
bucket_id,
&key,
&upload_id,
content_sha256,
)
.await
} }
Endpoint::CreateBucket { bucket } => { Endpoint::CreateBucket { bucket } => {
debug!( debug!(
@ -206,7 +223,8 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
garage, garage,
&ListObjectsQuery { &ListObjectsQuery {
is_v2: false, is_v2: false,
bucket, bucket_name: bucket,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()), delimiter: delimiter.map(|d| d.to_string()),
max_keys: max_keys.unwrap_or(1000), max_keys: max_keys.unwrap_or(1000),
prefix: prefix.unwrap_or_default(), prefix: prefix.unwrap_or_default(),
@ -234,7 +252,8 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
garage, garage,
&ListObjectsQuery { &ListObjectsQuery {
is_v2: true, is_v2: true,
bucket, bucket_name: bucket,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()), delimiter: delimiter.map(|d| d.to_string()),
max_keys: max_keys.unwrap_or(1000), max_keys: max_keys.unwrap_or(1000),
prefix: prefix.unwrap_or_default(), prefix: prefix.unwrap_or_default(),
@ -252,17 +271,51 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
))) )))
} }
} }
Endpoint::DeleteObjects { bucket } => { Endpoint::DeleteObjects { .. } => {
handle_delete_objects(garage, &bucket, req, content_sha256).await handle_delete_objects(garage, bucket_id, req, content_sha256).await
} }
Endpoint::PutBucketWebsite { bucket } => { Endpoint::PutBucketWebsite { .. } => {
handle_put_website(garage, bucket, req, content_sha256).await handle_put_website(garage, bucket_id, req, content_sha256).await
} }
Endpoint::DeleteBucketWebsite { bucket } => handle_delete_website(garage, bucket).await, Endpoint::DeleteBucketWebsite { .. } => handle_delete_website(garage, bucket_id).await,
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
} }
} }
async fn handle_request_without_bucket(
garage: Arc<Garage>,
_req: Request<Body>,
api_key: Key,
endpoint: Endpoint,
) -> Result<Response<Body>, Error> {
match endpoint {
Endpoint::ListBuckets => handle_list_buckets(&garage, &api_key).await,
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
}
}
#[allow(clippy::ptr_arg)]
async fn resolve_bucket(
garage: &Garage,
bucket_name: &String,
api_key: &Key,
) -> Result<Uuid, Error> {
let api_key_params = api_key
.state
.as_option()
.ok_or_else(|| Error::Forbidden("Operation is not allowed for this key.".to_string()))?;
if let Some(Some(bucket_id)) = api_key_params.local_aliases.get(bucket_name) {
Ok(*bucket_id)
} else {
Ok(garage
.bucket_helper()
.resolve_global_bucket_name(bucket_name)
.await?
.ok_or(Error::NotFound)?)
}
}
/// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in /// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in
/// the host header of the request /// the host header of the request
/// ///

View file

@ -4,6 +4,7 @@ use err_derive::Error;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode}; use hyper::{HeaderMap, StatusCode};
use garage_model::helper::error::Error as HelperError;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
use crate::s3_xml; use crate::s3_xml;
@ -83,6 +84,15 @@ impl From<quick_xml::de::DeError> for Error {
} }
} }
impl From<HelperError> for Error {
fn from(err: HelperError) -> Self {
match err {
HelperError::Internal(i) => Self::InternalError(i),
HelperError::BadRequest(b) => Self::BadRequest(b),
}
}
}
impl Error { impl Error {
/// Get the HTTP status code that best represents the meaning of the error for the client /// Get the HTTP status code that best represents the meaning of the error for the client
pub fn http_status_code(&self) -> StatusCode { pub fn http_status_code(&self) -> StatusCode {
@ -156,62 +166,67 @@ impl Error {
/// Trait to map error to the Bad Request error code /// Trait to map error to the Bad Request error code
pub trait OkOrBadRequest { pub trait OkOrBadRequest {
type S2; type S;
fn ok_or_bad_request(self, reason: &'static str) -> Self::S2; fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<Self::S, Error>;
} }
impl<T, E> OkOrBadRequest for Result<T, E> impl<T, E> OkOrBadRequest for Result<T, E>
where where
E: std::fmt::Display, E: std::fmt::Display,
{ {
type S2 = Result<T, Error>; type S = T;
fn ok_or_bad_request(self, reason: &'static str) -> Result<T, Error> { fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<T, Error> {
match self { match self {
Ok(x) => Ok(x), Ok(x) => Ok(x),
Err(e) => Err(Error::BadRequest(format!("{}: {}", reason, e))), Err(e) => Err(Error::BadRequest(format!(
"{}: {}",
reason.as_ref(),
e.to_string()
))),
} }
} }
} }
impl<T> OkOrBadRequest for Option<T> { impl<T> OkOrBadRequest for Option<T> {
type S2 = Result<T, Error>; type S = T;
fn ok_or_bad_request(self, reason: &'static str) -> Result<T, Error> { fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<T, Error> {
match self { match self {
Some(x) => Ok(x), Some(x) => Ok(x),
None => Err(Error::BadRequest(reason.to_string())), None => Err(Error::BadRequest(reason.as_ref().to_string())),
} }
} }
} }
/// Trait to map an error to an Internal Error code /// Trait to map an error to an Internal Error code
pub trait OkOrInternalError { pub trait OkOrInternalError {
type S2; type S;
fn ok_or_internal_error(self, reason: &'static str) -> Self::S2; fn ok_or_internal_error<M: AsRef<str>>(self, reason: M) -> Result<Self::S, Error>;
} }
impl<T, E> OkOrInternalError for Result<T, E> impl<T, E> OkOrInternalError for Result<T, E>
where where
E: std::fmt::Display, E: std::fmt::Display,
{ {
type S2 = Result<T, Error>; type S = T;
fn ok_or_internal_error(self, reason: &'static str) -> Result<T, Error> { fn ok_or_internal_error<M: AsRef<str>>(self, reason: M) -> Result<T, Error> {
match self { match self {
Ok(x) => Ok(x), Ok(x) => Ok(x),
Err(e) => Err(Error::InternalError(GarageError::Message(format!( Err(e) => Err(Error::InternalError(GarageError::Message(format!(
"{}: {}", "{}: {}",
reason, e reason.as_ref(),
e
)))), )))),
} }
} }
} }
impl<T> OkOrInternalError for Option<T> { impl<T> OkOrInternalError for Option<T> {
type S2 = Result<T, Error>; type S = T;
fn ok_or_internal_error(self, reason: &'static str) -> Result<T, Error> { fn ok_or_internal_error<M: AsRef<str>>(self, reason: M) -> Result<T, Error> {
match self { match self {
Some(x) => Ok(x), Some(x) => Ok(x),
None => Err(Error::InternalError(GarageError::Message( None => Err(Error::InternalError(GarageError::Message(
reason.to_string(), reason.as_ref().to_string(),
))), ))),
} }
} }

View file

@ -1,9 +1,12 @@
use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use hyper::{Body, Response}; use hyper::{Body, Response};
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::key_table::Key; use garage_model::key_table::Key;
use garage_table::util::EmptyKey;
use garage_util::crdt::*;
use garage_util::time::*; use garage_util::time::*;
use crate::error::*; use crate::error::*;
@ -34,20 +37,61 @@ pub fn handle_get_bucket_versioning() -> Result<Response<Body>, Error> {
.body(Body::from(xml.into_bytes()))?) .body(Body::from(xml.into_bytes()))?)
} }
pub fn handle_list_buckets(api_key: &Key) -> Result<Response<Body>, Error> { pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result<Response<Body>, Error> {
let key_p = api_key.params().ok_or_internal_error(
"Key should not be in deleted state at this point (in handle_list_buckets)",
)?;
// Collect buckets user has access to
let ids = api_key
.state
.as_option()
.unwrap()
.authorized_buckets
.items()
.iter()
.filter(|(_, perms)| perms.allow_read || perms.allow_write || perms.allow_owner)
lx marked this conversation as resolved Outdated

would it make sense to be bucket owner without read nor write permission?

would it make sense to be bucket owner without read nor write permission?
.map(|(id, _)| *id)
.collect::<Vec<_>>();
let mut buckets_by_id = HashMap::new();
let mut aliases = HashMap::new();
for bucket_id in ids.iter() {
let bucket = garage.bucket_table.get(&EmptyKey, bucket_id).await?;
if let Some(bucket) = bucket {
for (alias, _, _active) in bucket.aliases().iter().filter(|(_, _, active)| *active) {
let alias_opt = garage.bucket_alias_table.get(&EmptyKey, alias).await?;
if let Some(alias_ent) = alias_opt {
if *alias_ent.state.get() == Some(*bucket_id) {
aliases.insert(alias_ent.name().to_string(), *bucket_id);
}
}
}
lx marked this conversation as resolved Outdated

this is a lot of indentation. It may be possible to remove a few layers by using Option::and_then, and doing param.aliases.items().filter(|(_,_,active)| active)

this is a lot of indentation. It may be possible to remove a few layers by using Option::and_then, and doing `param.aliases.items().filter(|(_,_,active)| active)`
if let Deletable::Present(param) = bucket.state {
buckets_by_id.insert(bucket_id, param);
}
}
}
for (alias, _, id_opt) in key_p.local_aliases.items() {
if let Some(id) = id_opt {
aliases.insert(alias.clone(), *id);
}
}
// Generate response
let list_buckets = s3_xml::ListAllMyBucketsResult { let list_buckets = s3_xml::ListAllMyBucketsResult {
owner: s3_xml::Owner { owner: s3_xml::Owner {
display_name: s3_xml::Value(api_key.name.get().to_string()), display_name: s3_xml::Value(key_p.name.get().to_string()),
id: s3_xml::Value(api_key.key_id.to_string()), id: s3_xml::Value(api_key.key_id.to_string()),
}, },
buckets: s3_xml::BucketList { buckets: s3_xml::BucketList {
entries: api_key entries: aliases
.authorized_buckets
.items()
.iter() .iter()
.filter(|(_, _, perms)| perms.allow_read || perms.allow_write) .filter_map(|(name, id)| buckets_by_id.get(id).map(|p| (name, id, p)))
.map(|(name, ts, _)| s3_xml::Bucket { .map(|(name, _id, param)| s3_xml::Bucket {
creation_date: s3_xml::Value(msec_to_rfc3339(*ts)), creation_date: s3_xml::Value(msec_to_rfc3339(param.creation_date)),
name: s3_xml::Value(name.to_string()), name: s3_xml::Value(name.to_string()),
}) })
.collect(), .collect(),

View file

@ -18,14 +18,14 @@ use crate::s3_xml;
pub async fn handle_copy( pub async fn handle_copy(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<Body>, req: &Request<Body>,
dest_bucket: &str, dest_bucket_id: Uuid,
dest_key: &str, dest_key: &str,
source_bucket: &str, source_bucket_id: Uuid,
source_key: &str, source_key: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let source_object = garage let source_object = garage
.object_table .object_table
.get(&source_bucket.to_string(), &source_key.to_string()) .get(&source_bucket_id, &source_key.to_string())
.await? .await?
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;
@ -76,7 +76,7 @@ pub async fn handle_copy(
)), )),
}; };
let dest_object = Object::new( let dest_object = Object::new(
dest_bucket.to_string(), dest_bucket_id,
dest_key.to_string(), dest_key.to_string(),
vec![dest_object_version], vec![dest_object_version],
); );
@ -99,7 +99,7 @@ pub async fn handle_copy(
state: ObjectVersionState::Uploading(new_meta.headers.clone()), state: ObjectVersionState::Uploading(new_meta.headers.clone()),
}; };
let tmp_dest_object = Object::new( let tmp_dest_object = Object::new(
dest_bucket.to_string(), dest_bucket_id,
dest_key.to_string(), dest_key.to_string(),
vec![tmp_dest_object_version], vec![tmp_dest_object_version],
); );
@ -109,12 +109,8 @@ pub async fn handle_copy(
// this means that the BlockRef entries linked to this version cannot be // this means that the BlockRef entries linked to this version cannot be
// marked as deleted (they are marked as deleted only if the Version // marked as deleted (they are marked as deleted only if the Version
// doesn't exist or is marked as deleted). // doesn't exist or is marked as deleted).
let mut dest_version = Version::new( let mut dest_version =
new_uuid, Version::new(new_uuid, dest_bucket_id, dest_key.to_string(), false);
dest_bucket.to_string(),
dest_key.to_string(),
false,
);
garage.version_table.insert(&dest_version).await?; garage.version_table.insert(&dest_version).await?;
// Fill in block list for version and insert block refs // Fill in block list for version and insert block refs
@ -151,7 +147,7 @@ pub async fn handle_copy(
)), )),
}; };
let dest_object = Object::new( let dest_object = Object::new(
dest_bucket.to_string(), dest_bucket_id,
dest_key.to_string(), dest_key.to_string(),
vec![dest_object_version], vec![dest_object_version],
); );

View file

@ -14,12 +14,12 @@ use crate::signature::verify_signed_content;
async fn handle_delete_internal( async fn handle_delete_internal(
garage: &Garage, garage: &Garage,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
) -> Result<(Uuid, Uuid), Error> { ) -> Result<(Uuid, Uuid), Error> {
let object = garage let object = garage
.object_table .object_table
.get(&bucket.to_string(), &key.to_string()) .get(&bucket_id, &key.to_string())
.await? .await?
.ok_or(Error::NotFound)?; // No need to delete .ok_or(Error::NotFound)?; // No need to delete
@ -45,7 +45,7 @@ async fn handle_delete_internal(
let version_uuid = gen_uuid(); let version_uuid = gen_uuid();
let object = Object::new( let object = Object::new(
bucket.into(), bucket_id,
key.into(), key.into(),
vec![ObjectVersion { vec![ObjectVersion {
uuid: version_uuid, uuid: version_uuid,
@ -61,11 +61,11 @@ async fn handle_delete_internal(
pub async fn handle_delete( pub async fn handle_delete(
garage: Arc<Garage>, garage: Arc<Garage>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let (_deleted_version, delete_marker_version) = let (_deleted_version, delete_marker_version) =
handle_delete_internal(&garage, bucket, key).await?; handle_delete_internal(&garage, bucket_id, key).await?;
Ok(Response::builder() Ok(Response::builder()
.header("x-amz-version-id", hex::encode(delete_marker_version)) .header("x-amz-version-id", hex::encode(delete_marker_version))
@ -76,7 +76,7 @@ pub async fn handle_delete(
pub async fn handle_delete_objects( pub async fn handle_delete_objects(
garage: Arc<Garage>, garage: Arc<Garage>,
bucket: &str, bucket_id: Uuid,
req: Request<Body>, req: Request<Body>,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
@ -90,7 +90,7 @@ pub async fn handle_delete_objects(
let mut ret_errors = Vec::new(); let mut ret_errors = Vec::new();
for obj in cmd.objects.iter() { for obj in cmd.objects.iter() {
match handle_delete_internal(&garage, bucket, &obj.key).await { match handle_delete_internal(&garage, bucket_id, &obj.key).await {
Ok((deleted_version, delete_marker_version)) => { Ok((deleted_version, delete_marker_version)) => {
if cmd.quiet { if cmd.quiet {
continue; continue;

View file

@ -7,6 +7,7 @@ use hyper::body::Bytes;
use hyper::{Body, Request, Response, StatusCode}; use hyper::{Body, Request, Response, StatusCode};
use garage_table::EmptyKey; use garage_table::EmptyKey;
use garage_util::data::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::object_table::*; use garage_model::object_table::*;
@ -84,12 +85,12 @@ fn try_answer_cached(
pub async fn handle_head( pub async fn handle_head(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<Body>, req: &Request<Body>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let object = garage let object = garage
.object_table .object_table
.get(&bucket.to_string(), &key.to_string()) .get(&bucket_id, &key.to_string())
.await? .await?
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;
@ -123,12 +124,12 @@ pub async fn handle_head(
pub async fn handle_get( pub async fn handle_get(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<Body>, req: &Request<Body>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let object = garage let object = garage
.object_table .object_table
.get(&bucket.to_string(), &key.to_string()) .get(&bucket_id, &key.to_string())
.await? .await?
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;

View file

@ -3,6 +3,7 @@ use std::sync::Arc;
use hyper::{Body, Response}; use hyper::{Body, Response};
use garage_util::data::*;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
use garage_util::time::*; use garage_util::time::*;
@ -18,7 +19,8 @@ use crate::s3_xml;
#[derive(Debug)] #[derive(Debug)]
pub struct ListObjectsQuery { pub struct ListObjectsQuery {
pub is_v2: bool, pub is_v2: bool,
pub bucket: String, pub bucket_name: String,
pub bucket_id: Uuid,
pub delimiter: Option<String>, pub delimiter: Option<String>,
pub max_keys: usize, pub max_keys: usize,
pub prefix: String, pub prefix: String,
@ -102,7 +104,7 @@ pub async fn handle_list(
let objects = garage let objects = garage
.object_table .object_table
.get_range( .get_range(
&query.bucket, &query.bucket_id,
Some(next_chunk_start.clone()), Some(next_chunk_start.clone()),
Some(DeletedFilter::NotDeleted), Some(DeletedFilter::NotDeleted),
query.max_keys + 1, query.max_keys + 1,
@ -232,7 +234,7 @@ pub async fn handle_list(
let mut result = s3_xml::ListBucketResult { let mut result = s3_xml::ListBucketResult {
xmlns: (), xmlns: (),
name: s3_xml::Value(query.bucket.to_string()), name: s3_xml::Value(query.bucket_name.to_string()),
prefix: uriencode_maybe(&query.prefix, query.urlencode_resp), prefix: uriencode_maybe(&query.prefix, query.urlencode_resp),
marker: None, marker: None,
next_marker: None, next_marker: None,

View file

@ -24,7 +24,7 @@ use crate::signature::verify_signed_content;
pub async fn handle_put( pub async fn handle_put(
garage: Arc<Garage>, garage: Arc<Garage>,
req: Request<Body>, req: Request<Body>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
@ -77,7 +77,7 @@ pub async fn handle_put(
)), )),
}; };
let object = Object::new(bucket.into(), key.into(), vec![object_version]); let object = Object::new(bucket_id, key.into(), vec![object_version]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
return Ok(put_response(version_uuid, data_md5sum_hex)); return Ok(put_response(version_uuid, data_md5sum_hex));
@ -90,14 +90,14 @@ pub async fn handle_put(
timestamp: version_timestamp, timestamp: version_timestamp,
state: ObjectVersionState::Uploading(headers.clone()), state: ObjectVersionState::Uploading(headers.clone()),
}; };
let object = Object::new(bucket.into(), key.into(), vec![object_version.clone()]); let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
// Initialize corresponding entry in version table // Initialize corresponding entry in version table
// Write this entry now, even with empty block list, // Write this entry now, even with empty block list,
// to prevent block_ref entries from being deleted (they can be deleted // to prevent block_ref entries from being deleted (they can be deleted
// if the reference a version that isn't found in the version table) // if the reference a version that isn't found in the version table)
let version = Version::new(version_uuid, bucket.into(), key.into(), false); let version = Version::new(version_uuid, bucket_id, key.into(), false);
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Transfer data and verify checksum // Transfer data and verify checksum
@ -127,7 +127,7 @@ pub async fn handle_put(
Err(e) => { Err(e) => {
// Mark object as aborted, this will free the blocks further down // Mark object as aborted, this will free the blocks further down
object_version.state = ObjectVersionState::Aborted; object_version.state = ObjectVersionState::Aborted;
let object = Object::new(bucket.into(), key.into(), vec![object_version.clone()]); let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
return Err(e); return Err(e);
} }
@ -143,7 +143,7 @@ pub async fn handle_put(
}, },
first_block_hash, first_block_hash,
)); ));
let object = Object::new(bucket.into(), key.into(), vec![object_version]); let object = Object::new(bucket_id, key.into(), vec![object_version]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
Ok(put_response(version_uuid, md5sum_hex)) Ok(put_response(version_uuid, md5sum_hex))
@ -315,7 +315,8 @@ pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<Body> {
pub async fn handle_create_multipart_upload( pub async fn handle_create_multipart_upload(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<Body>, req: &Request<Body>,
bucket: &str, bucket_name: &str,
bucket_id: Uuid,
key: &str, key: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let version_uuid = gen_uuid(); let version_uuid = gen_uuid();
@ -327,20 +328,20 @@ pub async fn handle_create_multipart_upload(
timestamp: now_msec(), timestamp: now_msec(),
state: ObjectVersionState::Uploading(headers), state: ObjectVersionState::Uploading(headers),
}; };
let object = Object::new(bucket.to_string(), key.to_string(), vec![object_version]); let object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
// Insert empty version so that block_ref entries refer to something // Insert empty version so that block_ref entries refer to something
// (they are inserted concurrently with blocks in the version table, so // (they are inserted concurrently with blocks in the version table, so
// there is the possibility that they are inserted before the version table // there is the possibility that they are inserted before the version table
// is created, in which case it is allowed to delete them, e.g. in repair_*) // is created, in which case it is allowed to delete them, e.g. in repair_*)
let version = Version::new(version_uuid, bucket.into(), key.into(), false); let version = Version::new(version_uuid, bucket_id, key.into(), false);
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Send success response // Send success response
let result = s3_xml::InitiateMultipartUploadResult { let result = s3_xml::InitiateMultipartUploadResult {
xmlns: (), xmlns: (),
bucket: s3_xml::Value(bucket.to_string()), bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key.to_string()), key: s3_xml::Value(key.to_string()),
upload_id: s3_xml::Value(hex::encode(version_uuid)), upload_id: s3_xml::Value(hex::encode(version_uuid)),
}; };
@ -352,7 +353,7 @@ pub async fn handle_create_multipart_upload(
pub async fn handle_put_part( pub async fn handle_put_part(
garage: Arc<Garage>, garage: Arc<Garage>,
req: Request<Body>, req: Request<Body>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
part_number: u64, part_number: u64,
upload_id: &str, upload_id: &str,
@ -366,12 +367,11 @@ pub async fn handle_put_part(
}; };
// Read first chuck, and at the same time try to get object to see if it exists // Read first chuck, and at the same time try to get object to see if it exists
let bucket = bucket.to_string();
let key = key.to_string(); let key = key.to_string();
let mut chunker = BodyChunker::new(req.into_body(), garage.config.block_size); let mut chunker = BodyChunker::new(req.into_body(), garage.config.block_size);
let (object, first_block) = let (object, first_block) =
futures::try_join!(garage.object_table.get(&bucket, &key), chunker.next(),)?; futures::try_join!(garage.object_table.get(&bucket_id, &key), chunker.next(),)?;
// Check object is valid and multipart block can be accepted // Check object is valid and multipart block can be accepted
let first_block = first_block.ok_or_else(|| Error::BadRequest("Empty body".to_string()))?; let first_block = first_block.ok_or_else(|| Error::BadRequest("Empty body".to_string()))?;
@ -386,7 +386,7 @@ pub async fn handle_put_part(
} }
// Copy block to store // Copy block to store
let version = Version::new(version_uuid, bucket, key, false); let version = Version::new(version_uuid, bucket_id, key, false);
let first_block_hash = blake2sum(&first_block[..]); let first_block_hash = blake2sum(&first_block[..]);
let (_, data_md5sum, data_sha256sum) = read_and_put_blocks( let (_, data_md5sum, data_sha256sum) = read_and_put_blocks(
&garage, &garage,
@ -424,7 +424,8 @@ pub async fn handle_put_part(
pub async fn handle_complete_multipart_upload( pub async fn handle_complete_multipart_upload(
garage: Arc<Garage>, garage: Arc<Garage>,
req: Request<Body>, req: Request<Body>,
bucket: &str, bucket_name: &str,
bucket_id: Uuid,
key: &str, key: &str,
upload_id: &str, upload_id: &str,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
@ -442,10 +443,9 @@ pub async fn handle_complete_multipart_upload(
let version_uuid = decode_upload_id(upload_id)?; let version_uuid = decode_upload_id(upload_id)?;
let bucket = bucket.to_string();
let key = key.to_string(); let key = key.to_string();
let (object, version) = futures::try_join!( let (object, version) = futures::try_join!(
garage.object_table.get(&bucket, &key), garage.object_table.get(&bucket_id, &key),
garage.version_table.get(&version_uuid, &EmptyKey), garage.version_table.get(&version_uuid, &EmptyKey),
)?; )?;
@ -510,14 +510,14 @@ pub async fn handle_complete_multipart_upload(
version.blocks.items()[0].1.hash, version.blocks.items()[0].1.hash,
)); ));
let final_object = Object::new(bucket.clone(), key.clone(), vec![object_version]); let final_object = Object::new(bucket_id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?; garage.object_table.insert(&final_object).await?;
// Send response saying ok we're done // Send response saying ok we're done
let result = s3_xml::CompleteMultipartUploadResult { let result = s3_xml::CompleteMultipartUploadResult {
xmlns: (), xmlns: (),
location: None, location: None,
bucket: s3_xml::Value(bucket), bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key), key: s3_xml::Value(key),
etag: s3_xml::Value(etag), etag: s3_xml::Value(etag),
}; };
@ -528,7 +528,7 @@ pub async fn handle_complete_multipart_upload(
pub async fn handle_abort_multipart_upload( pub async fn handle_abort_multipart_upload(
garage: Arc<Garage>, garage: Arc<Garage>,
bucket: &str, bucket_id: Uuid,
key: &str, key: &str,
upload_id: &str, upload_id: &str,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
@ -536,7 +536,7 @@ pub async fn handle_abort_multipart_upload(
let object = garage let object = garage
.object_table .object_table
.get(&bucket.to_string(), &key.to_string()) .get(&bucket_id, &key.to_string())
.await?; .await?;
let object = object.ok_or_else(|| Error::BadRequest("Object not found".to_string()))?; let object = object.ok_or_else(|| Error::BadRequest("Object not found".to_string()))?;
@ -550,7 +550,7 @@ pub async fn handle_abort_multipart_upload(
}; };
object_version.state = ObjectVersionState::Aborted; object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket.to_string(), key.to_string(), vec![object_version]); let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&final_object).await?; garage.object_table.insert(&final_object).await?;
Ok(Response::new(Body::from(vec![]))) Ok(Response::new(Body::from(vec![])))

View file

@ -7,24 +7,28 @@ use serde::{Deserialize, Serialize};
use crate::error::*; use crate::error::*;
use crate::s3_xml::{xmlns_tag, IntValue, Value}; use crate::s3_xml::{xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content; use crate::signature::verify_signed_content;
use garage_model::bucket_table::BucketState;
use garage_model::bucket_table::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_table::*; use garage_table::*;
use garage_util::data::Hash; use garage_util::crdt;
use garage_util::data::*;
pub async fn handle_delete_website( pub async fn handle_delete_website(
garage: Arc<Garage>, garage: Arc<Garage>,
bucket: String, bucket_id: Uuid,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
let mut bucket = garage let mut bucket = garage
.bucket_table .bucket_table
.get(&EmptyKey, &bucket) .get(&EmptyKey, &bucket_id)
.await? .await?
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;
if let BucketState::Present(state) = bucket.state.get_mut() { if let crdt::Deletable::Present(param) = &mut bucket.state {
state.website.update(false); param.website_config.update(None);
garage.bucket_table.insert(&bucket).await?; garage.bucket_table.insert(&bucket).await?;
} else {
unreachable!();
} }
Ok(Response::builder() Ok(Response::builder()
@ -35,7 +39,7 @@ pub async fn handle_delete_website(
pub async fn handle_put_website( pub async fn handle_put_website(
garage: Arc<Garage>, garage: Arc<Garage>,
bucket: String, bucket_id: Uuid,
req: Request<Body>, req: Request<Body>,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
@ -44,16 +48,20 @@ pub async fn handle_put_website(
let mut bucket = garage let mut bucket = garage
.bucket_table .bucket_table
.get(&EmptyKey, &bucket) .get(&EmptyKey, &bucket_id)
.await? .await?
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?; let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
conf.validate()?; conf.validate()?;
if let BucketState::Present(state) = bucket.state.get_mut() { if let crdt::Deletable::Present(param) = &mut bucket.state {
state.website.update(true); param
.website_config
.update(Some(conf.into_garage_website_config()?));
garage.bucket_table.insert(&bucket).await?; garage.bucket_table.insert(&bucket).await?;
} else {
unreachable!();
} }
Ok(Response::builder() Ok(Response::builder()
@ -160,6 +168,26 @@ impl WebsiteConfiguration {
Ok(()) Ok(())
} }
pub fn into_garage_website_config(self) -> Result<WebsiteConfig, Error> {
if self.redirect_all_requests_to.is_some() {
Err(Error::NotImplemented(
"S3 website redirects are not currently implemented in Garage.".into(),
))
} else if self.routing_rules.map(|x| !x.is_empty()).unwrap_or(false) {
Err(Error::NotImplemented(
"S3 routing rules are not currently implemented in Garage.".into(),
))
} else {
Ok(WebsiteConfig {
index_document: self
.index_document
.map(|x| x.suffix.0)
.unwrap_or_else(|| "index.html".to_string()),
error_document: self.error_document.map(|x| x.key.0),
})
}
}
} }
impl Key { impl Key {

View file

@ -64,8 +64,9 @@ pub async fn check_signature(
.key_table .key_table
.get(&EmptyKey, &authorization.key_id) .get(&EmptyKey, &authorization.key_id)
.await? .await?
.filter(|k| !k.deleted.get()) .filter(|k| !k.state.is_deleted())
.ok_or_else(|| Error::Forbidden(format!("No such key: {}", authorization.key_id)))?; .ok_or_else(|| Error::Forbidden(format!("No such key: {}", authorization.key_id)))?;
let key_p = key.params().unwrap();
let canonical_request = canonical_request( let canonical_request = canonical_request(
request.method(), request.method(),
@ -79,7 +80,7 @@ pub async fn check_signature(
let mut hmac = signing_hmac( let mut hmac = signing_hmac(
&date, &date,
&key.secret_key, &key_p.secret_key,
&garage.config.s3_api.s3_region, &garage.config.s3_api.s3_region,
"s3", "s3",
) )

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage" name = "garage"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -15,12 +15,12 @@ path = "main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_api = { version = "0.5.0", path = "../api" } garage_api = { version = "0.6.0", path = "../api" }
garage_model = { version = "0.5.0", path = "../model" } garage_model = { version = "0.6.0", path = "../model" }
garage_rpc = { version = "0.5.0", path = "../rpc" } garage_rpc = { version = "0.6.0", path = "../rpc" }
garage_table = { version = "0.5.0", path = "../table" } garage_table = { version = "0.6.0", path = "../table" }
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
garage_web = { version = "0.5.0", path = "../web" } garage_web = { version = "0.6.0", path = "../web" }
bytes = "1.0" bytes = "1.0"
git-version = "0.3.4" git-version = "0.3.4"
@ -35,6 +35,7 @@ sled = "0.34"
rmp-serde = "0.15" rmp-serde = "0.15"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
serde_bytes = "0.11"
structopt = { version = "0.3", default-features = false } structopt = { version = "0.3", default-features = false }
toml = "0.5" toml = "0.5"

View file

@ -5,17 +5,23 @@ use std::sync::Arc;
use async_trait::async_trait; use async_trait::async_trait;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use garage_util::error::Error; use garage_util::crdt::*;
use garage_util::data::*;
use garage_util::error::Error as GarageError;
use garage_util::time::*;
use garage_table::crdt::Crdt;
use garage_table::replication::*; use garage_table::replication::*;
use garage_table::*; use garage_table::*;
use garage_rpc::*; use garage_rpc::*;
use garage_model::bucket_alias_table::*;
use garage_model::bucket_table::*; use garage_model::bucket_table::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::helper::error::{Error, OkOrBadRequest};
use garage_model::key_table::*; use garage_model::key_table::*;
use garage_model::migrate::Migrate;
use garage_model::permission::*;
use crate::cli::*; use crate::cli::*;
use crate::repair::Repair; use crate::repair::Repair;
@ -27,14 +33,15 @@ pub enum AdminRpc {
BucketOperation(BucketOperation), BucketOperation(BucketOperation),
KeyOperation(KeyOperation), KeyOperation(KeyOperation),
LaunchRepair(RepairOpt), LaunchRepair(RepairOpt),
Migrate(MigrateOpt),
Stats(StatsOpt), Stats(StatsOpt),
// Replies // Replies
Ok(String), Ok(String),
BucketList(Vec<String>), BucketList(Vec<Bucket>),
BucketInfo(Bucket), BucketInfo(Bucket, HashMap<String, Key>),
KeyList(Vec<(String, String)>), KeyList(Vec<(String, String)>),
KeyInfo(Key), KeyInfo(Key, HashMap<Uuid, Bucket>),
} }
impl Rpc for AdminRpc { impl Rpc for AdminRpc {
@ -56,280 +63,515 @@ impl AdminRpcHandler {
async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> { async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
match cmd { match cmd {
BucketOperation::List => { BucketOperation::List => self.handle_list_buckets().await,
let bucket_names = self BucketOperation::Info(query) => self.handle_bucket_info(query).await,
.garage BucketOperation::Create(query) => self.handle_create_bucket(&query.name).await,
.bucket_table BucketOperation::Delete(query) => self.handle_delete_bucket(query).await,
.get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000) BucketOperation::Alias(query) => self.handle_alias_bucket(query).await,
.await? BucketOperation::Unalias(query) => self.handle_unalias_bucket(query).await,
.iter() BucketOperation::Allow(query) => self.handle_bucket_allow(query).await,
.map(|b| b.name.to_string()) BucketOperation::Deny(query) => self.handle_bucket_deny(query).await,
.collect::<Vec<_>>(); BucketOperation::Website(query) => self.handle_bucket_website(query).await,
Ok(AdminRpc::BucketList(bucket_names)) }
} }
BucketOperation::Info(query) => {
let bucket = self.get_existing_bucket(&query.name).await?;
Ok(AdminRpc::BucketInfo(bucket))
}
BucketOperation::Create(query) => {
let bucket = match self.garage.bucket_table.get(&EmptyKey, &query.name).await? {
Some(mut bucket) => {
if !bucket.is_deleted() {
return Err(Error::BadRpc(format!(
"Bucket {} already exists",
query.name
)));
}
bucket
.state
.update(BucketState::Present(BucketParams::new()));
bucket
}
None => Bucket::new(query.name.clone()),
};
self.garage.bucket_table.insert(&bucket).await?;
Ok(AdminRpc::Ok(format!("Bucket {} was created.", query.name)))
}
BucketOperation::Delete(query) => {
let mut bucket = self.get_existing_bucket(&query.name).await?;
let objects = self
.garage
.object_table
.get_range(&query.name, None, Some(DeletedFilter::NotDeleted), 10)
.await?;
if !objects.is_empty() {
return Err(Error::BadRpc(format!("Bucket {} is not empty", query.name)));
}
if !query.yes {
return Err(Error::BadRpc(
"Add --yes flag to really perform this operation".to_string(),
));
}
// --- done checking, now commit ---
for (key_id, _, _) in bucket.authorized_keys() {
if let Some(key) = self.garage.key_table.get(&EmptyKey, key_id).await? {
if !key.deleted.get() {
self.update_key_bucket(&key, &bucket.name, false, false)
.await?;
}
} else {
return Err(Error::Message(format!("Key not found: {}", key_id)));
}
}
bucket.state.update(BucketState::Deleted);
self.garage.bucket_table.insert(&bucket).await?;
Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name)))
}
BucketOperation::Allow(query) => {
let key = self.get_existing_key(&query.key_pattern).await?;
let bucket = self.get_existing_bucket(&query.bucket).await?;
let allow_read = query.read || key.allow_read(&query.bucket);
let allow_write = query.write || key.allow_write(&query.bucket);
self.update_key_bucket(&key, &query.bucket, allow_read, allow_write)
.await?;
self.update_bucket_key(bucket, &key.key_id, allow_read, allow_write)
.await?;
Ok(AdminRpc::Ok(format!(
"New permissions for {} on {}: read {}, write {}.",
&key.key_id, &query.bucket, allow_read, allow_write
)))
}
BucketOperation::Deny(query) => {
let key = self.get_existing_key(&query.key_pattern).await?;
let bucket = self.get_existing_bucket(&query.bucket).await?;
let allow_read = !query.read && key.allow_read(&query.bucket);
let allow_write = !query.write && key.allow_write(&query.bucket);
self.update_key_bucket(&key, &query.bucket, allow_read, allow_write)
.await?;
self.update_bucket_key(bucket, &key.key_id, allow_read, allow_write)
.await?;
Ok(AdminRpc::Ok(format!(
"New permissions for {} on {}: read {}, write {}.",
&key.key_id, &query.bucket, allow_read, allow_write
)))
}
BucketOperation::Website(query) => {
let mut bucket = self.get_existing_bucket(&query.bucket).await?;
if !(query.allow ^ query.deny) { async fn handle_list_buckets(&self) -> Result<AdminRpc, Error> {
return Err(Error::Message( let buckets = self
"You must specify exactly one flag, either --allow or --deny".to_string(), .garage
)); .bucket_table
} .get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000)
.await?;
Ok(AdminRpc::BucketList(buckets))
}
if let BucketState::Present(state) = bucket.state.get_mut() { async fn handle_bucket_info(&self, query: &BucketOpt) -> Result<AdminRpc, Error> {
state.website.update(query.allow); let bucket_id = self
self.garage.bucket_table.insert(&bucket).await?; .garage
let msg = if query.allow { .bucket_helper()
format!("Website access allowed for {}", &query.bucket) .resolve_global_bucket_name(&query.name)
} else { .await?
format!("Website access denied for {}", &query.bucket) .ok_or_bad_request("Bucket not found")?;
};
Ok(AdminRpc::Ok(msg)) let bucket = self
} else { .garage
unreachable!(); .bucket_helper()
} .get_existing_bucket(bucket_id)
.await?;
let mut relevant_keys = HashMap::new();
for (k, _) in bucket
.state
.as_option()
.unwrap()
.authorized_keys
.items()
.iter()
{
if let Some(key) = self
.garage
.key_table
.get(&EmptyKey, k)
.await?
.filter(|k| !k.is_deleted())
{
relevant_keys.insert(k.clone(), key);
} }
} }
for ((k, _), _, _) in bucket
.state
.as_option()
.unwrap()
.local_aliases
.items()
.iter()
{
if relevant_keys.contains_key(k) {
continue;
}
if let Some(key) = self.garage.key_table.get(&EmptyKey, k).await? {
relevant_keys.insert(k.clone(), key);
}
}
Ok(AdminRpc::BucketInfo(bucket, relevant_keys))
}
#[allow(clippy::ptr_arg)]
async fn handle_create_bucket(&self, name: &String) -> Result<AdminRpc, Error> {
if !is_valid_bucket_name(name) {
return Err(Error::BadRequest(format!(
"{}: {}",
name, INVALID_BUCKET_NAME_MESSAGE
)));
}
if let Some(alias) = self.garage.bucket_alias_table.get(&EmptyKey, name).await? {
if alias.state.get().is_some() {
return Err(Error::BadRequest(format!("Bucket {} already exists", name)));
}
}
// ---- done checking, now commit ----
let bucket = Bucket::new();
self.garage.bucket_table.insert(&bucket).await?;
self.garage
.bucket_helper()
.set_global_bucket_alias(bucket.id, name)
.await?;
Ok(AdminRpc::Ok(format!("Bucket {} was created.", name)))
}
async fn handle_delete_bucket(&self, query: &DeleteBucketOpt) -> Result<AdminRpc, Error> {
let helper = self.garage.bucket_helper();
let bucket_id = helper
.resolve_global_bucket_name(&query.name)
.await?
.ok_or_bad_request("Bucket not found")?;
// Get the alias, but keep in minde here the bucket name
// given in parameter can also be directly the bucket's ID.
// In that case bucket_alias will be None, and
// we can still delete the bucket if it has zero aliases
// (a condition which we try to prevent but that could still happen somehow).
// We just won't try to delete an alias entry because there isn't one.
let bucket_alias = self
.garage
.bucket_alias_table
.get(&EmptyKey, &query.name)
.await?;
// Check bucket doesn't have other aliases
let mut bucket = helper.get_existing_bucket(bucket_id).await?;
let bucket_state = bucket.state.as_option().unwrap();
if bucket_state
.aliases
.items()
.iter()
.filter(|(_, _, active)| *active)
.any(|(name, _, _)| name != &query.name)
{
return Err(Error::BadRequest(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", query.name)));
}
if bucket_state
.local_aliases
.items()
.iter()
.any(|(_, _, active)| *active)
{
return Err(Error::BadRequest(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", query.name)));
}
// Check bucket is empty
let objects = self
.garage
.object_table
.get_range(&bucket_id, None, Some(DeletedFilter::NotDeleted), 10)
.await?;
if !objects.is_empty() {
return Err(Error::BadRequest(format!(
"Bucket {} is not empty",
query.name
)));
}
if !query.yes {
return Err(Error::BadRequest(
"Add --yes flag to really perform this operation".to_string(),
));
}
// --- done checking, now commit ---
// 1. delete authorization from keys that had access
for (key_id, _) in bucket.authorized_keys() {
helper
.set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
.await?;
}
// 2. delete bucket alias
if bucket_alias.is_some() {
helper
.purge_global_bucket_alias(bucket_id, &query.name)
.await?;
}
// 3. delete bucket
bucket.state = Deletable::delete();
self.garage.bucket_table.insert(&bucket).await?;
Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name)))
}
async fn handle_alias_bucket(&self, query: &AliasBucketOpt) -> Result<AdminRpc, Error> {
let helper = self.garage.bucket_helper();
let bucket_id = helper
.resolve_global_bucket_name(&query.existing_bucket)
.await?
.ok_or_bad_request("Bucket not found")?;
if let Some(key_pattern) = &query.local {
let key = helper.get_existing_matching_key(key_pattern).await?;
helper
.set_local_bucket_alias(bucket_id, &key.key_id, &query.new_name)
.await?;
Ok(AdminRpc::Ok(format!(
"Alias {} now points to bucket {:?} in namespace of key {}",
query.new_name, bucket_id, key.key_id
)))
} else {
helper
.set_global_bucket_alias(bucket_id, &query.new_name)
.await?;
Ok(AdminRpc::Ok(format!(
"Alias {} now points to bucket {:?}",
query.new_name, bucket_id
)))
}
}
async fn handle_unalias_bucket(&self, query: &UnaliasBucketOpt) -> Result<AdminRpc, Error> {
let helper = self.garage.bucket_helper();
if let Some(key_pattern) = &query.local {
let key = helper.get_existing_matching_key(key_pattern).await?;
let bucket_id = key
.state
.as_option()
.unwrap()
.local_aliases
.get(&query.name)
.cloned()
.flatten()
.ok_or_bad_request("Bucket not found")?;
helper
.unset_local_bucket_alias(bucket_id, &key.key_id, &query.name)
.await?;
Ok(AdminRpc::Ok(format!(
"Alias {} no longer points to bucket {:?} in namespace of key {}",
&query.name, bucket_id, key.key_id
)))
} else {
let bucket_id = helper
.resolve_global_bucket_name(&query.name)
.await?
.ok_or_bad_request("Bucket not found")?;
helper
.unset_global_bucket_alias(bucket_id, &query.name)
.await?;
Ok(AdminRpc::Ok(format!(
"Alias {} no longer points to bucket {:?}",
&query.name, bucket_id
)))
}
}
async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
let helper = self.garage.bucket_helper();
let bucket_id = helper
.resolve_global_bucket_name(&query.bucket)
.await?
.ok_or_bad_request("Bucket not found")?;
let key = helper.get_existing_matching_key(&query.key_pattern).await?;
let allow_read = query.read || key.allow_read(&bucket_id);
let allow_write = query.write || key.allow_write(&bucket_id);
let allow_owner = query.owner || key.allow_owner(&bucket_id);
helper
.set_bucket_key_permissions(
bucket_id,
&key.key_id,
BucketKeyPerm {
timestamp: now_msec(),
allow_read,
allow_write,
allow_owner,
},
)
.await?;
Ok(AdminRpc::Ok(format!(
"New permissions for {} on {}: read {}, write {}, owner {}.",
&key.key_id, &query.bucket, allow_read, allow_write, allow_owner
)))
}
async fn handle_bucket_deny(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
let helper = self.garage.bucket_helper();
let bucket_id = helper
.resolve_global_bucket_name(&query.bucket)
.await?
.ok_or_bad_request("Bucket not found")?;
let key = helper.get_existing_matching_key(&query.key_pattern).await?;
let allow_read = !query.read && key.allow_read(&bucket_id);
let allow_write = !query.write && key.allow_write(&bucket_id);
let allow_owner = !query.owner && key.allow_owner(&bucket_id);
helper
.set_bucket_key_permissions(
bucket_id,
&key.key_id,
BucketKeyPerm {
timestamp: now_msec(),
allow_read,
allow_write,
allow_owner,
},
)
.await?;
Ok(AdminRpc::Ok(format!(
"New permissions for {} on {}: read {}, write {}, owner {}.",
&key.key_id, &query.bucket, allow_read, allow_write, allow_owner
)))
}
async fn handle_bucket_website(&self, query: &WebsiteOpt) -> Result<AdminRpc, Error> {
let bucket_id = self
.garage
.bucket_helper()
.resolve_global_bucket_name(&query.bucket)
.await?
.ok_or_bad_request("Bucket not found")?;
let mut bucket = self
.garage
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
let bucket_state = bucket.state.as_option_mut().unwrap();
if !(query.allow ^ query.deny) {
return Err(Error::BadRequest(
"You must specify exactly one flag, either --allow or --deny".to_string(),
));
}
let website = if query.allow {
Some(WebsiteConfig {
index_document: "index.html".into(),
error_document: None,
})
} else {
None
};
bucket_state.website_config.update(website);
self.garage.bucket_table.insert(&bucket).await?;
let msg = if query.allow {
format!("Website access allowed for {}", &query.bucket)
} else {
format!("Website access denied for {}", &query.bucket)
};
Ok(AdminRpc::Ok(msg))
} }
async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> { async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> {
match cmd { match cmd {
KeyOperation::List => { KeyOperation::List => self.handle_list_keys().await,
let key_ids = self KeyOperation::Info(query) => self.handle_key_info(query).await,
.garage KeyOperation::New(query) => self.handle_create_key(query).await,
.key_table KeyOperation::Rename(query) => self.handle_rename_key(query).await,
.get_range( KeyOperation::Delete(query) => self.handle_delete_key(query).await,
&EmptyKey, KeyOperation::Import(query) => self.handle_import_key(query).await,
None,
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
10000,
)
.await?
.iter()
.map(|k| (k.key_id.to_string(), k.name.get().clone()))
.collect::<Vec<_>>();
Ok(AdminRpc::KeyList(key_ids))
}
KeyOperation::Info(query) => {
let key = self.get_existing_key(&query.key_pattern).await?;
Ok(AdminRpc::KeyInfo(key))
}
KeyOperation::New(query) => {
let key = Key::new(query.name.clone());
self.garage.key_table.insert(&key).await?;
Ok(AdminRpc::KeyInfo(key))
}
KeyOperation::Rename(query) => {
let mut key = self.get_existing_key(&query.key_pattern).await?;
key.name.update(query.new_name.clone());
self.garage.key_table.insert(&key).await?;
Ok(AdminRpc::KeyInfo(key))
}
KeyOperation::Delete(query) => {
let key = self.get_existing_key(&query.key_pattern).await?;
if !query.yes {
return Err(Error::BadRpc(
"Add --yes flag to really perform this operation".to_string(),
));
}
// --- done checking, now commit ---
for (ab_name, _, _) in key.authorized_buckets.items().iter() {
if let Some(bucket) = self.garage.bucket_table.get(&EmptyKey, ab_name).await? {
if !bucket.is_deleted() {
self.update_bucket_key(bucket, &key.key_id, false, false)
.await?;
}
} else {
return Err(Error::Message(format!("Bucket not found: {}", ab_name)));
}
}
let del_key = Key::delete(key.key_id.to_string());
self.garage.key_table.insert(&del_key).await?;
Ok(AdminRpc::Ok(format!(
"Key {} was deleted successfully.",
key.key_id
)))
}
KeyOperation::Import(query) => {
let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?;
if prev_key.is_some() {
return Err(Error::Message(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id)));
}
let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name);
self.garage.key_table.insert(&imported_key).await?;
Ok(AdminRpc::KeyInfo(imported_key))
}
} }
} }
#[allow(clippy::ptr_arg)] async fn handle_list_keys(&self) -> Result<AdminRpc, Error> {
async fn get_existing_bucket(&self, bucket: &String) -> Result<Bucket, Error> { let key_ids = self
self.garage
.bucket_table
.get(&EmptyKey, bucket)
.await?
.filter(|b| !b.is_deleted())
.map(Ok)
.unwrap_or_else(|| Err(Error::BadRpc(format!("Bucket {} does not exist", bucket))))
}
async fn get_existing_key(&self, pattern: &str) -> Result<Key, Error> {
let candidates = self
.garage .garage
.key_table .key_table
.get_range( .get_range(
&EmptyKey, &EmptyKey,
None, None,
Some(KeyFilter::Matches(pattern.to_string())), Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
10, 10000,
) )
.await? .await?
.into_iter() .iter()
.filter(|k| !k.deleted.get()) .map(|k| (k.key_id.to_string(), k.params().unwrap().name.get().clone()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if candidates.len() != 1 { Ok(AdminRpc::KeyList(key_ids))
Err(Error::Message(format!(
"{} matching keys",
candidates.len()
)))
} else {
Ok(candidates.into_iter().next().unwrap())
}
} }
/// Update **bucket table** to inform of the new linked key async fn handle_key_info(&self, query: &KeyOpt) -> Result<AdminRpc, Error> {
async fn update_bucket_key( let key = self
&self, .garage
mut bucket: Bucket, .bucket_helper()
key_id: &str, .get_existing_matching_key(&query.key_pattern)
allow_read: bool, .await?;
allow_write: bool, self.key_info_result(key).await
) -> Result<(), Error> {
if let BucketState::Present(params) = bucket.state.get_mut() {
let ak = &mut params.authorized_keys;
let old_ak = ak.take_and_clear();
ak.merge(&old_ak.update_mutator(
key_id.to_string(),
PermissionSet {
allow_read,
allow_write,
},
));
} else {
return Err(Error::Message(
"Bucket is deleted in update_bucket_key".to_string(),
));
}
self.garage.bucket_table.insert(&bucket).await?;
Ok(())
} }
/// Update **key table** to inform of the new linked bucket async fn handle_create_key(&self, query: &KeyNewOpt) -> Result<AdminRpc, Error> {
async fn update_key_bucket( let key = Key::new(&query.name);
&self,
key: &Key,
bucket: &str,
allow_read: bool,
allow_write: bool,
) -> Result<(), Error> {
let mut key = key.clone();
let old_map = key.authorized_buckets.take_and_clear();
key.authorized_buckets.merge(&old_map.update_mutator(
bucket.to_string(),
PermissionSet {
allow_read,
allow_write,
},
));
self.garage.key_table.insert(&key).await?; self.garage.key_table.insert(&key).await?;
Ok(()) self.key_info_result(key).await
}
async fn handle_rename_key(&self, query: &KeyRenameOpt) -> Result<AdminRpc, Error> {
let mut key = self
.garage
.bucket_helper()
.get_existing_matching_key(&query.key_pattern)
.await?;
key.params_mut()
.unwrap()
.name
.update(query.new_name.clone());
self.garage.key_table.insert(&key).await?;
self.key_info_result(key).await
}
async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result<AdminRpc, Error> {
let helper = self.garage.bucket_helper();
let mut key = helper.get_existing_matching_key(&query.key_pattern).await?;
if !query.yes {
return Err(Error::BadRequest(
"Add --yes flag to really perform this operation".to_string(),
));
}
let state = key.state.as_option_mut().unwrap();
// --- done checking, now commit ---
// (the step at unset_local_bucket_alias will fail if a bucket
// does not have another alias, the deletion will be
// interrupted in the middle if that happens)
// 1. Delete local aliases
for (alias, _, to) in state.local_aliases.items().iter() {
if let Some(bucket_id) = to {
helper
.unset_local_bucket_alias(*bucket_id, &key.key_id, alias)
.await?;
}
}
// 2. Remove permissions on all authorized buckets
for (ab_id, _auth) in state.authorized_buckets.items().iter() {
helper
.set_bucket_key_permissions(*ab_id, &key.key_id, BucketKeyPerm::NO_PERMISSIONS)
.await?;
}
// 3. Actually delete key
key.state = Deletable::delete();
self.garage.key_table.insert(&key).await?;
Ok(AdminRpc::Ok(format!(
"Key {} was deleted successfully.",
key.key_id
)))
}
async fn handle_import_key(&self, query: &KeyImportOpt) -> Result<AdminRpc, Error> {
let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?;
if prev_key.is_some() {
return Err(Error::BadRequest(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id)));
}
let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name);
self.garage.key_table.insert(&imported_key).await?;
self.key_info_result(imported_key).await
}
async fn key_info_result(&self, key: Key) -> Result<AdminRpc, Error> {
let mut relevant_buckets = HashMap::new();
for (id, _) in key
.state
.as_option()
.unwrap()
.authorized_buckets
.items()
.iter()
{
if let Some(b) = self.garage.bucket_table.get(&EmptyKey, id).await? {
relevant_buckets.insert(*id, b);
}
}
Ok(AdminRpc::KeyInfo(key, relevant_buckets))
}
async fn handle_migrate(self: &Arc<Self>, opt: MigrateOpt) -> Result<AdminRpc, Error> {
if !opt.yes {
return Err(Error::BadRequest(
"Please provide the --yes flag to initiate migration operation.".to_string(),
));
}
let m = Migrate {
garage: self.garage.clone(),
};
match opt.what {
MigrateWhat::Buckets050 => m.migrate_buckets050().await,
}?;
Ok(AdminRpc::Ok("Migration successfull.".into()))
} }
async fn handle_launch_repair(self: &Arc<Self>, opt: RepairOpt) -> Result<AdminRpc, Error> { async fn handle_launch_repair(self: &Arc<Self>, opt: RepairOpt) -> Result<AdminRpc, Error> {
if !opt.yes { if !opt.yes {
return Err(Error::BadRpc( return Err(Error::BadRequest(
"Please provide the --yes flag to initiate repair operations.".to_string(), "Please provide the --yes flag to initiate repair operations.".to_string(),
)); ));
} }
@ -356,7 +598,7 @@ impl AdminRpcHandler {
if failures.is_empty() { if failures.is_empty() {
Ok(AdminRpc::Ok("Repair launched on all nodes".to_string())) Ok(AdminRpc::Ok("Repair launched on all nodes".to_string()))
} else { } else {
Err(Error::Message(format!( Err(Error::BadRequest(format!(
"Could not launch repair on nodes: {:?} (launched successfully on other nodes)", "Could not launch repair on nodes: {:?} (launched successfully on other nodes)",
failures failures
))) )))
@ -466,7 +708,7 @@ impl AdminRpcHandler {
F: TableSchema + 'static, F: TableSchema + 'static,
R: TableReplication + 'static, R: TableReplication + 'static,
{ {
writeln!(to, "\nTable stats for {}", t.data.name).unwrap(); writeln!(to, "\nTable stats for {}", F::TABLE_NAME).unwrap();
if opt.detailed { if opt.detailed {
writeln!(to, " number of items: {}", t.data.store.len()).unwrap(); writeln!(to, " number of items: {}", t.data.store.len()).unwrap();
writeln!( writeln!(
@ -496,9 +738,10 @@ impl EndpointHandler<AdminRpc> for AdminRpcHandler {
match message { match message {
AdminRpc::BucketOperation(bo) => self.handle_bucket_cmd(bo).await, AdminRpc::BucketOperation(bo) => self.handle_bucket_cmd(bo).await,
AdminRpc::KeyOperation(ko) => self.handle_key_cmd(ko).await, AdminRpc::KeyOperation(ko) => self.handle_key_cmd(ko).await,
AdminRpc::Migrate(opt) => self.handle_migrate(opt.clone()).await,
AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await, AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await,
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await, AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
_ => Err(Error::BadRpc("Invalid RPC".to_string())), m => Err(GarageError::unexpected_rpc_message(m).into()),
} }
} }
} }

View file

@ -6,6 +6,8 @@ use garage_rpc::layout::*;
use garage_rpc::system::*; use garage_rpc::system::*;
use garage_rpc::*; use garage_rpc::*;
use garage_model::helper::error::Error as HelperError;
use crate::admin::*; use crate::admin::*;
use crate::cli::*; use crate::cli::*;
@ -14,14 +16,14 @@ pub async fn cli_command_dispatch(
system_rpc_endpoint: &Endpoint<SystemRpc, ()>, system_rpc_endpoint: &Endpoint<SystemRpc, ()>,
admin_rpc_endpoint: &Endpoint<AdminRpc, ()>, admin_rpc_endpoint: &Endpoint<AdminRpc, ()>,
rpc_host: NodeID, rpc_host: NodeID,
) -> Result<(), Error> { ) -> Result<(), HelperError> {
match cmd { match cmd {
Command::Status => cmd_status(system_rpc_endpoint, rpc_host).await, Command::Status => Ok(cmd_status(system_rpc_endpoint, rpc_host).await?),
Command::Node(NodeOperation::Connect(connect_opt)) => { Command::Node(NodeOperation::Connect(connect_opt)) => {
cmd_connect(system_rpc_endpoint, rpc_host, connect_opt).await Ok(cmd_connect(system_rpc_endpoint, rpc_host, connect_opt).await?)
} }
Command::Layout(layout_opt) => { Command::Layout(layout_opt) => {
cli_layout_command_dispatch(layout_opt, system_rpc_endpoint, rpc_host).await Ok(cli_layout_command_dispatch(layout_opt, system_rpc_endpoint, rpc_host).await?)
} }
Command::Bucket(bo) => { Command::Bucket(bo) => {
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BucketOperation(bo)).await cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BucketOperation(bo)).await
@ -29,6 +31,9 @@ pub async fn cli_command_dispatch(
Command::Key(ko) => { Command::Key(ko) => {
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::KeyOperation(ko)).await cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::KeyOperation(ko)).await
} }
Command::Migrate(mo) => {
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Migrate(mo)).await
}
Command::Repair(ro) => { Command::Repair(ro) => {
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::LaunchRepair(ro)).await cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::LaunchRepair(ro)).await
} }
@ -146,7 +151,7 @@ pub async fn cmd_connect(
println!("Success."); println!("Success.");
Ok(()) Ok(())
} }
r => Err(Error::BadRpc(format!("Unexpected response: {:?}", r))), m => Err(Error::unexpected_rpc_message(m)),
} }
} }
@ -154,28 +159,22 @@ pub async fn cmd_admin(
rpc_cli: &Endpoint<AdminRpc, ()>, rpc_cli: &Endpoint<AdminRpc, ()>,
rpc_host: NodeID, rpc_host: NodeID,
args: AdminRpc, args: AdminRpc,
) -> Result<(), Error> { ) -> Result<(), HelperError> {
match rpc_cli.call(&rpc_host, &args, PRIO_NORMAL).await?? { match rpc_cli.call(&rpc_host, &args, PRIO_NORMAL).await?? {
AdminRpc::Ok(msg) => { AdminRpc::Ok(msg) => {
println!("{}", msg); println!("{}", msg);
} }
AdminRpc::BucketList(bl) => { AdminRpc::BucketList(bl) => {
println!("List of buckets:"); print_bucket_list(bl);
for bucket in bl {
println!("{}", bucket);
}
} }
AdminRpc::BucketInfo(bucket) => { AdminRpc::BucketInfo(bucket, rk) => {
print_bucket_info(&bucket); print_bucket_info(&bucket, &rk);
} }
AdminRpc::KeyList(kl) => { AdminRpc::KeyList(kl) => {
println!("List of keys:"); print_key_list(kl);
for key in kl {
println!("{}\t{}", key.0, key.1);
}
} }
AdminRpc::KeyInfo(key) => { AdminRpc::KeyInfo(key, rb) => {
print_key_info(&key); print_key_info(&key, &rb);
} }
r => { r => {
error!("Unexpected response: {:?}", r); error!("Unexpected response: {:?}", r);

View file

@ -28,6 +28,11 @@ pub enum Command {
#[structopt(name = "key")] #[structopt(name = "key")]
Key(KeyOperation), Key(KeyOperation),
/// Run migrations from previous Garage version
/// (DO NOT USE WITHOUT READING FULL DOCUMENTATION)
#[structopt(name = "migrate")]
Migrate(MigrateOpt),
/// Start repair of node data /// Start repair of node data
#[structopt(name = "repair")] #[structopt(name = "repair")]
Repair(RepairOpt), Repair(RepairOpt),
@ -150,6 +155,14 @@ pub enum BucketOperation {
#[structopt(name = "delete")] #[structopt(name = "delete")]
Delete(DeleteBucketOpt), Delete(DeleteBucketOpt),
/// Alias bucket under new name
#[structopt(name = "alias")]
Alias(AliasBucketOpt),
/// Remove bucket alias
#[structopt(name = "unalias")]
Unalias(UnaliasBucketOpt),
/// Allow key to read or write to bucket /// Allow key to read or write to bucket
#[structopt(name = "allow")] #[structopt(name = "allow")]
Allow(PermBucketOpt), Allow(PermBucketOpt),
@ -193,6 +206,29 @@ pub struct DeleteBucketOpt {
pub yes: bool, pub yes: bool,
} }
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct AliasBucketOpt {
/// Existing bucket name (its alias in global namespace or its full hex uuid)
pub existing_bucket: String,
/// New bucket name
pub new_name: String,
/// Make this alias local to the specified access key
#[structopt(long = "local")]
pub local: Option<String>,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct UnaliasBucketOpt {
/// Bucket name
pub name: String,
/// Unalias in bucket namespace local to this access key
#[structopt(long = "local")]
pub local: Option<String>,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)] #[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct PermBucketOpt { pub struct PermBucketOpt {
/// Access key name or ID /// Access key name or ID
@ -207,6 +243,11 @@ pub struct PermBucketOpt {
#[structopt(long = "write")] #[structopt(long = "write")]
pub write: bool, pub write: bool,
/// Allow/deny administrative operations operations
/// (such as deleting bucket or changing bucket website configuration)
#[structopt(long = "owner")]
pub owner: bool,
/// Bucket name /// Bucket name
pub bucket: String, pub bucket: String,
} }
@ -283,6 +324,23 @@ pub struct KeyImportOpt {
pub name: String, pub name: String,
} }
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
pub struct MigrateOpt {
/// Confirm the launch of the migrate operation
#[structopt(long = "yes")]
pub yes: bool,
#[structopt(subcommand)]
pub what: MigrateWhat,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
pub enum MigrateWhat {
/// Migrate buckets and permissions from v0.5.0
#[structopt(name = "buckets050")]
Buckets050,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)] #[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
pub struct RepairOpt { pub struct RepairOpt {
/// Launch repair operation on all nodes /// Launch repair operation on all nodes

View file

@ -1,33 +1,168 @@
use std::collections::HashMap;
use garage_util::crdt::*;
use garage_util::data::Uuid; use garage_util::data::Uuid;
use garage_util::error::*; use garage_util::error::*;
use garage_model::bucket_table::*; use garage_model::bucket_table::*;
use garage_model::key_table::*; use garage_model::key_table::*;
pub fn print_key_info(key: &Key) { pub fn print_bucket_list(bl: Vec<Bucket>) {
println!("Key name: {}", key.name.get()); println!("List of buckets:");
println!("Key ID: {}", key.key_id);
println!("Secret key: {}", key.secret_key); let mut table = vec![];
if key.deleted.get() { for bucket in bl {
println!("Key is deleted."); let aliases = bucket
} else { .aliases()
println!("Authorized buckets:"); .iter()
for (b, _, perm) in key.authorized_buckets.items().iter() { .filter(|(_, _, active)| *active)
println!("- {} R:{} W:{}", b, perm.allow_read, perm.allow_write); .map(|(name, _, _)| name.to_string())
.collect::<Vec<_>>();
let local_aliases_n = match bucket
.local_aliases()
.iter()
.filter(|(_, _, active)| *active)
.count()
{
0 => "".into(),
1 => "1 local alias".into(),
n => format!("{} local aliases", n),
};
table.push(format!(
"\t{}\t{}\t{}",
aliases.join(","),
local_aliases_n,
hex::encode(bucket.id)
));
}
format_table(table);
}
pub fn print_key_list(kl: Vec<(String, String)>) {
println!("List of keys:");
let mut table = vec![];
for key in kl {
table.push(format!("\t{}\t{}", key.0, key.1));
}
format_table(table);
}
pub fn print_key_info(key: &Key, relevant_buckets: &HashMap<Uuid, Bucket>) {
let bucket_global_aliases = |b: &Uuid| {
if let Some(bucket) = relevant_buckets.get(b) {
if let Some(p) = bucket.state.as_option() {
return p
.aliases
.items()
.iter()
.filter(|(_, _, active)| *active)
.map(|(a, _, _)| a.clone())
.collect::<Vec<_>>()
.join(", ");
}
}
"".to_string()
};
match &key.state {
Deletable::Present(p) => {
println!("Key name: {}", p.name.get());
println!("Key ID: {}", key.key_id);
println!("Secret key: {}", p.secret_key);
println!("Can create buckets: {}", p.allow_create_bucket.get());
println!("\nKey-specific bucket aliases:");
let mut table = vec![];
for (alias_name, _, alias) in p.local_aliases.items().iter() {
if let Some(bucket_id) = alias {
table.push(format!(
"\t{}\t{}\t{}",
alias_name,
bucket_global_aliases(bucket_id),
hex::encode(bucket_id)
));
}
}
format_table(table);
println!("\nAuthorized buckets:");
let mut table = vec![];
for (bucket_id, perm) in p.authorized_buckets.items().iter() {
let rflag = if perm.allow_read { "R" } else { " " };
let wflag = if perm.allow_write { "W" } else { " " };
let oflag = if perm.allow_owner { "O" } else { " " };
let local_aliases = p
.local_aliases
.items()
.iter()
.filter(|(_, _, a)| *a == Some(*bucket_id))
.map(|(a, _, _)| a.clone())
.collect::<Vec<_>>()
.join(", ");
table.push(format!(
"\t{}{}{}\t{}\t{}\t{:?}",
rflag,
wflag,
oflag,
bucket_global_aliases(bucket_id),
local_aliases,
bucket_id
));
}
format_table(table);
}
Deletable::Deleted => {
println!("Key {} is deleted.", key.key_id);
} }
} }
} }
pub fn print_bucket_info(bucket: &Bucket) { pub fn print_bucket_info(bucket: &Bucket, relevant_keys: &HashMap<String, Key>) {
println!("Bucket name: {}", bucket.name); let key_name = |k| {
match bucket.state.get() { relevant_keys
BucketState::Deleted => println!("Bucket is deleted."), .get(k)
BucketState::Present(p) => { .map(|k| k.params().unwrap().name.get().as_str())
println!("Authorized keys:"); .unwrap_or("<deleted>")
for (k, _, perm) in p.authorized_keys.items().iter() { };
println!("- {} R:{} W:{}", k, perm.allow_read, perm.allow_write);
println!("Bucket: {}", hex::encode(bucket.id));
match &bucket.state {
Deletable::Deleted => println!("Bucket is deleted."),
Deletable::Present(p) => {
println!("Website access: {}", p.website_config.get().is_some());
println!("\nGlobal aliases:");
for (alias, _, active) in p.aliases.items().iter() {
if *active {
println!(" {}", alias);
}
} }
println!("Website access: {}", p.website.get());
println!("\nKey-specific aliases:");
let mut table = vec![];
for ((key_id, alias), _, active) in p.local_aliases.items().iter() {
if *active {
table.push(format!("\t{} ({})\t{}", key_id, key_name(key_id), alias));
}
}
format_table(table);
println!("\nAuthorized keys:");
let mut table = vec![];
for (k, perm) in p.authorized_keys.items().iter() {
let rflag = if perm.allow_read { "R" } else { " " };
let wflag = if perm.allow_write { "W" } else { " " };
let oflag = if perm.allow_owner { "O" } else { " " };
table.push(format!(
"\t{}{}{}\t{}\t{}",
rflag,
wflag,
oflag,
k,
key_name(k)
));
}
format_table(table);
} }
}; };
} }

View file

@ -22,6 +22,8 @@ use garage_util::error::*;
use garage_rpc::system::*; use garage_rpc::system::*;
use garage_rpc::*; use garage_rpc::*;
use garage_model::helper::error::Error as HelperError;
use admin::*; use admin::*;
use cli::*; use cli::*;
@ -136,5 +138,9 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into()); let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into());
let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into()); let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into());
cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await match cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await {
Err(HelperError::Internal(i)) => Err(Error::Message(format!("Internal error: {}", i))),
Err(HelperError::BadRequest(b)) => Err(Error::Message(b)),
Ok(x) => Ok(x),
}
} }

View file

@ -77,7 +77,7 @@ impl Repair {
let object = self let object = self
.garage .garage
.object_table .object_table
.get(&version.bucket, &version.key) .get(&version.bucket_id, &version.key)
.await?; .await?;
let version_exists = match object { let version_exists = match object {
Some(o) => o Some(o) => o
@ -92,7 +92,7 @@ impl Repair {
.version_table .version_table
.insert(&Version::new( .insert(&Version::new(
version.uuid, version.uuid,
version.bucket, version.bucket_id,
version.key, version.key,
true, true,
)) ))

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_model" name = "garage_model"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -14,12 +14,14 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_rpc = { version = "0.5.0", path = "../rpc" } garage_rpc = { version = "0.6.0", path = "../rpc" }
garage_table = { version = "0.5.0", path = "../table" } garage_table = { version = "0.6.0", path = "../table" }
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
garage_model_050 = { package = "garage_model", version = "0.5.1" }
async-trait = "0.1.7" async-trait = "0.1.7"
arc-swap = "1.0" arc-swap = "1.0"
err-derive = "0.3"
hex = "0.4" hex = "0.4"
log = "0.4" log = "0.4"
rand = "0.8" rand = "0.8"

View file

@ -594,10 +594,8 @@ impl BlockManager {
need_nodes.push(*node); need_nodes.push(*node);
} }
} }
_ => { m => {
return Err(Error::Message( return Err(Error::unexpected_rpc_message(m));
"Unexpected response to NeedBlockQuery RPC".to_string(),
));
} }
} }
} }
@ -730,7 +728,7 @@ impl EndpointHandler<BlockRpc> for BlockManager {
BlockRpc::PutBlock { hash, data } => self.write_block(hash, data).await, BlockRpc::PutBlock { hash, data } => self.write_block(hash, data).await,
BlockRpc::GetBlock(h) => self.read_block(h).await, BlockRpc::GetBlock(h) => self.read_block(h).await,
BlockRpc::NeedBlockQuery(h) => self.need_block(h).await.map(BlockRpc::NeedBlockReply), BlockRpc::NeedBlockQuery(h) => self.need_block(h).await.map(BlockRpc::NeedBlockReply),
_ => Err(Error::BadRpc("Unexpected RPC message".to_string())), m => Err(Error::unexpected_rpc_message(m)),
} }
} }
} }

View file

@ -44,6 +44,8 @@ pub struct BlockRefTable {
} }
impl TableSchema for BlockRefTable { impl TableSchema for BlockRefTable {
const TABLE_NAME: &'static str = "block_ref";
type P = Hash; type P = Hash;
type S = Uuid; type S = Uuid;
type E = BlockRef; type E = BlockRef;

View file

@ -0,0 +1,93 @@
use serde::{Deserialize, Serialize};
use garage_util::data::*;
use garage_table::crdt::*;
use garage_table::*;
/// The bucket alias table holds the names given to buckets
/// in the global namespace.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct BucketAlias {
name: String,
pub state: crdt::Lww<Option<Uuid>>,
}
impl BucketAlias {
pub fn new(name: String, ts: u64, bucket_id: Option<Uuid>) -> Option<Self> {
if !is_valid_bucket_name(&name) {
None
} else {
Some(BucketAlias {
name,
state: crdt::Lww::raw(ts, bucket_id),
})
}
}
pub fn is_deleted(&self) -> bool {
self.state.get().is_none()
}
pub fn name(&self) -> &str {
&self.name
}
}
impl Crdt for BucketAlias {
fn merge(&mut self, o: &Self) {
self.state.merge(&o.state);
}
}
impl Entry<EmptyKey, String> for BucketAlias {
fn partition_key(&self) -> &EmptyKey {
&EmptyKey
}
fn sort_key(&self) -> &String {
&self.name
}
}
pub struct BucketAliasTable;
impl TableSchema for BucketAliasTable {
const TABLE_NAME: &'static str = "bucket_alias";
type P = EmptyKey;
type S = String;
type E = BucketAlias;
type Filter = DeletedFilter;
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
filter.apply(entry.is_deleted())
}
}
/// Check if a bucket name is valid.
///
/// The requirements are listed here:
///
/// <https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html>
///
/// In the case of Garage, bucket names must not be hex-encoded
/// 32 byte string, which is excluded thanks to the
/// maximum length of 63 bytes given in the spec.
pub fn is_valid_bucket_name(n: &str) -> bool {
// Bucket names must be between 3 and 63 characters
n.len() >= 3 && n.len() <= 63
// Bucket names must be composed of lowercase letters, numbers,
// dashes and dots
&& n.chars().all(|c| matches!(c, '.' | '-' | 'a'..='z' | '0'..='9'))
// Bucket names must start and end with a letter or a number
&& !n.starts_with(&['-', '.'][..])
&& !n.ends_with(&['-', '.'][..])
// Bucket names must not be formated as an IP address
&& n.parse::<std::net::IpAddr>().is_err()
// Bucket names must not start wih "xn--"
&& !n.starts_with("xn--")
// Bucket names must not end with "-s3alias"
&& !n.ends_with("-s3alias")
}
/// Error message to return for invalid bucket names
pub const INVALID_BUCKET_NAME_MESSAGE: &str = "Invalid bucket name. See AWS documentation for constraints on S3 bucket names:\nhttps://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html";

View file

@ -2,8 +2,10 @@ use serde::{Deserialize, Serialize};
use garage_table::crdt::Crdt; use garage_table::crdt::Crdt;
use garage_table::*; use garage_table::*;
use garage_util::data::*;
use garage_util::time::*;
use crate::key_table::PermissionSet; use crate::permission::BucketKeyPerm;
/// A bucket is a collection of objects /// A bucket is a collection of objects
/// ///
lx marked this conversation as resolved Outdated

this is the equivalent to garage default config in 0.5.0

<?xml version="1.0" encoding="UTF-8"?>
<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
  <IndexDocument>
    <Suffix>index.html</Suffix>
  </IndexDocument>
</WebsiteConfiguration>

I'm still not convinced this should be XML, not because of performances reasons, but because I don't think we should store raw S3 payloads in general (and I generaly don't like XML)

this is the equivalent to garage default config in 0.5.0 ```xml <?xml version="1.0" encoding="UTF-8"?> <WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <IndexDocument> <Suffix>index.html</Suffix> </IndexDocument> </WebsiteConfiguration> ``` I'm still not convinced this should be XML, not because of performances reasons, but because I don't think we should store raw S3 payloads in general (and I generaly don't like XML)
@ -12,57 +14,66 @@ use crate::key_table::PermissionSet;
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present. /// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct Bucket { pub struct Bucket {
/// Name of the bucket /// ID of the bucket
pub name: String, pub id: Uuid,
/// State, and configuration if not deleted, of the bucket /// State, and configuration if not deleted, of the bucket
pub state: crdt::Lww<BucketState>, pub state: crdt::Deletable<BucketParams>,
}
/// State of a bucket
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub enum BucketState {
/// The bucket is deleted
Deleted,
/// The bucket exists
Present(BucketParams),
}
impl Crdt for BucketState {
fn merge(&mut self, o: &Self) {
match o {
BucketState::Deleted => *self = BucketState::Deleted,
BucketState::Present(other_params) => {
if let BucketState::Present(params) = self {
params.merge(other_params);
}
}
}
}
} }
/// Configuration for a bucket /// Configuration for a bucket
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct BucketParams { pub struct BucketParams {
/// Bucket's creation date
pub creation_date: u64,
/// Map of key with access to the bucket, and what kind of access they give /// Map of key with access to the bucket, and what kind of access they give
pub authorized_keys: crdt::LwwMap<String, PermissionSet>, pub authorized_keys: crdt::Map<String, BucketKeyPerm>,
/// Is the bucket served as http /// Whether this bucket is allowed for website access
pub website: crdt::Lww<bool>, /// (under all of its global alias names),
/// and if so, the website configuration XML document
pub website_config: crdt::Lww<Option<WebsiteConfig>>,
/// Map of aliases that are or have been given to this bucket
/// in the global namespace
lx marked this conversation as resolved Outdated

I'd argue only website_config should exist, with a Some(_) generated automatically when migrating from 0.5 with website enabled.
I also think this should probably contain a WebsiteConfiguration (or some flattened form of it), to not require parsing XML on each web request, however doing so does have downsides if we add things to this struct in the future.

I'd argue only `website_config` should exist, with a `Some(_)` generated automatically when migrating from 0.5 with website enabled. I also think this should probably contain a `WebsiteConfiguration` (or some flattened form of it), to not require parsing XML on each web request, however doing so does have downsides if we add things to this struct in the future.
Outdated
Review

Concerning the first point (removing website_access and storing only the config as an option), I think yes that's probably better because it's closer to how Garage works currently. I was hesitant because in AWS the permissions and the website config seem to be handled separately, but Garage has its own logic and doesn't implement AWS's ACLs, so yes we can simplify here for now and come back to it later when/if we work on ACLs.

Concerning storing a WebsiteConfiguration instead of a ByteBuf, there are upsides and downsides.

Upsides:

  • don't parse xml at every web request
  • the messagepack representation is probably smaller

Downsides:

  • we have to move the definition of WebsiteConfiguration in the model crate (for now we have kept very separate the structs that manage Garage's internal state and structs that represent official items of the S3 spec) ; if we don't want to do this, the alternative is to have our own struct that keeps only the relevant aspects of a WebsiteConfiguration, but it adds more complexity
  • we are now parsing messagepack for the website configuration for all accesses to this and not just when doing a website call

Alternatives:

  • if we store XML, we can cache the parsed XML in the web/ module for a nominal duratio of a few seconds to avoid parsing at every request
  • if handling the website config ends up taking too much cpu resource, we can move it to a separate table
  • we could also implement optimisations in the table/ module to keep a cache of deserialized versions of stuff stored in the table

In other words, storing a struct here 1/ has disadvantages in terms of keeping a clean architectures, and 2/ looks to me a bit like a case of premature optimization (we can have a separate reflexion later concerning how we approach performance on Garage tables, which is a whole topic on its own with multiple aspects such as caching, minimizing representation sizes, splitting stuff in separate tables, etc).

Concerning the first point (removing `website_access` and storing only the config as an option), I think yes that's probably better because it's closer to how Garage works currently. I was hesitant because in AWS the permissions and the website config seem to be handled separately, but Garage has its own logic and doesn't implement AWS's ACLs, so yes we can simplify here for now and come back to it later when/if we work on ACLs. Concerning storing a WebsiteConfiguration instead of a ByteBuf, there are upsides and downsides. Upsides: - don't parse xml at every web request - the messagepack representation is probably smaller Downsides: - we have to move the definition of WebsiteConfiguration in the model crate (for now we have kept very separate the structs that manage Garage's internal state and structs that represent official items of the S3 spec) ; if we don't want to do this, the alternative is to have our own struct that keeps only the relevant aspects of a WebsiteConfiguration, but it adds more complexity - we are now parsing messagepack for the website configuration for all accesses to this and not just when doing a website call Alternatives: - if we store XML, we can cache the parsed XML in the `web/` module for a nominal duratio of a few seconds to avoid parsing at every request - if handling the website config ends up taking too much cpu resource, we can move it to a separate table - we could also implement optimisations in the `table/` module to keep a cache of deserialized versions of stuff stored in the table In other words, storing a struct here 1/ has disadvantages in terms of keeping a clean architectures, and 2/ looks to me a bit like a case of premature optimization (we can have a separate reflexion later concerning how we approach performance on Garage tables, which is a whole topic on its own with multiple aspects such as caching, minimizing representation sizes, splitting stuff in separate tables, etc).
/// (not authoritative: this is just used as an indication to
/// map back to aliases when doing ListBuckets)
pub aliases: crdt::LwwMap<String, bool>,
/// Map of aliases that are or have been given to this bucket
/// in namespaces local to keys
/// key = (access key id, alias name)
pub local_aliases: crdt::LwwMap<(String, String), bool>,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct WebsiteConfig {
pub index_document: String,
pub error_document: Option<String>,
} }
impl BucketParams { impl BucketParams {
/// Create an empty BucketParams with no authorized keys and no website accesss /// Create an empty BucketParams with no authorized keys and no website accesss
pub fn new() -> Self { pub fn new() -> Self {
BucketParams { BucketParams {
authorized_keys: crdt::LwwMap::new(), creation_date: now_msec(),
website: crdt::Lww::new(false), authorized_keys: crdt::Map::new(),
website_config: crdt::Lww::new(None),
aliases: crdt::LwwMap::new(),
local_aliases: crdt::LwwMap::new(),
} }
} }
} }
impl Crdt for BucketParams { impl Crdt for BucketParams {
fn merge(&mut self, o: &Self) { fn merge(&mut self, o: &Self) {
self.creation_date = std::cmp::min(self.creation_date, o.creation_date);
self.authorized_keys.merge(&o.authorized_keys); self.authorized_keys.merge(&o.authorized_keys);
self.website.merge(&o.website); self.website_config.merge(&o.website_config);
self.aliases.merge(&o.aliases);
self.local_aliases.merge(&o.local_aliases);
}
}
impl Default for Bucket {
fn default() -> Self {
Self::new()
} }
} }
@ -74,34 +85,53 @@ impl Default for BucketParams {
impl Bucket { impl Bucket {
/// Initializes a new instance of the Bucket struct /// Initializes a new instance of the Bucket struct
pub fn new(name: String) -> Self { pub fn new() -> Self {
Bucket { Bucket {
name, id: gen_uuid(),
state: crdt::Lww::new(BucketState::Present(BucketParams::new())), state: crdt::Deletable::present(BucketParams::new()),
} }
} }
/// Returns true if this represents a deleted bucket /// Returns true if this represents a deleted bucket
pub fn is_deleted(&self) -> bool { pub fn is_deleted(&self) -> bool {
*self.state.get() == BucketState::Deleted self.state.is_deleted()
}
/// Returns an option representing the parameters (None if in deleted state)
pub fn params(&self) -> Option<&BucketParams> {
self.state.as_option()
}
/// Mutable version of `.params()`
pub fn params_mut(&mut self) -> Option<&mut BucketParams> {
self.state.as_option_mut()
} }
/// Return the list of authorized keys, when each was updated, and the permission associated to /// Return the list of authorized keys, when each was updated, and the permission associated to
/// the key /// the key
pub fn authorized_keys(&self) -> &[(String, u64, PermissionSet)] { pub fn authorized_keys(&self) -> &[(String, BucketKeyPerm)] {
match self.state.get() { self.params()
BucketState::Deleted => &[], .map(|s| s.authorized_keys.items())
BucketState::Present(state) => state.authorized_keys.items(), .unwrap_or(&[])
} }
pub fn aliases(&self) -> &[(String, u64, bool)] {
self.params().map(|s| s.aliases.items()).unwrap_or(&[])
}
pub fn local_aliases(&self) -> &[((String, String), u64, bool)] {
self.params()
.map(|s| s.local_aliases.items())
.unwrap_or(&[])
} }
} }
impl Entry<EmptyKey, String> for Bucket { impl Entry<EmptyKey, Uuid> for Bucket {
fn partition_key(&self) -> &EmptyKey { fn partition_key(&self) -> &EmptyKey {
&EmptyKey &EmptyKey
} }
fn sort_key(&self) -> &String { fn sort_key(&self) -> &Uuid {
&self.name &self.id
} }
} }
@ -114,8 +144,10 @@ impl Crdt for Bucket {
pub struct BucketTable; pub struct BucketTable;
impl TableSchema for BucketTable { impl TableSchema for BucketTable {
const TABLE_NAME: &'static str = "bucket_v2";
type P = EmptyKey; type P = EmptyKey;
type S = String; type S = Uuid;
type E = Bucket; type E = Bucket;
type Filter = DeletedFilter; type Filter = DeletedFilter;

View file

@ -14,7 +14,9 @@ use garage_table::*;
use crate::block::*; use crate::block::*;
use crate::block_ref_table::*; use crate::block_ref_table::*;
use crate::bucket_alias_table::*;
use crate::bucket_table::*; use crate::bucket_table::*;
use crate::helper;
use crate::key_table::*; use crate::key_table::*;
use crate::object_table::*; use crate::object_table::*;
use crate::version_table::*; use crate::version_table::*;
@ -35,6 +37,8 @@ pub struct Garage {
/// Table containing informations about buckets /// Table containing informations about buckets
pub bucket_table: Arc<Table<BucketTable, TableFullReplication>>, pub bucket_table: Arc<Table<BucketTable, TableFullReplication>>,
/// Table containing informations about bucket aliases
pub bucket_alias_table: Arc<Table<BucketAliasTable, TableFullReplication>>,
/// Table containing informations about api keys /// Table containing informations about api keys
pub key_table: Arc<Table<KeyTable, TableFullReplication>>, pub key_table: Arc<Table<KeyTable, TableFullReplication>>,
@ -93,7 +97,6 @@ impl Garage {
meta_rep_param.clone(), meta_rep_param.clone(),
system.clone(), system.clone(),
&db, &db,
"block_ref".to_string(),
); );
info!("Initialize version_table..."); info!("Initialize version_table...");
@ -105,7 +108,6 @@ impl Garage {
meta_rep_param.clone(), meta_rep_param.clone(),
system.clone(), system.clone(),
&db, &db,
"version".to_string(),
); );
info!("Initialize object_table..."); info!("Initialize object_table...");
@ -117,26 +119,21 @@ impl Garage {
meta_rep_param, meta_rep_param,
system.clone(), system.clone(),
&db, &db,
"object".to_string(),
); );
info!("Initialize bucket_table..."); info!("Initialize bucket_table...");
let bucket_table = Table::new( let bucket_table = Table::new(BucketTable, control_rep_param.clone(), system.clone(), &db);
BucketTable,
info!("Initialize bucket_alias_table...");
let bucket_alias_table = Table::new(
BucketAliasTable,
control_rep_param.clone(), control_rep_param.clone(),
system.clone(), system.clone(),
&db, &db,
"bucket".to_string(),
); );
info!("Initialize key_table_table..."); info!("Initialize key_table_table...");
let key_table = Table::new( let key_table = Table::new(KeyTable, control_rep_param, system.clone(), &db);
KeyTable,
control_rep_param,
system.clone(),
&db,
"key".to_string(),
);
info!("Initialize Garage..."); info!("Initialize Garage...");
let garage = Arc::new(Self { let garage = Arc::new(Self {
@ -146,6 +143,7 @@ impl Garage {
system, system,
block_manager, block_manager,
bucket_table, bucket_table,
bucket_alias_table,
key_table, key_table,
object_table, object_table,
version_table, version_table,
@ -163,4 +161,8 @@ impl Garage {
pub fn break_reference_cycles(&self) { pub fn break_reference_cycles(&self) {
self.block_manager.garage.swap(None); self.block_manager.garage.swap(None);
} }
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
helper::bucket::BucketHelper(self)
}
} }

472
src/model/helper/bucket.rs Normal file
View file

@ -0,0 +1,472 @@
use garage_table::util::EmptyKey;
use garage_util::crdt::*;
use garage_util::data::*;
use garage_util::error::{Error as GarageError, OkOrMessage};
use garage_util::time::*;
use crate::bucket_alias_table::*;
use crate::bucket_table::*;
use crate::garage::Garage;
use crate::helper::error::*;
use crate::key_table::{Key, KeyFilter};
use crate::permission::BucketKeyPerm;
pub struct BucketHelper<'a>(pub(crate) &'a Garage);
#[allow(clippy::ptr_arg)]
impl<'a> BucketHelper<'a> {
pub async fn resolve_global_bucket_name(
&self,
bucket_name: &String,
) -> Result<Option<Uuid>, Error> {
// Bucket names in Garage are aliases, true bucket identifiers
// are 32-byte UUIDs. This function resolves bucket names into
// their full identifier by looking up in the bucket_alias_table.
// This function also allows buckets to be identified by their
// full UUID (hex-encoded). Here, if the name to be resolved is a
// hex string of the correct length, it is directly parsed as a bucket
// identifier which is returned. There is no risk of this conflicting
// with an actual bucket name: bucket names are max 63 chars long by
// the AWS spec, and hex-encoded UUIDs are 64 chars long.
let hexbucket = hex::decode(bucket_name.as_str())
.ok()
.map(|by| Uuid::try_from(&by))
.flatten();
if let Some(bucket_id) = hexbucket {
Ok(self
.0
.bucket_table
.get(&EmptyKey, &bucket_id)
.await?
.filter(|x| !x.state.is_deleted())
.map(|_| bucket_id))
} else {
Ok(self
.0
.bucket_alias_table
.get(&EmptyKey, bucket_name)
.await?
.map(|x| *x.state.get())
.flatten())
}
}
/// Returns a Bucket if it is present in bucket table,
/// even if it is in deleted state. Querying a non-existing
/// bucket ID returns an internal error.
pub async fn get_internal_bucket(&self, bucket_id: Uuid) -> Result<Bucket, Error> {
Ok(self
.0
.bucket_table
.get(&EmptyKey, &bucket_id)
.await?
.ok_or_message(format!("Bucket {:?} does not exist", bucket_id))?)
}
/// Returns a Bucket if it is present in bucket table,
/// only if it is in non-deleted state.
/// Querying a non-existing bucket ID or a deleted bucket
/// returns a bad request error.
pub async fn get_existing_bucket(&self, bucket_id: Uuid) -> Result<Bucket, Error> {
self.0
.bucket_table
.get(&EmptyKey, &bucket_id)
.await?
.filter(|b| !b.is_deleted())
.ok_or_bad_request(format!(
"Bucket {:?} does not exist or has been deleted",
bucket_id
))
}
/// Returns a Key if it is present in key table,
/// even if it is in deleted state. Querying a non-existing
/// key ID returns an internal error.
pub async fn get_internal_key(&self, key_id: &String) -> Result<Key, Error> {
Ok(self
.0
.key_table
.get(&EmptyKey, key_id)
.await?
.ok_or_message(format!("Key {} does not exist", key_id))?)
}
/// Returns a Key if it is present in key table,
/// only if it is in non-deleted state.
/// Querying a non-existing key ID or a deleted key
/// returns a bad request error.
pub async fn get_existing_key(&self, key_id: &String) -> Result<Key, Error> {
self.0
.key_table
.get(&EmptyKey, key_id)
.await?
.filter(|b| !b.state.is_deleted())
.ok_or_bad_request(format!("Key {} does not exist or has been deleted", key_id))
}
/// Returns a Key if it is present in key table,
/// looking it up by key ID or by a match on its name,
/// only if it is in non-deleted state.
/// Querying a non-existing key ID or a deleted key
/// returns a bad request error.
pub async fn get_existing_matching_key(&self, pattern: &str) -> Result<Key, Error> {
let candidates = self
.0
.key_table
.get_range(
&EmptyKey,
None,
Some(KeyFilter::MatchesAndNotDeleted(pattern.to_string())),
10,
)
.await?
.into_iter()
.collect::<Vec<_>>();
if candidates.len() != 1 {
Err(Error::BadRequest(format!(
"{} matching keys",
candidates.len()
)))
} else {
Ok(candidates.into_iter().next().unwrap())
}
}
/// Sets a new alias for a bucket in global namespace.
/// This function fails if:
/// - alias name is not valid according to S3 spec
/// - bucket does not exist or is deleted
/// - alias already exists and points to another bucket
pub async fn set_global_bucket_alias(
&self,
bucket_id: Uuid,
alias_name: &String,
) -> Result<(), Error> {
if !is_valid_bucket_name(alias_name) {
return Err(Error::BadRequest(format!(
"{}: {}",
alias_name, INVALID_BUCKET_NAME_MESSAGE
)));
}
let mut bucket = self.get_existing_bucket(bucket_id).await?;
let alias = self.0.bucket_alias_table.get(&EmptyKey, alias_name).await?;
if let Some(existing_alias) = alias.as_ref() {
if let Some(p_bucket) = existing_alias.state.get() {
if *p_bucket != bucket_id {
return Err(Error::BadRequest(format!(
"Alias {} already exists and points to different bucket: {:?}",
alias_name, p_bucket
)));
}
}
}
// Checks ok, add alias
let mut bucket_p = bucket.state.as_option_mut().unwrap();
let alias_ts = increment_logical_clock_2(
bucket_p.aliases.get_timestamp(alias_name),
alias.as_ref().map(|a| a.state.timestamp()).unwrap_or(0),
);
// ---- timestamp-ensured causality barrier ----
// writes are now done and all writes use timestamp alias_ts
let alias = match alias {
None => BucketAlias::new(alias_name.clone(), alias_ts, Some(bucket_id))
.ok_or_bad_request(format!("{}: {}", alias_name, INVALID_BUCKET_NAME_MESSAGE))?,
Some(mut a) => {
a.state = Lww::raw(alias_ts, Some(bucket_id));
a
}
};
self.0.bucket_alias_table.insert(&alias).await?;
bucket_p.aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, true);
self.0.bucket_table.insert(&bucket).await?;
Ok(())
}
/// Unsets an alias for a bucket in global namespace.
/// This function fails if:
/// - bucket does not exist or is deleted
/// - alias does not exist or maps to another bucket (-> internal error)
/// - bucket has no other aliases (global or local)
pub async fn unset_global_bucket_alias(
&self,
bucket_id: Uuid,
alias_name: &String,
) -> Result<(), Error> {
let mut bucket = self.get_existing_bucket(bucket_id).await?;
let mut bucket_state = bucket.state.as_option_mut().unwrap();
let mut alias = self
.0
.bucket_alias_table
.get(&EmptyKey, alias_name)
.await?
.filter(|a| a.state.get().map(|x| x == bucket_id).unwrap_or(false))
.ok_or_message(format!(
"Internal error: alias not found or does not point to bucket {:?}",
bucket_id
))?;
let has_other_global_aliases = bucket_state
.aliases
.items()
.iter()
.any(|(name, _, active)| name != alias_name && *active);
let has_other_local_aliases = bucket_state
.local_aliases
.items()
.iter()
.any(|(_, _, active)| *active);
if !has_other_global_aliases && !has_other_local_aliases {
return Err(Error::BadRequest(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", alias_name)));
}
// Checks ok, remove alias
let alias_ts = increment_logical_clock_2(
alias.state.timestamp(),
bucket_state.aliases.get_timestamp(alias_name),
);
// ---- timestamp-ensured causality barrier ----
// writes are now done and all writes use timestamp alias_ts
alias.state = Lww::raw(alias_ts, None);
self.0.bucket_alias_table.insert(&alias).await?;
bucket_state.aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, false);
self.0.bucket_table.insert(&bucket).await?;
Ok(())
}
/// Ensures a bucket does not have a certain global alias.
/// Contrarily to unset_global_bucket_alias, this does not
/// fail on any condition other than:
/// - bucket cannot be found (its fine if it is in deleted state)
/// - alias cannot be found (its fine if it points to nothing or
/// to another bucket)
pub async fn purge_global_bucket_alias(
&self,
bucket_id: Uuid,
alias_name: &String,
) -> Result<(), Error> {
let mut bucket = self.get_internal_bucket(bucket_id).await?;
let mut alias = self
.0
.bucket_alias_table
.get(&EmptyKey, alias_name)
.await?
.ok_or_message(format!("Alias {} not found", alias_name))?;
// Checks ok, remove alias
let alias_ts = match bucket.state.as_option() {
Some(bucket_state) => increment_logical_clock_2(
alias.state.timestamp(),
bucket_state.aliases.get_timestamp(alias_name),
),
None => increment_logical_clock(alias.state.timestamp()),
};
// ---- timestamp-ensured causality barrier ----
// writes are now done and all writes use timestamp alias_ts
if alias.state.get() == &Some(bucket_id) {
alias.state = Lww::raw(alias_ts, None);
self.0.bucket_alias_table.insert(&alias).await?;
}
if let Some(mut bucket_state) = bucket.state.as_option_mut() {
bucket_state.aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, false);
self.0.bucket_table.insert(&bucket).await?;
}
Ok(())
}
/// Sets a new alias for a bucket in the local namespace of a key.
/// This function fails if:
/// - alias name is not valid according to S3 spec
/// - bucket does not exist or is deleted
/// - key does not exist or is deleted
/// - alias already exists and points to another bucket
pub async fn set_local_bucket_alias(
&self,
bucket_id: Uuid,
key_id: &String,
alias_name: &String,
) -> Result<(), Error> {
if !is_valid_bucket_name(alias_name) {
return Err(Error::BadRequest(format!(
"{}: {}",
alias_name, INVALID_BUCKET_NAME_MESSAGE
)));
}
let mut bucket = self.get_existing_bucket(bucket_id).await?;
let mut key = self.get_existing_key(key_id).await?;
let mut key_param = key.state.as_option_mut().unwrap();
if let Some(Some(existing_alias)) = key_param.local_aliases.get(alias_name) {
if *existing_alias != bucket_id {
return Err(Error::BadRequest(format!("Alias {} already exists in namespace of key {} and points to different bucket: {:?}", alias_name, key.key_id, existing_alias)));
}
}
// Checks ok, add alias
let mut bucket_p = bucket.state.as_option_mut().unwrap();
let bucket_p_local_alias_key = (key.key_id.clone(), alias_name.clone());
// Calculate the timestamp to assign to this aliasing in the two local_aliases maps
// (the one from key to bucket, and the reverse one stored in the bucket iself)
// so that merges on both maps in case of a concurrent operation resolve
// to the same alias being set
let alias_ts = increment_logical_clock_2(
key_param.local_aliases.get_timestamp(alias_name),
bucket_p
.local_aliases
.get_timestamp(&bucket_p_local_alias_key),
);
// ---- timestamp-ensured causality barrier ----
// writes are now done and all writes use timestamp alias_ts
key_param.local_aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, Some(bucket_id));
self.0.key_table.insert(&key).await?;
bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, true);
self.0.bucket_table.insert(&bucket).await?;
Ok(())
}
/// Unsets an alias for a bucket in the local namespace of a key.
/// This function fails if:
/// - bucket does not exist or is deleted
/// - key does not exist or is deleted
/// - alias does not exist or maps to another bucket (-> internal error)
/// - bucket has no other aliases (global or local)
pub async fn unset_local_bucket_alias(
&self,
bucket_id: Uuid,
key_id: &String,
alias_name: &String,
) -> Result<(), Error> {
let mut bucket = self.get_existing_bucket(bucket_id).await?;
let mut key = self.get_existing_key(key_id).await?;
let mut bucket_p = bucket.state.as_option_mut().unwrap();
if key
.state
.as_option()
.unwrap()
.local_aliases
.get(alias_name)
.cloned()
.flatten() != Some(bucket_id)
{
return Err(GarageError::Message(format!(
"Bucket {:?} does not have alias {} in namespace of key {}",
bucket_id, alias_name, key_id
))
.into());
}
let has_other_global_aliases = bucket_p
.aliases
.items()
.iter()
.any(|(_, _, active)| *active);
let has_other_local_aliases = bucket_p
.local_aliases
.items()
.iter()
.any(|((k, n), _, active)| *k == key.key_id && n == alias_name && *active);
if !has_other_global_aliases && !has_other_local_aliases {
return Err(Error::BadRequest(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", alias_name)));
}
// Checks ok, remove alias
let mut key_param = key.state.as_option_mut().unwrap();
let bucket_p_local_alias_key = (key.key_id.clone(), alias_name.clone());
let alias_ts = increment_logical_clock_2(
key_param.local_aliases.get_timestamp(alias_name),
bucket_p
.local_aliases
.get_timestamp(&bucket_p_local_alias_key),
);
// ---- timestamp-ensured causality barrier ----
// writes are now done and all writes use timestamp alias_ts
key_param.local_aliases = LwwMap::raw_item(alias_name.clone(), alias_ts, None);
self.0.key_table.insert(&key).await?;
bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, false);
self.0.bucket_table.insert(&bucket).await?;
Ok(())
}
/// Sets permissions for a key on a bucket.
/// This function fails if:
/// - bucket or key cannot be found at all (its ok if they are in deleted state)
/// - bucket or key is in deleted state and we are trying to set permissions other than "deny
/// all"
pub async fn set_bucket_key_permissions(
&self,
bucket_id: Uuid,
key_id: &String,
mut perm: BucketKeyPerm,
) -> Result<(), Error> {
let mut bucket = self.get_internal_bucket(bucket_id).await?;
let mut key = self.get_internal_key(key_id).await?;
let allow_any = perm.allow_read || perm.allow_write || perm.allow_owner;
if let Some(bstate) = bucket.state.as_option() {
if let Some(kp) = bstate.authorized_keys.get(key_id) {
perm.timestamp = increment_logical_clock_2(perm.timestamp, kp.timestamp);
}
} else if allow_any {
return Err(Error::BadRequest(
"Trying to give permissions on a deleted bucket".into(),
));
}
if let Some(kstate) = key.state.as_option() {
if let Some(bp) = kstate.authorized_buckets.get(&bucket_id) {
perm.timestamp = increment_logical_clock_2(perm.timestamp, bp.timestamp);
}
} else if allow_any {
return Err(Error::BadRequest(
"Trying to give permissions to a deleted key".into(),
));
}
// ---- timestamp-ensured causality barrier ----
if let Some(bstate) = bucket.state.as_option_mut() {
bstate.authorized_keys = Map::put_mutator(key_id.clone(), perm);
self.0.bucket_table.insert(&bucket).await?;
}
if let Some(kstate) = key.state.as_option_mut() {
kstate.authorized_buckets = Map::put_mutator(bucket_id, perm);
self.0.key_table.insert(&key).await?;
}
Ok(())
}
}

51
src/model/helper/error.rs Normal file
View file

@ -0,0 +1,51 @@
use err_derive::Error;
use serde::{Deserialize, Serialize};
use garage_util::error::Error as GarageError;
#[derive(Debug, Error, Serialize, Deserialize)]
pub enum Error {
#[error(display = "Internal error: {}", _0)]
Internal(#[error(source)] GarageError),
#[error(display = "Bad request: {}", _0)]
BadRequest(String),
}
impl From<netapp::error::Error> for Error {
fn from(e: netapp::error::Error) -> Self {
Error::Internal(GarageError::Netapp(e))
}
}
pub trait OkOrBadRequest {
type S;
fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<Self::S, Error>;
}
impl<T, E> OkOrBadRequest for Result<T, E>
where
E: std::fmt::Display,
{
type S = T;
fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<T, Error> {
match self {
Ok(x) => Ok(x),
Err(e) => Err(Error::BadRequest(format!(
"{}: {}",
reason.as_ref(),
e.to_string()
))),
}
}
}
impl<T> OkOrBadRequest for Option<T> {
type S = T;
fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<T, Error> {
match self {
Some(x) => Ok(x),
None => Err(Error::BadRequest(reason.as_ref().to_string())),
}
}
}

2
src/model/helper/mod.rs Normal file
View file

@ -0,0 +1,2 @@
pub mod bucket;
pub mod error;

View file

@ -2,6 +2,11 @@ use serde::{Deserialize, Serialize};
use garage_table::crdt::*; use garage_table::crdt::*;
use garage_table::*; use garage_table::*;
use garage_util::data::*;
use crate::permission::BucketKeyPerm;
use garage_model_050::key_table as old;
/// An api key /// An api key
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
@ -9,31 +14,61 @@ pub struct Key {
/// The id of the key (immutable), used as partition key /// The id of the key (immutable), used as partition key
pub key_id: String, pub key_id: String,
/// The secret_key associated /// Internal state of the key
pub state: crdt::Deletable<KeyParams>,
}
/// Configuration for a key
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct KeyParams {
/// The secret_key associated (immutable)
pub secret_key: String, pub secret_key: String,
/// Name for the key /// Name for the key
pub name: crdt::Lww<String>, pub name: crdt::Lww<String>,
/// Is the key deleted /// Flag to allow users having this key to create buckets
pub deleted: crdt::Bool, pub allow_create_bucket: crdt::Lww<bool>,
/// Buckets in which the key is authorized. Empty if `Key` is deleted /// If the key is present: it gives some permissions,
// CRDT interaction: deleted implies authorized_buckets is empty /// a map of bucket IDs (uuids) to permissions.
pub authorized_buckets: crdt::LwwMap<String, PermissionSet>, /// Otherwise no permissions are granted to key
pub authorized_buckets: crdt::Map<Uuid, BucketKeyPerm>,
/// A key can have a local view of buckets names it is
/// the only one to see, this is the namespace for these aliases
pub local_aliases: crdt::LwwMap<String, Option<Uuid>>,
}
impl KeyParams {
fn new(secret_key: &str, name: &str) -> Self {
KeyParams {
secret_key: secret_key.to_string(),
name: crdt::Lww::new(name.to_string()),
allow_create_bucket: crdt::Lww::new(false),
authorized_buckets: crdt::Map::new(),
local_aliases: crdt::LwwMap::new(),
}
}
}
impl Crdt for KeyParams {
fn merge(&mut self, o: &Self) {
self.name.merge(&o.name);
self.allow_create_bucket.merge(&o.allow_create_bucket);
self.authorized_buckets.merge(&o.authorized_buckets);
self.local_aliases.merge(&o.local_aliases);
}
} }
impl Key { impl Key {
/// Initialize a new Key, generating a random identifier and associated secret key /// Initialize a new Key, generating a random identifier and associated secret key
pub fn new(name: String) -> Self { pub fn new(name: &str) -> Self {
let key_id = format!("GK{}", hex::encode(&rand::random::<[u8; 12]>()[..])); let key_id = format!("GK{}", hex::encode(&rand::random::<[u8; 12]>()[..]));
let secret_key = hex::encode(&rand::random::<[u8; 32]>()[..]); let secret_key = hex::encode(&rand::random::<[u8; 32]>()[..]);
Self { Self {
key_id, key_id,
secret_key, state: crdt::Deletable::present(KeyParams::new(&secret_key, name)),
name: crdt::Lww::new(name),
deleted: crdt::Bool::new(false),
authorized_buckets: crdt::LwwMap::new(),
} }
} }
@ -41,10 +76,7 @@ impl Key {
pub fn import(key_id: &str, secret_key: &str, name: &str) -> Self { pub fn import(key_id: &str, secret_key: &str, name: &str) -> Self {
Self { Self {
key_id: key_id.to_string(), key_id: key_id.to_string(),
secret_key: secret_key.to_string(), state: crdt::Deletable::present(KeyParams::new(secret_key, name)),
name: crdt::Lww::new(name.to_string()),
deleted: crdt::Bool::new(false),
authorized_buckets: crdt::LwwMap::new(),
} }
} }
@ -52,41 +84,48 @@ impl Key {
pub fn delete(key_id: String) -> Self { pub fn delete(key_id: String) -> Self {
Self { Self {
key_id, key_id,
secret_key: "".into(), state: crdt::Deletable::Deleted,
name: crdt::Lww::new("".to_string()),
deleted: crdt::Bool::new(true),
authorized_buckets: crdt::LwwMap::new(),
} }
} }
/// Returns true if this represents a deleted bucket
pub fn is_deleted(&self) -> bool {
self.state.is_deleted()
}
/// Returns an option representing the params (None if in deleted state)
pub fn params(&self) -> Option<&KeyParams> {
self.state.as_option()
}
/// Mutable version of `.state()`
pub fn params_mut(&mut self) -> Option<&mut KeyParams> {
self.state.as_option_mut()
}
/// Get permissions for a bucket
pub fn bucket_permissions(&self, bucket: &Uuid) -> BucketKeyPerm {
self.params()
.map(|params| params.authorized_buckets.get(bucket))
.flatten()
.cloned()
.unwrap_or(BucketKeyPerm::NO_PERMISSIONS)
}
/// Check if `Key` is allowed to read in bucket /// Check if `Key` is allowed to read in bucket
pub fn allow_read(&self, bucket: &str) -> bool { pub fn allow_read(&self, bucket: &Uuid) -> bool {
self.authorized_buckets self.bucket_permissions(bucket).allow_read
.get(&bucket.to_string())
.map(|x| x.allow_read)
.unwrap_or(false)
} }
/// Check if `Key` is allowed to write in bucket /// Check if `Key` is allowed to write in bucket
pub fn allow_write(&self, bucket: &str) -> bool { pub fn allow_write(&self, bucket: &Uuid) -> bool {
self.authorized_buckets self.bucket_permissions(bucket).allow_write
.get(&bucket.to_string())
.map(|x| x.allow_write)
.unwrap_or(false)
} }
}
/// Permission given to a key in a bucket /// Check if `Key` is owner of bucket
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] pub fn allow_owner(&self, bucket: &Uuid) -> bool {
pub struct PermissionSet { self.bucket_permissions(bucket).allow_owner
/// The key can be used to read the bucket }
pub allow_read: bool,
/// The key can be used to write in the bucket
pub allow_write: bool,
}
impl AutoCrdt for PermissionSet {
const WARN_IF_DIFFERENT: bool = true;
} }
impl Entry<EmptyKey, String> for Key { impl Entry<EmptyKey, String> for Key {
@ -100,14 +139,7 @@ impl Entry<EmptyKey, String> for Key {
impl Crdt for Key { impl Crdt for Key {
fn merge(&mut self, other: &Self) { fn merge(&mut self, other: &Self) {
self.name.merge(&other.name); self.state.merge(&other.state);
self.deleted.merge(&other.deleted);
if self.deleted.get() {
self.authorized_buckets.clear();
} else {
self.authorized_buckets.merge(&other.authorized_buckets);
}
} }
} }
@ -116,10 +148,12 @@ pub struct KeyTable;
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum KeyFilter { pub enum KeyFilter {
Deleted(DeletedFilter), Deleted(DeletedFilter),
Matches(String), MatchesAndNotDeleted(String),
} }
impl TableSchema for KeyTable { impl TableSchema for KeyTable {
const TABLE_NAME: &'static str = "key";
type P = EmptyKey; type P = EmptyKey;
type S = String; type S = String;
type E = Key; type E = Key;
@ -127,12 +161,41 @@ impl TableSchema for KeyTable {
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
match filter { match filter {
KeyFilter::Deleted(df) => df.apply(entry.deleted.get()), KeyFilter::Deleted(df) => df.apply(entry.state.is_deleted()),
KeyFilter::Matches(pat) => { KeyFilter::MatchesAndNotDeleted(pat) => {
let pat = pat.to_lowercase(); let pat = pat.to_lowercase();
entry.key_id.to_lowercase().starts_with(&pat) entry
|| entry.name.get().to_lowercase() == pat .params()
.map(|p| {
entry.key_id.to_lowercase().starts_with(&pat)
|| p.name.get().to_lowercase() == pat
})
.unwrap_or(false)
} }
} }
} }
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
let old_k = rmp_serde::decode::from_read_ref::<_, old::Key>(bytes).ok()?;
let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone());
let state = if old_k.deleted.get() {
crdt::Deletable::Deleted
} else {
// Authorized buckets is ignored here,
// migration is performed in specific migration code in
// garage/migrate.rs
crdt::Deletable::Present(KeyParams {
secret_key: old_k.secret_key,
name,
allow_create_bucket: crdt::Lww::new(false),
authorized_buckets: crdt::Map::new(),
local_aliases: crdt::LwwMap::new(),
})
};
Some(Key {
key_id: old_k.key_id,
state,
})
}
} }

View file

@ -1,10 +1,17 @@
#[macro_use] #[macro_use]
extern crate log; extern crate log;
pub mod block; pub mod permission;
pub mod block_ref_table; pub mod block_ref_table;
pub mod bucket_alias_table;
pub mod bucket_table; pub mod bucket_table;
pub mod garage;
pub mod key_table; pub mod key_table;
pub mod object_table; pub mod object_table;
pub mod version_table; pub mod version_table;
pub mod block;
pub mod garage;
pub mod helper;
pub mod migrate;

102
src/model/migrate.rs Normal file
View file

@ -0,0 +1,102 @@
use std::sync::Arc;
use garage_util::crdt::*;
use garage_util::data::*;
use garage_util::error::Error as GarageError;
use garage_util::time::*;
use garage_model_050::bucket_table as old_bucket;
use crate::bucket_alias_table::*;
use crate::bucket_table::*;
use crate::garage::Garage;
use crate::helper::error::*;
use crate::permission::*;
pub struct Migrate {
pub garage: Arc<Garage>,
}
impl Migrate {
pub async fn migrate_buckets050(&self) -> Result<(), Error> {
let tree = self
.garage
.db
.open_tree("bucket:table")
.map_err(GarageError::from)?;
for res in tree.iter() {
let (_k, v) = res.map_err(GarageError::from)?;
let bucket = rmp_serde::decode::from_read_ref::<_, old_bucket::Bucket>(&v[..])
.map_err(GarageError::from)?;
if let old_bucket::BucketState::Present(p) = bucket.state.get() {
self.migrate_buckets050_do_bucket(&bucket, p).await?;
}
}
Ok(())
}
pub async fn migrate_buckets050_do_bucket(
&self,
old_bucket: &old_bucket::Bucket,
old_bucket_p: &old_bucket::BucketParams,
) -> Result<(), Error> {
let bucket_id = blake2sum(old_bucket.name.as_bytes());
let new_name = if is_valid_bucket_name(&old_bucket.name) {
old_bucket.name.clone()
} else {
// if old bucket name was not valid, replace it by
// a hex-encoded name derived from its identifier
hex::encode(&bucket_id.as_slice()[..16])
};
let website = if *old_bucket_p.website.get() {
Some(WebsiteConfig {
index_document: "index.html".into(),
error_document: None,
})
} else {
None
};
self.garage
.bucket_table
.insert(&Bucket {
id: bucket_id,
state: Deletable::Present(BucketParams {
creation_date: now_msec(),
authorized_keys: Map::new(),
website_config: Lww::new(website),
aliases: LwwMap::new(),
local_aliases: LwwMap::new(),
}),
})
.await?;
self.garage
.bucket_helper()
.set_global_bucket_alias(bucket_id, &new_name)
.await?;
for (k, ts, perm) in old_bucket_p.authorized_keys.items().iter() {
self.garage
.bucket_helper()
.set_bucket_key_permissions(
bucket_id,
k,
BucketKeyPerm {
timestamp: *ts,
allow_read: perm.allow_read,
allow_write: perm.allow_write,
allow_owner: false,
},
)
.await?;
}
Ok(())
}
}

View file

@ -11,11 +11,13 @@ use garage_table::*;
use crate::version_table::*; use crate::version_table::*;
use garage_model_050::object_table as old;
/// An object /// An object
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct Object { pub struct Object {
/// The bucket in which the object is stored, used as partition key /// The bucket in which the object is stored, used as partition key
pub bucket: String, pub bucket_id: Uuid,
/// The key at which the object is stored in its bucket, used as sorting key /// The key at which the object is stored in its bucket, used as sorting key
pub key: String, pub key: String,
@ -26,9 +28,9 @@ pub struct Object {
impl Object { impl Object {
/// Initialize an Object struct from parts /// Initialize an Object struct from parts
pub fn new(bucket: String, key: String, versions: Vec<ObjectVersion>) -> Self { pub fn new(bucket_id: Uuid, key: String, versions: Vec<ObjectVersion>) -> Self {
let mut ret = Self { let mut ret = Self {
bucket, bucket_id,
key, key,
versions: vec![], versions: vec![],
}; };
@ -164,9 +166,9 @@ impl ObjectVersion {
} }
} }
impl Entry<String, String> for Object { impl Entry<Uuid, String> for Object {
fn partition_key(&self) -> &String { fn partition_key(&self) -> &Uuid {
&self.bucket &self.bucket_id
} }
fn sort_key(&self) -> &String { fn sort_key(&self) -> &String {
&self.key &self.key
@ -217,7 +219,9 @@ pub struct ObjectTable {
} }
impl TableSchema for ObjectTable { impl TableSchema for ObjectTable {
type P = String; const TABLE_NAME: &'static str = "object";
type P = Uuid;
type S = String; type S = String;
type E = Object; type E = Object;
type Filter = DeletedFilter; type Filter = DeletedFilter;
@ -240,7 +244,7 @@ impl TableSchema for ObjectTable {
}; };
if newly_deleted { if newly_deleted {
let deleted_version = let deleted_version =
Version::new(v.uuid, old_v.bucket.clone(), old_v.key.clone(), true); Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true);
version_table.insert(&deleted_version).await?; version_table.insert(&deleted_version).await?;
} }
} }
@ -253,4 +257,70 @@ impl TableSchema for ObjectTable {
let deleted = !entry.versions.iter().any(|v| v.is_data()); let deleted = !entry.versions.iter().any(|v| v.is_data());
filter.apply(deleted) filter.apply(deleted)
} }
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
let old_obj = rmp_serde::decode::from_read_ref::<_, old::Object>(bytes).ok()?;
Some(migrate_object(old_obj))
}
}
// vvvvvvvv migration code, stupid stuff vvvvvvvvvvvv
// (we just want to change bucket into bucket_id by hashing it)
fn migrate_object(o: old::Object) -> Object {
let versions = o
.versions()
.iter()
.cloned()
.map(migrate_object_version)
.collect();
Object {
bucket_id: blake2sum(o.bucket.as_bytes()),
key: o.key,
versions,
}
}
fn migrate_object_version(v: old::ObjectVersion) -> ObjectVersion {
ObjectVersion {
uuid: Uuid::try_from(v.uuid.as_slice()).unwrap(),
timestamp: v.timestamp,
state: match v.state {
old::ObjectVersionState::Uploading(h) => {
ObjectVersionState::Uploading(migrate_object_version_headers(h))
}
old::ObjectVersionState::Complete(d) => {
ObjectVersionState::Complete(migrate_object_version_data(d))
}
old::ObjectVersionState::Aborted => ObjectVersionState::Aborted,
},
}
}
fn migrate_object_version_headers(h: old::ObjectVersionHeaders) -> ObjectVersionHeaders {
ObjectVersionHeaders {
content_type: h.content_type,
other: h.other,
}
}
fn migrate_object_version_data(d: old::ObjectVersionData) -> ObjectVersionData {
match d {
old::ObjectVersionData::DeleteMarker => ObjectVersionData::DeleteMarker,
old::ObjectVersionData::Inline(m, b) => {
ObjectVersionData::Inline(migrate_object_version_meta(m), b)
}
old::ObjectVersionData::FirstBlock(m, h) => ObjectVersionData::FirstBlock(
migrate_object_version_meta(m),
Hash::try_from(h.as_slice()).unwrap(),
),
}
}
fn migrate_object_version_meta(m: old::ObjectVersionMeta) -> ObjectVersionMeta {
ObjectVersionMeta {
headers: migrate_object_version_headers(m.headers),
size: m.size,
etag: m.etag,
}
} }

53
src/model/permission.rs Normal file
View file

@ -0,0 +1,53 @@
use std::cmp::Ordering;
use serde::{Deserialize, Serialize};
use garage_util::crdt::*;
/// Permission given to a key in a bucket
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
pub struct BucketKeyPerm {
/// Timestamp at which the permission was given
pub timestamp: u64,
/// The key can be used to read the bucket
pub allow_read: bool,
/// The key can be used to write objects to the bucket
pub allow_write: bool,
/// The key can be used to control other aspects of the bucket:
/// - enable / disable website access
/// - delete bucket
pub allow_owner: bool,
}
impl BucketKeyPerm {
pub const NO_PERMISSIONS: Self = Self {
timestamp: 0,
allow_read: false,
allow_write: false,
allow_owner: false,
};
}
impl Crdt for BucketKeyPerm {
fn merge(&mut self, other: &Self) {
match other.timestamp.cmp(&self.timestamp) {
Ordering::Greater => {
*self = *other;
lx marked this conversation as resolved Outdated

allow_owner is not merged. Is it sementically read-only hence don't needs to (if so, please document it), or just forgotten?

allow_owner is not merged. Is it sementically read-only hence don't needs to (if so, please document it), or just forgotten?
Outdated
Review

Just forgotten, thx

Just forgotten, thx
}
Ordering::Equal if other != self => {
warn!("Different permission sets with same timestamp: {:?} and {:?}, merging to most restricted permission set.", self, other);
if !other.allow_read {
self.allow_read = false;
}
if !other.allow_write {
self.allow_write = false;
}
if !other.allow_owner {
self.allow_owner = false;
}
}
_ => (),
}
}
}

View file

@ -10,6 +10,8 @@ use garage_table::*;
use crate::block_ref_table::*; use crate::block_ref_table::*;
use garage_model_050::version_table as old;
/// A version of an object /// A version of an object
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct Version { pub struct Version {
@ -29,19 +31,19 @@ pub struct Version {
// Back link to bucket+key so that we can figure if // Back link to bucket+key so that we can figure if
// this was deleted later on // this was deleted later on
/// Bucket in which the related object is stored /// Bucket in which the related object is stored
pub bucket: String, pub bucket_id: Uuid,
/// Key in which the related object is stored /// Key in which the related object is stored
pub key: String, pub key: String,
} }
impl Version { impl Version {
pub fn new(uuid: Uuid, bucket: String, key: String, deleted: bool) -> Self { pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self {
Self { Self {
uuid, uuid,
deleted: deleted.into(), deleted: deleted.into(),
blocks: crdt::Map::new(), blocks: crdt::Map::new(),
parts_etags: crdt::Map::new(), parts_etags: crdt::Map::new(),
bucket, bucket_id,
key, key,
} }
} }
@ -82,8 +84,8 @@ impl AutoCrdt for VersionBlock {
const WARN_IF_DIFFERENT: bool = true; const WARN_IF_DIFFERENT: bool = true;
} }
impl Entry<Hash, EmptyKey> for Version { impl Entry<Uuid, EmptyKey> for Version {
fn partition_key(&self) -> &Hash { fn partition_key(&self) -> &Uuid {
&self.uuid &self.uuid
} }
fn sort_key(&self) -> &EmptyKey { fn sort_key(&self) -> &EmptyKey {
@ -114,7 +116,9 @@ pub struct VersionTable {
} }
impl TableSchema for VersionTable { impl TableSchema for VersionTable {
type P = Hash; const TABLE_NAME: &'static str = "version";
type P = Uuid;
type S = EmptyKey; type S = EmptyKey;
type E = Version; type E = Version;
type Filter = DeletedFilter; type Filter = DeletedFilter;
@ -145,4 +149,42 @@ impl TableSchema for VersionTable {
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
filter.apply(entry.deleted.get()) filter.apply(entry.deleted.get())
} }
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
let old = rmp_serde::decode::from_read_ref::<_, old::Version>(bytes).ok()?;
let blocks = old
.blocks
.items()
.iter()
.map(|(k, v)| {
(
VersionBlockKey {
part_number: k.part_number,
offset: k.offset,
},
VersionBlock {
hash: Hash::try_from(v.hash.as_slice()).unwrap(),
size: v.size,
},
)
})
.collect::<crdt::Map<_, _>>();
let parts_etags = old
.parts_etags
.items()
.iter()
.map(|(k, v)| (*k, v.clone()))
.collect::<crdt::Map<_, _>>();
Some(Version {
uuid: Hash::try_from(old.uuid.as_slice()).unwrap(),
deleted: crdt::Bool::new(old.deleted.get()),
blocks,
parts_etags,
bucket_id: blake2sum(old.bucket.as_bytes()),
key: old.key,
})
}
} }

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_rpc" name = "garage_rpc"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -14,7 +14,7 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
arc-swap = "1.0" arc-swap = "1.0"
bytes = "1.0" bytes = "1.0"

View file

@ -576,7 +576,7 @@ impl EndpointHandler<SystemRpc> for System {
self.clone().handle_advertise_cluster_layout(adv).await self.clone().handle_advertise_cluster_layout(adv).await
} }
SystemRpc::GetKnownNodes => Ok(self.handle_get_known_nodes()), SystemRpc::GetKnownNodes => Ok(self.handle_get_known_nodes()),
_ => Err(Error::BadRpc("Unexpected RPC message".to_string())), m => Err(Error::unexpected_rpc_message(m)),
} }
} }
} }

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_table" name = "garage_table"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -14,8 +14,8 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_rpc = { version = "0.5.0", path = "../rpc" } garage_rpc = { version = "0.6.0", path = "../rpc" }
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
async-trait = "0.1.7" async-trait = "0.1.7"
bytes = "1.0" bytes = "1.0"

View file

@ -19,7 +19,6 @@ use crate::schema::*;
pub struct TableData<F: TableSchema, R: TableReplication> { pub struct TableData<F: TableSchema, R: TableReplication> {
system: Arc<System>, system: Arc<System>,
pub name: String,
pub(crate) instance: F, pub(crate) instance: F,
pub(crate) replication: R, pub(crate) replication: R,
@ -36,31 +35,24 @@ where
F: TableSchema, F: TableSchema,
R: TableReplication, R: TableReplication,
{ {
pub fn new( pub fn new(system: Arc<System>, instance: F, replication: R, db: &sled::Db) -> Arc<Self> {
system: Arc<System>,
name: String,
instance: F,
replication: R,
db: &sled::Db,
) -> Arc<Self> {
let store = db let store = db
.open_tree(&format!("{}:table", name)) .open_tree(&format!("{}:table", F::TABLE_NAME))
.expect("Unable to open DB tree"); .expect("Unable to open DB tree");
let merkle_tree = db let merkle_tree = db
.open_tree(&format!("{}:merkle_tree", name)) .open_tree(&format!("{}:merkle_tree", F::TABLE_NAME))
.expect("Unable to open DB Merkle tree tree"); .expect("Unable to open DB Merkle tree tree");
let merkle_todo = db let merkle_todo = db
.open_tree(&format!("{}:merkle_todo", name)) .open_tree(&format!("{}:merkle_todo", F::TABLE_NAME))
.expect("Unable to open DB Merkle TODO tree"); .expect("Unable to open DB Merkle TODO tree");
let gc_todo = db let gc_todo = db
.open_tree(&format!("{}:gc_todo_v2", name)) .open_tree(&format!("{}:gc_todo_v2", F::TABLE_NAME))
.expect("Unable to open DB tree"); .expect("Unable to open DB tree");
Arc::new(Self { Arc::new(Self {
system, system,
name,
instance, instance,
replication, replication,
store, store,
@ -245,7 +237,7 @@ where
Err(e) => match F::try_migrate(bytes) { Err(e) => match F::try_migrate(bytes) {
Some(x) => Ok(x), Some(x) => Ok(x),
None => { None => {
warn!("Unable to decode entry of {}: {}", self.name, e); warn!("Unable to decode entry of {}: {}", F::TABLE_NAME, e);
for line in hexdump::hexdump_iter(bytes) { for line in hexdump::hexdump_iter(bytes) {
debug!("{}", line); debug!("{}", line);
} }

View file

@ -57,11 +57,11 @@ where
pub(crate) fn launch(system: Arc<System>, data: Arc<TableData<F, R>>) -> Arc<Self> { pub(crate) fn launch(system: Arc<System>, data: Arc<TableData<F, R>>) -> Arc<Self> {
let endpoint = system let endpoint = system
.netapp .netapp
.endpoint(format!("garage_table/gc.rs/Rpc:{}", data.name)); .endpoint(format!("garage_table/gc.rs/Rpc:{}", F::TABLE_NAME));
let gc = Arc::new(Self { let gc = Arc::new(Self {
system: system.clone(), system: system.clone(),
data: data.clone(), data,
endpoint, endpoint,
}); });
@ -69,7 +69,7 @@ where
let gc1 = gc.clone(); let gc1 = gc.clone();
system.background.spawn_worker( system.background.spawn_worker(
format!("GC loop for {}", data.name), format!("GC loop for {}", F::TABLE_NAME),
move |must_exit: watch::Receiver<bool>| gc1.gc_loop(must_exit), move |must_exit: watch::Receiver<bool>| gc1.gc_loop(must_exit),
); );
@ -90,7 +90,7 @@ where
} }
} }
Err(e) => { Err(e) => {
warn!("({}) Error doing GC: {}", self.data.name, e); warn!("({}) Error doing GC: {}", F::TABLE_NAME, e);
} }
} }
} }
@ -160,7 +160,7 @@ where
return Ok(Some(Duration::from_secs(60))); return Ok(Some(Duration::from_secs(60)));
} }
debug!("({}) GC: doing {} items", self.data.name, entries.len()); debug!("({}) GC: doing {} items", F::TABLE_NAME, entries.len());
// Split entries to GC by the set of nodes on which they are stored. // Split entries to GC by the set of nodes on which they are stored.
// Here we call them partitions but they are not exactly // Here we call them partitions but they are not exactly
@ -262,7 +262,8 @@ where
info!( info!(
"({}) GC: {} items successfully pushed, will try to delete.", "({}) GC: {} items successfully pushed, will try to delete.",
self.data.name, n_items F::TABLE_NAME,
n_items
); );
// Step 2: delete tombstones everywhere. // Step 2: delete tombstones everywhere.
@ -314,7 +315,7 @@ where
} }
Ok(GcRpc::Ok) Ok(GcRpc::Ok)
} }
_ => Err(Error::Message("Unexpected GC RPC".to_string())), m => Err(Error::unexpected_rpc_message(m)),
} }
} }
} }

View file

@ -82,7 +82,7 @@ where
let ret2 = ret.clone(); let ret2 = ret.clone();
background.spawn_worker( background.spawn_worker(
format!("Merkle tree updater for {}", ret.data.name), format!("Merkle tree updater for {}", F::TABLE_NAME),
|must_exit: watch::Receiver<bool>| ret2.updater_loop(must_exit), |must_exit: watch::Receiver<bool>| ret2.updater_loop(must_exit),
); );
@ -97,14 +97,16 @@ where
if let Err(e) = self.update_item(&key[..], &valhash[..]) { if let Err(e) = self.update_item(&key[..], &valhash[..]) {
warn!( warn!(
"({}) Error while updating Merkle tree item: {}", "({}) Error while updating Merkle tree item: {}",
self.data.name, e F::TABLE_NAME,
e
); );
} }
} }
Err(e) => { Err(e) => {
warn!( warn!(
"({}) Error while iterating on Merkle todo tree: {}", "({}) Error while iterating on Merkle todo tree: {}",
self.data.name, e F::TABLE_NAME,
e
); );
tokio::time::sleep(Duration::from_secs(10)).await; tokio::time::sleep(Duration::from_secs(10)).await;
} }
@ -147,7 +149,8 @@ where
if !deleted { if !deleted {
debug!( debug!(
"({}) Item not deleted from Merkle todo because it changed: {:?}", "({}) Item not deleted from Merkle todo because it changed: {:?}",
self.data.name, k F::TABLE_NAME,
k
); );
} }
Ok(()) Ok(())
@ -183,7 +186,7 @@ where
// should not happen // should not happen
warn!( warn!(
"({}) Replacing intermediate node with empty node, should not happen.", "({}) Replacing intermediate node with empty node, should not happen.",
self.data.name F::TABLE_NAME
); );
Some(MerkleNode::Empty) Some(MerkleNode::Empty)
} else if children.len() == 1 { } else if children.len() == 1 {
@ -195,7 +198,7 @@ where
MerkleNode::Empty => { MerkleNode::Empty => {
warn!( warn!(
"({}) Single subnode in tree is empty Merkle node", "({}) Single subnode in tree is empty Merkle node",
self.data.name F::TABLE_NAME
); );
Some(MerkleNode::Empty) Some(MerkleNode::Empty)
} }

View file

@ -16,7 +16,10 @@ impl PartitionKey for String {
} }
} }
impl PartitionKey for Hash { /// Values of type FixedBytes32 are assumed to be random,
/// either a hash or a random UUID. This means we can use
/// them directly as an index into the hash table.
impl PartitionKey for FixedBytes32 {
fn hash(&self) -> Hash { fn hash(&self) -> Hash {
*self *self
} }
@ -34,7 +37,7 @@ impl SortKey for String {
} }
} }
impl SortKey for Hash { impl SortKey for FixedBytes32 {
fn sort_key(&self) -> &[u8] { fn sort_key(&self) -> &[u8] {
self.as_slice() self.as_slice()
} }
@ -57,12 +60,19 @@ pub trait Entry<P: PartitionKey, S: SortKey>:
/// Trait for the schema used in a table /// Trait for the schema used in a table
pub trait TableSchema: Send + Sync { pub trait TableSchema: Send + Sync {
/// The name of the table in the database
const TABLE_NAME: &'static str;
/// The partition key used in that table /// The partition key used in that table
type P: PartitionKey + Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync; type P: PartitionKey + Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync;
/// The sort key used int that table /// The sort key used int that table
type S: SortKey + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync; type S: SortKey + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync;
/// They type for an entry in that table /// They type for an entry in that table
type E: Entry<Self::P, Self::S>; type E: Entry<Self::P, Self::S>;
/// The type for a filter that can be applied to select entries
/// (e.g. filter out deleted entries)
type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync; type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync;
// Action to take if not able to decode current version: // Action to take if not able to decode current version:

View file

@ -77,13 +77,13 @@ where
) -> Arc<Self> { ) -> Arc<Self> {
let endpoint = system let endpoint = system
.netapp .netapp
.endpoint(format!("garage_table/sync.rs/Rpc:{}", data.name)); .endpoint(format!("garage_table/sync.rs/Rpc:{}", F::TABLE_NAME));
let todo = SyncTodo { todo: vec![] }; let todo = SyncTodo { todo: vec![] };
let syncer = Arc::new(Self { let syncer = Arc::new(Self {
system: system.clone(), system: system.clone(),
data: data.clone(), data,
merkle, merkle,
todo: Mutex::new(todo), todo: Mutex::new(todo),
endpoint, endpoint,
@ -95,13 +95,13 @@ where
let s1 = syncer.clone(); let s1 = syncer.clone();
system.background.spawn_worker( system.background.spawn_worker(
format!("table sync watcher for {}", data.name), format!("table sync watcher for {}", F::TABLE_NAME),
move |must_exit: watch::Receiver<bool>| s1.watcher_task(must_exit, busy_rx), move |must_exit: watch::Receiver<bool>| s1.watcher_task(must_exit, busy_rx),
); );
let s2 = syncer.clone(); let s2 = syncer.clone();
system.background.spawn_worker( system.background.spawn_worker(
format!("table syncer for {}", data.name), format!("table syncer for {}", F::TABLE_NAME),
move |must_exit: watch::Receiver<bool>| s2.syncer_task(must_exit, busy_tx), move |must_exit: watch::Receiver<bool>| s2.syncer_task(must_exit, busy_tx),
); );
@ -128,7 +128,7 @@ where
_ = ring_recv.changed().fuse() => { _ = ring_recv.changed().fuse() => {
let new_ring = ring_recv.borrow(); let new_ring = ring_recv.borrow();
if !Arc::ptr_eq(&new_ring, &prev_ring) { if !Arc::ptr_eq(&new_ring, &prev_ring) {
debug!("({}) Ring changed, adding full sync to syncer todo list", self.data.name); debug!("({}) Ring changed, adding full sync to syncer todo list", F::TABLE_NAME);
self.add_full_sync(); self.add_full_sync();
prev_ring = new_ring.clone(); prev_ring = new_ring.clone();
} }
@ -146,7 +146,7 @@ where
_ = tokio::time::sleep(Duration::from_secs(1)).fuse() => { _ = tokio::time::sleep(Duration::from_secs(1)).fuse() => {
if nothing_to_do_since.map(|t| Instant::now() - t >= ANTI_ENTROPY_INTERVAL).unwrap_or(false) { if nothing_to_do_since.map(|t| Instant::now() - t >= ANTI_ENTROPY_INTERVAL).unwrap_or(false) {
nothing_to_do_since = None; nothing_to_do_since = None;
debug!("({}) Interval passed, adding full sync to syncer todo list", self.data.name); debug!("({}) Interval passed, adding full sync to syncer todo list", F::TABLE_NAME);
self.add_full_sync(); self.add_full_sync();
} }
} }
@ -177,7 +177,9 @@ where
if let Err(e) = res { if let Err(e) = res {
warn!( warn!(
"({}) Error while syncing {:?}: {}", "({}) Error while syncing {:?}: {}",
self.data.name, partition, e F::TABLE_NAME,
partition,
e
); );
} }
} else { } else {
@ -205,7 +207,9 @@ where
debug!( debug!(
"({}) Syncing {:?} with {:?}...", "({}) Syncing {:?} with {:?}...",
self.data.name, partition, nodes F::TABLE_NAME,
partition,
nodes
); );
let mut sync_futures = nodes let mut sync_futures = nodes
.iter() .iter()
@ -219,7 +223,7 @@ where
while let Some(r) = sync_futures.next().await { while let Some(r) = sync_futures.next().await {
if let Err(e) = r { if let Err(e) = r {
n_errors += 1; n_errors += 1;
warn!("({}) Sync error: {}", self.data.name, e); warn!("({}) Sync error: {}", F::TABLE_NAME, e);
} }
} }
if n_errors > self.data.replication.max_write_errors() { if n_errors > self.data.replication.max_write_errors() {
@ -272,7 +276,7 @@ where
if nodes.contains(&self.system.id) { if nodes.contains(&self.system.id) {
warn!( warn!(
"({}) Interrupting offload as partitions seem to have changed", "({}) Interrupting offload as partitions seem to have changed",
self.data.name F::TABLE_NAME
); );
break; break;
} }
@ -286,7 +290,7 @@ where
counter += 1; counter += 1;
info!( info!(
"({}) Offloading {} items from {:?}..{:?} ({})", "({}) Offloading {} items from {:?}..{:?} ({})",
self.data.name, F::TABLE_NAME,
items.len(), items.len(),
begin, begin,
end, end,
@ -329,7 +333,7 @@ where
} }
if not_removed > 0 { if not_removed > 0 {
debug!("({}) {} items not removed during offload because they changed in between (trying again...)", self.data.name, not_removed); debug!("({}) {} items not removed during offload because they changed in between (trying again...)", F::TABLE_NAME, not_removed);
} }
Ok(()) Ok(())
@ -360,7 +364,9 @@ where
if root_ck.is_empty() { if root_ck.is_empty() {
debug!( debug!(
"({}) Sync {:?} with {:?}: partition is empty.", "({}) Sync {:?} with {:?}: partition is empty.",
self.data.name, partition, who F::TABLE_NAME,
partition,
who
); );
return Ok(()); return Ok(());
} }
@ -384,7 +390,9 @@ where
SyncRpc::RootCkDifferent(false) => { SyncRpc::RootCkDifferent(false) => {
debug!( debug!(
"({}) Sync {:?} with {:?}: no difference", "({}) Sync {:?} with {:?}: no difference",
self.data.name, partition, who F::TABLE_NAME,
partition,
who
); );
return Ok(()); return Ok(());
} }
@ -413,11 +421,11 @@ where
// Just send that item directly // Just send that item directly
if let Some(val) = self.data.store.get(&ik[..])? { if let Some(val) = self.data.store.get(&ik[..])? {
if blake2sum(&val[..]) != ivhash { if blake2sum(&val[..]) != ivhash {
warn!("({}) Hashes differ between stored value and Merkle tree, key: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", self.data.name, ik); warn!("({}) Hashes differ between stored value and Merkle tree, key: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", F::TABLE_NAME, ik);
} }
todo_items.push(val.to_vec()); todo_items.push(val.to_vec());
} else { } else {
warn!("({}) Item from Merkle tree not found in store: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", self.data.name, ik); warn!("({}) Item from Merkle tree not found in store: {:?} (if your server is very busy, don't worry, this happens when the Merkle tree can't be updated fast enough)", F::TABLE_NAME, ik);
} }
} }
MerkleNode::Intermediate(l) => { MerkleNode::Intermediate(l) => {
@ -482,7 +490,7 @@ where
async fn send_items(&self, who: Uuid, item_value_list: Vec<Vec<u8>>) -> Result<(), Error> { async fn send_items(&self, who: Uuid, item_value_list: Vec<Vec<u8>>) -> Result<(), Error> {
info!( info!(
"({}) Sending {} items to {:?}", "({}) Sending {} items to {:?}",
self.data.name, F::TABLE_NAME,
item_value_list.len(), item_value_list.len(),
who who
); );
@ -506,10 +514,7 @@ where
if let SyncRpc::Ok = rpc_resp { if let SyncRpc::Ok = rpc_resp {
Ok(()) Ok(())
} else { } else {
Err(Error::Message(format!( Err(Error::unexpected_rpc_message(rpc_resp))
"Unexpected response to RPC Update: {}",
debug_serialize(&rpc_resp)
)))
} }
} }
} }
@ -537,7 +542,7 @@ where
self.data.update_many(items)?; self.data.update_many(items)?;
Ok(SyncRpc::Ok) Ok(SyncRpc::Ok)
} }
_ => Err(Error::Message("Unexpected sync RPC".to_string())), m => Err(Error::unexpected_rpc_message(m)),
} }
} }
} }

View file

@ -55,18 +55,12 @@ where
{ {
// =============== PUBLIC INTERFACE FUNCTIONS (new, insert, get, etc) =============== // =============== PUBLIC INTERFACE FUNCTIONS (new, insert, get, etc) ===============
pub fn new( pub fn new(instance: F, replication: R, system: Arc<System>, db: &sled::Db) -> Arc<Self> {
instance: F,
replication: R,
system: Arc<System>,
db: &sled::Db,
name: String,
) -> Arc<Self> {
let endpoint = system let endpoint = system
.netapp .netapp
.endpoint(format!("garage_table/table.rs/Rpc:{}", name)); .endpoint(format!("garage_table/table.rs/Rpc:{}", F::TABLE_NAME));
let data = TableData::new(system.clone(), name, instance, replication, db); let data = TableData::new(system.clone(), instance, replication, db);
let merkle_updater = MerkleUpdater::launch(&system.background, data.clone()); let merkle_updater = MerkleUpdater::launch(&system.background, data.clone());
@ -317,7 +311,7 @@ where
self.data.update_many(pairs)?; self.data.update_many(pairs)?;
Ok(TableRpc::Ok) Ok(TableRpc::Ok)
} }
_ => Err(Error::BadRpc("Unexpected table RPC".to_string())), m => Err(Error::unexpected_rpc_message(m)),
} }
} }
} }

View file

@ -19,7 +19,7 @@ impl PartitionKey for EmptyKey {
#[derive(Clone, Copy, Debug, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub enum DeletedFilter { pub enum DeletedFilter {
All, Any,
Deleted, Deleted,
NotDeleted, NotDeleted,
} }
@ -27,7 +27,7 @@ pub enum DeletedFilter {
impl DeletedFilter { impl DeletedFilter {
pub fn apply(&self, deleted: bool) -> bool { pub fn apply(&self, deleted: bool) -> bool {
match self { match self {
DeletedFilter::All => true, DeletedFilter::Any => true,
DeletedFilter::Deleted => deleted, DeletedFilter::Deleted => deleted,
DeletedFilter::NotDeleted => !deleted, DeletedFilter::NotDeleted => !deleted,
} }

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_util" name = "garage_util"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"

View file

@ -1,5 +1,3 @@
use crate::data::*;
/// Definition of a CRDT - all CRDT Rust types implement this. /// Definition of a CRDT - all CRDT Rust types implement this.
/// ///
/// A CRDT is defined as a merge operator that respects a certain set of axioms. /// A CRDT is defined as a merge operator that respects a certain set of axioms.
@ -28,6 +26,28 @@ pub trait Crdt {
fn merge(&mut self, other: &Self); fn merge(&mut self, other: &Self);
} }
/// Option<T> implements Crdt for any type T, even if T doesn't implement CRDT itself: when
/// different values are detected, they are always merged to None. This can be used for value
/// types which shoulnd't be merged, instead of trying to merge things when we know we don't want
/// to merge them (which is what the AutoCrdt trait is used for most of the time). This cases
/// arises very often, for example with a Lww or a LwwMap: the value type has to be a CRDT so that
/// we have a rule for what to do when timestamps aren't enough to disambiguate (in a distributed
/// system, anything can happen!), and with AutoCrdt the rule is to make an arbitrary (but
/// determinstic) choice between the two. When using an Option<T> instead with this impl, ambiguity
lx marked this conversation as resolved Outdated

I find this implementation surprising. I'd expect Some(true) and Some(false) to be merged to Some(true), not None.

I find this implementation surprising. I'd expect `Some(true)` and `Some(false)` to be merged to `Some(true)`, not `None`.
Outdated
Review

So I haven't written a doc comment for this yet, but the idea of this impl is that we can make a CRDT of any type T that doesn't implement CRDT, by declaring that different values merge to None. We probably should use this more often in fact, instead of trying to merge things when we know we don't want to merge them (which is what the AutoCrdt trait is used for most of the time). This cases arises very often, for example with a Lww or a LwwMap: the value type has to be a CRDT so that we have a rule for what to do when timestamps aren't enough to disambiguate (in a distributed system, anything can happen!), and with AutoCrdt the rule is to make an arbitrary (but determinstic) choice between the two. When using an option instead with this impl, ambiguity cases are explicitely stored as None, which allows us to detect the ambiguity and handle it in the way we want. This truly depends on the semantics of the application: the crdt module is just a toolbox of things that can be taken when needed when building models. In the precise case where we are using it here, i.e. for storing the website configuration, the logic is that if we have two website configurations and we don't know which one is the correct one, we can just disable website access until an admin gives a new configuration (we try to have a safe behavior instead of an unpredictable one -- but anyways this is an extreme edge case as it mostly should never happen that the timestamps are the same).

So I haven't written a doc comment for this yet, but the idea of this impl is that we can make a CRDT of any type T that doesn't implement CRDT, by declaring that different values merge to None. We probably should use this more often in fact, instead of trying to merge things when we know we don't want to merge them (which is what the AutoCrdt trait is used for most of the time). This cases arises very often, for example with a `Lww` or a `LwwMap`: the value type has to be a CRDT so that we have a rule for what to do when timestamps aren't enough to disambiguate (in a distributed system, anything can happen!), and with AutoCrdt the rule is to make an arbitrary (but determinstic) choice between the two. When using an option instead with this impl, ambiguity cases are explicitely stored as None, which allows us to detect the ambiguity and handle it in the way we want. This truly depends on the semantics of the application: the `crdt` module is just a toolbox of things that can be taken when needed when building models. In the precise case where we are using it here, i.e. for storing the website configuration, the logic is that if we have two website configurations and we don't know which one is the correct one, we can just disable website access until an admin gives a new configuration (we try to have a safe behavior instead of an unpredictable one -- but anyways this is an extreme edge case as it mostly should never happen that the timestamps are the same).

I understand now, but I'm not sure it's well fit to some place it's used.
For website configuration, if two updates happen at the same time, I think it makes more sense to consider an arbitrary one happened an instant before the other, and got overwritten, than to merge both into the state "website disabled"

I understand now, but I'm not sure it's well fit to some place it's used. For website configuration, if two updates happen at the same time, I think it makes more sense to consider an arbitrary one happened an instant before the other, and got overwritten, than to merge both into the state "website disabled"
/// cases are explicitely stored as None, which allows us to detect the ambiguity and handle it in
/// the way we want. (this can only work if we are happy with losing the value when an ambiguity
/// arises)
impl<T> Crdt for Option<T>
where
T: Eq,
{
fn merge(&mut self, other: &Self) {
if self != other {
*self = None;
}
}
}
/// All types that implement `Ord` (a total order) can also implement a trivial CRDT /// All types that implement `Ord` (a total order) can also implement a trivial CRDT
/// defined by the merge rule: `a ⊔ b = max(a, b)`. Implement this trait for your type /// defined by the merge rule: `a ⊔ b = max(a, b)`. Implement this trait for your type
/// to enable this behavior. /// to enable this behavior.
@ -65,7 +85,3 @@ impl AutoCrdt for String {
impl AutoCrdt for bool { impl AutoCrdt for bool {
const WARN_IF_DIFFERENT: bool = true; const WARN_IF_DIFFERENT: bool = true;
} }
impl AutoCrdt for FixedBytes32 {
const WARN_IF_DIFFERENT: bool = true;
}

View file

@ -0,0 +1,72 @@
use serde::{Deserialize, Serialize};
use crate::crdt::crdt::*;
/// Deletable object (once deleted, cannot go back)
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)]
pub enum Deletable<T> {
Present(T),
Deleted,
}
impl<T: Crdt> Deletable<T> {
/// Create a new deletable object that isn't deleted
pub fn present(v: T) -> Self {
Self::Present(v)
}
/// Create a new deletable object that is deleted
pub fn delete() -> Self {
Self::Deleted
}
/// As option
pub fn as_option(&self) -> Option<&T> {
match self {
Self::Present(v) => Some(v),
Self::Deleted => None,
}
}
/// As option, mutable
pub fn as_option_mut(&mut self) -> Option<&mut T> {
match self {
Self::Present(v) => Some(v),
Self::Deleted => None,
}
}
/// Into option
pub fn into_option(self) -> Option<T> {
match self {
Self::Present(v) => Some(v),
Self::Deleted => None,
}
}
/// Is object deleted?
pub fn is_deleted(&self) -> bool {
matches!(self, Self::Deleted)
}
}
impl<T> From<Option<T>> for Deletable<T> {
fn from(v: Option<T>) -> Self {
v.map(Self::Present).unwrap_or(Self::Deleted)
}
}
impl<T> From<Deletable<T>> for Option<T> {
fn from(v: Deletable<T>) -> Option<T> {
match v {
Deletable::Present(v) => Some(v),
Deletable::Deleted => None,
}
}
}
impl<T: Crdt> Crdt for Deletable<T> {
fn merge(&mut self, other: &Self) {
if let Deletable::Present(v) = self {
match other {
Deletable::Deleted => *self = Deletable::Deleted,
Deletable::Present(v2) => v.merge(v2),
}
}
}
}

View file

@ -57,11 +57,8 @@ where
} }
} }
/// Build a new CRDT from a previous non-compatible one /// Build a new LWW CRDT from its raw pieces: a timestamp and the value
/// pub fn raw(ts: u64, value: T) -> Self {
/// Compared to new, the CRDT's timestamp is not set to now
/// but must be set to the previous, non-compatible, CRDT's timestamp.
pub fn migrate_from_raw(ts: u64, value: T) -> Self {
Self { ts, v: value } Self { ts, v: value }
} }
@ -77,11 +74,21 @@ where
self.v = new_value; self.v = new_value;
} }
/// Get the timestamp currently associated with the value
pub fn timestamp(&self) -> u64 {
self.ts
}
/// Get the CRDT value /// Get the CRDT value
pub fn get(&self) -> &T { pub fn get(&self) -> &T {
&self.v &self.v
} }
/// Take the value inside the CRDT (discards the timesamp)
pub fn take(self) -> T {
self.v
}
/// Get a mutable reference to the CRDT's value /// Get a mutable reference to the CRDT's value
/// ///
/// This is usefull to mutate the inside value without changing the LWW timestamp. /// This is usefull to mutate the inside value without changing the LWW timestamp.

View file

@ -30,18 +30,20 @@ pub struct LwwMap<K, V> {
impl<K, V> LwwMap<K, V> impl<K, V> LwwMap<K, V>
where where
K: Ord, K: Clone + Ord,
V: Crdt, V: Clone + Crdt,
{ {
/// Create a new empty map CRDT /// Create a new empty map CRDT
pub fn new() -> Self { pub fn new() -> Self {
Self { vals: vec![] } Self { vals: vec![] }
} }
/// Used to migrate from a map defined in an incompatible format. This produces
/// a map that contains a single item with the specified timestamp (copied from /// This produces a map that contains a single item with the specified timestamp.
/// the incompatible format). Do this as many times as you have items to migrate, ///
/// and put them all together using the CRDT merge operator. /// Used to migrate from a map defined in an incompatible format. Do this as many
pub fn migrate_from_raw_item(k: K, ts: u64, v: V) -> Self { /// times as you have items to migrate, and put them all together using the
/// CRDT merge operator.
pub fn raw_item(k: K, ts: u64, v: V) -> Self {
Self { Self {
vals: vec![(k, ts, v)], vals: vec![(k, ts, v)],
} }
@ -62,6 +64,7 @@ where
/// ///
/// However extracting the mutator on its own and only sending that on the network is very /// However extracting the mutator on its own and only sending that on the network is very
/// interesting as it is much smaller than the whole map. /// interesting as it is much smaller than the whole map.
#[must_use = "CRDT mutators are meant to be merged into a CRDT and not ignored."]
pub fn update_mutator(&self, k: K, new_v: V) -> Self { pub fn update_mutator(&self, k: K, new_v: V) -> Self {
let new_vals = match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(&k)) { let new_vals = match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(&k)) {
Ok(i) => { Ok(i) => {
@ -73,6 +76,38 @@ where
}; };
Self { vals: new_vals } Self { vals: new_vals }
} }
/// Updates a value in place in the map (this generates
/// a new timestamp)
pub fn update_in_place(&mut self, k: K, new_v: V) {
self.merge(&self.update_mutator(k, new_v));
}
/// Updates a value in place in the map, from a
/// (key, timestamp, value) triple, only if the given
/// timestamp is larger than the timestamp currently
/// in the map
pub fn merge_raw(&mut self, k: &K, ts2: u64, v2: &V) {
match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) {
Ok(i) => {
let (_, ts1, _v1) = &self.vals[i];
match ts2.cmp(ts1) {
Ordering::Greater => {
self.vals[i].1 = ts2;
self.vals[i].2 = v2.clone();
}
Ordering::Equal => {
self.vals[i].2.merge(v2);
}
Ordering::Less => (),
}
}
Err(i) => {
self.vals.insert(i, (k.clone(), ts2, v2.clone()));
}
}
}
/// Takes all of the values of the map and returns them. The current map is reset to the /// Takes all of the values of the map and returns them. The current map is reset to the
/// empty map. This is very usefull to produce in-place a new map that contains only a delta /// empty map. This is very usefull to produce in-place a new map that contains only a delta
/// that modifies a certain value: /// that modifies a certain value:
@ -99,10 +134,12 @@ where
let vals = std::mem::take(&mut self.vals); let vals = std::mem::take(&mut self.vals);
Self { vals } Self { vals }
} }
/// Removes all values from the map /// Removes all values from the map
pub fn clear(&mut self) { pub fn clear(&mut self) {
self.vals.clear(); self.vals.clear();
} }
/// Get a reference to the value assigned to a key /// Get a reference to the value assigned to a key
pub fn get(&self, k: &K) -> Option<&V> { pub fn get(&self, k: &K) -> Option<&V> {
match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) { match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) {
@ -110,6 +147,16 @@ where
Err(_) => None, Err(_) => None,
} }
} }
/// Get the timestamp of the value assigned to a key, or 0 if
/// no value is assigned
pub fn get_timestamp(&self, k: &K) -> u64 {
match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) {
Ok(i) => self.vals[i].1,
Err(_) => 0,
}
}
/// Gets a reference to all of the items, as a slice. Usefull to iterate on all map values. /// Gets a reference to all of the items, as a slice. Usefull to iterate on all map values.
/// In most case you will want to ignore the timestamp (second item of the tuple). /// In most case you will want to ignore the timestamp (second item of the tuple).
pub fn items(&self) -> &[(K, u64, V)] { pub fn items(&self) -> &[(K, u64, V)] {
@ -134,32 +181,15 @@ where
{ {
fn merge(&mut self, other: &Self) { fn merge(&mut self, other: &Self) {
for (k, ts2, v2) in other.vals.iter() { for (k, ts2, v2) in other.vals.iter() {
match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) { self.merge_raw(k, *ts2, v2);
Ok(i) => {
let (_, ts1, _v1) = &self.vals[i];
match ts2.cmp(ts1) {
Ordering::Greater => {
self.vals[i].1 = *ts2;
self.vals[i].2 = v2.clone();
}
Ordering::Equal => {
self.vals[i].2.merge(v2);
}
Ordering::Less => (),
}
}
Err(i) => {
self.vals.insert(i, (k.clone(), *ts2, v2.clone()));
}
}
} }
} }
} }
impl<K, V> Default for LwwMap<K, V> impl<K, V> Default for LwwMap<K, V>
where where
K: Ord, K: Clone + Ord,
V: Crdt, V: Clone + Crdt,
{ {
fn default() -> Self { fn default() -> Self {
Self::new() Self::new()

View file

@ -1,3 +1,5 @@
use std::iter::{FromIterator, IntoIterator};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::crdt::crdt::*; use crate::crdt::crdt::*;
@ -33,6 +35,7 @@ where
/// This can be used to build a delta-mutator: /// This can be used to build a delta-mutator:
/// when merged with another map, the value will be added or CRDT-merged if a previous /// when merged with another map, the value will be added or CRDT-merged if a previous
/// value already exists. /// value already exists.
#[must_use = "CRDT mutators are meant to be merged into a CRDT and not ignored."]
pub fn put_mutator(k: K, v: V) -> Self { pub fn put_mutator(k: K, v: V) -> Self {
Self { vals: vec![(k, v)] } Self { vals: vec![(k, v)] }
} }
@ -97,3 +100,26 @@ where
Self::new() Self::new()
} }
} }
/// A crdt map can be created from an iterator of key-value pairs.
/// Note that all keys in the iterator must be distinct:
/// this function will throw a panic if it is not the case.
impl<K, V> FromIterator<(K, V)> for Map<K, V>
where
K: Clone + Ord,
V: Clone + Crdt,
{
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
let mut vals: Vec<(K, V)> = iter.into_iter().collect();
vals.sort_by_cached_key(|tup| tup.0.clone());
// sanity check
for i in 1..vals.len() {
if vals[i - 1].0 == vals[i].0 {
panic!("Duplicate key in crdt::Map resulting from .from_iter() or .collect()");
}
}
Self { vals }
}
}

View file

@ -7,17 +7,19 @@
//! counter. Alice does +1 on her copy, she reads 1. Bob does +3 on his copy, he reads 3. Now, //! counter. Alice does +1 on her copy, she reads 1. Bob does +3 on his copy, he reads 3. Now,
//! it is easy to merge their counters, order does not count: we always get 4. //! it is easy to merge their counters, order does not count: we always get 4.
//! //!
//! Learn more about CRDT [on Wikipedia](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type) //! Learn more about CRDTs [on Wikipedia](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type)
mod bool; mod bool;
#[allow(clippy::module_inception)] #[allow(clippy::module_inception)]
mod crdt; mod crdt;
mod deletable;
mod lww; mod lww;
mod lww_map; mod lww_map;
mod map; mod map;
pub use self::bool::*; pub use self::bool::*;
pub use crdt::*; pub use crdt::*;
pub use deletable::*;
pub use lww::*; pub use lww::*;
pub use lww_map::*; pub use lww_map::*;
pub use map::*; pub use map::*;

View file

@ -5,7 +5,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::fmt; use std::fmt;
/// An array of 32 bytes /// An array of 32 bytes
#[derive(Default, PartialOrd, Ord, Clone, Hash, PartialEq, Copy)] #[derive(Default, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Copy)]
pub struct FixedBytes32([u8; 32]); pub struct FixedBytes32([u8; 32]);
impl From<[u8; 32]> for FixedBytes32 { impl From<[u8; 32]> for FixedBytes32 {
@ -20,8 +20,6 @@ impl std::convert::AsRef<[u8]> for FixedBytes32 {
} }
} }
impl Eq for FixedBytes32 {}
impl fmt::Debug for FixedBytes32 { impl fmt::Debug for FixedBytes32 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}…", hex::encode(&self.0[..8])) write!(f, "{}…", hex::encode(&self.0[..8]))

View file

@ -59,8 +59,8 @@ pub enum Error {
)] )]
Quorum(usize, usize, usize, Vec<String>), Quorum(usize, usize, usize, Vec<String>),
#[error(display = "Bad RPC: {}", _0)] #[error(display = "Unexpected RPC message: {}", _0)]
BadRpc(String), UnexpectedRpcMessage(String),
#[error(display = "Corrupt data: does not match hash {:?}", _0)] #[error(display = "Corrupt data: does not match hash {:?}", _0)]
CorruptData(Hash), CorruptData(Hash),
@ -69,6 +69,12 @@ pub enum Error {
Message(String), Message(String),
} }
impl Error {
pub fn unexpected_rpc_message<T: Serialize>(v: T) -> Self {
Self::UnexpectedRpcMessage(debug_serialize(&v))
}
}
impl From<sled::transaction::TransactionError<Error>> for Error { impl From<sled::transaction::TransactionError<Error>> for Error {
fn from(e: sled::transaction::TransactionError<Error>) -> Error { fn from(e: sled::transaction::TransactionError<Error>) -> Error {
match e { match e {
@ -119,6 +125,35 @@ where
} }
} }
/// Trait to map any error type to Error::Message
pub trait OkOrMessage {
type S;
fn ok_or_message<M: Into<String>>(self, message: M) -> Result<Self::S, Error>;
}
impl<T, E> OkOrMessage for Result<T, E>
where
E: std::fmt::Display,
{
type S = T;
fn ok_or_message<M: Into<String>>(self, message: M) -> Result<T, Error> {
match self {
Ok(x) => Ok(x),
Err(e) => Err(Error::Message(format!("{}: {}", message.into(), e))),
}
}
}
impl<T> OkOrMessage for Option<T> {
type S = T;
fn ok_or_message<M: Into<String>>(self, message: M) -> Result<T, Error> {
match self {
Some(x) => Ok(x),
None => Err(Error::Message(message.into())),
}
}
}
// Custom serialization for our error type, for use in RPC. // Custom serialization for our error type, for use in RPC.
// Errors are serialized as a string of their Display representation. // Errors are serialized as a string of their Display representation.
// Upon deserialization, they all become a RemoteError with the // Upon deserialization, they all become a RemoteError with the

View file

@ -10,6 +10,16 @@ pub fn now_msec() -> u64 {
.as_millis() as u64 .as_millis() as u64
} }
/// Increment logical clock
pub fn increment_logical_clock(prev: u64) -> u64 {
std::cmp::max(prev + 1, now_msec())
}
/// Increment two logical clocks
pub fn increment_logical_clock_2(prev: u64, prev2: u64) -> u64 {
std::cmp::max(prev2 + 1, std::cmp::max(prev + 1, now_msec()))
}
/// Convert a timestamp represented as milliseconds since UNIX Epoch to /// Convert a timestamp represented as milliseconds since UNIX Epoch to
/// its RFC3339 representation, such as "2021-01-01T12:30:00Z" /// its RFC3339 representation, such as "2021-01-01T12:30:00Z"
pub fn msec_to_rfc3339(msecs: u64) -> String { pub fn msec_to_rfc3339(msecs: u64) -> String {

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_web" name = "garage_web"
version = "0.5.0" version = "0.6.0"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"] authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -14,10 +14,10 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
garage_api = { version = "0.5.0", path = "../api" } garage_api = { version = "0.6.0", path = "../api" }
garage_model = { version = "0.5.0", path = "../model" } garage_model = { version = "0.6.0", path = "../model" }
garage_util = { version = "0.5.0", path = "../util" } garage_util = { version = "0.6.0", path = "../util" }
garage_table = { version = "0.5.0", path = "../table" } garage_table = { version = "0.6.0", path = "../table" }
err-derive = "0.3" err-derive = "0.3"
log = "0.4" log = "0.4"

View file

@ -10,10 +10,13 @@ use hyper::{
}; };
use crate::error::*; use crate::error::*;
use garage_api::helpers::{authority_to_host, host_to_bucket}; use garage_api::helpers::{authority_to_host, host_to_bucket};
use garage_api::s3_get::{handle_get, handle_head}; use garage_api::s3_get::{handle_get, handle_head};
use garage_model::bucket_table::*;
use garage_model::bucket_table::Bucket;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_table::*; use garage_table::*;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
@ -77,31 +80,42 @@ async fn serve_file(garage: Arc<Garage>, req: Request<Body>) -> Result<Response<
// Get bucket // Get bucket
let host = authority_to_host(authority)?; let host = authority_to_host(authority)?;
let root = &garage.config.s3_web.root_domain; let root = &garage.config.s3_web.root_domain;
let bucket = host_to_bucket(&host, root).unwrap_or(&host);
// Check bucket is exposed as a website let bucket_name = host_to_bucket(&host, root).unwrap_or(&host);
let bucket_desc = garage let bucket_id = garage
.bucket_table .bucket_alias_table
.get(&EmptyKey, &bucket.to_string()) .get(&EmptyKey, &bucket_name.to_string())
.await? .await?
.filter(|b| !b.is_deleted()) .map(|x| x.state.take())
.flatten()
.ok_or(Error::NotFound)?; .ok_or(Error::NotFound)?;
match bucket_desc.state.get() { // Check bucket isn't deleted and has website access enabled
BucketState::Present(params) if *params.website.get() => Ok(()), let _: Bucket = garage
_ => Err(Error::NotFound), .bucket_table
}?; .get(&EmptyKey, &bucket_id)
.await?
.filter(|b| {
b.state
.as_option()
.map(|x| x.website_config.get().is_some())
.unwrap_or(false)
})
.ok_or(Error::NotFound)?;
// Get path // Get path
let path = req.uri().path().to_string(); let path = req.uri().path().to_string();
let index = &garage.config.s3_web.index; let index = &garage.config.s3_web.index;
let key = path_to_key(&path, index)?; let key = path_to_key(&path, index)?;
info!("Selected bucket: \"{}\", selected key: \"{}\"", bucket, key); info!(
"Selected bucket: \"{}\" {:?}, selected key: \"{}\"",
bucket_name, bucket_id, key
);
let res = match *req.method() { let res = match *req.method() {
Method::HEAD => handle_head(garage, &req, bucket, &key).await?, Method::HEAD => handle_head(garage, &req, bucket_id, &key).await?,
Method::GET => handle_get(garage, &req, bucket, &key).await?, Method::GET => handle_get(garage, &req, bucket_id, &key).await?,
_ => return Err(Error::BadRequest("HTTP method not supported".to_string())), _ => return Err(Error::BadRequest("HTTP method not supported".to_string())),
}; };