Compare commits
No commits in common. "main" and "caddy-fileserver-browse-s3" have entirely different histories.
main
...
caddy-file
|
@ -5,7 +5,6 @@ when:
|
|||
- pull_request
|
||||
- deployment
|
||||
- cron
|
||||
- manual
|
||||
|
||||
steps:
|
||||
- name: check formatting
|
||||
|
@ -34,9 +33,7 @@ steps:
|
|||
- ./result/bin/garage_util-*
|
||||
- ./result/bin/garage_web-*
|
||||
- ./result/bin/garage-*
|
||||
- GARAGE_TEST_INTEGRATION_DB_ENGINE=lmdb ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
||||
- nix-shell --attr ci --run "killall -9 garage" || true
|
||||
- GARAGE_TEST_INTEGRATION_DB_ENGINE=sqlite ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
||||
- ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
||||
- rm result
|
||||
- rm -rv tmp-garage-integration
|
||||
|
||||
|
|
271
Cargo.lock
generated
|
@ -17,41 +17,6 @@ version = "1.0.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
|
||||
|
||||
[[package]]
|
||||
name = "aead"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0"
|
||||
dependencies = [
|
||||
"crypto-common",
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aes"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cipher",
|
||||
"cpufeatures",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aes-gcm"
|
||||
version = "0.10.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1"
|
||||
dependencies = [
|
||||
"aead",
|
||||
"aes",
|
||||
"cipher",
|
||||
"ctr",
|
||||
"ghash",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.8.7"
|
||||
|
@ -155,18 +120,6 @@ version = "1.6.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6"
|
||||
|
||||
[[package]]
|
||||
name = "argon2"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"blake2",
|
||||
"cpufeatures",
|
||||
"password-hash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.5.2"
|
||||
|
@ -796,16 +749,6 @@ dependencies = [
|
|||
"windows-targets 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cipher"
|
||||
version = "0.4.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
|
||||
dependencies = [
|
||||
"crypto-common",
|
||||
"inout",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.34.0"
|
||||
|
@ -905,9 +848,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.4.0"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"
|
||||
checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
@ -921,6 +864,15 @@ dependencies = [
|
|||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.9.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-queue"
|
||||
version = "0.3.11"
|
||||
|
@ -965,19 +917,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
"rand_core",
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ctr"
|
||||
version = "0.9.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835"
|
||||
dependencies = [
|
||||
"cipher",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling"
|
||||
version = "0.20.5"
|
||||
|
@ -1213,6 +1155,16 @@ dependencies = [
|
|||
name = "format_table"
|
||||
version = "0.1.1"
|
||||
|
||||
[[package]]
|
||||
name = "fs2"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "futures"
|
||||
version = "0.3.30"
|
||||
|
@ -1302,9 +1254,18 @@ dependencies = [
|
|||
"slab",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fxhash"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "garage"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"assert-json-diff",
|
||||
"async-trait",
|
||||
|
@ -1346,11 +1307,9 @@ dependencies = [
|
|||
"serde",
|
||||
"serde_bytes",
|
||||
"serde_json",
|
||||
"sha1",
|
||||
"sha2",
|
||||
"static_init",
|
||||
"structopt",
|
||||
"syslog-tracing",
|
||||
"timeago",
|
||||
"tokio",
|
||||
"toml",
|
||||
|
@ -1360,17 +1319,12 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_api"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"aes-gcm",
|
||||
"argon2",
|
||||
"async-compression",
|
||||
"async-trait",
|
||||
"base64 0.21.7",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"crc32c",
|
||||
"crc32fast",
|
||||
"crypto-common",
|
||||
"err-derive",
|
||||
"form_urlencoded",
|
||||
|
@ -1404,18 +1358,16 @@ dependencies = [
|
|||
"serde",
|
||||
"serde_bytes",
|
||||
"serde_json",
|
||||
"sha1",
|
||||
"sha2",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util 0.7.10",
|
||||
"tracing",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "garage_block"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-compression",
|
||||
|
@ -1442,21 +1394,20 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_db"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"err-derive",
|
||||
"heed",
|
||||
"hexdump",
|
||||
"mktemp",
|
||||
"r2d2",
|
||||
"r2d2_sqlite",
|
||||
"rusqlite",
|
||||
"sled",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "garage_model"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
|
@ -1473,9 +1424,7 @@ dependencies = [
|
|||
"garage_table",
|
||||
"garage_util",
|
||||
"hex",
|
||||
"http 1.0.0",
|
||||
"opentelemetry",
|
||||
"parse_duration",
|
||||
"rand",
|
||||
"serde",
|
||||
"serde_bytes",
|
||||
|
@ -1486,7 +1435,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_net"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
|
@ -1512,7 +1461,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_rpc"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
|
@ -1527,7 +1476,6 @@ dependencies = [
|
|||
"garage_util",
|
||||
"gethostname",
|
||||
"hex",
|
||||
"ipnet",
|
||||
"itertools 0.12.1",
|
||||
"k8s-openapi",
|
||||
"kube",
|
||||
|
@ -1548,7 +1496,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_table"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
|
@ -1570,7 +1518,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_util"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
|
@ -1604,7 +1552,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "garage_web"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
dependencies = [
|
||||
"err-derive",
|
||||
"futures",
|
||||
|
@ -1653,16 +1601,6 @@ dependencies = [
|
|||
"wasi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ghash"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1"
|
||||
dependencies = [
|
||||
"opaque-debug",
|
||||
"polyval",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.28.1"
|
||||
|
@ -1756,9 +1694,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "hashlink"
|
||||
version = "0.9.0"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee"
|
||||
checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
|
||||
dependencies = [
|
||||
"hashbrown 0.14.3",
|
||||
]
|
||||
|
@ -2112,15 +2050,6 @@ dependencies = [
|
|||
"hashbrown 0.14.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "inout"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.12"
|
||||
|
@ -2424,9 +2353,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "libsqlite3-sys"
|
||||
version = "0.28.0"
|
||||
version = "0.27.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f"
|
||||
checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"pkg-config",
|
||||
|
@ -2701,12 +2630,6 @@ version = "1.19.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
|
||||
|
||||
[[package]]
|
||||
name = "opaque-debug"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381"
|
||||
|
||||
[[package]]
|
||||
name = "openssl-probe"
|
||||
version = "0.1.5"
|
||||
|
@ -2876,17 +2799,6 @@ dependencies = [
|
|||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "password-hash"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"rand_core",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "paste"
|
||||
version = "1.0.14"
|
||||
|
@ -3044,18 +2956,6 @@ dependencies = [
|
|||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "polyval"
|
||||
version = "0.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"opaque-debug",
|
||||
"universal-hash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "powerfmt"
|
||||
version = "0.2.0"
|
||||
|
@ -3204,28 +3104,6 @@ dependencies = [
|
|||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "r2d2"
|
||||
version = "0.8.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93"
|
||||
dependencies = [
|
||||
"log",
|
||||
"parking_lot 0.12.1",
|
||||
"scheduled-thread-pool",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "r2d2_sqlite"
|
||||
version = "0.24.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a982edf65c129796dba72f8775b292ef482b40d035e827a9825b3bc07ccc5f2"
|
||||
dependencies = [
|
||||
"r2d2",
|
||||
"rusqlite",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.5"
|
||||
|
@ -3419,9 +3297,9 @@ checksum = "3cd14fd5e3b777a7422cca79358c57a8f6e3a703d9ac187448d0daf220c2407f"
|
|||
|
||||
[[package]]
|
||||
name = "rusqlite"
|
||||
version = "0.31.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae"
|
||||
checksum = "a78046161564f5e7cd9008aff3b2990b3850dc8e0349119b98e8f251e099f24d"
|
||||
dependencies = [
|
||||
"bitflags 2.4.2",
|
||||
"fallible-iterator",
|
||||
|
@ -3586,15 +3464,6 @@ dependencies = [
|
|||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scheduled-thread-pool"
|
||||
version = "0.2.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"
|
||||
dependencies = [
|
||||
"parking_lot 0.12.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "schemars"
|
||||
version = "0.8.16"
|
||||
|
@ -3842,6 +3711,22 @@ dependencies = [
|
|||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sled"
|
||||
version = "0.34.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935"
|
||||
dependencies = [
|
||||
"crc32fast",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"fs2",
|
||||
"fxhash",
|
||||
"libc",
|
||||
"log",
|
||||
"parking_lot 0.11.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.13.1"
|
||||
|
@ -3987,17 +3872,6 @@ dependencies = [
|
|||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syslog-tracing"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "340b1540dcdb6b066bc2966e7974f977ab1a38f21b2be189014ffb0cc2405768"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"tracing-core",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "system-configuration"
|
||||
version = "0.5.1"
|
||||
|
@ -4082,9 +3956,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.3.36"
|
||||
version = "0.3.34"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
|
||||
checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749"
|
||||
dependencies = [
|
||||
"deranged",
|
||||
"num-conv",
|
||||
|
@ -4102,9 +3976,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
|
|||
|
||||
[[package]]
|
||||
name = "time-macros"
|
||||
version = "0.2.18"
|
||||
version = "0.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"
|
||||
checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774"
|
||||
dependencies = [
|
||||
"num-conv",
|
||||
"time-core",
|
||||
|
@ -4501,16 +4375,6 @@ version = "0.2.4"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
|
||||
|
||||
[[package]]
|
||||
name = "universal-hash"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea"
|
||||
dependencies = [
|
||||
"crypto-common",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unsafe-libyaml"
|
||||
version = "0.2.10"
|
||||
|
@ -4553,7 +4417,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"rand",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
30
Cargo.toml
|
@ -21,20 +21,19 @@ default-members = ["src/garage"]
|
|||
|
||||
# Internal Garage crates
|
||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||
garage_api = { version = "1.0.1", path = "src/api" }
|
||||
garage_block = { version = "1.0.1", path = "src/block" }
|
||||
garage_db = { version = "1.0.1", path = "src/db", default-features = false }
|
||||
garage_model = { version = "1.0.1", path = "src/model", default-features = false }
|
||||
garage_net = { version = "1.0.1", path = "src/net" }
|
||||
garage_rpc = { version = "1.0.1", path = "src/rpc" }
|
||||
garage_table = { version = "1.0.1", path = "src/table" }
|
||||
garage_util = { version = "1.0.1", path = "src/util" }
|
||||
garage_web = { version = "1.0.1", path = "src/web" }
|
||||
garage_api = { version = "0.9.1", path = "src/api" }
|
||||
garage_block = { version = "0.9.1", path = "src/block" }
|
||||
garage_db = { version = "0.9.1", path = "src/db", default-features = false }
|
||||
garage_model = { version = "0.9.1", path = "src/model", default-features = false }
|
||||
garage_net = { version = "0.9.1", path = "src/net" }
|
||||
garage_rpc = { version = "0.9.1", path = "src/rpc" }
|
||||
garage_table = { version = "0.9.1", path = "src/table" }
|
||||
garage_util = { version = "0.9.1", path = "src/util" }
|
||||
garage_web = { version = "0.9.1", path = "src/web" }
|
||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||
|
||||
# External crates from crates.io
|
||||
arc-swap = "1.0"
|
||||
argon2 = "0.5"
|
||||
async-trait = "0.1.7"
|
||||
backtrace = "0.3"
|
||||
base64 = "0.21"
|
||||
|
@ -43,8 +42,6 @@ bytes = "1.0"
|
|||
bytesize = "1.1"
|
||||
cfg-if = "1.0"
|
||||
chrono = "0.4"
|
||||
crc32fast = "1.4"
|
||||
crc32c = "0.6"
|
||||
crypto-common = "0.1"
|
||||
digest = "0.10"
|
||||
err-derive = "0.3"
|
||||
|
@ -55,7 +52,6 @@ hexdump = "0.1"
|
|||
hmac = "0.12"
|
||||
idna = "0.5"
|
||||
itertools = "0.12"
|
||||
ipnet = "2.9.0"
|
||||
lazy_static = "1.4"
|
||||
md-5 = "0.10"
|
||||
mktemp = "0.5"
|
||||
|
@ -65,26 +61,22 @@ parse_duration = "2.1"
|
|||
pin-project = "1.0.12"
|
||||
pnet_datalink = "0.34"
|
||||
rand = "0.8"
|
||||
sha1 = "0.10"
|
||||
sha2 = "0.10"
|
||||
timeago = { version = "0.4", default-features = false }
|
||||
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
||||
|
||||
aes-gcm = { version = "0.10", features = ["aes", "stream"] }
|
||||
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
||||
kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] }
|
||||
|
||||
clap = { version = "4.1", features = ["derive", "env"] }
|
||||
pretty_env_logger = "0.5"
|
||||
structopt = { version = "0.3", default-features = false }
|
||||
syslog-tracing = "0.3"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
||||
rusqlite = "0.31.0"
|
||||
r2d2 = "0.8"
|
||||
r2d2_sqlite = "0.24"
|
||||
rusqlite = "0.30.0"
|
||||
sled = "0.34"
|
||||
|
||||
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
||||
zstd = { version = "0.13", default-features = false }
|
||||
|
|
|
@ -40,6 +40,7 @@ in {
|
|||
features = [
|
||||
"garage/bundled-libs"
|
||||
"garage/k2v"
|
||||
"garage/sled"
|
||||
"garage/lmdb"
|
||||
"garage/sqlite"
|
||||
];
|
||||
|
|
|
@ -98,6 +98,7 @@ paths:
|
|||
type: string
|
||||
example:
|
||||
- "k2v"
|
||||
- "sled"
|
||||
- "lmdb"
|
||||
- "sqlite"
|
||||
- "consul-discovery"
|
||||
|
|
|
@ -23,7 +23,7 @@ client = minio.Minio(
|
|||
"GKyourapikey",
|
||||
"abcd[...]1234",
|
||||
# Force the region, this is specific to garage
|
||||
region="garage",
|
||||
region="region",
|
||||
)
|
||||
```
|
||||
|
||||
|
|
|
@ -80,53 +80,6 @@ To test your new configuration, just reload your Nextcloud webpage and start sen
|
|||
|
||||
*External link:* [Nextcloud Documentation > Primary Storage](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html)
|
||||
|
||||
#### SSE-C encryption (since Garage v1.0)
|
||||
|
||||
Since version 1.0, Garage supports server-side encryption with customer keys
|
||||
(SSE-C). In this mode, Garage is responsible for encrypting and decrypting
|
||||
objects, but it does not store the encryption key itself. The encryption key
|
||||
should be provided by Nextcloud upon each request. This mode of operation is
|
||||
supported by Nextcloud and it has successfully been tested together with
|
||||
Garage.
|
||||
|
||||
To enable SSE-C encryption:
|
||||
|
||||
1. Make sure your Garage server is accessible via SSL through a reverse proxy
|
||||
such as Nginx, and that it is using a valid public certificate (Nextcloud
|
||||
might be able to connect to an S3 server that is using a self-signed
|
||||
certificate, but you will lose many hours while trying, so don't).
|
||||
Configure values for `use_ssl` and `port` accordingly in your `config.php`
|
||||
file.
|
||||
|
||||
2. Generate an encryption key using the following command:
|
||||
|
||||
```
|
||||
openssl rand -base64 32
|
||||
```
|
||||
|
||||
Make sure to keep this key **secret**!
|
||||
|
||||
3. Add the encryption key in your `config.php` file as follows:
|
||||
|
||||
|
||||
```php
|
||||
<?php
|
||||
$CONFIG = array(
|
||||
'objectstore' => [
|
||||
'class' => '\\OC\\Files\\ObjectStore\\S3',
|
||||
'arguments' => [
|
||||
...
|
||||
'sse_c_key' => 'exampleencryptionkeyLbU+5fKYQcVoqnn+RaIOXgo=',
|
||||
...
|
||||
],
|
||||
],
|
||||
```
|
||||
|
||||
Nextcloud will now make Garage encrypt files at rest in the storage bucket.
|
||||
These files will not be readable by an S3 client that has credentials to the
|
||||
bucket but doesn't also know the secret encryption key.
|
||||
|
||||
|
||||
### External Storage
|
||||
|
||||
**From the GUI.** Activate the "External storage support" app from the "Applications" page (click on your account icon on the top right corner of your screen to display the menu). Go to your parameters page (also located below your account icon). Click on external storage (or the corresponding translation in your language).
|
||||
|
@ -292,7 +245,7 @@ with average object size ranging from 50 KB to 150 KB.
|
|||
As such, your Garage cluster should be configured appropriately for good performance:
|
||||
|
||||
- use Garage v0.8.0 or higher with the [LMDB database engine](@documentation/reference-manual/configuration.md#db-engine-since-v0-8-0).
|
||||
Older versions of Garage used the Sled database engine which had issues, such as databases quickly ending up taking tens of GB of disk space.
|
||||
With the default Sled database engine, your database could quickly end up taking tens of GB of disk space.
|
||||
- the Garage database should be stored on a SSD
|
||||
|
||||
### Creating your bucket
|
||||
|
@ -335,7 +288,6 @@ From the [official Mastodon documentation](https://docs.joinmastodon.org/admin/t
|
|||
|
||||
```bash
|
||||
$ RAILS_ENV=production bin/tootctl media remove --days 3
|
||||
$ RAILS_ENV=production bin/tootctl media remove --days 15 --prune-profiles
|
||||
$ RAILS_ENV=production bin/tootctl media remove-orphans
|
||||
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
|
||||
```
|
||||
|
@ -354,6 +306,8 @@ Imports: 1.7 KB
|
|||
Settings: 0 Bytes
|
||||
```
|
||||
|
||||
Unfortunately, [old avatars and headers cannot currently be cleaned up](https://github.com/mastodon/mastodon/issues/9567).
|
||||
|
||||
### Migrating your data
|
||||
|
||||
Data migration should be done with an efficient S3 client.
|
||||
|
|
|
@ -55,8 +55,8 @@ Create your key and bucket:
|
|||
|
||||
```bash
|
||||
garage key create my-key
|
||||
garage bucket create backups
|
||||
garage bucket allow backups --read --write --key my-key
|
||||
garage bucket create backup
|
||||
garage bucket allow backup --read --write --key my-key
|
||||
```
|
||||
|
||||
Then register your Key ID and Secret key in your environment:
|
||||
|
|
|
@ -259,7 +259,7 @@ duck --delete garage:/my-files/an-object.txt
|
|||
|
||||
## WinSCP (libs3) {#winscp}
|
||||
|
||||
*You can find instructions on how to use the GUI in french [in our wiki](https://guide.deuxfleurs.fr/prise_en_main/winscp/).*
|
||||
*You can find instructions on how to use the GUI in french [in our wiki](https://wiki.deuxfleurs.fr/fr/Guide/Garage/WinSCP).*
|
||||
|
||||
How to use `winscp.com`, the CLI interface of WinSCP:
|
||||
|
||||
|
|
|
@ -53,43 +53,20 @@ and that's also why your nodes have super long identifiers.
|
|||
|
||||
Adding TLS support built into Garage is not currently planned.
|
||||
|
||||
## Garage stores data in plain text on the filesystem or encrypted using customer keys (SSE-C)
|
||||
## Garage stores data in plain text on the filesystem
|
||||
|
||||
For standard S3 API requests, Garage does not encrypt data at rest by itself.
|
||||
For the most generic at rest encryption of data, we recommend setting up your
|
||||
storage partitions on encrypted LUKS devices.
|
||||
Garage does not handle data encryption at rest by itself, and instead delegates
|
||||
to the user to add encryption, either at the storage layer (LUKS, etc) or on
|
||||
the client side (or both). There are no current plans to add data encryption
|
||||
directly in Garage.
|
||||
|
||||
If you are developping your own client software that makes use of S3 storage,
|
||||
we recommend implementing data encryption directly on the client side and never
|
||||
transmitting plaintext data to Garage. This makes it easy to use an external
|
||||
untrusted storage provider if necessary.
|
||||
|
||||
Garage does support [SSE-C
|
||||
encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html),
|
||||
an encryption mode of Amazon S3 where data is encrypted at rest using
|
||||
encryption keys given by the client. The encryption keys are passed to the
|
||||
server in a header in each request, to encrypt or decrypt data at the moment of
|
||||
reading or writing. The server discards the key as soon as it has finished
|
||||
using it for the request. This mode allows the data to be encrypted at rest by
|
||||
Garage itself, but it requires support in the client software. It is also not
|
||||
adapted to a model where the server is not trusted or assumed to be
|
||||
compromised, as the server can easily know the encryption keys. Note however
|
||||
that when using SSE-C encryption, the only Garage node that knows the
|
||||
encryption key passed in a given request is the node to which the request is
|
||||
directed (which can be a gateway node), so it is easy to have untrusted nodes
|
||||
in the cluster as long as S3 API requests containing SSE-C encryption keys are
|
||||
not directed to them.
|
||||
|
||||
Implementing automatic data encryption directly in Garage without client-side
|
||||
management of keys (something like
|
||||
[SSE-S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html))
|
||||
could make things simpler for end users that don't want to setup LUKS, but also
|
||||
raises many more questions, especially around key management: for encryption of
|
||||
data, where could Garage get the encryption keys from? If we encrypt data but
|
||||
keep the keys in a plaintext file next to them, it's useless. We probably don't
|
||||
want to have to manage secrets in Garage as it would be very hard to do in a
|
||||
secure way. At the time of speaking, there are no plans to implement this in
|
||||
Garage.
|
||||
Implementing data encryption directly in Garage might make things simpler for
|
||||
end users, but also raises many more questions, especially around key
|
||||
management: for encryption of data, where could Garage get the encryption keys
|
||||
from ? If we encrypt data but keep the keys in a plaintext file next to them,
|
||||
it's useless. We probably don't want to have to manage secrets in garage as it
|
||||
would be very hard to do in a secure way. Maybe integrate with an external
|
||||
system such as Hashicorp Vault?
|
||||
|
||||
|
||||
# Adding data encryption using external tools
|
||||
|
|
|
@ -90,6 +90,6 @@ The following feature flags are available in v0.8.0:
|
|||
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
|
||||
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
||||
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
||||
| `syslog` | optional | Enable logging to Syslog |
|
||||
| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
|
||||
| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata |
|
||||
| `sled` | *by default* | Enable using Sled to store Garage's metadata |
|
||||
| `lmdb` | optional | Enable using LMDB to store Garage's metadata |
|
||||
| `sqlite` | optional | Enable using Sqlite3 to store Garage's metadata |
|
||||
|
|
|
@ -53,9 +53,9 @@ to store 2 TB of data in total.
|
|||
|
||||
### Best practices
|
||||
|
||||
- If you have reasonably fast networking between all your nodes, and are planing to store
|
||||
mostly large files, bump the `block_size` configuration parameter to 10 MB
|
||||
(`block_size = "10M"`).
|
||||
- If you have fast dedicated networking between all your nodes, and are planing to store
|
||||
very large files, bump the `block_size` configuration parameter to 10 MB
|
||||
(`block_size = 10485760`).
|
||||
|
||||
- Garage stores its files in two locations: it uses a metadata directory to store frequently-accessed
|
||||
small metadata items, and a data directory to store data blocks of uploaded objects.
|
||||
|
@ -68,42 +68,31 @@ to store 2 TB of data in total.
|
|||
EXT4 is not recommended as it has more strict limitations on the number of inodes,
|
||||
which might cause issues with Garage when large numbers of objects are stored.
|
||||
|
||||
- Servers with multiple HDDs are supported natively by Garage without resorting
|
||||
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
|
||||
- If you only have an HDD and no SSD, it's fine to put your metadata alongside the data
|
||||
on the same drive. Having lots of RAM for your kernel to cache the metadata will
|
||||
help a lot with performance. Make sure to use the LMDB database engine,
|
||||
instead of Sled, which suffers from quite bad performance degradation on HDDs.
|
||||
Sled is still the default for legacy reasons, but is not recommended anymore.
|
||||
|
||||
- For the metadata storage, Garage does not do checksumming and integrity
|
||||
verification on its own, so it is better to use a robust filesystem such as
|
||||
BTRFS or ZFS. Users have reported that when using the LMDB database engine
|
||||
(the default), database files have a tendency of becoming corrupted after an
|
||||
unclean shutdown (e.g. a power outage), so you should take regular snapshots
|
||||
to be able to recover from such a situation. This can be done using Garage's
|
||||
built-in automatic snapshotting (since v0.9.4), or by using filesystem level
|
||||
snapshots. If you cannot do so, you might want to switch to Sqlite which is
|
||||
more robust.
|
||||
verification on its own. If you are afraid of bitrot/data corruption,
|
||||
put your metadata directory on a ZFS or BTRFS partition. Otherwise, just use regular
|
||||
EXT4 or XFS.
|
||||
|
||||
- LMDB is the fastest and most tested database engine, but it has the following
|
||||
weaknesses: 1/ data files are not architecture-independent, you cannot simply
|
||||
move a Garage metadata directory between nodes running different architectures,
|
||||
and 2/ LMDB is not suited for 32-bit platforms. Sqlite is a viable alternative
|
||||
if any of these are of concern.
|
||||
|
||||
- If you only have an HDD and no SSD, it's fine to put your metadata alongside
|
||||
the data on the same drive, but then consider your filesystem choice wisely
|
||||
(see above). Having lots of RAM for your kernel to cache the metadata will
|
||||
help a lot with performance. The default LMDB database engine is the most
|
||||
tested and has good performance.
|
||||
- Servers with multiple HDDs are supported natively by Garage without resorting
|
||||
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
|
||||
|
||||
## Get a Docker image
|
||||
|
||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
We encourage you to use a fixed tag (eg. `v1.0.1`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v1.0.1` but it's up to you
|
||||
We encourage you to use a fixed tag (eg. `v0.9.1`) and not the `latest` tag.
|
||||
For this example, we will use the latest published version at the time of the writing which is `v0.9.1` but it's up to you
|
||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
sudo docker pull dxflrs/garage:v1.0.1
|
||||
sudo docker pull dxflrs/garage:v0.9.1
|
||||
```
|
||||
|
||||
## Deploying and configuring Garage
|
||||
|
@ -126,9 +115,8 @@ A valid `/etc/garage.toml` for our cluster would look as follows:
|
|||
metadata_dir = "/var/lib/garage/meta"
|
||||
data_dir = "/var/lib/garage/data"
|
||||
db_engine = "lmdb"
|
||||
metadata_auto_snapshot_interval = "6h"
|
||||
|
||||
replication_factor = 3
|
||||
replication_mode = "3"
|
||||
|
||||
compression_level = 2
|
||||
|
||||
|
@ -152,8 +140,6 @@ Check the following for your configuration files:
|
|||
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
|
||||
This parameter is optional but recommended: if your nodes have trouble communicating with
|
||||
one another, consider adding it.
|
||||
Alternatively, you can also set `rpc_public_addr_subnet`, which can filter
|
||||
the addresses announced to other peers to a specific subnet.
|
||||
|
||||
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
|
||||
You can generate such a key with `openssl rand -hex 32`.
|
||||
|
@ -171,7 +157,7 @@ docker run \
|
|||
-v /etc/garage.toml:/etc/garage.toml \
|
||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||
dxflrs/garage:v1.0.1
|
||||
dxflrs/garage:v0.9.1
|
||||
```
|
||||
|
||||
With this command line, Garage should be started automatically at each boot.
|
||||
|
@ -185,7 +171,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
|||
version: "3"
|
||||
services:
|
||||
garage:
|
||||
image: dxflrs/garage:v1.0.1
|
||||
image: dxflrs/garage:v0.9.1
|
||||
network_mode: "host"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
|
@ -201,7 +187,7 @@ upgrades. With the containerized setup proposed here, the upgrade process
|
|||
will require stopping and removing the existing container, and re-creating it
|
||||
with the upgraded version.
|
||||
|
||||
## Controlling the daemon
|
||||
## Controling the daemon
|
||||
|
||||
The `garage` binary has two purposes:
|
||||
- it acts as a daemon when launched with `garage server`
|
||||
|
@ -259,7 +245,7 @@ You can then instruct nodes to connect to one another as follows:
|
|||
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
|
||||
```
|
||||
|
||||
You don't need to instruct all node to connect to all other nodes:
|
||||
You don't nead to instruct all node to connect to all other nodes:
|
||||
nodes will discover one another transitively.
|
||||
|
||||
Now if your run `garage status` on any node, you should have an output that looks as follows:
|
||||
|
|
|
@ -50,20 +50,3 @@ locations. They use Garage themselves for the following tasks:
|
|||
|
||||
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
|
||||
9 nodes in 3 physical locations.
|
||||
|
||||
### Triplebit
|
||||
|
||||
[Triplebit](https://www.triplebit.org) is a non-profit hosting provider and
|
||||
ISP focused on improving access to privacy-related services. They use
|
||||
Garage themselves for the following tasks:
|
||||
|
||||
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
|
||||
|
||||
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
|
||||
|
||||
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
|
||||
|
||||
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
|
||||
|
||||
Triplebit's Garage cluster is a multi-site cluster currently composed of
|
||||
10 nodes in 3 physical locations.
|
||||
|
|
|
@ -97,7 +97,7 @@ delete a tombstone, the following condition has to be met:
|
|||
superseeded by the tombstone. This ensures that deleting the tombstone is
|
||||
safe and that no deleted value will come back in the system.
|
||||
|
||||
Garage uses atomic database operations (such as compare-and-swap and
|
||||
Garage makes use of Sled's atomic operations (such as compare-and-swap and
|
||||
transactions) to ensure that only tombstones that have been correctly
|
||||
propagated to other nodes are ever deleted from the local entry tree.
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ Pithos has been abandonned and should probably not used yet, in the following we
|
|||
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
|
||||
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
|
||||
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
|
||||
We considered their v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
|
||||
We considered there v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
|
||||
|
||||
**[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):**
|
||||
*Not written yet*
|
||||
|
|
|
@ -19,7 +19,7 @@ connecting to. To run on all nodes, add the `-a` flag as follows:
|
|||
|
||||
# Data block operations
|
||||
|
||||
## Data store scrub {#scrub}
|
||||
## Data store scrub
|
||||
|
||||
Scrubbing the data store means examining each individual data block to check that
|
||||
their content is correct, by verifying their hash. Any block found to be corrupted
|
||||
|
@ -104,24 +104,6 @@ operation will also move out all data from locations marked as read-only.
|
|||
|
||||
# Metadata operations
|
||||
|
||||
## Metadata snapshotting
|
||||
|
||||
It is good practice to setup automatic snapshotting of your metadata database
|
||||
file, to recover from situations where it becomes corrupted on disk. This can
|
||||
be done at the filesystem level if you are using ZFS or BTRFS.
|
||||
|
||||
Since Garage v0.9.4, Garage is able to take snapshots of the metadata database
|
||||
itself. This basically amounts to copying the database file, except that it can
|
||||
be run live while Garage is running without the risk of corruption or
|
||||
inconsistencies. This can be setup to run automatically on a schedule using
|
||||
[`metadata_auto_snapshot_interval`](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval).
|
||||
A snapshot can also be triggered manually using the `garage meta snapshot`
|
||||
command. Note that taking a snapshot using this method is very intensive as it
|
||||
requires making a full copy of the database file, so you might prefer using
|
||||
filesystem-level snapshots if possible. To recover a corrupted node from such a
|
||||
snapshot, read the instructions
|
||||
[here](@/documentation/operations/recovering.md#corrupted_meta).
|
||||
|
||||
## Metadata table resync
|
||||
|
||||
Garage automatically resyncs all entries stored in the metadata tables every hour,
|
||||
|
@ -141,7 +123,4 @@ blocks may still be held by Garage. If you suspect that such corruption has occu
|
|||
in your cluster, you can run one of the following repair procedures:
|
||||
|
||||
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
|
||||
|
||||
- `garage repair block-refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
|
||||
|
||||
- `garage repair block-rc`: checks that the reference counters for blocks are in sync with the actual number of non-deleted entries in the block reference table
|
||||
- `garage repair block_refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
|
||||
|
|
|
@ -12,7 +12,7 @@ An introduction to building cluster layouts can be found in the [production depl
|
|||
In Garage, all of the data that can be stored in a given cluster is divided
|
||||
into slices which we call *partitions*. Each partition is stored by
|
||||
one or several nodes in the cluster
|
||||
(see [`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)).
|
||||
(see [`replication_mode`](@/documentation/reference-manual/configuration.md#replication_mode)).
|
||||
The layout determines the correspondence between these partitions,
|
||||
which exist on a logical level, and actual storage nodes.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ weight = 40
|
|||
|
||||
Garage is meant to work on old, second-hand hardware.
|
||||
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
|
||||
Fear not! Garage is fully equipped to handle drive failures, in most common cases.
|
||||
Fear not! For Garage is fully equipped to handle drive failures, in most common cases.
|
||||
|
||||
## A note on availability of Garage
|
||||
|
||||
|
@ -108,57 +108,3 @@ garage layout apply # once satisfied, apply the changes
|
|||
|
||||
Garage will then start synchronizing all required data on the new node.
|
||||
This process can be monitored using the `garage stats -a` command.
|
||||
|
||||
## Replacement scenario 3: corrupted metadata {#corrupted_meta}
|
||||
|
||||
In some cases, your metadata DB file might become corrupted, for instance if
|
||||
your node suffered a power outage and did not shut down properly. In this case,
|
||||
you can recover without having to change the node ID and rebuilding a cluster
|
||||
layout. This means that data blocks will not need to be shuffled around, you
|
||||
must simply find a way to repair the metadata file. The best way is generally
|
||||
to discard the corrupted file and recover it from another source.
|
||||
|
||||
First of all, start by locating the database file in your metadata directory,
|
||||
which [depends on your `db_engine`
|
||||
choice](@/documentation/reference-manual/configuration.md#db_engine). Then,
|
||||
your recovery options are as follows:
|
||||
|
||||
- **Option 1: resyncing from other nodes.** In case your cluster is replicated
|
||||
with two or three copies, you can simply delete the database file, and Garage
|
||||
will resync from other nodes. To do so, stop Garage, delete the database file
|
||||
or directory, and restart Garage. Then, do a full table repair by calling
|
||||
`garage repair -a --yes tables`. This will take a bit of time to complete as
|
||||
the new node will need to receive copies of the metadata tables from the
|
||||
network.
|
||||
|
||||
- **Option 2: restoring a snapshot taken by Garage.** Since v0.9.4, Garage can
|
||||
[automatically take regular
|
||||
snapshots](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval)
|
||||
of your metadata DB file. This file or directory should be located under
|
||||
`<metadata_dir>/snapshots`, and is named according to the UTC time at which it
|
||||
was taken. Stop Garage, discard the database file/directory and replace it by the
|
||||
snapshot you want to use. For instance, in the case of LMDB:
|
||||
|
||||
```bash
|
||||
cd $METADATA_DIR
|
||||
mv db.lmdb db.lmdb.bak
|
||||
cp -r snapshots/2024-03-15T12:13:52Z db.lmdb
|
||||
```
|
||||
|
||||
And for Sqlite:
|
||||
|
||||
```bash
|
||||
cd $METADATA_DIR
|
||||
mv db.sqlite db.sqlite.bak
|
||||
cp snapshots/2024-03-15T12:13:52Z db.sqlite
|
||||
```
|
||||
|
||||
Then, restart Garage and run a full table repair by calling `garage repair -a
|
||||
--yes tables`. This should run relatively fast as only the changes that
|
||||
occurred since the snapshot was taken will need to be resynchronized. Of
|
||||
course, if your cluster is not replicated, you will lose all changes that
|
||||
occurred since the snapshot was taken.
|
||||
|
||||
- **Option 3: restoring a filesystem-level snapshot.** If you are using ZFS or
|
||||
BTRFS to snapshot your metadata partition, refer to their specific
|
||||
documentation on rolling back or copying files from an old snapshot.
|
||||
|
|
|
@ -73,18 +73,6 @@ The entire procedure would look something like this:
|
|||
You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
|
||||
Do not try to make a backup of the metadata folder of a running node.
|
||||
|
||||
**Since Garage v0.9.4,** you can use the `garage meta snapshot --all` command
|
||||
to take a simultaneous snapshot of the metadata database files of all your
|
||||
nodes. This avoids the tedious process of having to take them down one by
|
||||
one before upgrading. Be careful that if automatic snapshotting is enabled,
|
||||
Garage only keeps the last two snapshots and deletes older ones, so you might
|
||||
want to disable automatic snapshotting in your upgraded configuration file
|
||||
until you have confirmed that the upgrade ran successfully. In addition to
|
||||
snapshotting the metadata databases of your nodes, you should back-up at
|
||||
least the `cluster_layout` file of one of your Garage instances (this file
|
||||
should be the same on all nodes and you can copy it safely while Garage is
|
||||
running).
|
||||
|
||||
3. Prepare your binaries and configuration files for the new Garage version
|
||||
|
||||
4. Restart all nodes simultaneously in the new version
|
||||
|
|
|
@ -42,13 +42,6 @@ If a binary of the last version is not available for your architecture,
|
|||
or if you want a build customized for your system,
|
||||
you can [build Garage from source](@/documentation/cookbook/from-source.md).
|
||||
|
||||
If none of these option work for you, you can also run Garage in a Docker
|
||||
container. When using Docker, the commands used in this guide will not work
|
||||
anymore. We recommend reading the tutorial on [configuring a
|
||||
multi-node cluster](@/documentation/cookbook/real-world.md) to learn about
|
||||
using Garage as a Docker container. For simplicity, a minimal command to launch
|
||||
Garage using Docker is provided in this quick start guide as well.
|
||||
|
||||
|
||||
## Configuring and starting Garage
|
||||
|
||||
|
@ -64,9 +57,9 @@ to generate unique and private secrets for security reasons:
|
|||
cat > garage.toml <<EOF
|
||||
metadata_dir = "/tmp/meta"
|
||||
data_dir = "/tmp/data"
|
||||
db_engine = "sqlite"
|
||||
db_engine = "lmdb"
|
||||
|
||||
replication_factor = 1
|
||||
replication_mode = "none"
|
||||
|
||||
rpc_bind_addr = "[::]:3901"
|
||||
rpc_public_addr = "127.0.0.1:3901"
|
||||
|
@ -86,15 +79,11 @@ index = "index.html"
|
|||
api_bind_addr = "[::]:3904"
|
||||
|
||||
[admin]
|
||||
api_bind_addr = "[::]:3903"
|
||||
api_bind_addr = "0.0.0.0:3903"
|
||||
admin_token = "$(openssl rand -base64 32)"
|
||||
metrics_token = "$(openssl rand -base64 32)"
|
||||
EOF
|
||||
```
|
||||
|
||||
See the [Configuration file format](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/)
|
||||
for complete options and values.
|
||||
|
||||
Now that your configuration file has been created, you may save it to the directory of your choice.
|
||||
By default, Garage looks for **`/etc/garage.toml`.**
|
||||
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
|
||||
|
@ -121,26 +110,6 @@ garage -c path/to/garage.toml server
|
|||
|
||||
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
|
||||
|
||||
Alternatively, if you cannot or do not wish to run the Garage binary directly,
|
||||
you may use Docker to run Garage in a container using the following command:
|
||||
|
||||
```bash
|
||||
docker run \
|
||||
-d \
|
||||
--name garaged \
|
||||
-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \
|
||||
-v /etc/garage.toml:/path/to/garage.toml \
|
||||
-v /var/lib/garage/meta:/path/to/garage/meta \
|
||||
-v /var/lib/garage/data:/path/to/garage/data \
|
||||
dxflrs/garage:v0.9.4
|
||||
```
|
||||
|
||||
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
||||
|
||||
#### Troubleshooting
|
||||
|
||||
Ensure your configuration file, `metadata_dir` and `data_dir` are readable by the user running the `garage` server or Docker.
|
||||
|
||||
You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \
|
||||
Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`.
|
||||
|
||||
|
@ -161,9 +130,6 @@ It uses values from the TOML configuration file to find the Garage daemon runnin
|
|||
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
|
||||
again have to specify `-c path/to/garage.toml` at each invocation.
|
||||
|
||||
If you are running Garage in a Docker container, you can set `alias garage="docker exec -ti <container name> /garage"`
|
||||
to use the Garage binary inside your container.
|
||||
|
||||
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
|
||||
the following command should be enough to show the status of your cluster:
|
||||
|
||||
|
|
|
@ -8,21 +8,19 @@ weight = 20
|
|||
Here is an example `garage.toml` configuration file that illustrates all of the possible options:
|
||||
|
||||
```toml
|
||||
replication_factor = 3
|
||||
consistency_mode = "consistent"
|
||||
replication_mode = "3"
|
||||
|
||||
metadata_dir = "/var/lib/garage/meta"
|
||||
data_dir = "/var/lib/garage/data"
|
||||
metadata_fsync = true
|
||||
data_fsync = false
|
||||
disable_scrub = false
|
||||
metadata_auto_snapshot_interval = "6h"
|
||||
|
||||
db_engine = "lmdb"
|
||||
|
||||
block_size = "1M"
|
||||
block_ram_buffer_max = "256MiB"
|
||||
|
||||
sled_cache_capacity = "128MiB"
|
||||
sled_flush_every_ms = 2000
|
||||
lmdb_map_size = "1T"
|
||||
|
||||
compression_level = 1
|
||||
|
@ -31,11 +29,6 @@ rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
|
|||
rpc_bind_addr = "[::]:3901"
|
||||
rpc_bind_outgoing = false
|
||||
rpc_public_addr = "[fc00:1::1]:3901"
|
||||
# or set rpc_public_adr_subnet to filter down autodiscovery to a subnet:
|
||||
# rpc_public_addr_subnet = "2001:0db8:f00:b00:/64"
|
||||
|
||||
|
||||
allow_world_readable_secrets = false
|
||||
|
||||
bootstrap_peers = [
|
||||
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
|
||||
|
@ -76,8 +69,8 @@ root_domain = ".web.garage"
|
|||
|
||||
[admin]
|
||||
api_bind_addr = "0.0.0.0:3903"
|
||||
metrics_token = "BCAdFjoa9G0KJR0WXnHHm7fs1ZAbfpI8iIZ+Z/a2NgI="
|
||||
admin_token = "UkLeGWEvHnXBqnueR3ISEMWpOnm40jH2tM2HnnL/0F4="
|
||||
metrics_token = "cacce0b2de4bc2d9f5b5fdff551e01ac1496055aed248202d415398987e35f81"
|
||||
admin_token = "ae8cb40ea7368bbdbb6430af11cca7da833d3458a5f52086f4e805a570fb5c2a"
|
||||
trace_sink = "http://localhost:4317"
|
||||
```
|
||||
|
||||
|
@ -87,29 +80,23 @@ The following gives details about each available configuration option.
|
|||
|
||||
### Index
|
||||
|
||||
[Environment variables](#env_variables).
|
||||
|
||||
Top-level configuration options:
|
||||
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
||||
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
||||
[`block_size`](#block_size),
|
||||
[`bootstrap_peers`](#bootstrap_peers),
|
||||
[`compression_level`](#compression_level),
|
||||
[`data_dir`](#data_dir),
|
||||
[`data_fsync`](#data_fsync),
|
||||
[`db_engine`](#db_engine),
|
||||
[`disable_scrub`](#disable_scrub),
|
||||
[`lmdb_map_size`](#lmdb_map_size),
|
||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
||||
[`metadata_dir`](#metadata_dir),
|
||||
[`metadata_fsync`](#metadata_fsync),
|
||||
[`replication_factor`](#replication_factor),
|
||||
[`consistency_mode`](#consistency_mode),
|
||||
[`replication_mode`](#replication_mode),
|
||||
[`rpc_bind_addr`](#rpc_bind_addr),
|
||||
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
|
||||
[`rpc_public_addr`](#rpc_public_addr),
|
||||
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
|
||||
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
|
||||
[`rpc_secret`/`rpc_secret_file`](#rpc_secret),
|
||||
[`sled_cache_capacity`](#sled_cache_capacity),
|
||||
[`sled_flush_every_ms`](#sled_flush_every_ms).
|
||||
|
||||
The `[consul_discovery]` section:
|
||||
[`api`](#consul_api),
|
||||
|
@ -143,32 +130,14 @@ The `[admin]` section:
|
|||
[`admin_token`/`admin_token_file`](#admin_token),
|
||||
[`trace_sink`](#admin_trace_sink),
|
||||
|
||||
### Environment variables {#env_variables}
|
||||
|
||||
The following configuration parameter must be specified as an environment
|
||||
variable, it does not exist in the configuration file:
|
||||
|
||||
- `GARAGE_LOG_TO_SYSLOG` (since v0.9.4): set this to `1` or `true` to make the
|
||||
Garage daemon send its logs to `syslog` (using the libc `syslog` function)
|
||||
instead of printing to stderr.
|
||||
|
||||
The following environment variables can be used to override the corresponding
|
||||
values in the configuration file:
|
||||
|
||||
- [`GARAGE_ALLOW_WORLD_READABLE_SECRETS`](#allow_world_readable_secrets)
|
||||
- [`GARAGE_RPC_SECRET` and `GARAGE_RPC_SECRET_FILE`](#rpc_secret)
|
||||
- [`GARAGE_ADMIN_TOKEN` and `GARAGE_ADMIN_TOKEN_FILE`](#admin_token)
|
||||
- [`GARAGE_METRICS_TOKEN` and `GARAGE_METRICS_TOKEN`](#admin_metrics_token)
|
||||
|
||||
|
||||
### Top-level configuration options
|
||||
|
||||
#### `replication_factor` {#replication_factor}
|
||||
#### `replication_mode` {#replication_mode}
|
||||
|
||||
The replication factor can be any positive integer smaller or equal the node count in your cluster.
|
||||
The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
|
||||
Garage supports the following replication modes:
|
||||
|
||||
- `1`: data stored on Garage is stored on a single node. There is no
|
||||
- `none` or `1`: data stored on Garage is stored on a single node. There is no
|
||||
redundancy, and data will be unavailable as soon as one node fails or its
|
||||
network is disconnected. Do not use this for anything else than test
|
||||
deployments.
|
||||
|
@ -179,6 +148,17 @@ The chosen replication factor has a big impact on the cluster's failure toleranc
|
|||
before losing data. Data remains available in read-only mode when one node is
|
||||
down, but write operations will fail.
|
||||
|
||||
- `2-dangerous`: a variant of mode `2`, where written objects are written to
|
||||
the second replica asynchronously. This means that Garage will return `200
|
||||
OK` to a PutObject request before the second copy is fully written (or even
|
||||
before it even starts being written). This means that data can more easily
|
||||
be lost if the node crashes before a second copy can be completed. This
|
||||
also means that written objects might not be visible immediately in read
|
||||
operations. In other words, this mode severely breaks the consistency and
|
||||
durability guarantees of standard Garage cluster operation. Benefits of
|
||||
this mode: you can still write to your cluster when one node is
|
||||
unavailable.
|
||||
|
||||
- `3`: data stored on Garage will be stored on three different nodes, if
|
||||
possible each in a different zones. Garage tolerates two node failure, or
|
||||
several node failures but in no more than two zones (in a deployment with at
|
||||
|
@ -186,84 +166,55 @@ The chosen replication factor has a big impact on the cluster's failure toleranc
|
|||
or node failures are only in a single zone, reading and writing data to
|
||||
Garage can continue normally.
|
||||
|
||||
- `5`, `7`, ...: When setting the replication factor above 3, it is most useful to
|
||||
choose an uneven value, since for every two copies added, one more node can fail
|
||||
before losing the ability to write and read to the cluster.
|
||||
|
||||
Note that in modes `2` and `3`,
|
||||
if at least the same number of zones are available, an arbitrary number of failures in
|
||||
any given zone is tolerated as copies of data will be spread over several zones.
|
||||
|
||||
**Make sure `replication_factor` is the same in the configuration files of all nodes.
|
||||
Never run a Garage cluster where that is not the case.**
|
||||
|
||||
It is technically possible to change the replication factor although it's a
|
||||
dangerous operation that is not officially supported. This requires you to
|
||||
delete the existing cluster layout and create a new layout from scratch,
|
||||
meaning that a full rebalancing of your cluster's data will be needed. To do
|
||||
it, shut down your cluster entirely, delete the `custer_layout` files in the
|
||||
meta directories of all your nodes, update all your configuration files with
|
||||
the new `replication_factor` parameter, restart your cluster, and then create a
|
||||
new layout with all the nodes you want to keep. Rebalancing data will take
|
||||
some time, and data might temporarily appear unavailable to your users.
|
||||
It is recommended to shut down public access to the cluster while rebalancing
|
||||
is in progress. In theory, no data should be lost as rebalancing is a
|
||||
routine operation for Garage, although we cannot guarantee you that everything
|
||||
will go right in such an extreme scenario.
|
||||
|
||||
#### `consistency_mode` {#consistency_mode}
|
||||
|
||||
The consistency mode setting determines the read and write behaviour of your cluster.
|
||||
|
||||
- `consistent`: The default setting. This is what the paragraph above describes.
|
||||
The read and write quorum will be determined so that read-after-write consistency
|
||||
is guaranteed.
|
||||
- `degraded`: Lowers the read
|
||||
- `3-degraded`: a variant of replication mode `3`, that lowers the read
|
||||
quorum to `1`, to allow you to read data from your cluster when several
|
||||
nodes (or nodes in several zones) are unavailable. In this mode, Garage
|
||||
does not provide read-after-write consistency anymore.
|
||||
The write quorum stays the same as in the `consistent` mode, ensuring that
|
||||
data successfully written to Garage is stored on multiple nodes (depending
|
||||
the replication factor).
|
||||
- `dangerous`: This mode lowers both the read
|
||||
does not provide read-after-write consistency anymore. The write quorum is
|
||||
still 2, ensuring that data successfully written to Garage is stored on at
|
||||
least two nodes.
|
||||
|
||||
- `3-dangerous`: a variant of replication mode `3` that lowers both the read
|
||||
and write quorums to `1`, to allow you to both read and write to your
|
||||
cluster when several nodes (or nodes in several zones) are unavailable. It
|
||||
is the least consistent mode of operation proposed by Garage, and also one
|
||||
that should probably never be used.
|
||||
|
||||
Changing the `consistency_mode` between modes while leaving the `replication_factor` untouched
|
||||
(e.g. setting your node's `consistency_mode` to `degraded` when it was previously unset, or from
|
||||
`dangerous` to `consistent`), can be done easily by just changing the `consistency_mode`
|
||||
parameter in your config files and restarting all your Garage nodes.
|
||||
Note that in modes `2` and `3`,
|
||||
if at least the same number of zones are available, an arbitrary number of failures in
|
||||
any given zone is tolerated as copies of data will be spread over several zones.
|
||||
|
||||
The consistency mode can be used together with various replication factors, to achieve
|
||||
a wide range of read and write characteristics. Some examples:
|
||||
|
||||
- Replication factor `2`, consistency mode `degraded`: While this mode
|
||||
technically exists, its properties are the same as with consistency mode `consistent`,
|
||||
since the read quorum with replication factor `2`, consistency mode `consistent` is already 1.
|
||||
|
||||
- Replication factor `2`, consistency mode `dangerous`: written objects are written to
|
||||
the second replica asynchronously. This means that Garage will return `200
|
||||
OK` to a PutObject request before the second copy is fully written (or even
|
||||
before it even starts being written). This means that data can more easily
|
||||
be lost if the node crashes before a second copy can be completed. This
|
||||
also means that written objects might not be visible immediately in read
|
||||
operations. In other words, this configuration severely breaks the consistency and
|
||||
durability guarantees of standard Garage cluster operation. Benefits of
|
||||
this configuration: you can still write to your cluster when one node is
|
||||
unavailable.
|
||||
**Make sure `replication_mode` is the same in the configuration files of all nodes.
|
||||
Never run a Garage cluster where that is not the case.**
|
||||
|
||||
The quorums associated with each replication mode are described below:
|
||||
|
||||
| `consistency_mode` | `replication_factor` | Write quorum | Read quorum | Read-after-write consistency? |
|
||||
| ------------------ | -------------------- | ------------ | ----------- | ----------------------------- |
|
||||
| `consistent` | 1 | 1 | 1 | yes |
|
||||
| `consistent` | 2 | 2 | 1 | yes |
|
||||
| `dangerous` | 2 | 1 | 1 | NO |
|
||||
| `consistent` | 3 | 2 | 2 | yes |
|
||||
| `degraded` | 3 | 2 | 1 | NO |
|
||||
| `dangerous` | 3 | 1 | 1 | NO |
|
||||
| `replication_mode` | Number of replicas | Write quorum | Read quorum | Read-after-write consistency? |
|
||||
| ------------------ | ------------------ | ------------ | ----------- | ----------------------------- |
|
||||
| `none` or `1` | 1 | 1 | 1 | yes |
|
||||
| `2` | 2 | 2 | 1 | yes |
|
||||
| `2-dangerous` | 2 | 1 | 1 | NO |
|
||||
| `3` | 3 | 2 | 2 | yes |
|
||||
| `3-degraded` | 3 | 2 | 1 | NO |
|
||||
| `3-dangerous` | 3 | 1 | 1 | NO |
|
||||
|
||||
Changing the `replication_mode` between modes with the same number of replicas
|
||||
(e.g. from `3` to `3-degraded`, or from `2-dangerous` to `2`), can be done easily by
|
||||
just changing the `replication_mode` parameter in your config files and restarting all your
|
||||
Garage nodes.
|
||||
|
||||
It is also technically possible to change the replication mode to a mode with a
|
||||
different numbers of replicas, although it's a dangerous operation that is not
|
||||
officially supported. This requires you to delete the existing cluster layout
|
||||
and create a new layout from scratch, meaning that a full rebalancing of your
|
||||
cluster's data will be needed. To do it, shut down your cluster entirely,
|
||||
delete the `custer_layout` files in the meta directories of all your nodes,
|
||||
update all your configuration files with the new `replication_mode` parameter,
|
||||
restart your cluster, and then create a new layout with all the nodes you want
|
||||
to keep. Rebalancing data will take some time, and data might temporarily
|
||||
appear unavailable to your users. It is recommended to shut down public access
|
||||
to the cluster while rebalancing is in progress. In theory, no data should be
|
||||
lost as rebalancing is a routine operation for Garage, although we cannot
|
||||
guarantee you that everything will go right in such an extreme scenario.
|
||||
|
||||
#### `metadata_dir` {#metadata_dir}
|
||||
|
||||
|
@ -299,43 +250,32 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
|||
|
||||
| DB engine | `db_engine` value | Database path |
|
||||
| --------- | ----------------- | ------------- |
|
||||
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
||||
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
||||
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
||||
| [LMDB](https://www.lmdb.tech) (default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
||||
| [Sled](https://sled.rs) (default up to `v0.8.0`) | `"sled"` | `<metadata_dir>/db/` |
|
||||
| [Sqlite](https://sqlite.org) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
||||
|
||||
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
||||
You can still use an older binary of Garage (e.g. v0.9.4) to migrate
|
||||
old Sled metadata databases to another engine.
|
||||
Sled was the only database engine up to Garage v0.7.0. Performance issues and
|
||||
API limitations of Sled prompted the addition of alternative engines in v0.8.0.
|
||||
Since v0.9.0, LMDB is the default engine instead of Sled, and Sled is
|
||||
deprecated. We plan to remove Sled in Garage v1.0.
|
||||
|
||||
Performance characteristics of the different DB engines are as follows:
|
||||
|
||||
- LMDB: the recommended database engine for high-performance distributed clusters.
|
||||
LMDB works very well, but is known to have the following limitations:
|
||||
- Sled: tends to produce large data files and also has performance issues,
|
||||
especially when the metadata folder is on a traditional HDD and not on SSD.
|
||||
|
||||
- The data format of LMDB is not portable between architectures, so for
|
||||
instance the Garage database of an x86-64 node cannot be moved to an ARM64
|
||||
node.
|
||||
|
||||
- While LMDB can technically be used on 32-bit systems, this will limit your
|
||||
node to very small database sizes due to how LMDB works; it is therefore
|
||||
not recommended.
|
||||
|
||||
- Several users have reported corrupted LMDB database files after an unclean
|
||||
shutdown (e.g. a power outage). This situation can generally be recovered
|
||||
from if your cluster is geo-replicated (by rebuilding your metadata db from
|
||||
other nodes), or if you have saved regular snapshots at the filesystem
|
||||
level.
|
||||
|
||||
- Keys in LMDB are limited to 511 bytes. This limit translates to limits on
|
||||
object keys in S3 and sort keys in K2V that are limted to 479 bytes.
|
||||
- LMDB: the recommended database engine on 64-bit systems, much more
|
||||
space-efficient and slightly faster. Note that the data format of LMDB is not
|
||||
portable between architectures, so for instance the Garage database of an
|
||||
x86-64 node cannot be moved to an ARM64 node. Also note that, while LMDB can
|
||||
technically be used on 32-bit systems, this will limit your node to very
|
||||
small database sizes due to how LMDB works; it is therefore not recommended.
|
||||
|
||||
- Sqlite: Garage supports Sqlite as an alternative storage backend for
|
||||
metadata, which does not have the issues listed above for LMDB.
|
||||
On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal
|
||||
performance, which was fixed with the addition of `metadata_fsync`.
|
||||
Sqlite is still probably slower than LMDB due to the way we use it,
|
||||
so it is not the best choice for high-performance storage clusters,
|
||||
but it should work fine in many cases.
|
||||
metadata, and although it has not been tested as much, it is expected to work
|
||||
satisfactorily. Since Garage v0.9.0, performance issues have largely been
|
||||
fixed by allowing for a no-fsync mode (see `metadata_fsync`). Sqlite does not
|
||||
have the database size limitation of LMDB on 32-bit systems.
|
||||
|
||||
It is possible to convert Garage's metadata directory from one format to another
|
||||
using the `garage convert-db` command, which should be used as follows:
|
||||
|
@ -372,6 +312,7 @@ Here is how this option impacts the different database engines:
|
|||
|
||||
| Database | `metadata_fsync = false` (default) | `metadata_fsync = true` |
|
||||
|----------|------------------------------------|-------------------------------|
|
||||
| Sled | default options | *unsupported* |
|
||||
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
|
||||
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
|
||||
|
||||
|
@ -390,43 +331,6 @@ at the cost of a moderate drop in write performance.
|
|||
Similarly to `metatada_fsync`, this is likely not necessary
|
||||
if geographical replication is used.
|
||||
|
||||
#### `metadata_auto_snapshot_interval` (since Garage v0.9.4) {#metadata_auto_snapshot_interval}
|
||||
|
||||
If this value is set, Garage will automatically take a snapshot of the metadata
|
||||
DB file at a regular interval and save it in the metadata directory.
|
||||
This parameter can take any duration string that can be parsed by
|
||||
the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate.
|
||||
|
||||
Snapshots can allow to recover from situations where the metadata DB file is
|
||||
corrupted, for instance after an unclean shutdown. See [this
|
||||
page](@/documentation/operations/recovering.md#corrupted_meta) for details.
|
||||
Garage keeps only the two most recent snapshots of the metadata DB and deletes
|
||||
older ones automatically.
|
||||
|
||||
Note that taking a metadata snapshot is a relatively intensive operation as the
|
||||
entire data file is copied. A snapshot being taken might have performance
|
||||
impacts on the Garage node while it is running. If the cluster is under heavy
|
||||
write load when a snapshot operation is running, this might also cause the
|
||||
database file to grow in size significantly as pages cannot be recycled easily.
|
||||
For this reason, it might be better to use filesystem-level snapshots instead
|
||||
if possible.
|
||||
|
||||
#### `disable_scrub` {#disable_scrub}
|
||||
|
||||
By default, Garage runs a scrub of the data directory approximately once per
|
||||
month, with a random delay to avoid all nodes running at the same time. When
|
||||
it scrubs the data directory, Garage will read all of the data files stored on
|
||||
disk to check their integrity, and will rebuild any data files that it finds
|
||||
corrupted, using the remaining valid copies stored on other nodes.
|
||||
See [this page](@/documentation/operations/durability-repairs.md#scrub) for details.
|
||||
|
||||
Set the `disable_scrub` configuration value to `true` if you don't need Garage
|
||||
to scrub the data directory, for instance if you are already scrubbing at the
|
||||
filesystem level. Note that in this case, if you find a corrupted data file,
|
||||
you should delete it from the data directory and then call `garage repair
|
||||
blocks` on the node to ensure that it re-obtains a copy from another node on
|
||||
the network.
|
||||
|
||||
#### `block_size` {#block_size}
|
||||
|
||||
Garage splits stored objects in consecutive chunks of size `block_size`
|
||||
|
@ -442,36 +346,20 @@ files will remain available. This however means that chunks from existing files
|
|||
will not be deduplicated with chunks from newly uploaded files, meaning you
|
||||
might use more storage space that is optimally possible.
|
||||
|
||||
#### `block_ram_buffer_max` (since v0.9.4) {#block_ram_buffer_max}
|
||||
#### `sled_cache_capacity` {#sled_cache_capacity}
|
||||
|
||||
A limit on the total size of data blocks kept in RAM by S3 API nodes awaiting
|
||||
to be sent to storage nodes asynchronously.
|
||||
This parameter can be used to tune the capacity of the cache used by
|
||||
[sled](https://sled.rs), the database Garage uses internally to store metadata.
|
||||
Tune this to fit the RAM you wish to make available to your Garage instance.
|
||||
This value has a conservative default (128MB) so that Garage doesn't use too much
|
||||
RAM by default, but feel free to increase this for higher performance.
|
||||
|
||||
Explanation: since Garage wants to tolerate node failures, it uses quorum
|
||||
writes to send data blocks to storage nodes: try to write the block to three
|
||||
nodes, and return ok as soon as two writes complete. So even if all three nodes
|
||||
are online, the third write always completes asynchronously. In general, there
|
||||
are not many writes to a cluster, and the third asynchronous write can
|
||||
terminate early enough so as to not cause unbounded RAM growth. However, if
|
||||
the S3 API node is continuously receiving large quantities of data and the
|
||||
third node is never able to catch up, many data blocks will be kept buffered in
|
||||
RAM as they are awaiting transfer to the third node.
|
||||
#### `sled_flush_every_ms` {#sled_flush_every_ms}
|
||||
|
||||
The `block_ram_buffer_max` sets a limit to the size of buffers that can be kept
|
||||
in RAM in this process. When the limit is reached, backpressure is applied
|
||||
back to the S3 client.
|
||||
|
||||
Note that this only counts buffers that have arrived to a certain stage of
|
||||
processing (received from the client + encrypted and/or compressed as
|
||||
necessary) and are ready to send to the storage nodes. Many other buffers will
|
||||
not be counted and this is not a hard limit on RAM consumption. In particular,
|
||||
if many clients send requests simultaneously with large objects, the RAM
|
||||
consumption will always grow linearly with the number of concurrent requests,
|
||||
as each request will use a few buffers of size `block_size` for receiving and
|
||||
intermediate processing before even trying to send the data to the storage
|
||||
node.
|
||||
|
||||
The default value is 256MiB.
|
||||
This parameters can be used to tune the flushing interval of sled.
|
||||
Increase this if sled is thrashing your SSD, at the risk of losing more data in case
|
||||
of a power outage (though this should not matter much as data is replicated on other
|
||||
nodes). The default value, 2000ms, should be appropriate for most use cases.
|
||||
|
||||
#### `lmdb_map_size` {#lmdb_map_size}
|
||||
|
||||
|
@ -529,7 +417,7 @@ the node, even in the case of a NAT: the NAT should be configured to forward the
|
|||
port number to the same internal port nubmer. This means that if you have several nodes running
|
||||
behind a NAT, they should each use a different RPC port number.
|
||||
|
||||
#### `rpc_bind_outgoing`(since v0.9.2) {#rpc_bind_outgoing}
|
||||
#### `rpc_bind_outgoing` {#rpc_bind_outgoing} (since v0.9.2)
|
||||
|
||||
If enabled, pre-bind all sockets for outgoing connections to the same IP address
|
||||
used for listening (the IP address specified in `rpc_bind_addr`) before
|
||||
|
@ -547,14 +435,6 @@ RPC calls. **This parameter is optional but recommended.** In case you have
|
|||
a NAT that binds the RPC port to a port that is different on your public IP,
|
||||
this field might help making it work.
|
||||
|
||||
#### `rpc_public_addr_subnet` {#rpc_public_addr_subnet}
|
||||
In case `rpc_public_addr` is not set, but autodiscovery is used, this allows
|
||||
filtering the list of automatically discovered IPs to a specific subnet.
|
||||
|
||||
For example, if nodes should pick *their* IP inside a specific subnet, but you
|
||||
don't want to explicitly write the IP down (as it's dynamic, or you want to
|
||||
share configs across nodes), you can use this option.
|
||||
|
||||
#### `bootstrap_peers` {#bootstrap_peers}
|
||||
|
||||
A list of peer identifiers on which to contact other Garage peers of this cluster.
|
||||
|
@ -571,7 +451,7 @@ be obtained by running `garage node id` and then included directly in the
|
|||
key will be returned by `garage node id` and you will have to add the IP
|
||||
yourself.
|
||||
|
||||
### `allow_world_readable_secrets` or `GARAGE_ALLOW_WORLD_READABLE_SECRETS` (env) {#allow_world_readable_secrets}
|
||||
### `allow_world_readable_secrets`
|
||||
|
||||
Garage checks the permissions of your secret files to make sure they're not
|
||||
world-readable. In some cases, the check might fail and consider your files as
|
||||
|
@ -732,7 +612,7 @@ the socket will have 0220 mode. Make sure to set user and group permissions acco
|
|||
The token for accessing the Metrics endpoint. If this token is not set, the
|
||||
Metrics endpoint can be accessed without access control.
|
||||
|
||||
You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
|
||||
You can use any random string for this value. We recommend generating a random token with `openssl rand -hex 32`.
|
||||
|
||||
`metrics_token` was introduced in Garage `v0.7.2`.
|
||||
`metrics_token_file` and the `GARAGE_METRICS_TOKEN` environment variable are supported since Garage `v0.8.2`.
|
||||
|
@ -744,7 +624,7 @@ You can use any random string for this value. We recommend generating a random t
|
|||
The token for accessing all of the other administration endpoints. If this
|
||||
token is not set, access to these endpoints is disabled entirely.
|
||||
|
||||
You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
|
||||
You can use any random string for this value. We recommend generating a random token with `openssl rand -hex 32`.
|
||||
|
||||
`admin_token` was introduced in Garage `v0.7.2`.
|
||||
`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`.
|
||||
|
|
|
@ -37,21 +37,6 @@ A Garage cluster can very easily evolve over time, as storage nodes are added or
|
|||
Garage will automatically rebalance data between nodes as needed to ensure the desired number of copies.
|
||||
Read about cluster layout management [here](@/documentation/operations/layout.md).
|
||||
|
||||
### Several replication modes
|
||||
|
||||
Garage supports a variety of replication modes, with configurable replica count,
|
||||
and with various levels of consistency, in order to adapt to a variety of usage scenarios.
|
||||
Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_factor)
|
||||
to select the replication mode best suited to your use case (hint: in most cases, `replication_factor = 3` is what you want).
|
||||
|
||||
### Compression and deduplication
|
||||
|
||||
All data stored in Garage is deduplicated, and optionnally compressed using
|
||||
Zstd. Objects uploaded to Garage are chunked in blocks of constant sizes (see
|
||||
[`block_size`](@/documentation/reference-manual/configuration.md#block_size)),
|
||||
and the hashes of individual blocks are used to dispatch them to storage nodes
|
||||
and to deduplicate them.
|
||||
|
||||
### No RAFT slowing you down
|
||||
|
||||
It might seem strange to tout the absence of something as a desirable feature,
|
||||
|
@ -63,6 +48,13 @@ As a consequence, requests can be handled much faster, even in cases where laten
|
|||
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
|
||||
This is particularly usefull when nodes are far from one another and talk to one other through standard Internet connections.
|
||||
|
||||
### Several replication modes
|
||||
|
||||
Garage supports a variety of replication modes, with 1 copy, 2 copies or 3 copies of your data,
|
||||
and with various levels of consistency, in order to adapt to a variety of usage scenarios.
|
||||
Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_mode)
|
||||
to select the replication mode best suited to your use case (hint: in most cases, `replication_mode = "3"` is what you want).
|
||||
|
||||
### Web server for static websites
|
||||
|
||||
A storage bucket can easily be configured to be served directly by Garage as a static web site.
|
||||
|
|
|
@ -225,17 +225,6 @@ block_bytes_read 120586322022
|
|||
block_bytes_written 3386618077
|
||||
```
|
||||
|
||||
#### `block_ram_buffer_free_kb` (gauge)
|
||||
|
||||
Kibibytes available for buffering blocks that have to be sent to remote nodes.
|
||||
When clients send too much data to this node and a storage node is not receiving
|
||||
data fast enough due to slower network conditions, this will decrease down to
|
||||
zero and backpressure will be applied.
|
||||
|
||||
```
|
||||
block_ram_buffer_free_kb 219829
|
||||
```
|
||||
|
||||
#### `block_compression_level` (counter)
|
||||
|
||||
Exposes the block compression level configured for the Garage node.
|
||||
|
|
|
@ -33,7 +33,6 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
|
|||
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
||||
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
||||
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
|
||||
| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
|
||||
|
||||
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part
|
||||
of signature v4 and they claim they support it without additional precisions,
|
||||
|
|
|
@ -1,77 +0,0 @@
|
|||
+++
|
||||
title = "Migrating from 0.9 to 1.0"
|
||||
weight = 11
|
||||
+++
|
||||
|
||||
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
|
||||
We don't recommend trying to migrate to 1.0 directly from 0.8 or older.**
|
||||
|
||||
This migration procedure has been tested on several clusters without issues.
|
||||
However, it is still a *critical procedure* that might cause issues.
|
||||
**Make sure to back up all your data before attempting it!**
|
||||
|
||||
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
|
||||
|
||||
## Changes introduced in v1.0
|
||||
|
||||
The following are **breaking changes** in Garage v1.0 that require your attention when migrating:
|
||||
|
||||
- The Sled metadata db engine has been **removed**. If your cluster was still
|
||||
using Sled, you will need to **use a Garage v0.9.x binary** to convert the
|
||||
database using the `garage convert-db` subcommand. See
|
||||
[here](@/documentation/reference-manual/configuration.md#db_engine) for the
|
||||
details of the procedure.
|
||||
|
||||
The following syntax changes have been made to the configuration file:
|
||||
|
||||
- The `replication_mode` parameter has been split into two parameters:
|
||||
[`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)
|
||||
and
|
||||
[`consistency_mode`](@/documentation/reference-manual/configuration.md#consistency_mode).
|
||||
The old syntax using `replication_mode` is still supported for legacy
|
||||
reasons and can still be used.
|
||||
|
||||
- The parameters `sled_cache_capacity` and `sled_flush_every_ms` have been removed.
|
||||
|
||||
## Migration procedure
|
||||
|
||||
The migration to Garage v1.0 can be done with almost no downtime,
|
||||
by restarting all nodes at once in the new version.
|
||||
|
||||
The migration steps are as follows:
|
||||
|
||||
1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
|
||||
all data seems to be synced correctly between nodes. If you have time, do
|
||||
additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
|
||||
etc.)
|
||||
|
||||
2. Ensure you have a snapshot of your Garage installation that you can restore
|
||||
to in case the upgrade goes wrong:
|
||||
|
||||
- If you are running Garage v0.9.4 or later, use the `garage meta snapshot
|
||||
--all` to make a backup snapshot of the metadata directories of your nodes
|
||||
for backup purposes, and save a copy of the following files in the
|
||||
metadata directories of your nodes: `cluster_layout`, `data_layout`,
|
||||
`node_key`, `node_key.pub`.
|
||||
|
||||
- If you are running a filesystem such as ZFS or BTRFS that support
|
||||
snapshotting, you can create a filesystem-level snapshot to be used as a
|
||||
restoration point if needed.
|
||||
|
||||
- In other cases, make a backup using the old procedure: turn off each node
|
||||
individually; back up its metadata folder (for instance, use the following
|
||||
command if your metadata directory is `/var/lib/garage/meta`: `cd
|
||||
/var/lib/garage ; tar -acf meta-v0.9.tar.zst meta/`); turn it back on
|
||||
again. This will allow you to take a backup of all nodes without
|
||||
impacting global cluster availability. You can do all nodes of a single
|
||||
zone at once as this does not impact the availability of Garage.
|
||||
|
||||
3. Prepare your updated binaries and configuration files for Garage v1.0
|
||||
|
||||
4. Shut down all v0.9 nodes simultaneously, and restart them all simultaneously
|
||||
in v1.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
|
||||
achieve this as fast as possible. Garage v1.0 should be in a working state
|
||||
as soon as enough nodes have started.
|
||||
|
||||
5. Monitor your cluster in the following hours to see if it works well under
|
||||
your production load.
|
|
@ -69,10 +69,11 @@ Example response body:
|
|||
|
||||
```json
|
||||
{
|
||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||
"garageVersion": "v1.0.1",
|
||||
"node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
|
||||
"garageVersion": "git:v0.9.0-dev",
|
||||
"garageFeatures": [
|
||||
"k2v",
|
||||
"sled",
|
||||
"lmdb",
|
||||
"sqlite",
|
||||
"metrics",
|
||||
|
@ -80,92 +81,83 @@ Example response body:
|
|||
],
|
||||
"rustVersion": "1.68.0",
|
||||
"dbEngine": "LMDB (using Heed crate)",
|
||||
"layoutVersion": 5,
|
||||
"nodes": [
|
||||
"knownNodes": [
|
||||
{
|
||||
"id": "62b218d848e86a64f7fe1909735f29a4350547b54c4b204f91246a14eb0a1a8c",
|
||||
"role": {
|
||||
"id": "62b218d848e86a64f7fe1909735f29a4350547b54c4b204f91246a14eb0a1a8c",
|
||||
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
|
||||
"addr": "10.0.0.11:3901",
|
||||
"isUp": true,
|
||||
"lastSeenSecsAgo": 9,
|
||||
"hostname": "node1"
|
||||
},
|
||||
{
|
||||
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
|
||||
"addr": "10.0.0.12:3901",
|
||||
"isUp": true,
|
||||
"lastSeenSecsAgo": 1,
|
||||
"hostname": "node2"
|
||||
},
|
||||
{
|
||||
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
|
||||
"addr": "10.0.0.21:3901",
|
||||
"isUp": true,
|
||||
"lastSeenSecsAgo": 7,
|
||||
"hostname": "node3"
|
||||
},
|
||||
{
|
||||
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
|
||||
"addr": "10.0.0.22:3901",
|
||||
"isUp": true,
|
||||
"lastSeenSecsAgo": 1,
|
||||
"hostname": "node4"
|
||||
}
|
||||
],
|
||||
"layout": {
|
||||
"version": 12,
|
||||
"roles": [
|
||||
{
|
||||
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
|
||||
"zone": "dc1",
|
||||
"capacity": 100000000000,
|
||||
"tags": []
|
||||
},
|
||||
"addr": "10.0.0.3:3901",
|
||||
"hostname": "node3",
|
||||
"isUp": true,
|
||||
"lastSeenSecsAgo": 12,
|
||||
"draining": false,
|
||||
"dataPartition": {
|
||||
"available": 660270088192,
|
||||
"total": 873862266880
|
||||
},
|
||||
"metadataPartition": {
|
||||
"available": 660270088192,
|
||||
"total": 873862266880
|
||||
}
|
||||
"capacity": 10737418240,
|
||||
"tags": [
|
||||
"node1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "a11c7cf18af297379eff8688360155fe68d9061654449ba0ce239252f5a7487f",
|
||||
"role": null,
|
||||
"addr": "10.0.0.2:3901",
|
||||
"hostname": "node2",
|
||||
"isUp": true,
|
||||
"lastSeenSecsAgo": 11,
|
||||
"draining": true,
|
||||
"dataPartition": {
|
||||
"available": 660270088192,
|
||||
"total": 873862266880
|
||||
},
|
||||
"metadataPartition": {
|
||||
"available": 660270088192,
|
||||
"total": 873862266880
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "a235ac7695e0c54d7b403943025f57504d500fdcc5c3e42c71c5212faca040a2",
|
||||
"role": {
|
||||
"id": "a235ac7695e0c54d7b403943025f57504d500fdcc5c3e42c71c5212faca040a2",
|
||||
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
|
||||
"zone": "dc1",
|
||||
"capacity": 100000000000,
|
||||
"tags": []
|
||||
},
|
||||
"addr": "127.0.0.1:3904",
|
||||
"hostname": "lindy",
|
||||
"isUp": true,
|
||||
"lastSeenSecsAgo": 2,
|
||||
"draining": false,
|
||||
"dataPartition": {
|
||||
"available": 660270088192,
|
||||
"total": 873862266880
|
||||
},
|
||||
"metadataPartition": {
|
||||
"available": 660270088192,
|
||||
"total": 873862266880
|
||||
}
|
||||
"capacity": 10737418240,
|
||||
"tags": [
|
||||
"node2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||
"role": {
|
||||
"id": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||
"zone": "dc1",
|
||||
"capacity": 100000000000,
|
||||
"tags": []
|
||||
},
|
||||
"addr": "10.0.0.1:3901",
|
||||
"hostname": "node1",
|
||||
"isUp": true,
|
||||
"lastSeenSecsAgo": 3,
|
||||
"draining": false,
|
||||
"dataPartition": {
|
||||
"available": 660270088192,
|
||||
"total": 873862266880
|
||||
},
|
||||
"metadataPartition": {
|
||||
"available": 660270088192,
|
||||
"total": 873862266880
|
||||
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
|
||||
"zone": "dc2",
|
||||
"capacity": 10737418240,
|
||||
"tags": [
|
||||
"node3"
|
||||
]
|
||||
}
|
||||
],
|
||||
"stagedRoleChanges": [
|
||||
{
|
||||
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
|
||||
"remove": false,
|
||||
"zone": "dc2",
|
||||
"capacity": 10737418240,
|
||||
"tags": [
|
||||
"node4"
|
||||
]
|
||||
}
|
||||
{
|
||||
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
|
||||
"remove": true,
|
||||
"zone": null,
|
||||
"capacity": null,
|
||||
"tags": null,
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ in a bucket, as the partition key becomes the sort key in the index.
|
|||
How indexing works:
|
||||
|
||||
- Each node keeps a local count of how many items it stores for each partition,
|
||||
in a local database tree that is updated atomically when an item is modified.
|
||||
in a local Sled tree that is updated atomically when an item is modified.
|
||||
- These local counters are asynchronously stored in the index table which is
|
||||
a regular Garage table spread in the network. Counters are stored as LWW values,
|
||||
so basically the final table will have the following structure:
|
||||
|
|
17
doc/talks/2024-02-29-capitoul/.gitignore
vendored
|
@ -1,17 +0,0 @@
|
|||
*
|
||||
|
||||
!*.txt
|
||||
!*.md
|
||||
|
||||
!assets
|
||||
|
||||
!.gitignore
|
||||
!*.svg
|
||||
!*.png
|
||||
!*.jpg
|
||||
!*.tex
|
||||
!Makefile
|
||||
!.gitignore
|
||||
!assets/*.drawio.pdf
|
||||
|
||||
!talk.pdf
|
|
@ -1,10 +0,0 @@
|
|||
ASSETS=../assets/logos/deuxfleurs.pdf
|
||||
|
||||
talk.pdf: talk.tex $(ASSETS)
|
||||
pdflatex talk.tex
|
||||
|
||||
%.pdf: %.svg
|
||||
inkscape -D -z --file=$^ --export-pdf=$@
|
||||
|
||||
%.pdf_tex: %.svg
|
||||
inkscape -D -z --file=$^ --export-pdf=$@ --export-latex
|
|
@ -1,543 +0,0 @@
|
|||
\nonstopmode
|
||||
\documentclass[aspectratio=169,xcolor={svgnames}]{beamer}
|
||||
\usepackage[utf8]{inputenc}
|
||||
% \usepackage[frenchb]{babel}
|
||||
\usepackage{amsmath}
|
||||
\usepackage{mathtools}
|
||||
\usepackage{breqn}
|
||||
\usepackage{multirow}
|
||||
\usetheme{boxes}
|
||||
\usepackage{graphicx}
|
||||
\usepackage{import}
|
||||
\usepackage{adjustbox}
|
||||
\usepackage[absolute,overlay]{textpos}
|
||||
%\useoutertheme[footline=authortitle,subsection=false]{miniframes}
|
||||
%\useoutertheme[footline=authorinstitute,subsection=false]{miniframes}
|
||||
\useoutertheme{infolines}
|
||||
\setbeamertemplate{headline}{}
|
||||
|
||||
\beamertemplatenavigationsymbolsempty
|
||||
|
||||
\definecolor{TitleOrange}{RGB}{255,137,0}
|
||||
\setbeamercolor{title}{fg=TitleOrange}
|
||||
\setbeamercolor{frametitle}{fg=TitleOrange}
|
||||
|
||||
\definecolor{ListOrange}{RGB}{255,145,5}
|
||||
\setbeamertemplate{itemize item}{\color{ListOrange}$\blacktriangleright$}
|
||||
|
||||
\definecolor{verygrey}{RGB}{70,70,70}
|
||||
\setbeamercolor{normal text}{fg=verygrey}
|
||||
|
||||
|
||||
\usepackage{tabu}
|
||||
\usepackage{multicol}
|
||||
\usepackage{vwcol}
|
||||
\usepackage{stmaryrd}
|
||||
\usepackage{graphicx}
|
||||
|
||||
\usepackage[normalem]{ulem}
|
||||
|
||||
\AtBeginSection[]{
|
||||
\begin{frame}
|
||||
\vfill
|
||||
\centering
|
||||
\begin{beamercolorbox}[sep=8pt,center,shadow=true,rounded=true]{title}
|
||||
\usebeamerfont{title}\insertsectionhead\par%
|
||||
\end{beamercolorbox}
|
||||
\vfill
|
||||
\end{frame}
|
||||
}
|
||||
|
||||
\title{Garage}
|
||||
\author{Alex Auvolat, Deuxfleurs}
|
||||
\date{Capitoul, 2024-02-29}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\begin{frame}
|
||||
\centering
|
||||
\includegraphics[width=.3\linewidth]{../../sticker/Garage.png}
|
||||
\vspace{1em}
|
||||
|
||||
{\large\bf Alex Auvolat, Deuxfleurs Association}
|
||||
\vspace{1em}
|
||||
|
||||
\url{https://garagehq.deuxfleurs.fr/}
|
||||
|
||||
Matrix channel: \texttt{\#garage:deuxfleurs.fr}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Who I am}
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.4\linewidth, valign=t]{../assets/alex.jpg}
|
||||
\end{column}
|
||||
\begin{column}{.6\textwidth}
|
||||
\textbf{Alex Auvolat}\\
|
||||
PhD; co-founder of Deuxfleurs
|
||||
\end{column}
|
||||
\begin{column}{.2\textwidth}
|
||||
~
|
||||
\end{column}
|
||||
\end{columns}
|
||||
\vspace{2em}
|
||||
|
||||
\begin{columns}[t]
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.5\linewidth, valign=t]{../assets/logos/deuxfleurs.pdf}
|
||||
\end{column}
|
||||
\begin{column}{.6\textwidth}
|
||||
\textbf{Deuxfleurs}\\
|
||||
A non-profit self-hosting collective,\\
|
||||
member of the CHATONS network
|
||||
\end{column}
|
||||
\begin{column}{.2\textwidth}
|
||||
\centering
|
||||
\adjincludegraphics[width=.7\linewidth, valign=t]{../assets/logos/logo_chatons.png}
|
||||
\end{column}
|
||||
\end{columns}
|
||||
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Our objective at Deuxfleurs}
|
||||
|
||||
\begin{center}
|
||||
\textbf{Promote self-hosting and small-scale hosting\\
|
||||
as an alternative to large cloud providers}
|
||||
\end{center}
|
||||
\vspace{2em}
|
||||
\visible<2->{
|
||||
Why is it hard?
|
||||
\vspace{2em}
|
||||
\begin{center}
|
||||
\textbf{\underline{Resilience}}\\
|
||||
{\footnotesize we want good uptime/availability with low supervision}
|
||||
\end{center}
|
||||
}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Our very low-tech infrastructure}
|
||||
|
||||
\only<1,3-6>{
|
||||
\begin{itemize}
|
||||
\item \textcolor<4->{gray}{Commodity hardware (e.g. old desktop PCs)\\
|
||||
\vspace{.5em}
|
||||
\visible<3->{{\footnotesize (can die at any time)}}}
|
||||
\vspace{1.5em}
|
||||
\item<4-> \textcolor<6->{gray}{Regular Internet (e.g. FTTB, FTTH) and power grid connections\\
|
||||
\vspace{.5em}
|
||||
\visible<5->{{\footnotesize (can be unavailable randomly)}}}
|
||||
\vspace{1.5em}
|
||||
\item<6-> \textbf{Geographical redundancy} (multi-site replication)
|
||||
\end{itemize}
|
||||
}
|
||||
\only<2>{
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/neptune.jpg}
|
||||
\end{center}
|
||||
}
|
||||
\only<7>{
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/inframap_jdll2023.pdf}
|
||||
\end{center}
|
||||
}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{How to make this happen}
|
||||
\begin{center}
|
||||
\only<1>{\includegraphics[width=.8\linewidth]{../assets/intro/slide1.png}}%
|
||||
\only<2>{\includegraphics[width=.8\linewidth]{../assets/intro/slide2.png}}%
|
||||
\only<3>{\includegraphics[width=.8\linewidth]{../assets/intro/slide3.png}}%
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Distributed file systems are slow}
|
||||
File systems are complex, for example:
|
||||
\vspace{1em}
|
||||
\begin{itemize}
|
||||
\item Concurrent modification by several processes
|
||||
\vspace{1em}
|
||||
\item Folder hierarchies
|
||||
\vspace{1em}
|
||||
\item Other requirements of the POSIX spec (e.g.~locks)
|
||||
\end{itemize}
|
||||
\vspace{1em}
|
||||
Coordination in a distributed system is costly
|
||||
|
||||
\vspace{1em}
|
||||
Costs explode with commodity hardware / Internet connections\\
|
||||
{\small (we experienced this!)}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{A simpler solution: object storage}
|
||||
Only two operations:
|
||||
\vspace{1em}
|
||||
\begin{itemize}
|
||||
\item Put an object at a key
|
||||
\vspace{1em}
|
||||
\item Retrieve an object from its key
|
||||
\end{itemize}
|
||||
\vspace{1em}
|
||||
{\footnotesize (and a few others)}
|
||||
|
||||
\vspace{1em}
|
||||
Sufficient for many applications!
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{A simpler solution: object storage}
|
||||
\begin{center}
|
||||
\includegraphics[height=6em]{../assets/logos/Amazon-S3.jpg}
|
||||
\hspace{3em}
|
||||
\visible<2->{\includegraphics[height=5em]{../assets/logos/minio.png}}
|
||||
\hspace{3em}
|
||||
\visible<3>{\includegraphics[height=6em]{../../logo/garage_hires_crop.png}}
|
||||
\end{center}
|
||||
\vspace{1em}
|
||||
S3: a de-facto standard, many compatible applications
|
||||
|
||||
\vspace{1em}
|
||||
\visible<2->{MinIO is self-hostable but not suited for geo-distributed deployments}
|
||||
|
||||
\vspace{1em}
|
||||
\visible<3->{\textbf{Garage is a self-hosted drop-in replacement for the Amazon S3 object store}}
|
||||
\end{frame}
|
||||
|
||||
% --------- BASED ON CRDTS ----------
|
||||
|
||||
\section{Principle 1: based on CRDTs}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{CRDTs / weak consistency instead of consensus}
|
||||
|
||||
\underline{Internally, Garage uses only CRDTs} (conflict-free replicated data types)
|
||||
|
||||
\vspace{2em}
|
||||
Why not Raft, Paxos, ...? Issues of consensus algorithms:
|
||||
|
||||
\vspace{1em}
|
||||
\begin{itemize}
|
||||
\item<2-> \textbf{Software complexity}
|
||||
\vspace{1em}
|
||||
\item<3-> \textbf{Performance issues:}
|
||||
\vspace{.5em}
|
||||
\begin{itemize}
|
||||
\item<4-> The leader is a \textbf{bottleneck} for all requests\\
|
||||
\vspace{.5em}
|
||||
\item<5-> \textbf{Sensitive to higher latency} between nodes
|
||||
\vspace{.5em}
|
||||
\item<6-> \textbf{Takes time to reconverge} when disrupted (e.g. node going down)
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{The data model of object storage}
|
||||
Object storage is basically a \textbf{key-value store}:
|
||||
\vspace{.5em}
|
||||
|
||||
{\scriptsize
|
||||
\begin{center}
|
||||
\begin{tabular}{|l|p{7cm}|}
|
||||
\hline
|
||||
\textbf{Key: file path + name} & \textbf{Value: file data + metadata} \\
|
||||
\hline
|
||||
\hline
|
||||
\texttt{index.html} &
|
||||
\texttt{Content-Type: text/html; charset=utf-8} \newline
|
||||
\texttt{Content-Length: 24929} \newline
|
||||
\texttt{<binary blob>} \\
|
||||
\hline
|
||||
\texttt{img/logo.svg} &
|
||||
\texttt{Content-Type: text/svg+xml} \newline
|
||||
\texttt{Content-Length: 13429} \newline
|
||||
\texttt{<binary blob>} \\
|
||||
\hline
|
||||
\texttt{download/index.html} &
|
||||
\texttt{Content-Type: text/html; charset=utf-8} \newline
|
||||
\texttt{Content-Length: 26563} \newline
|
||||
\texttt{<binary blob>} \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{center}
|
||||
}
|
||||
|
||||
\vspace{.5em}
|
||||
\begin{itemize}
|
||||
\item<2-> Maps well to CRDT data types
|
||||
\item<3> Read-after-write consistency with quorums
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Performance gains in practice}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/perf/endpoint_latency_0.7_0.8_minio.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
% --------- GEO-DISTRIBUTED MODEL ----------
|
||||
|
||||
\section{Principle 2: geo-distributed data model}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Key-value stores, upgraded: the Dynamo model}
|
||||
\textbf{Two keys:}
|
||||
\begin{itemize}
|
||||
\item Partition key: used to divide data into partitions {\small (a.k.a.~shards)}
|
||||
\item Sort key: used to identify items inside a partition
|
||||
\end{itemize}
|
||||
|
||||
\vspace{1em}
|
||||
|
||||
\begin{center}
|
||||
\begin{tabular}{|l|l|p{3cm}|}
|
||||
\hline
|
||||
\textbf{Partition key: bucket} & \textbf{Sort key: filename} & \textbf{Value} \\
|
||||
\hline
|
||||
\hline
|
||||
\texttt{website} & \texttt{index.html} & (file data) \\
|
||||
\hline
|
||||
\texttt{website} & \texttt{img/logo.svg} & (file data) \\
|
||||
\hline
|
||||
\texttt{website} & \texttt{download/index.html} & (file data) \\
|
||||
\hline
|
||||
\hline
|
||||
\texttt{backup} & \texttt{borg/index.2822} & (file data) \\
|
||||
\hline
|
||||
\texttt{backup} & \texttt{borg/data/2/2329} & (file data) \\
|
||||
\hline
|
||||
\texttt{backup} & \texttt{borg/data/2/2680} & (file data) \\
|
||||
\hline
|
||||
\hline
|
||||
\texttt{private} & \texttt{qq3a2nbe1qjq0ebbvo6ocsp6co} & (file data) \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Layout computation}
|
||||
\begin{overprint}
|
||||
\onslide<1>
|
||||
\begin{center}
|
||||
\includegraphics[width=\linewidth, trim=0 0 0 -4cm]{../assets/screenshots/garage_status_0.9_prod_zonehl.png}
|
||||
\end{center}
|
||||
\onslide<2>
|
||||
\begin{center}
|
||||
\includegraphics[width=.7\linewidth]{../assets/map.png}
|
||||
\end{center}
|
||||
\end{overprint}
|
||||
\vspace{1em}
|
||||
Garage stores replicas on different zones when possible
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{What a "layout" is}
|
||||
\textbf{A layout is a precomputed index table:}
|
||||
\vspace{1em}
|
||||
|
||||
{\footnotesize
|
||||
\begin{center}
|
||||
\begin{tabular}{|l|l|l|l|}
|
||||
\hline
|
||||
\textbf{Partition} & \textbf{Node 1} & \textbf{Node 2} & \textbf{Node 3} \\
|
||||
\hline
|
||||
\hline
|
||||
Partition 0 & df-ymk (bespin) & Abricot (scorpio) & Courgette (neptune) \\
|
||||
\hline
|
||||
Partition 1 & Ananas (scorpio) & Courgette (neptune) & df-ykl (bespin) \\
|
||||
\hline
|
||||
Partition 2 & df-ymf (bespin) & Celeri (neptune) & Abricot (scorpio) \\
|
||||
\hline
|
||||
\hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ \\
|
||||
\hline
|
||||
Partition 255 & Concombre (neptune) & df-ykl (bespin) & Abricot (scorpio) \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{center}
|
||||
}
|
||||
|
||||
\vspace{2em}
|
||||
\visible<2->{
|
||||
The index table is built centrally using an optimal algorithm,\\
|
||||
then propagated to all nodes
|
||||
}
|
||||
|
||||
\vspace{1em}
|
||||
\visible<3->{
|
||||
\footnotesize
|
||||
Oulamara, M., \& Auvolat, A. (2023). \emph{An algorithm for geo-distributed and redundant storage in Garage}.\\ arXiv preprint arXiv:2302.13798.
|
||||
}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{The relationship between \emph{partition} and \emph{partition key}}
|
||||
\begin{center}
|
||||
\begin{tabular}{|l|l|l|l|}
|
||||
\hline
|
||||
\textbf{Partition key} & \textbf{Partition} & \textbf{Sort key} & \textbf{Value} \\
|
||||
\hline
|
||||
\hline
|
||||
\texttt{website} & Partition 12 & \texttt{index.html} & (file data) \\
|
||||
\hline
|
||||
\texttt{website} & Partition 12 & \texttt{img/logo.svg} & (file data) \\
|
||||
\hline
|
||||
\texttt{website} & Partition 12 &\texttt{download/index.html} & (file data) \\
|
||||
\hline
|
||||
\hline
|
||||
\texttt{backup} & Partition 42 & \texttt{borg/index.2822} & (file data) \\
|
||||
\hline
|
||||
\texttt{backup} & Partition 42 & \texttt{borg/data/2/2329} & (file data) \\
|
||||
\hline
|
||||
\texttt{backup} & Partition 42 & \texttt{borg/data/2/2680} & (file data) \\
|
||||
\hline
|
||||
\hline
|
||||
\texttt{private} & Partition 42 & \texttt{qq3a2nbe1qjq0ebbvo6ocsp6co} & (file data) \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{center}
|
||||
\vspace{1em}
|
||||
\textbf{To read or write an item:} hash partition key
|
||||
\\ \hspace{5cm} $\to$ determine partition number (first 8 bits)
|
||||
\\ \hspace{5cm} $\to$ find associated nodes
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Garage's internal data structures}
|
||||
\centering
|
||||
\includegraphics[width=.75\columnwidth]{../assets/garage_tables.pdf}
|
||||
\end{frame}
|
||||
|
||||
% ---------- OPERATING GARAGE ---------
|
||||
|
||||
\section{Operating Garage clusters}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Operating Garage}
|
||||
\begin{center}
|
||||
\only<1-2>{
|
||||
\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_0.10.png}
|
||||
\\\vspace{1em}
|
||||
\visible<2>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_unhealthy_0.10.png}}
|
||||
}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Background synchronization}
|
||||
\begin{center}
|
||||
\includegraphics[width=.6\linewidth]{../assets/garage_sync.drawio.pdf}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Digging deeper}
|
||||
\begin{center}
|
||||
\only<1>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_stats_0.10.png}}
|
||||
\only<2>{\includegraphics[width=.5\linewidth]{../assets/screenshots/garage_worker_list_0.10.png}}
|
||||
\only<3>{\includegraphics[width=.6\linewidth]{../assets/screenshots/garage_worker_param_0.10.png}}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Monitoring with Prometheus + Grafana}
|
||||
\begin{center}
|
||||
\includegraphics[width=.9\linewidth]{../assets/screenshots/grafana_dashboard.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Debugging with traces}
|
||||
\begin{center}
|
||||
\includegraphics[width=.8\linewidth]{../assets/screenshots/jaeger_listobjects.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
% ---------- SCALING GARAGE ---------
|
||||
|
||||
\section{Scaling Garage clusters}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Potential limitations and bottlenecks}
|
||||
\begin{itemize}
|
||||
\item Global:
|
||||
\begin{itemize}
|
||||
\item Max. $\sim$100 nodes per cluster (excluding gateways)
|
||||
\end{itemize}
|
||||
\vspace{1em}
|
||||
\item Metadata:
|
||||
\begin{itemize}
|
||||
\item One big bucket = bottleneck, object list on 3 nodes only
|
||||
\end{itemize}
|
||||
\vspace{1em}
|
||||
\item Block manager:
|
||||
\begin{itemize}
|
||||
\item Lots of small files on disk
|
||||
\item Processing the resync queue can be slow
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Deployment advice for very large clusters}
|
||||
\begin{itemize}
|
||||
\item Metadata storage:
|
||||
\begin{itemize}
|
||||
\item ZFS mirror (x2) on fast NVMe
|
||||
\item Use LMDB storage engine
|
||||
\end{itemize}
|
||||
\vspace{.5em}
|
||||
\item Data block storage:
|
||||
\begin{itemize}
|
||||
\item Use Garage's native multi-HDD support
|
||||
\item XFS on individual drives
|
||||
\item Increase block size (1MB $\to$ 10MB, requires more RAM and good networking)
|
||||
\item Tune \texttt{resync-tranquility} and \texttt{resync-worker-count} dynamically
|
||||
\end{itemize}
|
||||
\vspace{.5em}
|
||||
\item Other :
|
||||
\begin{itemize}
|
||||
\item Split data over several buckets
|
||||
\item Use less than 100 storage nodes
|
||||
\item Use gateway nodes
|
||||
\end{itemize}
|
||||
\vspace{.5em}
|
||||
\end{itemize}
|
||||
Our deployments: $< 10$ TB. Some people have done more!
|
||||
\end{frame}
|
||||
|
||||
|
||||
% ======================================== END
|
||||
% ======================================== END
|
||||
% ======================================== END
|
||||
|
||||
\begin{frame}
|
||||
\frametitle{Where to find us}
|
||||
\begin{center}
|
||||
\includegraphics[width=.25\linewidth]{../../logo/garage_hires.png}\\
|
||||
\vspace{-1em}
|
||||
\url{https://garagehq.deuxfleurs.fr/}\\
|
||||
\url{mailto:garagehq@deuxfleurs.fr}\\
|
||||
\texttt{\#garage:deuxfleurs.fr} on Matrix
|
||||
|
||||
\vspace{1.5em}
|
||||
\includegraphics[width=.06\linewidth]{../assets/logos/rust_logo.png}
|
||||
\includegraphics[width=.13\linewidth]{../assets/logos/AGPLv3_Logo.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\end{document}
|
||||
|
||||
%% vim: set ts=4 sw=4 tw=0 noet spelllang=en :
|
Before Width: | Height: | Size: 87 KiB |
Before Width: | Height: | Size: 81 KiB |
Before Width: | Height: | Size: 124 KiB |
Before Width: | Height: | Size: 84 KiB |
Before Width: | Height: | Size: 81 KiB |
Before Width: | Height: | Size: 81 KiB |
Before Width: | Height: | Size: 315 KiB |
Before Width: | Height: | Size: 286 KiB |
84
flake.lock
|
@ -28,11 +28,11 @@
|
|||
},
|
||||
"flake-compat": {
|
||||
"locked": {
|
||||
"lastModified": 1717312683,
|
||||
"narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
|
||||
"lastModified": 1688025799,
|
||||
"narHash": "sha256-ktpB4dRtnksm9F5WawoIkEneh1nrEvuxb5lJFt1iOyw=",
|
||||
"owner": "nix-community",
|
||||
"repo": "flake-compat",
|
||||
"rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
|
||||
"rev": "8bf105319d44f6b9f0d764efa4fdef9f1cc9ba1c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -42,12 +42,33 @@
|
|||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1659877975,
|
||||
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -58,11 +79,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1724395761,
|
||||
"narHash": "sha256-zRkDV/nbrnp3Y8oCADf5ETl1sDrdmAW6/bBVJ8EbIdQ=",
|
||||
"lastModified": 1682109806,
|
||||
"narHash": "sha256-d9g7RKNShMLboTWwukM+RObDWWpHKaqTYXB48clBWXI=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "ae815cee91b417be55d43781eb4b73ae1ecc396c",
|
||||
"rev": "2362848adf8def2866fabbffc50462e929d7fffb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -74,17 +95,17 @@
|
|||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1724681257,
|
||||
"narHash": "sha256-EJRuc5Qp7yfXko5ZNeEMYAs4DzAvkCyALuJ/tGllhN4=",
|
||||
"lastModified": 1707091808,
|
||||
"narHash": "sha256-LahKBAfGbY836gtpVNnWwBTIzN7yf/uYM/S0g393r0Y=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
|
||||
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
|
||||
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
|
@ -101,14 +122,15 @@
|
|||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nixpkgs": "nixpkgs"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1724638882,
|
||||
"narHash": "sha256-ap2jIQi/FuUHR6HCht6ASWhoz8EiB99XmI8Esot38VE=",
|
||||
"lastModified": 1707271822,
|
||||
"narHash": "sha256-/DZsoPH5GBzOpVEGz5PgJ7vh8Q6TcrJq5u8FcBjqAfI=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "19b70f147b9c67a759e35824b241f1ed92e46694",
|
||||
"rev": "7a94fe7690d2bdfe1aab475382a505e14dc114a6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -116,6 +138,36 @@
|
|||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
description =
|
||||
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||
|
||||
# Nixpkgs 24.05 as of 2024-08-26 has rustc v1.77
|
||||
# Nixpkgs 23.11 as of 2024-02-07, has rustc v1.73
|
||||
inputs.nixpkgs.url =
|
||||
"github:NixOS/nixpkgs/0239aeb2f82ea27ccd6b61582b8f7fb8750eeada";
|
||||
"github:NixOS/nixpkgs/9f2ee8c91ac42da3ae6c6a1d21555f283458247e";
|
||||
|
||||
inputs.flake-compat.url = "github:nix-community/flake-compat";
|
||||
|
||||
|
@ -17,9 +17,9 @@
|
|||
# - rustc v1.66
|
||||
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
|
||||
|
||||
# Rust overlay as of 2024-08-26
|
||||
# Rust overlay as of 2024-02-07
|
||||
inputs.rust-overlay.url =
|
||||
"github:oxalica/rust-overlay/19b70f147b9c67a759e35824b241f1ed92e46694";
|
||||
"github:oxalica/rust-overlay/7a94fe7690d2bdfe1aab475382a505e14dc114a6";
|
||||
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
inputs.flake-compat.follows = "flake-compat";
|
||||
|
@ -76,7 +76,6 @@
|
|||
# import the full shell using `nix develop .#full`
|
||||
full = shellWithPackages (with pkgs; [
|
||||
rustfmt
|
||||
rust-analyzer
|
||||
clang
|
||||
mold
|
||||
# ---- extra packages for dev tasks ----
|
||||
|
|
158
k2v_test.py
Executable file
|
@ -0,0 +1,158 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import requests
|
||||
from datetime import datetime
|
||||
|
||||
# let's talk to our AWS Elasticsearch cluster
|
||||
#from requests_aws4auth import AWS4Auth
|
||||
#auth = AWS4Auth('GK31c2f218a2e44f485b94239e',
|
||||
# 'b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
||||
# 'us-east-1',
|
||||
# 's3')
|
||||
|
||||
from aws_requests_auth.aws_auth import AWSRequestsAuth
|
||||
auth = AWSRequestsAuth(aws_access_key='GK31c2f218a2e44f485b94239e',
|
||||
aws_secret_access_key='b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
||||
aws_host='localhost:3812',
|
||||
aws_region='us-east-1',
|
||||
aws_service='k2v')
|
||||
|
||||
|
||||
print("-- ReadIndex")
|
||||
response = requests.get('http://localhost:3812/alex',
|
||||
auth=auth)
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
|
||||
sort_keys = ["a", "b", "c", "d"]
|
||||
|
||||
for sk in sort_keys:
|
||||
print("-- (%s) Put initial (no CT)"%sk)
|
||||
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||
auth=auth,
|
||||
data='{}: Hello, world!'.format(datetime.timestamp(datetime.now())))
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
print("-- Get")
|
||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||
auth=auth)
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
ct = response.headers["x-garage-causality-token"]
|
||||
|
||||
print("-- ReadIndex")
|
||||
response = requests.get('http://localhost:3812/alex',
|
||||
auth=auth)
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
print("-- Put with CT")
|
||||
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||
auth=auth,
|
||||
headers={'x-garage-causality-token': ct},
|
||||
data='{}: Good bye, world!'.format(datetime.timestamp(datetime.now())))
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
print("-- Get")
|
||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||
auth=auth)
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
print("-- Put again with same CT (concurrent)")
|
||||
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||
auth=auth,
|
||||
headers={'x-garage-causality-token': ct},
|
||||
data='{}: Concurrent value, oops'.format(datetime.timestamp(datetime.now())))
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
for sk in sort_keys:
|
||||
print("-- (%s) Get"%sk)
|
||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||
auth=auth)
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
ct = response.headers["x-garage-causality-token"]
|
||||
|
||||
print("-- Delete")
|
||||
response = requests.delete('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||
headers={'x-garage-causality-token': ct},
|
||||
auth=auth)
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
print("-- ReadIndex")
|
||||
response = requests.get('http://localhost:3812/alex',
|
||||
auth=auth)
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
print("-- InsertBatch")
|
||||
response = requests.post('http://localhost:3812/alex',
|
||||
auth=auth,
|
||||
data='''
|
||||
[
|
||||
{"pk": "root", "sk": "a", "ct": null, "v": "aW5pdGlhbCB0ZXN0Cg=="},
|
||||
{"pk": "root", "sk": "b", "ct": null, "v": "aW5pdGlhbCB0ZXN1Cg=="},
|
||||
{"pk": "root", "sk": "c", "ct": null, "v": "aW5pdGlhbCB0ZXN2Cg=="}
|
||||
]
|
||||
''')
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
print("-- ReadIndex")
|
||||
response = requests.get('http://localhost:3812/alex',
|
||||
auth=auth)
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
for sk in sort_keys:
|
||||
print("-- (%s) Get"%sk)
|
||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||
auth=auth)
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
ct = response.headers["x-garage-causality-token"]
|
||||
|
||||
print("-- ReadBatch")
|
||||
response = requests.post('http://localhost:3812/alex?search',
|
||||
auth=auth,
|
||||
data='''
|
||||
[
|
||||
{"partitionKey": "root"},
|
||||
{"partitionKey": "root", "tombstones": true},
|
||||
{"partitionKey": "root", "tombstones": true, "limit": 2},
|
||||
{"partitionKey": "root", "start": "c", "singleItem": true},
|
||||
{"partitionKey": "root", "start": "b", "end": "d", "tombstones": true}
|
||||
]
|
||||
''')
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
|
||||
print("-- DeleteBatch")
|
||||
response = requests.post('http://localhost:3812/alex?delete',
|
||||
auth=auth,
|
||||
data='''
|
||||
[
|
||||
{"partitionKey": "root", "start": "b", "end": "c"}
|
||||
]
|
||||
''')
|
||||
print(response.headers)
|
||||
print(response.text)
|
||||
|
||||
print("-- ReadBatch")
|
||||
response = requests.post('http://localhost:3812/alex?search',
|
||||
auth=auth,
|
||||
data='''
|
||||
[
|
||||
{"partitionKey": "root"}
|
||||
]
|
||||
''')
|
||||
print(response.headers)
|
||||
print(response.text)
|
|
@ -20,7 +20,7 @@ let
|
|||
};
|
||||
|
||||
toolchainOptions = {
|
||||
rustVersion = "1.77.0";
|
||||
rustVersion = "1.73.0";
|
||||
extraRustComponents = [ "clippy" ];
|
||||
};
|
||||
|
||||
|
@ -168,12 +168,13 @@ let
|
|||
rootFeatures = if features != null then
|
||||
features
|
||||
else
|
||||
([ "garage/bundled-libs" "garage/lmdb" "garage/sqlite" "garage/k2v" ] ++ (if release then [
|
||||
([ "garage/bundled-libs" "garage/sled" "garage/lmdb" "garage/k2v" ] ++ (if release then [
|
||||
"garage/consul-discovery"
|
||||
"garage/kubernetes-discovery"
|
||||
"garage/metrics"
|
||||
"garage/telemetry-otlp"
|
||||
"garage/syslog"
|
||||
"garage/lmdb"
|
||||
"garage/sqlite"
|
||||
] else
|
||||
[ ]));
|
||||
|
||||
|
|
|
@ -15,10 +15,10 @@ type: application
|
|||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.1
|
||||
version: 0.4.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "v1.0.1"
|
||||
appVersion: "v0.9.1"
|
||||
|
|
|
@ -11,7 +11,6 @@ spec:
|
|||
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
||||
replicas: {{ .Values.deployment.replicaCount }}
|
||||
serviceName: {{ include "garage.fullname" . }}
|
||||
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
|
@ -64,10 +63,6 @@ spec:
|
|||
name: web-api
|
||||
- containerPort: 3903
|
||||
name: admin
|
||||
{{- with .Values.environment }}
|
||||
env:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: meta
|
||||
mountPath: /mnt/meta
|
||||
|
@ -76,9 +71,6 @@ spec:
|
|||
- name: etc
|
||||
mountPath: /etc/garage.toml
|
||||
subPath: garage.toml
|
||||
{{- with .Values.extraVolumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
# TODO
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
|
@ -113,9 +105,6 @@ spec:
|
|||
- name: data
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- with .Values.extraVolumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
|
|
@ -6,13 +6,18 @@
|
|||
garage:
|
||||
# Can be changed for better performance on certain systems
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
|
||||
dbEngine: "lmdb"
|
||||
dbEngine: "sled"
|
||||
|
||||
# Defaults is 1MB
|
||||
# An increase can result in better performance in certain scenarios
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
|
||||
blockSize: "1048576"
|
||||
|
||||
# Tuning parameters for the sled DB engine
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#sled-cache-capacity
|
||||
sledCacheCapacity: "134217728"
|
||||
sledFlushEveryMs: "2000"
|
||||
|
||||
# Default to 3 replicas, see the replication_mode section at
|
||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
|
||||
replicationMode: "3"
|
||||
|
@ -45,6 +50,11 @@ garage:
|
|||
|
||||
block_size = {{ .Values.garage.blockSize }}
|
||||
|
||||
{{- if eq .Values.garage.dbEngine "sled"}}
|
||||
sled_cache_capacity = {{ .Values.garage.sledCacheCapacity }}
|
||||
sled_flush_every_ms = {{ .Values.garage.sledFlushEveryMs }}
|
||||
{{- end }}
|
||||
|
||||
replication_mode = "{{ .Values.garage.replicationMode }}"
|
||||
|
||||
compression_level = {{ .Values.garage.compressionLevel }}
|
||||
|
@ -96,8 +106,6 @@ deployment:
|
|||
kind: StatefulSet
|
||||
# Number of StatefulSet replicas/garage nodes to start
|
||||
replicaCount: 3
|
||||
# If using statefulset, allow Parallel or OrderedReady (default)
|
||||
podManagementPolicy: OrderedReady
|
||||
|
||||
image:
|
||||
repository: dxflrs/amd64_garage
|
||||
|
@ -216,12 +224,6 @@ tolerations: []
|
|||
|
||||
affinity: {}
|
||||
|
||||
environment: {}
|
||||
|
||||
extraVolumes: {}
|
||||
|
||||
extraVolumeMounts: {}
|
||||
|
||||
monitoring:
|
||||
metrics:
|
||||
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
||||
|
|
14
script/jepsen.garage/Vagrantfile
vendored
|
@ -30,11 +30,11 @@ Vagrant.configure("2") do |config|
|
|||
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
|
||||
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
|
||||
|
||||
#config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
|
||||
#config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
|
||||
#config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
|
||||
#config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
|
||||
#config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
|
||||
#config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
|
||||
#config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
|
||||
config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
|
||||
config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
|
||||
config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
|
||||
config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
|
||||
config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
|
||||
config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
|
||||
config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
|
||||
end
|
||||
|
|
|
@ -3,10 +3,11 @@
|
|||
set -x
|
||||
|
||||
#for ppatch in task3c task3a tsfix2; do
|
||||
for ppatch in v093 v1rc1; do
|
||||
for ppatch in tsfix2; do
|
||||
#for psc in c cp cdp r pr cpr dpr; do
|
||||
for ptsk in reg2 set2; do
|
||||
for psc in c cp cdp r pr cpr dpr; do
|
||||
for psc in cdp r pr cpr dpr; do
|
||||
#for ptsk in reg2 set1 set2; do
|
||||
for ptsk in set1; do
|
||||
for irun in $(seq 10); do
|
||||
lein run test --nodes-file nodes.vagrant \
|
||||
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \
|
||||
|
|
|
@ -38,9 +38,7 @@
|
|||
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
|
||||
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
|
||||
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
|
||||
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"
|
||||
"v093" "v0.9.3"
|
||||
"v1rc1" "v1.0.0-rc1"})
|
||||
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"})
|
||||
|
||||
(def cli-opts
|
||||
"Additional command line options."
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
|
||||
"rpc_public_addr = \"" node ":3901\"\n"
|
||||
"db_engine = \"lmdb\"\n"
|
||||
"replication_mode = \"3\"\n"
|
||||
"replication_mode = \"2\"\n"
|
||||
"data_dir = \"" data-dir "\"\n"
|
||||
"metadata_dir = \"" meta-dir "\"\n"
|
||||
"[s3_api]\n"
|
||||
|
|
|
@ -81,22 +81,11 @@ if [ -z "$SKIP_AWS" ]; then
|
|||
echo "Invalid multipart upload"
|
||||
exit 1
|
||||
fi
|
||||
aws s3api delete-object --bucket eprouvette --key upload
|
||||
|
||||
echo "🛠️ Test SSE-C with awscli (aws s3)"
|
||||
SSEC_KEY="u8zCfnEyt5Imo/krN+sxA1DQXxLWtPJavU6T6gOVj1Y="
|
||||
SSEC_KEY_MD5="jMGbs3GyZkYjJUP6q5jA7g=="
|
||||
echo "$SSEC_KEY" | base64 -d > /tmp/garage.ssec-key
|
||||
for idx in {1,2}.rnd; do
|
||||
aws s3 cp --sse-c AES256 --sse-c-key fileb:///tmp/garage.ssec-key \
|
||||
"/tmp/garage.$idx" "s3://eprouvette/garage.$idx.aws.sse-c"
|
||||
aws s3 cp --sse-c AES256 --sse-c-key fileb:///tmp/garage.ssec-key \
|
||||
"s3://eprouvette/garage.$idx.aws.sse-c" "/tmp/garage.$idx.dl.sse-c"
|
||||
diff "/tmp/garage.$idx" "/tmp/garage.$idx.dl.sse-c"
|
||||
aws s3api delete-object --bucket eprouvette --key "garage.$idx.aws.sse-c"
|
||||
done
|
||||
fi
|
||||
|
||||
echo "OK!!"
|
||||
exit 0
|
||||
|
||||
# S3CMD
|
||||
if [ -z "$SKIP_S3CMD" ]; then
|
||||
echo "🛠️ Testing with s3cmd"
|
||||
|
|
|
@ -11,7 +11,6 @@ in
|
|||
{
|
||||
# --- Dev shell inherited from flake.nix ---
|
||||
devShell = devShells.default;
|
||||
devShellFull = devShells.full;
|
||||
|
||||
# --- Continuous integration shell ---
|
||||
# The shell used for all CI jobs (along with devShell)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_api"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
@ -21,15 +21,10 @@ garage_net.workspace = true
|
|||
garage_util.workspace = true
|
||||
garage_rpc.workspace = true
|
||||
|
||||
aes-gcm.workspace = true
|
||||
argon2.workspace = true
|
||||
async-compression.workspace = true
|
||||
async-trait.workspace = true
|
||||
base64.workspace = true
|
||||
bytes.workspace = true
|
||||
chrono.workspace = true
|
||||
crc32fast.workspace = true
|
||||
crc32c.workspace = true
|
||||
crypto-common.workspace = true
|
||||
err-derive.workspace = true
|
||||
hex.workspace = true
|
||||
|
@ -39,14 +34,12 @@ tracing.workspace = true
|
|||
md-5.workspace = true
|
||||
nom.workspace = true
|
||||
pin-project.workspace = true
|
||||
sha1.workspace = true
|
||||
sha2.workspace = true
|
||||
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
tokio-util.workspace = true
|
||||
|
||||
form_urlencoded.workspace = true
|
||||
http.workspace = true
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use argon2::password_hash::PasswordHash;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
|
||||
|
@ -46,8 +45,14 @@ impl AdminApiServer {
|
|||
#[cfg(feature = "metrics")] exporter: PrometheusExporter,
|
||||
) -> Self {
|
||||
let cfg = &garage.config.admin;
|
||||
let metrics_token = cfg.metrics_token.as_deref().map(hash_bearer_token);
|
||||
let admin_token = cfg.admin_token.as_deref().map(hash_bearer_token);
|
||||
let metrics_token = cfg
|
||||
.metrics_token
|
||||
.as_ref()
|
||||
.map(|tok| format!("Bearer {}", tok));
|
||||
let admin_token = cfg
|
||||
.admin_token
|
||||
.as_ref()
|
||||
.map(|tok| format!("Bearer {}", tok));
|
||||
Self {
|
||||
garage,
|
||||
#[cfg(feature = "metrics")]
|
||||
|
@ -243,11 +248,11 @@ impl ApiHandler for AdminApiServer {
|
|||
req: Request<IncomingBody>,
|
||||
endpoint: Endpoint,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let required_auth_hash =
|
||||
let expected_auth_header =
|
||||
match endpoint.authorization_type() {
|
||||
Authorization::None => None,
|
||||
Authorization::MetricsToken => self.metrics_token.as_deref(),
|
||||
Authorization::AdminToken => match self.admin_token.as_deref() {
|
||||
Authorization::MetricsToken => self.metrics_token.as_ref(),
|
||||
Authorization::AdminToken => match &self.admin_token {
|
||||
None => return Err(Error::forbidden(
|
||||
"Admin token isn't configured, admin API access is disabled for security.",
|
||||
)),
|
||||
|
@ -255,11 +260,14 @@ impl ApiHandler for AdminApiServer {
|
|||
},
|
||||
};
|
||||
|
||||
if let Some(password_hash) = required_auth_hash {
|
||||
if let Some(h) = expected_auth_header {
|
||||
match req.headers().get("Authorization") {
|
||||
None => return Err(Error::forbidden("Authorization token must be provided")),
|
||||
Some(authorization) => {
|
||||
verify_bearer_token(&authorization, password_hash)?;
|
||||
Some(v) => {
|
||||
let authorized = v.to_str().map(|hv| hv.trim() == h).unwrap_or(false);
|
||||
if !authorized {
|
||||
return Err(Error::forbidden("Invalid authorization token provided"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -276,7 +284,7 @@ impl ApiHandler for AdminApiServer {
|
|||
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
|
||||
Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
|
||||
Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
|
||||
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage).await,
|
||||
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage, req).await,
|
||||
// Keys
|
||||
Endpoint::ListKeys => handle_list_keys(&self.garage).await,
|
||||
Endpoint::GetKeyInfo {
|
||||
|
@ -334,35 +342,3 @@ impl ApiEndpoint for Endpoint {
|
|||
|
||||
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
|
||||
}
|
||||
|
||||
fn hash_bearer_token(token: &str) -> String {
|
||||
use argon2::{
|
||||
password_hash::{rand_core::OsRng, PasswordHasher, SaltString},
|
||||
Argon2,
|
||||
};
|
||||
|
||||
let salt = SaltString::generate(&mut OsRng);
|
||||
let argon2 = Argon2::default();
|
||||
argon2
|
||||
.hash_password(token.trim().as_bytes(), &salt)
|
||||
.expect("could not hash API token")
|
||||
.to_string()
|
||||
}
|
||||
|
||||
fn verify_bearer_token(token: &hyper::http::HeaderValue, password_hash: &str) -> Result<(), Error> {
|
||||
use argon2::{password_hash::PasswordVerifier, Argon2};
|
||||
|
||||
let parsed_hash = PasswordHash::new(&password_hash).unwrap();
|
||||
|
||||
token
|
||||
.to_str()?
|
||||
.strip_prefix("Bearer ")
|
||||
.and_then(|token| {
|
||||
Argon2::default()
|
||||
.verify_password(token.trim().as_bytes(), &parsed_hash)
|
||||
.ok()
|
||||
})
|
||||
.ok_or_else(|| Error::forbidden("Invalid authorization token"))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ async fn bucket_info_results(
|
|||
.table
|
||||
.get(&bucket_id, &EmptyKey)
|
||||
.await?
|
||||
.map(|x| x.filtered_values(&garage.system.cluster_layout()))
|
||||
.map(|x| x.filtered_values(&garage.system.ring.borrow()))
|
||||
.unwrap_or_default();
|
||||
|
||||
let mpu_counters = garage
|
||||
|
@ -131,7 +131,7 @@ async fn bucket_info_results(
|
|||
.table
|
||||
.get(&bucket_id, &EmptyKey)
|
||||
.await?
|
||||
.map(|x| x.filtered_values(&garage.system.cluster_layout()))
|
||||
.map(|x| x.filtered_values(&garage.system.ring.borrow()))
|
||||
.unwrap_or_default();
|
||||
|
||||
let mut relevant_keys = HashMap::new();
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
|
@ -17,99 +16,25 @@ use crate::admin::error::*;
|
|||
use crate::helpers::{json_ok_response, parse_json_body};
|
||||
|
||||
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let layout = garage.system.cluster_layout();
|
||||
let mut nodes = garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|i| {
|
||||
(
|
||||
i.id,
|
||||
NodeResp {
|
||||
id: hex::encode(i.id),
|
||||
addr: i.addr,
|
||||
hostname: i.status.hostname,
|
||||
is_up: i.is_up,
|
||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||
data_partition: i
|
||||
.status
|
||||
.data_disk_avail
|
||||
.map(|(avail, total)| FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}),
|
||||
metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
|
||||
FreeSpaceResp {
|
||||
available: avail,
|
||||
total,
|
||||
}
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for (id, _, role) in layout.current().roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
let role = NodeRoleResp {
|
||||
id: hex::encode(id),
|
||||
zone: r.zone.to_string(),
|
||||
capacity: r.capacity,
|
||||
tags: r.tags.clone(),
|
||||
};
|
||||
match nodes.get_mut(id) {
|
||||
None => {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
role: Some(role),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
Some(n) => {
|
||||
n.role = Some(role);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for ver in layout.versions().iter().rev().skip(1) {
|
||||
for (id, _, role) in ver.roles.items().iter() {
|
||||
if let layout::NodeRoleV(Some(r)) = role {
|
||||
if r.capacity.is_some() {
|
||||
if let Some(n) = nodes.get_mut(id) {
|
||||
if n.role.is_none() {
|
||||
n.draining = true;
|
||||
}
|
||||
} else {
|
||||
nodes.insert(
|
||||
*id,
|
||||
NodeResp {
|
||||
id: hex::encode(id),
|
||||
draining: true,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
||||
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
||||
|
||||
let res = GetClusterStatusResponse {
|
||||
node: hex::encode(garage.system.id),
|
||||
garage_version: garage_util::version::garage_version(),
|
||||
garage_features: garage_util::version::garage_features(),
|
||||
rust_version: garage_util::version::rust_version(),
|
||||
db_engine: garage.db.engine(),
|
||||
layout_version: layout.current().version,
|
||||
nodes,
|
||||
known_nodes: garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|i| KnownNodeResp {
|
||||
id: hex::encode(i.id),
|
||||
addr: i.addr,
|
||||
is_up: i.is_up,
|
||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||
hostname: i.status.hostname,
|
||||
})
|
||||
.collect(),
|
||||
layout: format_cluster_layout(&garage.system.get_cluster_layout()),
|
||||
};
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
|
@ -160,14 +85,13 @@ pub async fn handle_connect_cluster_nodes(
|
|||
}
|
||||
|
||||
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let res = format_cluster_layout(garage.system.cluster_layout().inner());
|
||||
let res = format_cluster_layout(&garage.system.get_cluster_layout());
|
||||
|
||||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse {
|
||||
fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResponse {
|
||||
let roles = layout
|
||||
.current()
|
||||
.roles
|
||||
.items()
|
||||
.iter()
|
||||
|
@ -181,12 +105,10 @@ fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResp
|
|||
.collect::<Vec<_>>();
|
||||
|
||||
let staged_role_changes = layout
|
||||
.staging
|
||||
.get()
|
||||
.roles
|
||||
.staging_roles
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(k, _, v)| layout.current().roles.get(k) != Some(v))
|
||||
.filter(|(k, _, v)| layout.roles.get(k) != Some(v))
|
||||
.map(|(k, _, v)| match &v.0 {
|
||||
None => NodeRoleChange {
|
||||
id: hex::encode(k),
|
||||
|
@ -204,7 +126,7 @@ fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResp
|
|||
.collect::<Vec<_>>();
|
||||
|
||||
GetClusterLayoutResponse {
|
||||
version: layout.current().version,
|
||||
version: layout.version,
|
||||
roles,
|
||||
staged_role_changes,
|
||||
}
|
||||
|
@ -233,8 +155,8 @@ struct GetClusterStatusResponse {
|
|||
garage_features: Option<&'static [&'static str]>,
|
||||
rust_version: &'static str,
|
||||
db_engine: String,
|
||||
layout_version: u64,
|
||||
nodes: Vec<NodeResp>,
|
||||
known_nodes: Vec<KnownNodeResp>,
|
||||
layout: GetClusterLayoutResponse,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
|
@ -268,27 +190,14 @@ struct NodeRoleResp {
|
|||
tags: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Default)]
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct FreeSpaceResp {
|
||||
available: u64,
|
||||
total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct NodeResp {
|
||||
struct KnownNodeResp {
|
||||
id: String,
|
||||
role: Option<NodeRoleResp>,
|
||||
addr: Option<SocketAddr>,
|
||||
hostname: Option<String>,
|
||||
addr: SocketAddr,
|
||||
is_up: bool,
|
||||
last_seen_secs_ago: Option<u64>,
|
||||
draining: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
data_partition: Option<FreeSpaceResp>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
metadata_partition: Option<FreeSpaceResp>,
|
||||
hostname: String,
|
||||
}
|
||||
|
||||
// ---- update functions ----
|
||||
|
@ -299,10 +208,10 @@ pub async fn handle_update_cluster_layout(
|
|||
) -> Result<Response<ResBody>, Error> {
|
||||
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||
|
||||
let mut layout = garage.system.cluster_layout().inner().clone();
|
||||
let mut layout = garage.system.get_cluster_layout();
|
||||
|
||||
let mut roles = layout.current().roles.clone();
|
||||
roles.merge(&layout.staging.get().roles);
|
||||
let mut roles = layout.roles.clone();
|
||||
roles.merge(&layout.staging_roles);
|
||||
|
||||
for change in updates {
|
||||
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
||||
|
@ -323,17 +232,11 @@ pub async fn handle_update_cluster_layout(
|
|||
};
|
||||
|
||||
layout
|
||||
.staging
|
||||
.get_mut()
|
||||
.roles
|
||||
.staging_roles
|
||||
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
||||
}
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
garage.system.update_cluster_layout(&layout).await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(json_ok_response(&res)?)
|
||||
|
@ -343,16 +246,12 @@ pub async fn handle_apply_cluster_layout(
|
|||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
||||
let param = parse_json_body::<ApplyRevertLayoutRequest, _, Error>(req).await?;
|
||||
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let layout = garage.system.get_cluster_layout();
|
||||
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
||||
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
garage.system.update_cluster_layout(&layout).await?;
|
||||
|
||||
let res = ApplyClusterLayoutResponse {
|
||||
message: msg,
|
||||
|
@ -363,14 +262,13 @@ pub async fn handle_apply_cluster_layout(
|
|||
|
||||
pub async fn handle_revert_cluster_layout(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<IncomingBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let layout = garage.system.cluster_layout().inner().clone();
|
||||
let layout = layout.revert_staged_changes()?;
|
||||
garage
|
||||
.system
|
||||
.layout_manager
|
||||
.update_cluster_layout(&layout)
|
||||
.await?;
|
||||
let param = parse_json_body::<ApplyRevertLayoutRequest, _, Error>(req).await?;
|
||||
|
||||
let layout = garage.system.get_cluster_layout();
|
||||
let layout = layout.revert_staged_changes(Some(param.version))?;
|
||||
garage.system.update_cluster_layout(&layout).await?;
|
||||
|
||||
let res = format_cluster_layout(&layout);
|
||||
Ok(json_ok_response(&res)?)
|
||||
|
@ -382,7 +280,7 @@ type UpdateClusterLayoutRequest = Vec<NodeRoleChange>;
|
|||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct ApplyLayoutRequest {
|
||||
struct ApplyRevertLayoutRequest {
|
||||
version: u64,
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,9 @@ impl CommonError {
|
|||
pub fn http_status_code(&self) -> StatusCode {
|
||||
match self {
|
||||
CommonError::InternalError(
|
||||
GarageError::Timeout | GarageError::RemoteError(_) | GarageError::Quorum(..),
|
||||
GarageError::Timeout
|
||||
| GarageError::RemoteError(_)
|
||||
| GarageError::Quorum(_, _, _, _),
|
||||
) => StatusCode::SERVICE_UNAVAILABLE,
|
||||
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
|
@ -78,7 +80,9 @@ impl CommonError {
|
|||
match self {
|
||||
CommonError::Forbidden(_) => "AccessDenied",
|
||||
CommonError::InternalError(
|
||||
GarageError::Timeout | GarageError::RemoteError(_) | GarageError::Quorum(..),
|
||||
GarageError::Timeout
|
||||
| GarageError::RemoteError(_)
|
||||
| GarageError::Quorum(_, _, _, _),
|
||||
) => "ServiceUnavailable",
|
||||
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
|
||||
"InternalError"
|
||||
|
|
|
@ -2,7 +2,6 @@ use std::convert::Infallible;
|
|||
use std::fs::{self, Permissions};
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
|
@ -20,7 +19,6 @@ use hyper_util::rt::TokioIo;
|
|||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
|
||||
use tokio::sync::watch;
|
||||
use tokio::time::{sleep_until, Instant};
|
||||
|
||||
use opentelemetry::{
|
||||
global,
|
||||
|
@ -293,7 +291,7 @@ where
|
|||
let connection_collector = tokio::spawn({
|
||||
let server_name = server_name.clone();
|
||||
async move {
|
||||
let mut connections = FuturesUnordered::<tokio::task::JoinHandle<()>>::new();
|
||||
let mut connections = FuturesUnordered::new();
|
||||
loop {
|
||||
let collect_next = async {
|
||||
if connections.is_empty() {
|
||||
|
@ -314,34 +312,23 @@ where
|
|||
}
|
||||
}
|
||||
}
|
||||
let deadline = Instant::now() + Duration::from_secs(10);
|
||||
while !connections.is_empty() {
|
||||
if !connections.is_empty() {
|
||||
info!(
|
||||
"{} server: {} connections still open, deadline in {:.2}s",
|
||||
"{} server: {} connections still open",
|
||||
server_name,
|
||||
connections.len(),
|
||||
(deadline - Instant::now()).as_secs_f32(),
|
||||
connections.len()
|
||||
);
|
||||
tokio::select! {
|
||||
conn_res = connections.next() => {
|
||||
while let Some(conn_res) = connections.next().await {
|
||||
trace!(
|
||||
"{} server: HTTP connection finished: {:?}",
|
||||
server_name,
|
||||
conn_res.unwrap(),
|
||||
conn_res
|
||||
);
|
||||
}
|
||||
_ = sleep_until(deadline) => {
|
||||
warn!("{} server: exit deadline reached with {} connections still open, killing them now",
|
||||
info!(
|
||||
"{} server: {} connections still open",
|
||||
server_name,
|
||||
connections.len());
|
||||
for conn in connections.iter() {
|
||||
conn.abort();
|
||||
}
|
||||
for conn in connections {
|
||||
assert!(conn.await.unwrap_err().is_cancelled());
|
||||
}
|
||||
break;
|
||||
}
|
||||
connections.len()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
use std::convert::Infallible;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::{Stream, StreamExt, TryStreamExt};
|
||||
|
||||
|
@ -11,10 +10,6 @@ use hyper::{
|
|||
use idna::domain_to_unicode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_model::bucket_table::BucketParams;
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::Key;
|
||||
use garage_util::data::Uuid;
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
use crate::common_error::{CommonError as Error, *};
|
||||
|
@ -32,15 +27,6 @@ pub enum Authorization {
|
|||
Owner,
|
||||
}
|
||||
|
||||
/// The values which are known for each request related to a bucket
|
||||
pub struct ReqCtx {
|
||||
pub garage: Arc<Garage>,
|
||||
pub bucket_id: Uuid,
|
||||
pub bucket_name: String,
|
||||
pub bucket_params: BucketParams,
|
||||
pub api_key: Key,
|
||||
}
|
||||
|
||||
/// Host to bucket
|
||||
///
|
||||
/// Convert a host, like "bucket.garage-site.tld" to the corresponding bucket "bucket",
|
||||
|
|
|
@ -15,7 +15,8 @@ use garage_model::garage::Garage;
|
|||
use crate::generic_server::*;
|
||||
use crate::k2v::error::*;
|
||||
|
||||
use crate::signature::verify_request;
|
||||
use crate::signature::payload::check_payload_signature;
|
||||
use crate::signature::streaming::*;
|
||||
|
||||
use crate::helpers::*;
|
||||
use crate::k2v::batch::*;
|
||||
|
@ -85,7 +86,17 @@ impl ApiHandler for K2VApiServer {
|
|||
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||
}
|
||||
|
||||
let (req, api_key, _content_sha256) = verify_request(&garage, req, "k2v").await?;
|
||||
let (api_key, mut content_sha256) = check_payload_signature(&garage, "k2v", &req).await?;
|
||||
let api_key = api_key
|
||||
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
||||
|
||||
let req = parse_streaming_body(
|
||||
&api_key,
|
||||
req,
|
||||
&mut content_sha256,
|
||||
&garage.config.s3_api.s3_region,
|
||||
"k2v",
|
||||
)?;
|
||||
|
||||
let bucket_id = garage
|
||||
.bucket_helper()
|
||||
|
@ -95,7 +106,6 @@ impl ApiHandler for K2VApiServer {
|
|||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
let bucket_params = bucket.state.into_option().unwrap();
|
||||
|
||||
let allowed = match endpoint.authorization_type() {
|
||||
Authorization::Read => api_key.allow_read(&bucket_id),
|
||||
|
@ -113,42 +123,40 @@ impl ApiHandler for K2VApiServer {
|
|||
// are always preflighted, i.e. the browser should make
|
||||
// an OPTIONS call before to check it is allowed
|
||||
let matching_cors_rule = match *req.method() {
|
||||
Method::GET | Method::HEAD | Method::POST => {
|
||||
find_matching_cors_rule(&bucket_params, &req)
|
||||
.ok_or_internal_error("Error looking up CORS rule")?
|
||||
.cloned()
|
||||
}
|
||||
Method::GET | Method::HEAD | Method::POST => find_matching_cors_rule(&bucket, &req)
|
||||
.ok_or_internal_error("Error looking up CORS rule")?,
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let ctx = ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
bucket_name,
|
||||
bucket_params,
|
||||
api_key,
|
||||
};
|
||||
|
||||
let resp = match endpoint {
|
||||
Endpoint::DeleteItem {
|
||||
partition_key,
|
||||
sort_key,
|
||||
} => handle_delete_item(ctx, req, &partition_key, &sort_key).await,
|
||||
} => handle_delete_item(garage, req, bucket_id, &partition_key, &sort_key).await,
|
||||
Endpoint::InsertItem {
|
||||
partition_key,
|
||||
sort_key,
|
||||
} => handle_insert_item(ctx, req, &partition_key, &sort_key).await,
|
||||
} => handle_insert_item(garage, req, bucket_id, &partition_key, &sort_key).await,
|
||||
Endpoint::ReadItem {
|
||||
partition_key,
|
||||
sort_key,
|
||||
} => handle_read_item(ctx, &req, &partition_key, &sort_key).await,
|
||||
} => handle_read_item(garage, &req, bucket_id, &partition_key, &sort_key).await,
|
||||
Endpoint::PollItem {
|
||||
partition_key,
|
||||
sort_key,
|
||||
causality_token,
|
||||
timeout,
|
||||
} => {
|
||||
handle_poll_item(ctx, &req, partition_key, sort_key, causality_token, timeout).await
|
||||
handle_poll_item(
|
||||
garage,
|
||||
&req,
|
||||
bucket_id,
|
||||
partition_key,
|
||||
sort_key,
|
||||
causality_token,
|
||||
timeout,
|
||||
)
|
||||
.await
|
||||
}
|
||||
Endpoint::ReadIndex {
|
||||
prefix,
|
||||
|
@ -156,12 +164,12 @@ impl ApiHandler for K2VApiServer {
|
|||
end,
|
||||
limit,
|
||||
reverse,
|
||||
} => handle_read_index(ctx, prefix, start, end, limit, reverse).await,
|
||||
Endpoint::InsertBatch {} => handle_insert_batch(ctx, req).await,
|
||||
Endpoint::ReadBatch {} => handle_read_batch(ctx, req).await,
|
||||
Endpoint::DeleteBatch {} => handle_delete_batch(ctx, req).await,
|
||||
} => handle_read_index(garage, bucket_id, prefix, start, end, limit, reverse).await,
|
||||
Endpoint::InsertBatch {} => handle_insert_batch(garage, bucket_id, req).await,
|
||||
Endpoint::ReadBatch {} => handle_read_batch(garage, bucket_id, req).await,
|
||||
Endpoint::DeleteBatch {} => handle_delete_batch(garage, bucket_id, req).await,
|
||||
Endpoint::PollRange { partition_key } => {
|
||||
handle_poll_range(ctx, &partition_key, req).await
|
||||
handle_poll_range(garage, bucket_id, &partition_key, req).await
|
||||
}
|
||||
Endpoint::Options => unreachable!(),
|
||||
};
|
||||
|
@ -170,7 +178,7 @@ impl ApiHandler for K2VApiServer {
|
|||
// add the corresponding CORS headers to the response
|
||||
let mut resp_ok = resp?;
|
||||
if let Some(rule) = matching_cors_rule {
|
||||
add_cors_headers(&mut resp_ok, &rule)
|
||||
add_cors_headers(&mut resp_ok, rule)
|
||||
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,9 +1,14 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use base64::prelude::*;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_table::{EnumerationOrder, TableSchema};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::k2v::causality::*;
|
||||
use garage_model::k2v::item_table::*;
|
||||
|
||||
|
@ -13,12 +18,10 @@ use crate::k2v::error::*;
|
|||
use crate::k2v::range::read_range;
|
||||
|
||||
pub async fn handle_insert_batch(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
req: Request<ReqBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = &ctx;
|
||||
let items = parse_json_body::<Vec<InsertBatchItem>, _, Error>(req).await?;
|
||||
|
||||
let mut items2 = vec![];
|
||||
|
@ -35,7 +38,7 @@ pub async fn handle_insert_batch(
|
|||
items2.push((it.pk, it.sk, ct, v));
|
||||
}
|
||||
|
||||
garage.k2v.rpc.insert_batch(*bucket_id, items2).await?;
|
||||
garage.k2v.rpc.insert_batch(bucket_id, items2).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
|
@ -43,7 +46,8 @@ pub async fn handle_insert_batch(
|
|||
}
|
||||
|
||||
pub async fn handle_read_batch(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
req: Request<ReqBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let queries = parse_json_body::<Vec<ReadBatchQuery>, _, Error>(req).await?;
|
||||
|
@ -51,7 +55,7 @@ pub async fn handle_read_batch(
|
|||
let resp_results = futures::future::join_all(
|
||||
queries
|
||||
.into_iter()
|
||||
.map(|q| handle_read_batch_query(&ctx, q)),
|
||||
.map(|q| handle_read_batch_query(&garage, bucket_id, q)),
|
||||
)
|
||||
.await;
|
||||
|
||||
|
@ -64,15 +68,12 @@ pub async fn handle_read_batch(
|
|||
}
|
||||
|
||||
async fn handle_read_batch_query(
|
||||
ctx: &ReqCtx,
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
query: ReadBatchQuery,
|
||||
) -> Result<ReadBatchResponse, Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = ctx;
|
||||
|
||||
let partition = K2VItemPartition {
|
||||
bucket_id: *bucket_id,
|
||||
bucket_id,
|
||||
partition_key: query.partition_key.clone(),
|
||||
};
|
||||
|
||||
|
@ -137,7 +138,8 @@ async fn handle_read_batch_query(
|
|||
}
|
||||
|
||||
pub async fn handle_delete_batch(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
req: Request<ReqBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let queries = parse_json_body::<Vec<DeleteBatchQuery>, _, Error>(req).await?;
|
||||
|
@ -145,7 +147,7 @@ pub async fn handle_delete_batch(
|
|||
let resp_results = futures::future::join_all(
|
||||
queries
|
||||
.into_iter()
|
||||
.map(|q| handle_delete_batch_query(&ctx, q)),
|
||||
.map(|q| handle_delete_batch_query(&garage, bucket_id, q)),
|
||||
)
|
||||
.await;
|
||||
|
||||
|
@ -158,15 +160,12 @@ pub async fn handle_delete_batch(
|
|||
}
|
||||
|
||||
async fn handle_delete_batch_query(
|
||||
ctx: &ReqCtx,
|
||||
garage: &Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
query: DeleteBatchQuery,
|
||||
) -> Result<DeleteBatchResponse, Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = &ctx;
|
||||
|
||||
let partition = K2VItemPartition {
|
||||
bucket_id: *bucket_id,
|
||||
bucket_id,
|
||||
partition_key: query.partition_key.clone(),
|
||||
};
|
||||
|
||||
|
@ -196,7 +195,7 @@ async fn handle_delete_batch_query(
|
|||
.k2v
|
||||
.rpc
|
||||
.insert(
|
||||
*bucket_id,
|
||||
bucket_id,
|
||||
i.partition.partition_key,
|
||||
i.sort_key,
|
||||
Some(cc),
|
||||
|
@ -236,7 +235,7 @@ async fn handle_delete_batch_query(
|
|||
.collect::<Vec<_>>();
|
||||
let n = items.len();
|
||||
|
||||
garage.k2v.rpc.insert_batch(*bucket_id, items).await?;
|
||||
garage.k2v.rpc.insert_batch(bucket_id, items).await?;
|
||||
|
||||
n
|
||||
};
|
||||
|
@ -252,13 +251,11 @@ async fn handle_delete_batch_query(
|
|||
}
|
||||
|
||||
pub(crate) async fn handle_poll_range(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
partition_key: &str,
|
||||
req: Request<ReqBody>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = ctx;
|
||||
use garage_model::k2v::sub::PollRange;
|
||||
|
||||
let query = parse_json_body::<PollRangeQuery, _, Error>(req).await?;
|
||||
|
|
|
@ -1,8 +1,14 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use hyper::Response;
|
||||
use serde::Serialize;
|
||||
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_rpc::ring::Ring;
|
||||
use garage_table::util::*;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
|
||||
|
||||
use crate::helpers::*;
|
||||
|
@ -11,24 +17,17 @@ use crate::k2v::error::*;
|
|||
use crate::k2v::range::read_range;
|
||||
|
||||
pub async fn handle_read_index(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
prefix: Option<String>,
|
||||
start: Option<String>,
|
||||
end: Option<String>,
|
||||
limit: Option<u64>,
|
||||
reverse: Option<bool>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = &ctx;
|
||||
|
||||
let reverse = reverse.unwrap_or(false);
|
||||
|
||||
let node_id_vec = garage
|
||||
.system
|
||||
.cluster_layout()
|
||||
.all_nongateway_nodes()
|
||||
.to_vec();
|
||||
let ring: Arc<Ring> = garage.system.ring.borrow().clone();
|
||||
|
||||
let (partition_keys, more, next_start) = read_range(
|
||||
&garage.k2v.counter_table.table,
|
||||
|
@ -37,7 +36,7 @@ pub async fn handle_read_index(
|
|||
&start,
|
||||
&end,
|
||||
limit,
|
||||
Some((DeletedFilter::NotDeleted, node_id_vec)),
|
||||
Some((DeletedFilter::NotDeleted, ring.layout.node_id_vec.clone())),
|
||||
EnumerationOrder::from_reverse(reverse),
|
||||
)
|
||||
.await?;
|
||||
|
@ -56,7 +55,7 @@ pub async fn handle_read_index(
|
|||
partition_keys: partition_keys
|
||||
.into_iter()
|
||||
.map(|part| {
|
||||
let vals = part.filtered_values(&garage.system.cluster_layout());
|
||||
let vals = part.filtered_values(&ring);
|
||||
ReadIndexResponseEntry {
|
||||
pk: part.sk,
|
||||
entries: *vals.get(&s_entries).unwrap_or(&0),
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use base64::prelude::*;
|
||||
use http::header;
|
||||
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::k2v::causality::*;
|
||||
use garage_model::k2v::item_table::*;
|
||||
|
||||
|
@ -95,15 +100,12 @@ impl ReturnFormat {
|
|||
/// Handle ReadItem request
|
||||
#[allow(clippy::ptr_arg)]
|
||||
pub async fn handle_read_item(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<ReqBody>,
|
||||
bucket_id: Uuid,
|
||||
partition_key: &str,
|
||||
sort_key: &String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = &ctx;
|
||||
|
||||
let format = ReturnFormat::from(req)?;
|
||||
|
||||
let item = garage
|
||||
|
@ -111,7 +113,7 @@ pub async fn handle_read_item(
|
|||
.item_table
|
||||
.get(
|
||||
&K2VItemPartition {
|
||||
bucket_id: *bucket_id,
|
||||
bucket_id,
|
||||
partition_key: partition_key.to_string(),
|
||||
},
|
||||
sort_key,
|
||||
|
@ -123,14 +125,12 @@ pub async fn handle_read_item(
|
|||
}
|
||||
|
||||
pub async fn handle_insert_item(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
req: Request<ReqBody>,
|
||||
bucket_id: Uuid,
|
||||
partition_key: &str,
|
||||
sort_key: &str,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = &ctx;
|
||||
let causal_context = req
|
||||
.headers()
|
||||
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||
|
@ -149,7 +149,7 @@ pub async fn handle_insert_item(
|
|||
.k2v
|
||||
.rpc
|
||||
.insert(
|
||||
*bucket_id,
|
||||
bucket_id,
|
||||
partition_key.to_string(),
|
||||
sort_key.to_string(),
|
||||
causal_context,
|
||||
|
@ -163,14 +163,12 @@ pub async fn handle_insert_item(
|
|||
}
|
||||
|
||||
pub async fn handle_delete_item(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
req: Request<ReqBody>,
|
||||
bucket_id: Uuid,
|
||||
partition_key: &str,
|
||||
sort_key: &str,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = &ctx;
|
||||
let causal_context = req
|
||||
.headers()
|
||||
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||
|
@ -185,7 +183,7 @@ pub async fn handle_delete_item(
|
|||
.k2v
|
||||
.rpc
|
||||
.insert(
|
||||
*bucket_id,
|
||||
bucket_id,
|
||||
partition_key.to_string(),
|
||||
sort_key.to_string(),
|
||||
causal_context,
|
||||
|
@ -201,16 +199,14 @@ pub async fn handle_delete_item(
|
|||
/// Handle ReadItem request
|
||||
#[allow(clippy::ptr_arg)]
|
||||
pub async fn handle_poll_item(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<ReqBody>,
|
||||
bucket_id: Uuid,
|
||||
partition_key: String,
|
||||
sort_key: String,
|
||||
causality_token: String,
|
||||
timeout_secs: Option<u64>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = &ctx;
|
||||
let format = ReturnFormat::from(req)?;
|
||||
|
||||
let causal_context =
|
||||
|
@ -222,7 +218,7 @@ pub async fn handle_poll_item(
|
|||
.k2v
|
||||
.rpc
|
||||
.poll_item(
|
||||
*bucket_id,
|
||||
bucket_id,
|
||||
partition_key,
|
||||
sort_key,
|
||||
causal_context,
|
||||
|
|
|
@ -17,7 +17,8 @@ use garage_model::key_table::Key;
|
|||
use crate::generic_server::*;
|
||||
use crate::s3::error::*;
|
||||
|
||||
use crate::signature::verify_request;
|
||||
use crate::signature::payload::check_payload_signature;
|
||||
use crate::signature::streaming::*;
|
||||
|
||||
use crate::helpers::*;
|
||||
use crate::s3::bucket::*;
|
||||
|
@ -124,7 +125,17 @@ impl ApiHandler for S3ApiServer {
|
|||
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||
}
|
||||
|
||||
let (req, api_key, content_sha256) = verify_request(&garage, req, "s3").await?;
|
||||
let (api_key, mut content_sha256) = check_payload_signature(&garage, "s3", &req).await?;
|
||||
let api_key = api_key
|
||||
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
||||
|
||||
let req = parse_streaming_body(
|
||||
&api_key,
|
||||
req,
|
||||
&mut content_sha256,
|
||||
&garage.config.s3_api.s3_region,
|
||||
"s3",
|
||||
)?;
|
||||
|
||||
let bucket_name = match bucket_name {
|
||||
None => {
|
||||
|
@ -155,7 +166,6 @@ impl ApiHandler for S3ApiServer {
|
|||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
let bucket_params = bucket.state.into_option().unwrap();
|
||||
|
||||
let allowed = match endpoint.authorization_type() {
|
||||
Authorization::Read => api_key.allow_read(&bucket_id),
|
||||
|
@ -168,20 +178,12 @@ impl ApiHandler for S3ApiServer {
|
|||
return Err(Error::forbidden("Operation is not allowed for this key."));
|
||||
}
|
||||
|
||||
let matching_cors_rule = find_matching_cors_rule(&bucket_params, &req)?.cloned();
|
||||
|
||||
let ctx = ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
bucket_name,
|
||||
bucket_params,
|
||||
api_key,
|
||||
};
|
||||
let matching_cors_rule = find_matching_cors_rule(&bucket, &req)?;
|
||||
|
||||
let resp = match endpoint {
|
||||
Endpoint::HeadObject {
|
||||
key, part_number, ..
|
||||
} => handle_head(ctx, &req, &key, part_number).await,
|
||||
} => handle_head(garage, &req, bucket_id, &key, part_number).await,
|
||||
Endpoint::GetObject {
|
||||
key,
|
||||
part_number,
|
||||
|
@ -201,37 +203,74 @@ impl ApiHandler for S3ApiServer {
|
|||
response_content_type,
|
||||
response_expires,
|
||||
};
|
||||
handle_get(ctx, &req, &key, part_number, overrides).await
|
||||
handle_get(garage, &req, bucket_id, &key, part_number, overrides).await
|
||||
}
|
||||
Endpoint::UploadPart {
|
||||
key,
|
||||
part_number,
|
||||
upload_id,
|
||||
} => handle_put_part(ctx, req, &key, part_number, &upload_id, content_sha256).await,
|
||||
Endpoint::CopyObject { key } => handle_copy(ctx, &req, &key).await,
|
||||
} => {
|
||||
handle_put_part(
|
||||
garage,
|
||||
req,
|
||||
bucket_id,
|
||||
&key,
|
||||
part_number,
|
||||
&upload_id,
|
||||
content_sha256,
|
||||
)
|
||||
.await
|
||||
}
|
||||
Endpoint::CopyObject { key } => {
|
||||
handle_copy(garage, &api_key, &req, bucket_id, &key).await
|
||||
}
|
||||
Endpoint::UploadPartCopy {
|
||||
key,
|
||||
part_number,
|
||||
upload_id,
|
||||
} => handle_upload_part_copy(ctx, &req, &key, part_number, &upload_id).await,
|
||||
Endpoint::PutObject { key } => handle_put(ctx, req, &key, content_sha256).await,
|
||||
Endpoint::AbortMultipartUpload { key, upload_id } => {
|
||||
handle_abort_multipart_upload(ctx, &key, &upload_id).await
|
||||
} => {
|
||||
handle_upload_part_copy(
|
||||
garage,
|
||||
&api_key,
|
||||
&req,
|
||||
bucket_id,
|
||||
&key,
|
||||
part_number,
|
||||
&upload_id,
|
||||
)
|
||||
.await
|
||||
}
|
||||
Endpoint::DeleteObject { key, .. } => handle_delete(ctx, &key).await,
|
||||
Endpoint::PutObject { key } => {
|
||||
handle_put(garage, req, &bucket, &key, content_sha256).await
|
||||
}
|
||||
Endpoint::AbortMultipartUpload { key, upload_id } => {
|
||||
handle_abort_multipart_upload(garage, bucket_id, &key, &upload_id).await
|
||||
}
|
||||
Endpoint::DeleteObject { key, .. } => handle_delete(garage, bucket_id, &key).await,
|
||||
Endpoint::CreateMultipartUpload { key } => {
|
||||
handle_create_multipart_upload(ctx, &req, &key).await
|
||||
handle_create_multipart_upload(garage, &req, &bucket_name, bucket_id, &key).await
|
||||
}
|
||||
Endpoint::CompleteMultipartUpload { key, upload_id } => {
|
||||
handle_complete_multipart_upload(ctx, req, &key, &upload_id, content_sha256).await
|
||||
handle_complete_multipart_upload(
|
||||
garage,
|
||||
req,
|
||||
&bucket_name,
|
||||
&bucket,
|
||||
&key,
|
||||
&upload_id,
|
||||
content_sha256,
|
||||
)
|
||||
.await
|
||||
}
|
||||
Endpoint::CreateBucket {} => unreachable!(),
|
||||
Endpoint::HeadBucket {} => {
|
||||
let response = Response::builder().body(empty_body()).unwrap();
|
||||
Ok(response)
|
||||
}
|
||||
Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await,
|
||||
Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx),
|
||||
Endpoint::DeleteBucket {} => {
|
||||
handle_delete_bucket(&garage, bucket_id, bucket_name, &api_key.key_id).await
|
||||
}
|
||||
Endpoint::GetBucketLocation {} => handle_get_bucket_location(garage),
|
||||
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
|
||||
Endpoint::ListObjects {
|
||||
delimiter,
|
||||
|
@ -240,9 +279,11 @@ impl ApiHandler for S3ApiServer {
|
|||
max_keys,
|
||||
prefix,
|
||||
} => {
|
||||
let query = ListObjectsQuery {
|
||||
handle_list(
|
||||
garage,
|
||||
&ListObjectsQuery {
|
||||
common: ListQueryCommon {
|
||||
bucket_name: ctx.bucket_name.clone(),
|
||||
bucket_name,
|
||||
bucket_id,
|
||||
delimiter,
|
||||
page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
|
||||
|
@ -253,8 +294,9 @@ impl ApiHandler for S3ApiServer {
|
|||
marker,
|
||||
continuation_token: None,
|
||||
start_after: None,
|
||||
};
|
||||
handle_list(ctx, &query).await
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
Endpoint::ListObjectsV2 {
|
||||
delimiter,
|
||||
|
@ -267,9 +309,11 @@ impl ApiHandler for S3ApiServer {
|
|||
..
|
||||
} => {
|
||||
if list_type == "2" {
|
||||
let query = ListObjectsQuery {
|
||||
handle_list(
|
||||
garage,
|
||||
&ListObjectsQuery {
|
||||
common: ListQueryCommon {
|
||||
bucket_name: ctx.bucket_name.clone(),
|
||||
bucket_name,
|
||||
bucket_id,
|
||||
delimiter,
|
||||
page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
|
||||
|
@ -280,8 +324,9 @@ impl ApiHandler for S3ApiServer {
|
|||
marker: None,
|
||||
continuation_token,
|
||||
start_after,
|
||||
};
|
||||
handle_list(ctx, &query).await
|
||||
},
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
Err(Error::bad_request(format!(
|
||||
"Invalid endpoint: list-type={}",
|
||||
|
@ -297,9 +342,11 @@ impl ApiHandler for S3ApiServer {
|
|||
prefix,
|
||||
upload_id_marker,
|
||||
} => {
|
||||
let query = ListMultipartUploadsQuery {
|
||||
handle_list_multipart_upload(
|
||||
garage,
|
||||
&ListMultipartUploadsQuery {
|
||||
common: ListQueryCommon {
|
||||
bucket_name: ctx.bucket_name.clone(),
|
||||
bucket_name,
|
||||
bucket_id,
|
||||
delimiter,
|
||||
page_size: max_uploads.unwrap_or(1000).clamp(1, 1000),
|
||||
|
@ -308,8 +355,9 @@ impl ApiHandler for S3ApiServer {
|
|||
},
|
||||
key_marker,
|
||||
upload_id_marker,
|
||||
};
|
||||
handle_list_multipart_upload(ctx, &query).await
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
Endpoint::ListParts {
|
||||
key,
|
||||
|
@ -317,28 +365,39 @@ impl ApiHandler for S3ApiServer {
|
|||
part_number_marker,
|
||||
upload_id,
|
||||
} => {
|
||||
let query = ListPartsQuery {
|
||||
bucket_name: ctx.bucket_name.clone(),
|
||||
handle_list_parts(
|
||||
garage,
|
||||
&ListPartsQuery {
|
||||
bucket_name,
|
||||
bucket_id,
|
||||
key,
|
||||
upload_id,
|
||||
part_number_marker: part_number_marker.map(|p| p.min(10000)),
|
||||
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
|
||||
};
|
||||
handle_list_parts(ctx, req, &query).await
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
|
||||
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
|
||||
Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req, content_sha256).await,
|
||||
Endpoint::DeleteBucketWebsite {} => handle_delete_website(ctx).await,
|
||||
Endpoint::GetBucketCors {} => handle_get_cors(ctx).await,
|
||||
Endpoint::PutBucketCors {} => handle_put_cors(ctx, req, content_sha256).await,
|
||||
Endpoint::DeleteBucketCors {} => handle_delete_cors(ctx).await,
|
||||
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(ctx).await,
|
||||
Endpoint::DeleteObjects {} => {
|
||||
handle_delete_objects(garage, bucket_id, req, content_sha256).await
|
||||
}
|
||||
Endpoint::GetBucketWebsite {} => handle_get_website(&bucket).await,
|
||||
Endpoint::PutBucketWebsite {} => {
|
||||
handle_put_website(garage, bucket.clone(), req, content_sha256).await
|
||||
}
|
||||
Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket.clone()).await,
|
||||
Endpoint::GetBucketCors {} => handle_get_cors(&bucket).await,
|
||||
Endpoint::PutBucketCors {} => {
|
||||
handle_put_cors(garage, bucket.clone(), req, content_sha256).await
|
||||
}
|
||||
Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket.clone()).await,
|
||||
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(&bucket).await,
|
||||
Endpoint::PutBucketLifecycleConfiguration {} => {
|
||||
handle_put_lifecycle(ctx, req, content_sha256).await
|
||||
handle_put_lifecycle(garage, bucket.clone(), req, content_sha256).await
|
||||
}
|
||||
Endpoint::DeleteBucketLifecycle {} => {
|
||||
handle_delete_lifecycle(garage, bucket.clone()).await
|
||||
}
|
||||
Endpoint::DeleteBucketLifecycle {} => handle_delete_lifecycle(ctx).await,
|
||||
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
||||
};
|
||||
|
||||
|
@ -346,7 +405,7 @@ impl ApiHandler for S3ApiServer {
|
|||
// add the corresponding CORS headers to the response
|
||||
let mut resp_ok = resp?;
|
||||
if let Some(rule) = matching_cors_rule {
|
||||
add_cors_headers(&mut resp_ok, &rule)
|
||||
add_cors_headers(&mut resp_ok, rule)
|
||||
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
|
@ -20,8 +21,7 @@ use crate::s3::error::*;
|
|||
use crate::s3::xml as s3_xml;
|
||||
use crate::signature::verify_signed_content;
|
||||
|
||||
pub fn handle_get_bucket_location(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { garage, .. } = ctx;
|
||||
pub fn handle_get_bucket_location(garage: Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||
let loc = s3_xml::LocationConstraint {
|
||||
xmlns: (),
|
||||
region: garage.config.s3_api.s3_region.to_string(),
|
||||
|
@ -204,20 +204,21 @@ pub async fn handle_create_bucket(
|
|||
.unwrap())
|
||||
}
|
||||
|
||||
pub async fn handle_delete_bucket(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
bucket_name,
|
||||
bucket_params: bucket_state,
|
||||
api_key,
|
||||
..
|
||||
} = &ctx;
|
||||
pub async fn handle_delete_bucket(
|
||||
garage: &Garage,
|
||||
bucket_id: Uuid,
|
||||
bucket_name: String,
|
||||
api_key_id: &String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let helper = garage.locked_helper().await;
|
||||
|
||||
let api_key = helper.key().get_existing_key(api_key_id).await?;
|
||||
let key_params = api_key.params().unwrap();
|
||||
|
||||
let is_local_alias = matches!(key_params.local_aliases.get(bucket_name), Some(Some(_)));
|
||||
let is_local_alias = matches!(key_params.local_aliases.get(&bucket_name), Some(Some(_)));
|
||||
|
||||
let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
|
||||
let bucket_state = bucket.state.as_option().unwrap();
|
||||
|
||||
// If the bucket has no other aliases, this is a true deletion.
|
||||
// Otherwise, it is just an alias removal.
|
||||
|
@ -227,20 +228,20 @@ pub async fn handle_delete_bucket(ctx: ReqCtx) -> Result<Response<ResBody>, Erro
|
|||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, active)| *active)
|
||||
.any(|(n, _, _)| is_local_alias || (*n != *bucket_name));
|
||||
.any(|(n, _, _)| is_local_alias || (*n != bucket_name));
|
||||
|
||||
let has_other_local_aliases = bucket_state
|
||||
.local_aliases
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, active)| *active)
|
||||
.any(|((k, n), _, _)| !is_local_alias || *n != *bucket_name || *k != api_key.key_id);
|
||||
.any(|((k, n), _, _)| !is_local_alias || *n != bucket_name || *k != api_key.key_id);
|
||||
|
||||
if !has_other_global_aliases && !has_other_local_aliases {
|
||||
// Delete bucket
|
||||
|
||||
// Check bucket is empty
|
||||
if !helper.bucket().is_bucket_empty(*bucket_id).await? {
|
||||
if !helper.bucket().is_bucket_empty(bucket_id).await? {
|
||||
return Err(CommonError::BucketNotEmpty.into());
|
||||
}
|
||||
|
||||
|
@ -248,36 +249,33 @@ pub async fn handle_delete_bucket(ctx: ReqCtx) -> Result<Response<ResBody>, Erro
|
|||
// 1. delete bucket alias
|
||||
if is_local_alias {
|
||||
helper
|
||||
.unset_local_bucket_alias(*bucket_id, &api_key.key_id, bucket_name)
|
||||
.unset_local_bucket_alias(bucket_id, &api_key.key_id, &bucket_name)
|
||||
.await?;
|
||||
} else {
|
||||
helper
|
||||
.unset_global_bucket_alias(*bucket_id, bucket_name)
|
||||
.unset_global_bucket_alias(bucket_id, &bucket_name)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 2. delete authorization from keys that had access
|
||||
for (key_id, _) in bucket_state.authorized_keys.items() {
|
||||
for (key_id, _) in bucket.authorized_keys() {
|
||||
helper
|
||||
.set_bucket_key_permissions(*bucket_id, key_id, BucketKeyPerm::NO_PERMISSIONS)
|
||||
.set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let bucket = Bucket {
|
||||
id: *bucket_id,
|
||||
state: Deletable::delete(),
|
||||
};
|
||||
// 3. delete bucket
|
||||
bucket.state = Deletable::delete();
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
} else if is_local_alias {
|
||||
// Just unalias
|
||||
helper
|
||||
.unset_local_bucket_alias(*bucket_id, &api_key.key_id, bucket_name)
|
||||
.unset_local_bucket_alias(bucket_id, &api_key.key_id, &bucket_name)
|
||||
.await?;
|
||||
} else {
|
||||
// Just unalias (but from global namespace)
|
||||
helper
|
||||
.unset_global_bucket_alias(*bucket_id, bucket_name)
|
||||
.unset_global_bucket_alias(bucket_id, &bucket_name)
|
||||
.await?;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,406 +0,0 @@
|
|||
use std::convert::{TryFrom, TryInto};
|
||||
use std::hash::Hasher;
|
||||
|
||||
use base64::prelude::*;
|
||||
use crc32c::Crc32cHasher as Crc32c;
|
||||
use crc32fast::Hasher as Crc32;
|
||||
use md5::{Digest, Md5};
|
||||
use sha1::Sha1;
|
||||
use sha2::Sha256;
|
||||
|
||||
use http::{HeaderMap, HeaderName, HeaderValue};
|
||||
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::OkOrMessage;
|
||||
|
||||
use garage_model::s3::object_table::*;
|
||||
|
||||
use crate::s3::error::*;
|
||||
|
||||
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
|
||||
HeaderName::from_static("x-amz-checksum-algorithm");
|
||||
pub const X_AMZ_CHECKSUM_MODE: HeaderName = HeaderName::from_static("x-amz-checksum-mode");
|
||||
pub const X_AMZ_CHECKSUM_CRC32: HeaderName = HeaderName::from_static("x-amz-checksum-crc32");
|
||||
pub const X_AMZ_CHECKSUM_CRC32C: HeaderName = HeaderName::from_static("x-amz-checksum-crc32c");
|
||||
pub const X_AMZ_CHECKSUM_SHA1: HeaderName = HeaderName::from_static("x-amz-checksum-sha1");
|
||||
pub const X_AMZ_CHECKSUM_SHA256: HeaderName = HeaderName::from_static("x-amz-checksum-sha256");
|
||||
|
||||
pub type Crc32Checksum = [u8; 4];
|
||||
pub type Crc32cChecksum = [u8; 4];
|
||||
pub type Md5Checksum = [u8; 16];
|
||||
pub type Sha1Checksum = [u8; 20];
|
||||
pub type Sha256Checksum = [u8; 32];
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct ExpectedChecksums {
|
||||
// base64-encoded md5 (content-md5 header)
|
||||
pub md5: Option<String>,
|
||||
// content_sha256 (as a Hash / FixedBytes32)
|
||||
pub sha256: Option<Hash>,
|
||||
// extra x-amz-checksum-* header
|
||||
pub extra: Option<ChecksumValue>,
|
||||
}
|
||||
|
||||
pub(crate) struct Checksummer {
|
||||
pub crc32: Option<Crc32>,
|
||||
pub crc32c: Option<Crc32c>,
|
||||
pub md5: Option<Md5>,
|
||||
pub sha1: Option<Sha1>,
|
||||
pub sha256: Option<Sha256>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct Checksums {
|
||||
pub crc32: Option<Crc32Checksum>,
|
||||
pub crc32c: Option<Crc32cChecksum>,
|
||||
pub md5: Option<Md5Checksum>,
|
||||
pub sha1: Option<Sha1Checksum>,
|
||||
pub sha256: Option<Sha256Checksum>,
|
||||
}
|
||||
|
||||
impl Checksummer {
|
||||
pub(crate) fn init(expected: &ExpectedChecksums, require_md5: bool) -> Self {
|
||||
let mut ret = Self {
|
||||
crc32: None,
|
||||
crc32c: None,
|
||||
md5: None,
|
||||
sha1: None,
|
||||
sha256: None,
|
||||
};
|
||||
|
||||
if expected.md5.is_some() || require_md5 {
|
||||
ret.md5 = Some(Md5::new());
|
||||
}
|
||||
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
|
||||
ret.sha256 = Some(Sha256::new());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
|
||||
ret.crc32 = Some(Crc32::new());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
|
||||
ret.crc32c = Some(Crc32c::default());
|
||||
}
|
||||
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
|
||||
ret.sha1 = Some(Sha1::new());
|
||||
}
|
||||
ret
|
||||
}
|
||||
|
||||
pub(crate) fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
|
||||
match algo {
|
||||
Some(ChecksumAlgorithm::Crc32) => {
|
||||
self.crc32 = Some(Crc32::new());
|
||||
}
|
||||
Some(ChecksumAlgorithm::Crc32c) => {
|
||||
self.crc32c = Some(Crc32c::default());
|
||||
}
|
||||
Some(ChecksumAlgorithm::Sha1) => {
|
||||
self.sha1 = Some(Sha1::new());
|
||||
}
|
||||
Some(ChecksumAlgorithm::Sha256) => {
|
||||
self.sha256 = Some(Sha256::new());
|
||||
}
|
||||
None => (),
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
pub(crate) fn update(&mut self, bytes: &[u8]) {
|
||||
if let Some(crc32) = &mut self.crc32 {
|
||||
crc32.update(bytes);
|
||||
}
|
||||
if let Some(crc32c) = &mut self.crc32c {
|
||||
crc32c.write(bytes);
|
||||
}
|
||||
if let Some(md5) = &mut self.md5 {
|
||||
md5.update(bytes);
|
||||
}
|
||||
if let Some(sha1) = &mut self.sha1 {
|
||||
sha1.update(bytes);
|
||||
}
|
||||
if let Some(sha256) = &mut self.sha256 {
|
||||
sha256.update(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn finalize(self) -> Checksums {
|
||||
Checksums {
|
||||
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
|
||||
crc32c: self
|
||||
.crc32c
|
||||
.map(|x| u32::to_be_bytes(u32::try_from(x.finish()).unwrap())),
|
||||
md5: self.md5.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||
sha1: self.sha1.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||
sha256: self.sha256.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Checksums {
|
||||
pub fn verify(&self, expected: &ExpectedChecksums) -> Result<(), Error> {
|
||||
if let Some(expected_md5) = &expected.md5 {
|
||||
match self.md5 {
|
||||
Some(md5) if BASE64_STANDARD.encode(&md5) == expected_md5.trim_matches('"') => (),
|
||||
_ => {
|
||||
return Err(Error::InvalidDigest(
|
||||
"MD5 checksum verification failed (from content-md5)".into(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(expected_sha256) = &expected.sha256 {
|
||||
match self.sha256 {
|
||||
Some(sha256) if &sha256[..] == expected_sha256.as_slice() => (),
|
||||
_ => {
|
||||
return Err(Error::InvalidDigest(
|
||||
"SHA256 checksum verification failed (from x-amz-content-sha256)".into(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(extra) = expected.extra {
|
||||
let algo = extra.algorithm();
|
||||
if self.extract(Some(algo)) != Some(extra) {
|
||||
return Err(Error::InvalidDigest(format!(
|
||||
"Failed to validate checksum for algorithm {:?}",
|
||||
algo
|
||||
)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn extract(&self, algo: Option<ChecksumAlgorithm>) -> Option<ChecksumValue> {
|
||||
match algo {
|
||||
None => None,
|
||||
Some(ChecksumAlgorithm::Crc32) => Some(ChecksumValue::Crc32(self.crc32.unwrap())),
|
||||
Some(ChecksumAlgorithm::Crc32c) => Some(ChecksumValue::Crc32c(self.crc32c.unwrap())),
|
||||
Some(ChecksumAlgorithm::Sha1) => Some(ChecksumValue::Sha1(self.sha1.unwrap())),
|
||||
Some(ChecksumAlgorithm::Sha256) => Some(ChecksumValue::Sha256(self.sha256.unwrap())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct MultipartChecksummer {
|
||||
pub md5: Md5,
|
||||
pub extra: Option<MultipartExtraChecksummer>,
|
||||
}
|
||||
|
||||
pub(crate) enum MultipartExtraChecksummer {
|
||||
Crc32(Crc32),
|
||||
Crc32c(Crc32c),
|
||||
Sha1(Sha1),
|
||||
Sha256(Sha256),
|
||||
}
|
||||
|
||||
impl MultipartChecksummer {
|
||||
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
|
||||
Self {
|
||||
md5: Md5::new(),
|
||||
extra: match algo {
|
||||
None => None,
|
||||
Some(ChecksumAlgorithm::Crc32) => {
|
||||
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
|
||||
}
|
||||
Some(ChecksumAlgorithm::Crc32c) => {
|
||||
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
|
||||
}
|
||||
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
|
||||
Some(ChecksumAlgorithm::Sha256) => {
|
||||
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update(
|
||||
&mut self,
|
||||
etag: &str,
|
||||
checksum: Option<ChecksumValue>,
|
||||
) -> Result<(), Error> {
|
||||
self.md5
|
||||
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
|
||||
match (&mut self.extra, checksum) {
|
||||
(None, _) => (),
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
|
||||
Some(ChecksumValue::Crc32(x)),
|
||||
) => {
|
||||
crc32.update(&x);
|
||||
}
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
|
||||
Some(ChecksumValue::Crc32c(x)),
|
||||
) => {
|
||||
crc32c.write(&x);
|
||||
}
|
||||
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
|
||||
sha1.update(&x);
|
||||
}
|
||||
(
|
||||
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
|
||||
Some(ChecksumValue::Sha256(x)),
|
||||
) => {
|
||||
sha256.update(&x);
|
||||
}
|
||||
(Some(_), b) => {
|
||||
return Err(Error::internal_error(format!(
|
||||
"part checksum was not computed correctly, got: {:?}",
|
||||
b
|
||||
)))
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
|
||||
let md5 = self.md5.finalize()[..].try_into().unwrap();
|
||||
let extra = match self.extra {
|
||||
None => None,
|
||||
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
|
||||
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
|
||||
}
|
||||
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
|
||||
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
|
||||
)),
|
||||
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
|
||||
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
|
||||
}
|
||||
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
|
||||
sha256.finalize()[..].try_into().unwrap(),
|
||||
)),
|
||||
};
|
||||
(md5, extra)
|
||||
}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
/// Extract the value of the x-amz-checksum-algorithm header
|
||||
pub(crate) fn request_checksum_algorithm(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<Option<ChecksumAlgorithm>, Error> {
|
||||
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
||||
None => Ok(None),
|
||||
Some(x) if x == "CRC32" => Ok(Some(ChecksumAlgorithm::Crc32)),
|
||||
Some(x) if x == "CRC32C" => Ok(Some(ChecksumAlgorithm::Crc32c)),
|
||||
Some(x) if x == "SHA1" => Ok(Some(ChecksumAlgorithm::Sha1)),
|
||||
Some(x) if x == "SHA256" => Ok(Some(ChecksumAlgorithm::Sha256)),
|
||||
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract the value of any of the x-amz-checksum-* headers
|
||||
pub(crate) fn request_checksum_value(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<Option<ChecksumValue>, Error> {
|
||||
let mut ret = vec![];
|
||||
|
||||
if let Some(crc32_str) = headers.get(X_AMZ_CHECKSUM_CRC32) {
|
||||
let crc32 = BASE64_STANDARD
|
||||
.decode(&crc32_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
||||
ret.push(ChecksumValue::Crc32(crc32))
|
||||
}
|
||||
if let Some(crc32c_str) = headers.get(X_AMZ_CHECKSUM_CRC32C) {
|
||||
let crc32c = BASE64_STANDARD
|
||||
.decode(&crc32c_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
||||
ret.push(ChecksumValue::Crc32c(crc32c))
|
||||
}
|
||||
if let Some(sha1_str) = headers.get(X_AMZ_CHECKSUM_SHA1) {
|
||||
let sha1 = BASE64_STANDARD
|
||||
.decode(&sha1_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
||||
ret.push(ChecksumValue::Sha1(sha1))
|
||||
}
|
||||
if let Some(sha256_str) = headers.get(X_AMZ_CHECKSUM_SHA256) {
|
||||
let sha256 = BASE64_STANDARD
|
||||
.decode(&sha256_str)
|
||||
.ok()
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
||||
ret.push(ChecksumValue::Sha256(sha256))
|
||||
}
|
||||
|
||||
if ret.len() > 1 {
|
||||
return Err(Error::bad_request(
|
||||
"multiple x-amz-checksum-* headers given",
|
||||
));
|
||||
}
|
||||
Ok(ret.pop())
|
||||
}
|
||||
|
||||
/// Checks for the presense of x-amz-checksum-algorithm
|
||||
/// if so extract the corrseponding x-amz-checksum-* value
|
||||
pub(crate) fn request_checksum_algorithm_value(
|
||||
headers: &HeaderMap<HeaderValue>,
|
||||
) -> Result<Option<ChecksumValue>, Error> {
|
||||
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
||||
Some(x) if x == "CRC32" => {
|
||||
let crc32 = headers
|
||||
.get(X_AMZ_CHECKSUM_CRC32)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
||||
Ok(Some(ChecksumValue::Crc32(crc32)))
|
||||
}
|
||||
Some(x) if x == "CRC32C" => {
|
||||
let crc32c = headers
|
||||
.get(X_AMZ_CHECKSUM_CRC32C)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
||||
Ok(Some(ChecksumValue::Crc32c(crc32c)))
|
||||
}
|
||||
Some(x) if x == "SHA1" => {
|
||||
let sha1 = headers
|
||||
.get(X_AMZ_CHECKSUM_SHA1)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
||||
Ok(Some(ChecksumValue::Sha1(sha1)))
|
||||
}
|
||||
Some(x) if x == "SHA256" => {
|
||||
let sha256 = headers
|
||||
.get(X_AMZ_CHECKSUM_SHA256)
|
||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||
.and_then(|x| x.try_into().ok())
|
||||
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
||||
Ok(Some(ChecksumValue::Sha256(sha256)))
|
||||
}
|
||||
Some(_) => Err(Error::bad_request("invalid x-amz-checksum-algorithm")),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn add_checksum_response_headers(
|
||||
checksum: &Option<ChecksumValue>,
|
||||
mut resp: http::response::Builder,
|
||||
) -> http::response::Builder {
|
||||
match checksum {
|
||||
Some(ChecksumValue::Crc32(crc32)) => {
|
||||
resp = resp.header(X_AMZ_CHECKSUM_CRC32, BASE64_STANDARD.encode(&crc32));
|
||||
}
|
||||
Some(ChecksumValue::Crc32c(crc32c)) => {
|
||||
resp = resp.header(X_AMZ_CHECKSUM_CRC32C, BASE64_STANDARD.encode(&crc32c));
|
||||
}
|
||||
Some(ChecksumValue::Sha1(sha1)) => {
|
||||
resp = resp.header(X_AMZ_CHECKSUM_SHA1, BASE64_STANDARD.encode(&sha1));
|
||||
}
|
||||
Some(ChecksumValue::Sha256(sha256)) => {
|
||||
resp = resp.header(X_AMZ_CHECKSUM_SHA256, BASE64_STANDARD.encode(&sha256));
|
||||
}
|
||||
None => (),
|
||||
}
|
||||
resp
|
||||
}
|
|
@ -1,20 +1,22 @@
|
|||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use futures::{stream, stream::Stream, StreamExt, TryStreamExt};
|
||||
use futures::{stream, stream::Stream, StreamExt};
|
||||
use md5::{Digest as Md5Digest, Md5};
|
||||
|
||||
use bytes::Bytes;
|
||||
use hyper::{Request, Response};
|
||||
use serde::Serialize;
|
||||
|
||||
use garage_net::bytes_buf::BytesBuf;
|
||||
use garage_net::stream::read_stream_to_end;
|
||||
use garage_rpc::rpc_helper::OrderTag;
|
||||
use garage_table::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::time::*;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::Key;
|
||||
use garage_model::s3::block_ref_table::*;
|
||||
use garage_model::s3::mpu_table::*;
|
||||
use garage_model::s3::object_table::*;
|
||||
|
@ -22,26 +24,21 @@ use garage_model::s3::version_table::*;
|
|||
|
||||
use crate::helpers::*;
|
||||
use crate::s3::api_server::{ReqBody, ResBody};
|
||||
use crate::s3::checksum::*;
|
||||
use crate::s3::encryption::EncryptionParams;
|
||||
use crate::s3::error::*;
|
||||
use crate::s3::get::full_object_byte_stream;
|
||||
use crate::s3::multipart;
|
||||
use crate::s3::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
|
||||
use crate::s3::put::get_headers;
|
||||
use crate::s3::xml::{self as s3_xml, xmlns_tag};
|
||||
|
||||
// -------- CopyObject ---------
|
||||
|
||||
pub async fn handle_copy(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
api_key: &Key,
|
||||
req: &Request<ReqBody>,
|
||||
dest_bucket_id: Uuid,
|
||||
dest_key: &str,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
||||
|
||||
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
|
||||
|
||||
let source_object = get_copy_source(&ctx, req).await?;
|
||||
let source_object = get_copy_source(&garage, api_key, req).await?;
|
||||
|
||||
let (source_version, source_version_data, source_version_meta) =
|
||||
extract_source_info(&source_object)?;
|
||||
|
@ -49,150 +46,26 @@ pub async fn handle_copy(
|
|||
// Check precondition, e.g. x-amz-copy-source-if-match
|
||||
copy_precondition.check(source_version, &source_version_meta.etag)?;
|
||||
|
||||
// Determine encryption parameters
|
||||
let (source_encryption, source_object_meta_inner) =
|
||||
EncryptionParams::check_decrypt_for_copy_source(
|
||||
&ctx.garage,
|
||||
req.headers(),
|
||||
&source_version_meta.encryption,
|
||||
)?;
|
||||
let dest_encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
||||
|
||||
// Extract source checksum info before source_object_meta_inner is consumed
|
||||
let source_checksum = source_object_meta_inner.checksum;
|
||||
let source_checksum_algorithm = source_checksum.map(|x| x.algorithm());
|
||||
|
||||
// If source object has a checksum, the destination object must as well.
|
||||
// The x-amz-checksum-algorihtm header allows to change that algorithm,
|
||||
// but if it is absent, we must use the same as before
|
||||
let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm);
|
||||
|
||||
// Determine metadata of destination object
|
||||
let was_multipart = source_version_meta.etag.contains('-');
|
||||
let dest_object_meta = ObjectVersionMetaInner {
|
||||
headers: match req.headers().get("x-amz-metadata-directive") {
|
||||
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
|
||||
get_headers(req.headers())?
|
||||
}
|
||||
_ => source_object_meta_inner.into_owned().headers,
|
||||
},
|
||||
checksum: source_checksum,
|
||||
};
|
||||
|
||||
// Do actual object copying
|
||||
//
|
||||
// In any of the following scenarios, we need to read the whole object
|
||||
// data and re-write it again:
|
||||
//
|
||||
// - the data needs to be decrypted or encrypted
|
||||
// - the requested checksum algorithm requires us to recompute a checksum
|
||||
// - the original object was a multipart upload and a checksum algorithm
|
||||
// is defined (AWS specifies that in this case, we must recompute the
|
||||
// checksum from scratch as if this was a single big object and not
|
||||
// a multipart object, as the checksums are not computed in the same way)
|
||||
//
|
||||
// In other cases, we can just copy the metadata and reference the same blocks.
|
||||
//
|
||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||
|
||||
let must_recopy = !EncryptionParams::is_same(&source_encryption, &dest_encryption)
|
||||
|| source_checksum_algorithm != checksum_algorithm
|
||||
|| (was_multipart && checksum_algorithm.is_some());
|
||||
|
||||
let res = if !must_recopy {
|
||||
// In most cases, we can just copy the metadata and link blocks of the
|
||||
// old object from the new object.
|
||||
handle_copy_metaonly(
|
||||
ctx,
|
||||
dest_key,
|
||||
dest_object_meta,
|
||||
dest_encryption,
|
||||
source_version,
|
||||
source_version_data,
|
||||
source_version_meta,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
let expected_checksum = ExpectedChecksums {
|
||||
md5: None,
|
||||
sha256: None,
|
||||
extra: source_checksum,
|
||||
};
|
||||
let checksum_mode = if was_multipart || source_checksum_algorithm != checksum_algorithm {
|
||||
ChecksumMode::Calculate(checksum_algorithm)
|
||||
} else {
|
||||
ChecksumMode::Verify(&expected_checksum)
|
||||
};
|
||||
// If source and dest encryption use different keys,
|
||||
// we must decrypt content and re-encrypt, so rewrite all data blocks.
|
||||
handle_copy_reencrypt(
|
||||
ctx,
|
||||
dest_key,
|
||||
dest_object_meta,
|
||||
dest_encryption,
|
||||
source_version,
|
||||
source_version_data,
|
||||
source_encryption,
|
||||
checksum_mode,
|
||||
)
|
||||
.await?
|
||||
};
|
||||
|
||||
let last_modified = msec_to_rfc3339(res.version_timestamp);
|
||||
let result = CopyObjectResult {
|
||||
last_modified: s3_xml::Value(last_modified),
|
||||
etag: s3_xml::Value(format!("\"{}\"", res.etag)),
|
||||
};
|
||||
let xml = s3_xml::to_xml_with_header(&result)?;
|
||||
|
||||
let mut resp = Response::builder()
|
||||
.header("Content-Type", "application/xml")
|
||||
.header("x-amz-version-id", hex::encode(res.version_uuid))
|
||||
.header(
|
||||
"x-amz-copy-source-version-id",
|
||||
hex::encode(source_version.uuid),
|
||||
);
|
||||
dest_encryption.add_response_headers(&mut resp);
|
||||
Ok(resp.body(string_body(xml))?)
|
||||
}
|
||||
|
||||
async fn handle_copy_metaonly(
|
||||
ctx: ReqCtx,
|
||||
dest_key: &str,
|
||||
dest_object_meta: ObjectVersionMetaInner,
|
||||
dest_encryption: EncryptionParams,
|
||||
source_version: &ObjectVersion,
|
||||
source_version_data: &ObjectVersionData,
|
||||
source_version_meta: &ObjectVersionMeta,
|
||||
) -> Result<SaveStreamResult, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
bucket_id: dest_bucket_id,
|
||||
..
|
||||
} = ctx;
|
||||
|
||||
// Generate parameters for copied object
|
||||
let new_uuid = gen_uuid();
|
||||
let new_timestamp = now_msec();
|
||||
|
||||
let new_meta = ObjectVersionMeta {
|
||||
encryption: dest_encryption.encrypt_meta(dest_object_meta)?,
|
||||
// Implement x-amz-metadata-directive: REPLACE
|
||||
let new_meta = match req.headers().get("x-amz-metadata-directive") {
|
||||
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => ObjectVersionMeta {
|
||||
headers: get_headers(req.headers())?,
|
||||
size: source_version_meta.size,
|
||||
etag: source_version_meta.etag.clone(),
|
||||
},
|
||||
_ => source_version_meta.clone(),
|
||||
};
|
||||
|
||||
let res = SaveStreamResult {
|
||||
version_uuid: new_uuid,
|
||||
version_timestamp: new_timestamp,
|
||||
etag: new_meta.etag.clone(),
|
||||
};
|
||||
let etag = new_meta.etag.to_string();
|
||||
|
||||
// Save object copy
|
||||
match source_version_data {
|
||||
ObjectVersionData::DeleteMarker => unreachable!(),
|
||||
ObjectVersionData::Inline(_meta, bytes) => {
|
||||
// bytes is either plaintext before&after or encrypted with the
|
||||
// same keys, so it's ok to just copy it as is
|
||||
let dest_object_version = ObjectVersion {
|
||||
uuid: new_uuid,
|
||||
timestamp: new_timestamp,
|
||||
|
@ -223,8 +96,7 @@ async fn handle_copy_metaonly(
|
|||
uuid: new_uuid,
|
||||
timestamp: new_timestamp,
|
||||
state: ObjectVersionState::Uploading {
|
||||
encryption: new_meta.encryption.clone(),
|
||||
checksum_algorithm: None,
|
||||
headers: new_meta.headers.clone(),
|
||||
multipart: false,
|
||||
},
|
||||
};
|
||||
|
@ -291,45 +163,28 @@ async fn handle_copy_metaonly(
|
|||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
let last_modified = msec_to_rfc3339(new_timestamp);
|
||||
let result = CopyObjectResult {
|
||||
last_modified: s3_xml::Value(last_modified),
|
||||
etag: s3_xml::Value(format!("\"{}\"", etag)),
|
||||
};
|
||||
let xml = s3_xml::to_xml_with_header(&result)?;
|
||||
|
||||
async fn handle_copy_reencrypt(
|
||||
ctx: ReqCtx,
|
||||
dest_key: &str,
|
||||
dest_object_meta: ObjectVersionMetaInner,
|
||||
dest_encryption: EncryptionParams,
|
||||
source_version: &ObjectVersion,
|
||||
source_version_data: &ObjectVersionData,
|
||||
source_encryption: EncryptionParams,
|
||||
checksum_mode: ChecksumMode<'_>,
|
||||
) -> Result<SaveStreamResult, Error> {
|
||||
// basically we will read the source data (decrypt if necessary)
|
||||
// and save that in a new object (encrypt if necessary),
|
||||
// by combining the code used in getobject and putobject
|
||||
let source_stream = full_object_byte_stream(
|
||||
ctx.garage.clone(),
|
||||
source_version,
|
||||
source_version_data,
|
||||
source_encryption,
|
||||
);
|
||||
|
||||
save_stream(
|
||||
&ctx,
|
||||
dest_object_meta,
|
||||
dest_encryption,
|
||||
source_stream.map_err(|e| Error::from(GarageError::from(e))),
|
||||
&dest_key.to_string(),
|
||||
checksum_mode,
|
||||
Ok(Response::builder()
|
||||
.header("Content-Type", "application/xml")
|
||||
.header("x-amz-version-id", hex::encode(new_uuid))
|
||||
.header(
|
||||
"x-amz-copy-source-version-id",
|
||||
hex::encode(source_version.uuid),
|
||||
)
|
||||
.await
|
||||
.body(string_body(xml))?)
|
||||
}
|
||||
|
||||
// -------- UploadPartCopy ---------
|
||||
|
||||
pub async fn handle_upload_part_copy(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
api_key: &Key,
|
||||
req: &Request<ReqBody>,
|
||||
dest_bucket_id: Uuid,
|
||||
dest_key: &str,
|
||||
part_number: u64,
|
||||
upload_id: &str,
|
||||
|
@ -339,37 +194,17 @@ pub async fn handle_upload_part_copy(
|
|||
let dest_upload_id = multipart::decode_upload_id(upload_id)?;
|
||||
|
||||
let dest_key = dest_key.to_string();
|
||||
let (source_object, (_, dest_version, mut dest_mpu)) = futures::try_join!(
|
||||
get_copy_source(&ctx, req),
|
||||
multipart::get_upload(&ctx, &dest_key, &dest_upload_id)
|
||||
let (source_object, (_, _, mut dest_mpu)) = futures::try_join!(
|
||||
get_copy_source(&garage, api_key, req),
|
||||
multipart::get_upload(&garage, &dest_bucket_id, &dest_key, &dest_upload_id)
|
||||
)?;
|
||||
|
||||
let ReqCtx { garage, .. } = ctx;
|
||||
|
||||
let (source_object_version, source_version_data, source_version_meta) =
|
||||
extract_source_info(&source_object)?;
|
||||
|
||||
// Check precondition on source, e.g. x-amz-copy-source-if-match
|
||||
copy_precondition.check(source_object_version, &source_version_meta.etag)?;
|
||||
|
||||
// Determine encryption parameters
|
||||
let (source_encryption, _) = EncryptionParams::check_decrypt_for_copy_source(
|
||||
&garage,
|
||||
req.headers(),
|
||||
&source_version_meta.encryption,
|
||||
)?;
|
||||
let (dest_object_encryption, dest_object_checksum_algorithm) = match dest_version.state {
|
||||
ObjectVersionState::Uploading {
|
||||
encryption,
|
||||
checksum_algorithm,
|
||||
..
|
||||
} => (encryption, checksum_algorithm),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let (dest_encryption, _) =
|
||||
EncryptionParams::check_decrypt(&garage, req.headers(), &dest_object_encryption)?;
|
||||
let same_encryption = EncryptionParams::is_same(&source_encryption, &dest_encryption);
|
||||
|
||||
// Check source range is valid
|
||||
let source_range = match req.headers().get("x-amz-copy-source-range") {
|
||||
Some(range) => {
|
||||
|
@ -391,7 +226,9 @@ pub async fn handle_upload_part_copy(
|
|||
};
|
||||
|
||||
// Check source version is not inlined
|
||||
if matches!(source_version_data, ObjectVersionData::Inline(_, _)) {
|
||||
match source_version_data {
|
||||
ObjectVersionData::DeleteMarker => unreachable!(),
|
||||
ObjectVersionData::Inline(_meta, _bytes) => {
|
||||
// This is only for small files, we don't bother handling this.
|
||||
// (in AWS UploadPartCopy works for parts at least 5MB which
|
||||
// is never the case of an inline object)
|
||||
|
@ -399,8 +236,11 @@ pub async fn handle_upload_part_copy(
|
|||
"Source object is too small (minimum part size is 5Mb)",
|
||||
));
|
||||
}
|
||||
ObjectVersionData::FirstBlock(_meta, _first_block_hash) => (),
|
||||
};
|
||||
|
||||
// Fetch source version with its block list
|
||||
// Fetch source versin with its block list,
|
||||
// and destination version to check part hasn't yet been uploaded
|
||||
let source_version = garage
|
||||
.version_table
|
||||
.get(&source_object_version.uuid, &EmptyKey)
|
||||
|
@ -410,9 +250,7 @@ pub async fn handle_upload_part_copy(
|
|||
// We want to reuse blocks from the source version as much as possible.
|
||||
// However, we still need to get the data from these blocks
|
||||
// because we need to know it to calculate the MD5sum of the part
|
||||
// which is used as its ETag. For encrypted sources or destinations,
|
||||
// we must always read(+decrypt) and then write(+encrypt), so we
|
||||
// can never reuse data blocks as is.
|
||||
// which is used as its ETag.
|
||||
|
||||
// First, calculate what blocks we want to keep,
|
||||
// and the subrange of the block to take, if the bounds of the
|
||||
|
@ -461,9 +299,7 @@ pub async fn handle_upload_part_copy(
|
|||
dest_mpu_part_key,
|
||||
MpuPart {
|
||||
version: dest_version_id,
|
||||
// These are all filled in later (bottom of this function)
|
||||
etag: None,
|
||||
checksum: None,
|
||||
size: None,
|
||||
},
|
||||
);
|
||||
|
@ -476,55 +312,32 @@ pub async fn handle_upload_part_copy(
|
|||
},
|
||||
false,
|
||||
);
|
||||
// write an empty version now to be the parent of the block_ref entries
|
||||
garage.version_table.insert(&dest_version).await?;
|
||||
|
||||
// Now, actually copy the blocks
|
||||
let mut checksummer = Checksummer::init(&Default::default(), !dest_encryption.is_encrypted())
|
||||
.add(dest_object_checksum_algorithm);
|
||||
let mut md5hasher = Md5::new();
|
||||
|
||||
// First, create a stream that is able to read the source blocks
|
||||
// and extract the subrange if necessary.
|
||||
// The second returned value is an Option<Hash>, that is Some
|
||||
// if and only if the block returned is a block that already existed
|
||||
// in the Garage data store and can be reused as-is instead of having
|
||||
// to save it again. This excludes encrypted source blocks that we had
|
||||
// to decrypt.
|
||||
// in the Garage data store (thus we don't need to save it again).
|
||||
let garage2 = garage.clone();
|
||||
let order_stream = OrderTag::stream();
|
||||
let source_blocks = stream::iter(blocks_to_copy)
|
||||
.enumerate()
|
||||
.map(|(i, (block_hash, range_to_copy))| {
|
||||
.flat_map(|(i, (block_hash, range_to_copy))| {
|
||||
let garage3 = garage2.clone();
|
||||
async move {
|
||||
let stream = source_encryption
|
||||
.get_block(&garage3, &block_hash, Some(order_stream.order(i as u64)))
|
||||
stream::once(async move {
|
||||
let data = garage3
|
||||
.block_manager
|
||||
.rpc_get_block(&block_hash, Some(order_stream.order(i as u64)))
|
||||
.await?;
|
||||
let data = read_stream_to_end(stream).await?.into_bytes();
|
||||
// For each item, we return a tuple of:
|
||||
// 1. the full data block (decrypted)
|
||||
// 2. an Option<Hash> that indicates the hash of the block in the block store,
|
||||
// only if it can be re-used as-is in the copied object
|
||||
match range_to_copy {
|
||||
Some(r) => {
|
||||
// If we are taking a subslice of the data, we cannot reuse the block as-is
|
||||
Ok((data.slice(r), None))
|
||||
}
|
||||
None if same_encryption => {
|
||||
// If the data is unencrypted before & after, or if we are using
|
||||
// the same encryption key, we can reuse the stored block, no need
|
||||
// to re-send it to storage nodes.
|
||||
Ok((data, Some(block_hash)))
|
||||
}
|
||||
None => {
|
||||
// If we are decrypting / (re)encrypting with different keys,
|
||||
// we cannot reuse the block as-is
|
||||
Ok((data, None))
|
||||
}
|
||||
}
|
||||
Some(r) => Ok((data.slice(r), None)),
|
||||
None => Ok((data, Some(block_hash))),
|
||||
}
|
||||
})
|
||||
.buffered(2)
|
||||
})
|
||||
.peekable();
|
||||
|
||||
// The defragmenter is a custom stream (defined below) that concatenates
|
||||
|
@ -532,39 +345,22 @@ pub async fn handle_upload_part_copy(
|
|||
// It returns a series of (Vec<u8>, Option<Hash>).
|
||||
// When it is done, it returns an empty vec.
|
||||
// Same as the previous iterator, the Option is Some(_) if and only if
|
||||
// it's an existing block of the Garage data store that can be reused.
|
||||
// it's an existing block of the Garage data store.
|
||||
let mut defragmenter = Defragmenter::new(garage.config.block_size, Box::pin(source_blocks));
|
||||
|
||||
let mut current_offset = 0;
|
||||
let mut next_block = defragmenter.next().await?;
|
||||
|
||||
// TODO this could be optimized similarly to read_and_put_blocks
|
||||
// low priority because uploadpartcopy is rarely used
|
||||
loop {
|
||||
let (data, existing_block_hash) = next_block;
|
||||
if data.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
let data_len = data.len() as u64;
|
||||
md5hasher.update(&data[..]);
|
||||
|
||||
let (checksummer_updated, (data_to_upload, final_hash)) =
|
||||
tokio::task::spawn_blocking(move || {
|
||||
checksummer.update(&data[..]);
|
||||
|
||||
let tup = match existing_block_hash {
|
||||
Some(hash) if same_encryption => (None, hash),
|
||||
_ => {
|
||||
let data_enc = dest_encryption.encrypt_block(data)?;
|
||||
let hash = blake2sum(&data_enc);
|
||||
(Some(data_enc), hash)
|
||||
}
|
||||
};
|
||||
Ok::<_, Error>((checksummer, tup))
|
||||
})
|
||||
.await
|
||||
.unwrap()?;
|
||||
checksummer = checksummer_updated;
|
||||
let must_upload = existing_block_hash.is_none();
|
||||
let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..]));
|
||||
|
||||
dest_version.blocks.clear();
|
||||
dest_version.blocks.put(
|
||||
|
@ -574,10 +370,10 @@ pub async fn handle_upload_part_copy(
|
|||
},
|
||||
VersionBlock {
|
||||
hash: final_hash,
|
||||
size: data_len,
|
||||
size: data.len() as u64,
|
||||
},
|
||||
);
|
||||
current_offset += data_len;
|
||||
current_offset += data.len() as u64;
|
||||
|
||||
let block_ref = BlockRef {
|
||||
block: final_hash,
|
||||
|
@ -585,34 +381,33 @@ pub async fn handle_upload_part_copy(
|
|||
deleted: false.into(),
|
||||
};
|
||||
|
||||
let (_, _, _, next) = futures::try_join!(
|
||||
let garage2 = garage.clone();
|
||||
let res = futures::try_join!(
|
||||
// Thing 1: if the block is not exactly a block that existed before,
|
||||
// we need to insert that data as a new block.
|
||||
async {
|
||||
if let Some(final_data) = data_to_upload {
|
||||
garage
|
||||
.block_manager
|
||||
.rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None)
|
||||
.await
|
||||
async move {
|
||||
if must_upload {
|
||||
garage2.block_manager.rpc_put_block(final_hash, data).await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
async {
|
||||
// Thing 2: we need to insert the block in the version
|
||||
garage.version_table.insert(&dest_version),
|
||||
garage.version_table.insert(&dest_version).await?;
|
||||
// Thing 3: we need to add a block reference
|
||||
garage.block_ref_table.insert(&block_ref),
|
||||
// Thing 4: we need to read the next block
|
||||
garage.block_ref_table.insert(&block_ref).await
|
||||
},
|
||||
// Thing 4: we need to prefetch the next block
|
||||
defragmenter.next(),
|
||||
)?;
|
||||
next_block = next;
|
||||
next_block = res.2;
|
||||
}
|
||||
|
||||
assert_eq!(current_offset, source_range.length);
|
||||
|
||||
let checksums = checksummer.finalize();
|
||||
let etag = dest_encryption.etag_from_md5(&checksums.md5);
|
||||
let checksum = checksums.extract(dest_object_checksum_algorithm);
|
||||
let data_md5sum = md5hasher.finalize();
|
||||
let etag = hex::encode(data_md5sum);
|
||||
|
||||
// Put the part's ETag in the Versiontable
|
||||
dest_mpu.parts.put(
|
||||
|
@ -620,7 +415,6 @@ pub async fn handle_upload_part_copy(
|
|||
MpuPart {
|
||||
version: dest_version_id,
|
||||
etag: Some(etag.clone()),
|
||||
checksum,
|
||||
size: Some(current_offset),
|
||||
},
|
||||
);
|
||||
|
@ -633,21 +427,20 @@ pub async fn handle_upload_part_copy(
|
|||
last_modified: s3_xml::Value(msec_to_rfc3339(source_object_version.timestamp)),
|
||||
})?;
|
||||
|
||||
let mut resp = Response::builder()
|
||||
Ok(Response::builder()
|
||||
.header("Content-Type", "application/xml")
|
||||
.header(
|
||||
"x-amz-copy-source-version-id",
|
||||
hex::encode(source_object_version.uuid),
|
||||
);
|
||||
dest_encryption.add_response_headers(&mut resp);
|
||||
Ok(resp.body(string_body(resp_xml))?)
|
||||
)
|
||||
.body(string_body(resp_xml))?)
|
||||
}
|
||||
|
||||
async fn get_copy_source(ctx: &ReqCtx, req: &Request<ReqBody>) -> Result<Object, Error> {
|
||||
let ReqCtx {
|
||||
garage, api_key, ..
|
||||
} = ctx;
|
||||
|
||||
async fn get_copy_source(
|
||||
garage: &Garage,
|
||||
api_key: &Key,
|
||||
req: &Request<ReqBody>,
|
||||
) -> Result<Object, Error> {
|
||||
let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?;
|
||||
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
|
||||
|
||||
|
|
|
@ -21,13 +21,16 @@ use crate::s3::error::*;
|
|||
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||
use crate::signature::verify_signed_content;
|
||||
|
||||
use garage_model::bucket_table::{Bucket, BucketParams, CorsRule as GarageCorsRule};
|
||||
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule};
|
||||
use garage_model::garage::Garage;
|
||||
use garage_util::data::*;
|
||||
|
||||
pub async fn handle_get_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { bucket_params, .. } = ctx;
|
||||
if let Some(cors) = bucket_params.cors_config.get() {
|
||||
pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<ResBody>, Error> {
|
||||
let param = bucket
|
||||
.params()
|
||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||
|
||||
if let Some(cors) = param.cors_config.get() {
|
||||
let wc = CorsConfiguration {
|
||||
xmlns: (),
|
||||
cors_rules: cors
|
||||
|
@ -47,18 +50,16 @@ pub async fn handle_get_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn handle_delete_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
mut bucket_params,
|
||||
..
|
||||
} = ctx;
|
||||
bucket_params.cors_config.update(None);
|
||||
garage
|
||||
.bucket_table
|
||||
.insert(&Bucket::present(bucket_id, bucket_params))
|
||||
.await?;
|
||||
pub async fn handle_delete_cors(
|
||||
garage: Arc<Garage>,
|
||||
mut bucket: Bucket,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let param = bucket
|
||||
.params_mut()
|
||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||
|
||||
param.cors_config.update(None);
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
|
@ -66,33 +67,28 @@ pub async fn handle_delete_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error>
|
|||
}
|
||||
|
||||
pub async fn handle_put_cors(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
mut bucket: Bucket,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
mut bucket_params,
|
||||
..
|
||||
} = ctx;
|
||||
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
|
||||
let param = bucket
|
||||
.params_mut()
|
||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||
|
||||
let conf: CorsConfiguration = from_reader(&body as &[u8])?;
|
||||
conf.validate()?;
|
||||
|
||||
bucket_params
|
||||
param
|
||||
.cors_config
|
||||
.update(Some(conf.into_garage_cors_config()?));
|
||||
garage
|
||||
.bucket_table
|
||||
.insert(&Bucket::present(bucket_id, bucket_params))
|
||||
.await?;
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
|
@ -119,8 +115,7 @@ pub async fn handle_options_api(
|
|||
let bucket_id = helper.resolve_global_bucket_name(&bn).await?;
|
||||
if let Some(id) = bucket_id {
|
||||
let bucket = garage.bucket_helper().get_existing_bucket(id).await?;
|
||||
let bucket_params = bucket.state.into_option().unwrap();
|
||||
handle_options_for_bucket(req, &bucket_params)
|
||||
handle_options_for_bucket(req, &bucket)
|
||||
} else {
|
||||
// If there is a bucket name in the request, but that name
|
||||
// does not correspond to a global alias for a bucket,
|
||||
|
@ -150,7 +145,7 @@ pub async fn handle_options_api(
|
|||
|
||||
pub fn handle_options_for_bucket(
|
||||
req: &Request<IncomingBody>,
|
||||
bucket_params: &BucketParams,
|
||||
bucket: &Bucket,
|
||||
) -> Result<Response<EmptyBody>, CommonError> {
|
||||
let origin = req
|
||||
.headers()
|
||||
|
@ -167,7 +162,7 @@ pub fn handle_options_for_bucket(
|
|||
None => vec![],
|
||||
};
|
||||
|
||||
if let Some(cors_config) = bucket_params.cors_config.get() {
|
||||
if let Some(cors_config) = bucket.params().unwrap().cors_config.get() {
|
||||
let matching_rule = cors_config
|
||||
.iter()
|
||||
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
|
||||
|
@ -186,10 +181,10 @@ pub fn handle_options_for_bucket(
|
|||
}
|
||||
|
||||
pub fn find_matching_cors_rule<'a>(
|
||||
bucket_params: &'a BucketParams,
|
||||
bucket: &'a Bucket,
|
||||
req: &Request<impl Body>,
|
||||
) -> Result<Option<&'a GarageCorsRule>, Error> {
|
||||
if let Some(cors_config) = bucket_params.cors_config.get() {
|
||||
if let Some(cors_config) = bucket.params().unwrap().cors_config.get() {
|
||||
if let Some(origin) = req.headers().get("Origin") {
|
||||
let origin = origin.to_str()?;
|
||||
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::object_table::*;
|
||||
|
||||
use crate::helpers::*;
|
||||
|
@ -12,13 +15,14 @@ use crate::s3::put::next_timestamp;
|
|||
use crate::s3::xml as s3_xml;
|
||||
use crate::signature::verify_signed_content;
|
||||
|
||||
async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid), Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = ctx;
|
||||
async fn handle_delete_internal(
|
||||
garage: &Garage,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
) -> Result<(Uuid, Uuid), Error> {
|
||||
let object = garage
|
||||
.object_table
|
||||
.get(bucket_id, &key.to_string())
|
||||
.get(&bucket_id, &key.to_string())
|
||||
.await?
|
||||
.ok_or(Error::NoSuchKey)?; // No need to delete
|
||||
|
||||
|
@ -40,7 +44,7 @@ async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid),
|
|||
};
|
||||
|
||||
let object = Object::new(
|
||||
*bucket_id,
|
||||
bucket_id,
|
||||
key.into(),
|
||||
vec![ObjectVersion {
|
||||
uuid: del_uuid,
|
||||
|
@ -54,8 +58,12 @@ async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid),
|
|||
Ok((deleted_version, del_uuid))
|
||||
}
|
||||
|
||||
pub async fn handle_delete(ctx: ReqCtx, key: &str) -> Result<Response<ResBody>, Error> {
|
||||
match handle_delete_internal(&ctx, key).await {
|
||||
pub async fn handle_delete(
|
||||
garage: Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
match handle_delete_internal(&garage, bucket_id, key).await {
|
||||
Ok(_) | Err(Error::NoSuchKey) => Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
.body(empty_body())
|
||||
|
@ -65,7 +73,8 @@ pub async fn handle_delete(ctx: ReqCtx, key: &str) -> Result<Response<ResBody>,
|
|||
}
|
||||
|
||||
pub async fn handle_delete_objects(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
|
@ -82,7 +91,7 @@ pub async fn handle_delete_objects(
|
|||
let mut ret_errors = Vec::new();
|
||||
|
||||
for obj in cmd.objects.iter() {
|
||||
match handle_delete_internal(&ctx, &obj.key).await {
|
||||
match handle_delete_internal(&garage, bucket_id, &obj.key).await {
|
||||
Ok((deleted_version, delete_marker_version)) => {
|
||||
if cmd.quiet {
|
||||
continue;
|
||||
|
|
|
@ -1,595 +0,0 @@
|
|||
use std::borrow::Cow;
|
||||
use std::convert::TryInto;
|
||||
use std::pin::Pin;
|
||||
|
||||
use aes_gcm::{
|
||||
aead::stream::{DecryptorLE31, EncryptorLE31, StreamLE31},
|
||||
aead::{Aead, AeadCore, KeyInit, OsRng},
|
||||
aes::cipher::crypto_common::rand_core::RngCore,
|
||||
aes::cipher::typenum::Unsigned,
|
||||
Aes256Gcm, Key, Nonce,
|
||||
};
|
||||
use base64::prelude::*;
|
||||
use bytes::Bytes;
|
||||
|
||||
use futures::stream::Stream;
|
||||
use futures::task;
|
||||
use tokio::io::BufReader;
|
||||
|
||||
use http::header::{HeaderMap, HeaderName, HeaderValue};
|
||||
|
||||
use garage_net::bytes_buf::BytesBuf;
|
||||
use garage_net::stream::{stream_asyncread, ByteStream};
|
||||
use garage_rpc::rpc_helper::OrderTag;
|
||||
use garage_util::data::Hash;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::migrate::Migrate;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
|
||||
|
||||
use crate::common_error::*;
|
||||
use crate::s3::checksum::Md5Checksum;
|
||||
use crate::s3::error::Error;
|
||||
|
||||
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
||||
HeaderName::from_static("x-amz-server-side-encryption-customer-algorithm");
|
||||
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: HeaderName =
|
||||
HeaderName::from_static("x-amz-server-side-encryption-customer-key");
|
||||
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: HeaderName =
|
||||
HeaderName::from_static("x-amz-server-side-encryption-customer-key-md5");
|
||||
|
||||
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
||||
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-algorithm");
|
||||
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: HeaderName =
|
||||
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-key");
|
||||
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: HeaderName =
|
||||
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-key-md5");
|
||||
|
||||
const CUSTOMER_ALGORITHM_AES256: &[u8] = b"AES256";
|
||||
|
||||
type Md5Output = md5::digest::Output<md5::Md5Core>;
|
||||
|
||||
type StreamNonceSize = aes_gcm::aead::stream::NonceSize<Aes256Gcm, StreamLE31<Aes256Gcm>>;
|
||||
|
||||
// Data blocks are encrypted by smaller chunks of size 4096 bytes,
|
||||
// so that data can be streamed when reading.
|
||||
// This size has to be known and has to be constant, or data won't be
|
||||
// readable anymore. DO NOT CHANGE THIS VALUE.
|
||||
const STREAM_ENC_PLAIN_CHUNK_SIZE: usize = 0x1000; // 4096 bytes
|
||||
const STREAM_ENC_CYPER_CHUNK_SIZE: usize = STREAM_ENC_PLAIN_CHUNK_SIZE + 16;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum EncryptionParams {
|
||||
Plaintext,
|
||||
SseC {
|
||||
client_key: Key<Aes256Gcm>,
|
||||
client_key_md5: Md5Output,
|
||||
compression_level: Option<i32>,
|
||||
},
|
||||
}
|
||||
|
||||
impl EncryptionParams {
|
||||
pub fn is_encrypted(&self) -> bool {
|
||||
!matches!(self, Self::Plaintext)
|
||||
}
|
||||
|
||||
pub fn is_same(a: &Self, b: &Self) -> bool {
|
||||
let relevant_info = |x: &Self| match x {
|
||||
Self::Plaintext => None,
|
||||
Self::SseC {
|
||||
client_key,
|
||||
compression_level,
|
||||
..
|
||||
} => Some((*client_key, compression_level.is_some())),
|
||||
};
|
||||
relevant_info(a) == relevant_info(b)
|
||||
}
|
||||
|
||||
pub fn new_from_headers(
|
||||
garage: &Garage,
|
||||
headers: &HeaderMap,
|
||||
) -> Result<EncryptionParams, Error> {
|
||||
let key = parse_request_headers(
|
||||
headers,
|
||||
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
|
||||
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
|
||||
)?;
|
||||
match key {
|
||||
Some((client_key, client_key_md5)) => Ok(EncryptionParams::SseC {
|
||||
client_key,
|
||||
client_key_md5,
|
||||
compression_level: garage.config.compression_level,
|
||||
}),
|
||||
None => Ok(EncryptionParams::Plaintext),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_response_headers(&self, resp: &mut http::response::Builder) {
|
||||
if let Self::SseC { client_key_md5, .. } = self {
|
||||
let md5 = BASE64_STANDARD.encode(&client_key_md5);
|
||||
|
||||
resp.headers_mut().unwrap().insert(
|
||||
X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||
HeaderValue::from_bytes(CUSTOMER_ALGORITHM_AES256).unwrap(),
|
||||
);
|
||||
resp.headers_mut().unwrap().insert(
|
||||
X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
|
||||
HeaderValue::from_bytes(md5.as_bytes()).unwrap(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_decrypt<'a>(
|
||||
garage: &Garage,
|
||||
headers: &HeaderMap,
|
||||
obj_enc: &'a ObjectVersionEncryption,
|
||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
||||
let key = parse_request_headers(
|
||||
headers,
|
||||
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
|
||||
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
|
||||
)?;
|
||||
Self::check_decrypt_common(garage, key, obj_enc)
|
||||
}
|
||||
|
||||
pub fn check_decrypt_for_copy_source<'a>(
|
||||
garage: &Garage,
|
||||
headers: &HeaderMap,
|
||||
obj_enc: &'a ObjectVersionEncryption,
|
||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
||||
let key = parse_request_headers(
|
||||
headers,
|
||||
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
|
||||
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
|
||||
)?;
|
||||
Self::check_decrypt_common(garage, key, obj_enc)
|
||||
}
|
||||
|
||||
fn check_decrypt_common<'a>(
|
||||
garage: &Garage,
|
||||
key: Option<(Key<Aes256Gcm>, Md5Output)>,
|
||||
obj_enc: &'a ObjectVersionEncryption,
|
||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
||||
match (key, &obj_enc) {
|
||||
(
|
||||
Some((client_key, client_key_md5)),
|
||||
ObjectVersionEncryption::SseC { inner, compressed },
|
||||
) => {
|
||||
let enc = Self::SseC {
|
||||
client_key,
|
||||
client_key_md5,
|
||||
compression_level: if *compressed {
|
||||
Some(garage.config.compression_level.unwrap_or(1))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
};
|
||||
let plaintext = enc.decrypt_blob(&inner)?;
|
||||
let inner = ObjectVersionMetaInner::decode(&plaintext)
|
||||
.ok_or_internal_error("Could not decode encrypted metadata")?;
|
||||
Ok((enc, Cow::Owned(inner)))
|
||||
}
|
||||
(None, ObjectVersionEncryption::Plaintext { inner }) => {
|
||||
Ok((Self::Plaintext, Cow::Borrowed(inner)))
|
||||
}
|
||||
(_, ObjectVersionEncryption::SseC { .. }) => {
|
||||
Err(Error::bad_request("Object is encrypted"))
|
||||
}
|
||||
(Some(_), _) => {
|
||||
// TODO: should this be an OK scenario?
|
||||
Err(Error::bad_request("Trying to decrypt a plaintext object"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encrypt_meta(
|
||||
&self,
|
||||
meta: ObjectVersionMetaInner,
|
||||
) -> Result<ObjectVersionEncryption, Error> {
|
||||
match self {
|
||||
Self::SseC {
|
||||
compression_level, ..
|
||||
} => {
|
||||
let plaintext = meta.encode().map_err(GarageError::from)?;
|
||||
let ciphertext = self.encrypt_blob(&plaintext)?;
|
||||
Ok(ObjectVersionEncryption::SseC {
|
||||
inner: ciphertext.into_owned(),
|
||||
compressed: compression_level.is_some(),
|
||||
})
|
||||
}
|
||||
Self::Plaintext => Ok(ObjectVersionEncryption::Plaintext { inner: meta }),
|
||||
}
|
||||
}
|
||||
|
||||
// ---- generating object Etag values ----
|
||||
pub fn etag_from_md5(&self, md5sum: &Option<Md5Checksum>) -> String {
|
||||
match self {
|
||||
Self::Plaintext => md5sum
|
||||
.map(|x| hex::encode(&x[..]))
|
||||
.expect("md5 digest should have been computed"),
|
||||
Self::SseC { .. } => {
|
||||
// AWS specifies that for encrypted objects, the Etag is not
|
||||
// the md5sum of the data, but doesn't say what it is.
|
||||
// So we just put some random bytes.
|
||||
let mut random = [0u8; 16];
|
||||
OsRng.fill_bytes(&mut random);
|
||||
hex::encode(&random)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---- generic function for encrypting / decrypting blobs ----
|
||||
// Prepends a randomly-generated nonce to the encrypted value.
|
||||
// This is used for encrypting object metadata and inlined data for small objects.
|
||||
// This does not compress anything.
|
||||
|
||||
pub fn encrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
|
||||
match self {
|
||||
Self::SseC { client_key, .. } => {
|
||||
let cipher = Aes256Gcm::new(&client_key);
|
||||
let nonce = Aes256Gcm::generate_nonce(&mut OsRng);
|
||||
let ciphertext = cipher
|
||||
.encrypt(&nonce, blob)
|
||||
.ok_or_internal_error("Encryption failed")?;
|
||||
Ok(Cow::Owned([nonce.to_vec(), ciphertext].concat()))
|
||||
}
|
||||
Self::Plaintext => Ok(Cow::Borrowed(blob)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
|
||||
match self {
|
||||
Self::SseC { client_key, .. } => {
|
||||
let cipher = Aes256Gcm::new(&client_key);
|
||||
let nonce_size = <Aes256Gcm as AeadCore>::NonceSize::to_usize();
|
||||
let nonce = Nonce::from_slice(
|
||||
blob.get(..nonce_size)
|
||||
.ok_or_internal_error("invalid encrypted data")?,
|
||||
);
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, &blob[nonce_size..])
|
||||
.ok_or_bad_request(
|
||||
"Invalid encryption key, could not decrypt object metadata.",
|
||||
)?;
|
||||
Ok(Cow::Owned(plaintext))
|
||||
}
|
||||
Self::Plaintext => Ok(Cow::Borrowed(blob)),
|
||||
}
|
||||
}
|
||||
|
||||
// ---- function for encrypting / decrypting byte streams ----
|
||||
|
||||
/// Get a data block from the storage node, and decrypt+decompress it
|
||||
/// if necessary. If object is plaintext, just get it without any processing.
|
||||
pub async fn get_block(
|
||||
&self,
|
||||
garage: &Garage,
|
||||
hash: &Hash,
|
||||
order: Option<OrderTag>,
|
||||
) -> Result<ByteStream, GarageError> {
|
||||
let raw_block = garage
|
||||
.block_manager
|
||||
.rpc_get_block_streaming(hash, order)
|
||||
.await?;
|
||||
Ok(self.decrypt_block_stream(raw_block))
|
||||
}
|
||||
|
||||
pub fn decrypt_block_stream(&self, stream: ByteStream) -> ByteStream {
|
||||
match self {
|
||||
Self::Plaintext => stream,
|
||||
Self::SseC {
|
||||
client_key,
|
||||
compression_level,
|
||||
..
|
||||
} => {
|
||||
let plaintext = DecryptStream::new(stream, *client_key);
|
||||
if compression_level.is_some() {
|
||||
let reader = stream_asyncread(Box::pin(plaintext));
|
||||
let reader = BufReader::new(reader);
|
||||
let reader = async_compression::tokio::bufread::ZstdDecoder::new(reader);
|
||||
Box::pin(tokio_util::io::ReaderStream::new(reader))
|
||||
} else {
|
||||
Box::pin(plaintext)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Encrypt a data block if encryption is set, for use before
|
||||
/// putting the data blocks into storage
|
||||
pub fn encrypt_block(&self, block: Bytes) -> Result<Bytes, Error> {
|
||||
match self {
|
||||
Self::Plaintext => Ok(block),
|
||||
Self::SseC {
|
||||
client_key,
|
||||
compression_level,
|
||||
..
|
||||
} => {
|
||||
let block = if let Some(level) = compression_level {
|
||||
Cow::Owned(
|
||||
garage_block::zstd_encode(block.as_ref(), *level)
|
||||
.ok_or_internal_error("failed to compress data block")?,
|
||||
)
|
||||
} else {
|
||||
Cow::Borrowed(block.as_ref())
|
||||
};
|
||||
|
||||
let mut ret = Vec::with_capacity(block.len() + 32 + block.len() / 64);
|
||||
|
||||
let mut nonce: Nonce<StreamNonceSize> = Default::default();
|
||||
OsRng.fill_bytes(&mut nonce);
|
||||
ret.extend_from_slice(nonce.as_slice());
|
||||
|
||||
let mut cipher = EncryptorLE31::<Aes256Gcm>::new(&client_key, &nonce);
|
||||
let mut iter = block.chunks(STREAM_ENC_PLAIN_CHUNK_SIZE).peekable();
|
||||
|
||||
if iter.peek().is_none() {
|
||||
// Empty stream: we encrypt an empty last chunk
|
||||
let chunk_enc = cipher
|
||||
.encrypt_last(&[][..])
|
||||
.ok_or_internal_error("failed to encrypt chunk")?;
|
||||
ret.extend_from_slice(&chunk_enc);
|
||||
} else {
|
||||
loop {
|
||||
let chunk = iter.next().unwrap();
|
||||
if iter.peek().is_some() {
|
||||
let chunk_enc = cipher
|
||||
.encrypt_next(chunk)
|
||||
.ok_or_internal_error("failed to encrypt chunk")?;
|
||||
assert_eq!(chunk.len(), STREAM_ENC_PLAIN_CHUNK_SIZE);
|
||||
assert_eq!(chunk_enc.len(), STREAM_ENC_CYPER_CHUNK_SIZE);
|
||||
ret.extend_from_slice(&chunk_enc);
|
||||
} else {
|
||||
// use encrypt_last for the last chunk
|
||||
let chunk_enc = cipher
|
||||
.encrypt_last(chunk)
|
||||
.ok_or_internal_error("failed to encrypt chunk")?;
|
||||
ret.extend_from_slice(&chunk_enc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_request_headers(
|
||||
headers: &HeaderMap,
|
||||
alg_header: &HeaderName,
|
||||
key_header: &HeaderName,
|
||||
md5_header: &HeaderName,
|
||||
) -> Result<Option<(Key<Aes256Gcm>, Md5Output)>, Error> {
|
||||
let alg = headers.get(alg_header).map(HeaderValue::as_bytes);
|
||||
let key = headers.get(key_header).map(HeaderValue::as_bytes);
|
||||
let md5 = headers.get(md5_header).map(HeaderValue::as_bytes);
|
||||
|
||||
match alg {
|
||||
Some(CUSTOMER_ALGORITHM_AES256) => {
|
||||
use md5::{Digest, Md5};
|
||||
|
||||
let key_b64 =
|
||||
key.ok_or_bad_request("Missing server-side-encryption-customer-key header")?;
|
||||
let key_bytes: [u8; 32] = BASE64_STANDARD
|
||||
.decode(&key_b64)
|
||||
.ok_or_bad_request(
|
||||
"Invalid server-side-encryption-customer-key header: invalid base64",
|
||||
)?
|
||||
.try_into()
|
||||
.ok()
|
||||
.ok_or_bad_request(
|
||||
"Invalid server-side-encryption-customer-key header: invalid length",
|
||||
)?;
|
||||
|
||||
let md5_b64 =
|
||||
md5.ok_or_bad_request("Missing server-side-encryption-customer-key-md5 header")?;
|
||||
let md5_bytes = BASE64_STANDARD.decode(&md5_b64).ok_or_bad_request(
|
||||
"Invalid server-side-encryption-customer-key-md5 header: invalid bass64",
|
||||
)?;
|
||||
|
||||
let mut hasher = Md5::new();
|
||||
hasher.update(&key_bytes[..]);
|
||||
let our_md5 = hasher.finalize();
|
||||
if our_md5.as_slice() != md5_bytes.as_slice() {
|
||||
return Err(Error::bad_request(
|
||||
"Server-side encryption client key MD5 checksum does not match",
|
||||
));
|
||||
}
|
||||
|
||||
Ok(Some((key_bytes.into(), our_md5)))
|
||||
}
|
||||
Some(alg) => Err(Error::InvalidEncryptionAlgorithm(
|
||||
String::from_utf8_lossy(alg).into_owned(),
|
||||
)),
|
||||
None => {
|
||||
if key.is_some() || md5.is_some() {
|
||||
Err(Error::bad_request(
|
||||
"Unexpected server-side-encryption-customer-key{,-md5} header(s)",
|
||||
))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---- encrypt & decrypt streams ----
|
||||
|
||||
#[pin_project::pin_project]
|
||||
struct DecryptStream {
|
||||
#[pin]
|
||||
stream: ByteStream,
|
||||
done_reading: bool,
|
||||
buf: BytesBuf,
|
||||
key: Key<Aes256Gcm>,
|
||||
state: DecryptStreamState,
|
||||
}
|
||||
|
||||
enum DecryptStreamState {
|
||||
Starting,
|
||||
Running(DecryptorLE31<Aes256Gcm>),
|
||||
Done,
|
||||
}
|
||||
|
||||
impl DecryptStream {
|
||||
fn new(stream: ByteStream, key: Key<Aes256Gcm>) -> Self {
|
||||
Self {
|
||||
stream,
|
||||
done_reading: false,
|
||||
buf: BytesBuf::new(),
|
||||
key,
|
||||
state: DecryptStreamState::Starting,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for DecryptStream {
|
||||
type Item = Result<Bytes, std::io::Error>;
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut task::Context<'_>,
|
||||
) -> task::Poll<Option<Self::Item>> {
|
||||
use std::task::Poll;
|
||||
|
||||
let mut this = self.project();
|
||||
|
||||
// The first bytes of the stream should contain the starting nonce.
|
||||
// If we don't have a Running state, it means that we haven't
|
||||
// yet read the nonce.
|
||||
while matches!(this.state, DecryptStreamState::Starting) {
|
||||
let nonce_size = StreamNonceSize::to_usize();
|
||||
if let Some(nonce) = this.buf.take_exact(nonce_size) {
|
||||
let nonce = Nonce::from_slice(nonce.as_ref());
|
||||
*this.state = DecryptStreamState::Running(DecryptorLE31::new(&this.key, nonce));
|
||||
break;
|
||||
}
|
||||
|
||||
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
|
||||
Some(Ok(bytes)) => {
|
||||
this.buf.extend(bytes);
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
return Poll::Ready(Some(Err(e)));
|
||||
}
|
||||
None => {
|
||||
return Poll::Ready(Some(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"Decrypt: unexpected EOF, could not read nonce",
|
||||
))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read at least one byte more than the encrypted chunk size
|
||||
// (if possible), so that we know if we are decrypting the
|
||||
// last chunk or not.
|
||||
while !*this.done_reading && this.buf.len() <= STREAM_ENC_CYPER_CHUNK_SIZE {
|
||||
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
|
||||
Some(Ok(bytes)) => {
|
||||
this.buf.extend(bytes);
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
return Poll::Ready(Some(Err(e)));
|
||||
}
|
||||
None => {
|
||||
*this.done_reading = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if matches!(this.state, DecryptStreamState::Done) {
|
||||
if !this.buf.is_empty() {
|
||||
return Poll::Ready(Some(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"Decrypt: unexpected bytes after last encrypted chunk",
|
||||
))));
|
||||
}
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
|
||||
let res = if this.buf.len() > STREAM_ENC_CYPER_CHUNK_SIZE {
|
||||
// we have strictly more bytes than the encrypted chunk size,
|
||||
// so we know this is not the last
|
||||
let DecryptStreamState::Running(ref mut cipher) = this.state else {
|
||||
unreachable!()
|
||||
};
|
||||
let chunk = this.buf.take_exact(STREAM_ENC_CYPER_CHUNK_SIZE).unwrap();
|
||||
let chunk_dec = cipher.decrypt_next(chunk.as_ref());
|
||||
if let Ok(c) = &chunk_dec {
|
||||
assert_eq!(c.len(), STREAM_ENC_PLAIN_CHUNK_SIZE);
|
||||
}
|
||||
chunk_dec
|
||||
} else {
|
||||
// We have one encrypted chunk size or less, even though we tried
|
||||
// to read more, so this is the last chunk. Decrypt using the
|
||||
// appropriate decrypt_last() function that then destroys the cipher.
|
||||
let state = std::mem::replace(this.state, DecryptStreamState::Done);
|
||||
let DecryptStreamState::Running(cipher) = state else {
|
||||
unreachable!()
|
||||
};
|
||||
let chunk = this.buf.take_all();
|
||||
cipher.decrypt_last(chunk.as_ref())
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(bytes) if bytes.is_empty() => Poll::Ready(None),
|
||||
Ok(bytes) => Poll::Ready(Some(Ok(bytes.into()))),
|
||||
Err(_) => Poll::Ready(Some(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"Decryption failed",
|
||||
)))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use futures::stream::StreamExt;
|
||||
use garage_net::stream::read_stream_to_end;
|
||||
|
||||
fn stream() -> ByteStream {
|
||||
Box::pin(
|
||||
futures::stream::iter(16usize..1024)
|
||||
.map(|i| Ok(Bytes::from(vec![(i % 256) as u8; (i * 37) % 1024]))),
|
||||
)
|
||||
}
|
||||
|
||||
async fn test_block_enc(compression_level: Option<i32>) {
|
||||
let enc = EncryptionParams::SseC {
|
||||
client_key: Aes256Gcm::generate_key(&mut OsRng),
|
||||
client_key_md5: Default::default(), // not needed
|
||||
compression_level,
|
||||
};
|
||||
|
||||
let block_plain = read_stream_to_end(stream()).await.unwrap().into_bytes();
|
||||
|
||||
let block_enc = enc.encrypt_block(block_plain.clone()).unwrap();
|
||||
|
||||
let block_dec =
|
||||
enc.decrypt_block_stream(Box::pin(futures::stream::once(async { Ok(block_enc) })));
|
||||
let block_dec = read_stream_to_end(block_dec).await.unwrap().into_bytes();
|
||||
|
||||
assert_eq!(block_plain, block_dec);
|
||||
assert!(block_dec.len() > 128000);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypt_block() {
|
||||
test_block_enc(None).await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypt_block_compressed() {
|
||||
test_block_enc(Some(1)).await
|
||||
}
|
||||
}
|
|
@ -65,14 +65,6 @@ pub enum Error {
|
|||
#[error(display = "Invalid HTTP range: {:?}", _0)]
|
||||
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
|
||||
|
||||
/// The client sent a range header with invalid value
|
||||
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
|
||||
InvalidEncryptionAlgorithm(String),
|
||||
|
||||
/// The client sent invalid XML data
|
||||
#[error(display = "Invalid digest: {}", _0)]
|
||||
InvalidDigest(String),
|
||||
|
||||
/// The client sent a request for an action not supported by garage
|
||||
#[error(display = "Unimplemented action: {}", _0)]
|
||||
NotImplemented(String),
|
||||
|
@ -133,9 +125,7 @@ impl Error {
|
|||
Error::NotImplemented(_) => "NotImplemented",
|
||||
Error::InvalidXml(_) => "MalformedXML",
|
||||
Error::InvalidRange(_) => "InvalidRange",
|
||||
Error::InvalidDigest(_) => "InvalidDigest",
|
||||
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest",
|
||||
Error::InvalidEncryptionAlgorithm(_) => "InvalidEncryptionAlgorithmError",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -153,8 +143,6 @@ impl ApiError for Error {
|
|||
| Error::InvalidPart
|
||||
| Error::InvalidPartOrder
|
||||
| Error::EntityTooSmall
|
||||
| Error::InvalidDigest(_)
|
||||
| Error::InvalidEncryptionAlgorithm(_)
|
||||
| Error::InvalidXml(_)
|
||||
| Error::InvalidUtf8Str(_)
|
||||
| Error::InvalidUtf8String(_) => StatusCode::BAD_REQUEST,
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
//! Function related to GET and HEAD requests
|
||||
use std::collections::BTreeMap;
|
||||
use std::convert::TryInto;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, UNIX_EPOCH};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::future;
|
||||
use futures::stream::{self, Stream, StreamExt};
|
||||
use futures::stream::{self, StreamExt};
|
||||
use http::header::{
|
||||
ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE,
|
||||
CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MODIFIED_SINCE, IF_NONE_MATCH,
|
||||
|
@ -27,8 +25,6 @@ use garage_model::s3::version_table::*;
|
|||
|
||||
use crate::helpers::*;
|
||||
use crate::s3::api_server::ResBody;
|
||||
use crate::s3::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
||||
use crate::s3::encryption::EncryptionParams;
|
||||
use crate::s3::error::*;
|
||||
|
||||
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
|
||||
|
@ -46,9 +42,6 @@ pub struct GetObjectOverrides {
|
|||
fn object_headers(
|
||||
version: &ObjectVersion,
|
||||
version_meta: &ObjectVersionMeta,
|
||||
meta_inner: &ObjectVersionMetaInner,
|
||||
encryption: EncryptionParams,
|
||||
checksum_mode: ChecksumMode,
|
||||
) -> http::response::Builder {
|
||||
debug!("Version meta: {:?}", version_meta);
|
||||
|
||||
|
@ -56,6 +49,7 @@ fn object_headers(
|
|||
let date_str = httpdate::fmt_http_date(date);
|
||||
|
||||
let mut resp = Response::builder()
|
||||
.header(CONTENT_TYPE, version_meta.headers.content_type.to_string())
|
||||
.header(LAST_MODIFIED, date_str)
|
||||
.header(ACCEPT_RANGES, "bytes".to_string());
|
||||
|
||||
|
@ -63,30 +57,9 @@ fn object_headers(
|
|||
resp = resp.header(ETAG, format!("\"{}\"", version_meta.etag));
|
||||
}
|
||||
|
||||
// When metadata is retrieved through the REST API, Amazon S3 combines headers that
|
||||
// have the same name (ignoring case) into a comma-delimited list.
|
||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
|
||||
let mut headers_by_name = BTreeMap::new();
|
||||
for (name, value) in meta_inner.headers.iter() {
|
||||
match headers_by_name.get_mut(name) {
|
||||
None => {
|
||||
headers_by_name.insert(name, vec![value.as_str()]);
|
||||
for (k, v) in version_meta.headers.other.iter() {
|
||||
resp = resp.header(k, v.to_string());
|
||||
}
|
||||
Some(headers) => {
|
||||
headers.push(value.as_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (name, values) in headers_by_name {
|
||||
resp = resp.header(name, values.join(","));
|
||||
}
|
||||
|
||||
if checksum_mode.enabled {
|
||||
resp = add_checksum_response_headers(&meta_inner.checksum, resp);
|
||||
}
|
||||
|
||||
encryption.add_response_headers(&mut resp);
|
||||
|
||||
resp
|
||||
}
|
||||
|
@ -158,16 +131,6 @@ fn try_answer_cached(
|
|||
|
||||
/// Handle HEAD request
|
||||
pub async fn handle_head(
|
||||
ctx: ReqCtx,
|
||||
req: &Request<impl Body>,
|
||||
key: &str,
|
||||
part_number: Option<u64>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
handle_head_without_ctx(ctx.garage, req, ctx.bucket_id, key, part_number).await
|
||||
}
|
||||
|
||||
/// Handle HEAD request for website
|
||||
pub async fn handle_head_without_ctx(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<impl Body>,
|
||||
bucket_id: Uuid,
|
||||
|
@ -202,29 +165,17 @@ pub async fn handle_head_without_ctx(
|
|||
return Ok(cached);
|
||||
}
|
||||
|
||||
let (encryption, headers) =
|
||||
EncryptionParams::check_decrypt(&garage, req.headers(), &version_meta.encryption)?;
|
||||
|
||||
let checksum_mode = checksum_mode(&req);
|
||||
|
||||
if let Some(pn) = part_number {
|
||||
match version_data {
|
||||
ObjectVersionData::Inline(_, _) => {
|
||||
ObjectVersionData::Inline(_, bytes) => {
|
||||
if pn != 1 {
|
||||
return Err(Error::InvalidPart);
|
||||
}
|
||||
let bytes_len = version_meta.size;
|
||||
Ok(object_headers(
|
||||
object_version,
|
||||
version_meta,
|
||||
&headers,
|
||||
encryption,
|
||||
checksum_mode,
|
||||
)
|
||||
.header(CONTENT_LENGTH, format!("{}", bytes_len))
|
||||
Ok(object_headers(object_version, version_meta)
|
||||
.header(CONTENT_LENGTH, format!("{}", bytes.len()))
|
||||
.header(
|
||||
CONTENT_RANGE,
|
||||
format!("bytes 0-{}/{}", bytes_len - 1, bytes_len),
|
||||
format!("bytes 0-{}/{}", bytes.len() - 1, bytes.len()),
|
||||
)
|
||||
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
||||
.status(StatusCode::PARTIAL_CONTENT)
|
||||
|
@ -240,13 +191,7 @@ pub async fn handle_head_without_ctx(
|
|||
let (part_offset, part_end) =
|
||||
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
|
||||
|
||||
Ok(object_headers(
|
||||
object_version,
|
||||
version_meta,
|
||||
&headers,
|
||||
encryption,
|
||||
checksum_mode,
|
||||
)
|
||||
Ok(object_headers(object_version, version_meta)
|
||||
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
|
||||
.header(
|
||||
CONTENT_RANGE,
|
||||
|
@ -264,13 +209,7 @@ pub async fn handle_head_without_ctx(
|
|||
_ => unreachable!(),
|
||||
}
|
||||
} else {
|
||||
Ok(object_headers(
|
||||
object_version,
|
||||
version_meta,
|
||||
&headers,
|
||||
encryption,
|
||||
checksum_mode,
|
||||
)
|
||||
Ok(object_headers(object_version, version_meta)
|
||||
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
||||
.status(StatusCode::OK)
|
||||
.body(empty_body())?)
|
||||
|
@ -279,17 +218,6 @@ pub async fn handle_head_without_ctx(
|
|||
|
||||
/// Handle GET request
|
||||
pub async fn handle_get(
|
||||
ctx: ReqCtx,
|
||||
req: &Request<impl Body>,
|
||||
key: &str,
|
||||
part_number: Option<u64>,
|
||||
overrides: GetObjectOverrides,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
handle_get_without_ctx(ctx.garage, req, ctx.bucket_id, key, part_number, overrides).await
|
||||
}
|
||||
|
||||
/// Handle GET request
|
||||
pub async fn handle_get_without_ctx(
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<impl Body>,
|
||||
bucket_id: Uuid,
|
||||
|
@ -324,55 +252,23 @@ pub async fn handle_get_without_ctx(
|
|||
return Ok(cached);
|
||||
}
|
||||
|
||||
let (enc, headers) =
|
||||
EncryptionParams::check_decrypt(&garage, req.headers(), &last_v_meta.encryption)?;
|
||||
|
||||
let checksum_mode = checksum_mode(&req);
|
||||
|
||||
match (part_number, parse_range_header(req, last_v_meta.size)?) {
|
||||
(Some(_), Some(_)) => Err(Error::bad_request(
|
||||
"Cannot specify both partNumber and Range header",
|
||||
)),
|
||||
(Some(pn), None) => {
|
||||
handle_get_part(
|
||||
garage,
|
||||
last_v,
|
||||
last_v_data,
|
||||
last_v_meta,
|
||||
enc,
|
||||
&headers,
|
||||
pn,
|
||||
checksum_mode,
|
||||
)
|
||||
.await
|
||||
}
|
||||
(Some(pn), None) => handle_get_part(garage, last_v, last_v_data, last_v_meta, pn).await,
|
||||
(None, Some(range)) => {
|
||||
handle_get_range(
|
||||
garage,
|
||||
last_v,
|
||||
last_v_data,
|
||||
last_v_meta,
|
||||
enc,
|
||||
&headers,
|
||||
range.start,
|
||||
range.start + range.length,
|
||||
checksum_mode,
|
||||
)
|
||||
.await
|
||||
}
|
||||
(None, None) => {
|
||||
handle_get_full(
|
||||
garage,
|
||||
last_v,
|
||||
last_v_data,
|
||||
last_v_meta,
|
||||
enc,
|
||||
&headers,
|
||||
overrides,
|
||||
checksum_mode,
|
||||
)
|
||||
.await
|
||||
}
|
||||
(None, None) => handle_get_full(garage, last_v, last_v_data, last_v_meta, overrides).await,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -381,43 +277,17 @@ async fn handle_get_full(
|
|||
version: &ObjectVersion,
|
||||
version_data: &ObjectVersionData,
|
||||
version_meta: &ObjectVersionMeta,
|
||||
encryption: EncryptionParams,
|
||||
meta_inner: &ObjectVersionMetaInner,
|
||||
overrides: GetObjectOverrides,
|
||||
checksum_mode: ChecksumMode,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let mut resp_builder = object_headers(
|
||||
version,
|
||||
version_meta,
|
||||
&meta_inner,
|
||||
encryption,
|
||||
checksum_mode,
|
||||
)
|
||||
let mut resp_builder = object_headers(version, version_meta)
|
||||
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
||||
.status(StatusCode::OK);
|
||||
getobject_override_headers(overrides, &mut resp_builder)?;
|
||||
|
||||
let stream = full_object_byte_stream(garage, version, version_data, encryption);
|
||||
|
||||
Ok(resp_builder.body(response_body_from_stream(stream))?)
|
||||
}
|
||||
|
||||
pub fn full_object_byte_stream(
|
||||
garage: Arc<Garage>,
|
||||
version: &ObjectVersion,
|
||||
version_data: &ObjectVersionData,
|
||||
encryption: EncryptionParams,
|
||||
) -> ByteStream {
|
||||
match &version_data {
|
||||
ObjectVersionData::DeleteMarker => unreachable!(),
|
||||
ObjectVersionData::Inline(_, bytes) => {
|
||||
let bytes = bytes.to_vec();
|
||||
Box::pin(futures::stream::once(async move {
|
||||
encryption
|
||||
.decrypt_blob(&bytes)
|
||||
.map(|x| Bytes::from(x.to_vec()))
|
||||
.map_err(std_error_from_read_error)
|
||||
}))
|
||||
Ok(resp_builder.body(bytes_body(bytes.to_vec().into()))?)
|
||||
}
|
||||
ObjectVersionData::FirstBlock(_, first_block_hash) => {
|
||||
let (tx, rx) = mpsc::channel::<ByteStream>(2);
|
||||
|
@ -433,18 +303,19 @@ pub fn full_object_byte_stream(
|
|||
garage2.version_table.get(&version_uuid, &EmptyKey).await
|
||||
});
|
||||
|
||||
let stream_block_0 = encryption
|
||||
.get_block(&garage, &first_block_hash, Some(order_stream.order(0)))
|
||||
let stream_block_0 = garage
|
||||
.block_manager
|
||||
.rpc_get_block_streaming(&first_block_hash, Some(order_stream.order(0)))
|
||||
.await?;
|
||||
|
||||
tx.send(stream_block_0)
|
||||
.await
|
||||
.ok_or_message("channel closed")?;
|
||||
|
||||
let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?;
|
||||
for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) {
|
||||
let stream_block_i = encryption
|
||||
.get_block(&garage, &vb.hash, Some(order_stream.order(i as u64)))
|
||||
let stream_block_i = garage
|
||||
.block_manager
|
||||
.rpc_get_block_streaming(&vb.hash, Some(order_stream.order(i as u64)))
|
||||
.await?;
|
||||
tx.send(stream_block_i)
|
||||
.await
|
||||
|
@ -462,7 +333,8 @@ pub fn full_object_byte_stream(
|
|||
}
|
||||
});
|
||||
|
||||
Box::pin(tokio_stream::wrappers::ReceiverStream::new(rx).flatten())
|
||||
let body = response_body_from_block_stream(rx);
|
||||
Ok(resp_builder.body(body)?)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -472,16 +344,13 @@ async fn handle_get_range(
|
|||
version: &ObjectVersion,
|
||||
version_data: &ObjectVersionData,
|
||||
version_meta: &ObjectVersionMeta,
|
||||
encryption: EncryptionParams,
|
||||
meta_inner: &ObjectVersionMetaInner,
|
||||
begin: u64,
|
||||
end: u64,
|
||||
checksum_mode: ChecksumMode,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
// Here we do not use getobject_override_headers because we don't
|
||||
// want to add any overridden headers (those should not be added
|
||||
// when returning PARTIAL_CONTENT)
|
||||
let resp_builder = object_headers(version, version_meta, meta_inner, encryption, checksum_mode)
|
||||
let resp_builder = object_headers(version, version_meta)
|
||||
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
||||
.header(
|
||||
CONTENT_RANGE,
|
||||
|
@ -492,7 +361,6 @@ async fn handle_get_range(
|
|||
match &version_data {
|
||||
ObjectVersionData::DeleteMarker => unreachable!(),
|
||||
ObjectVersionData::Inline(_meta, bytes) => {
|
||||
let bytes = encryption.decrypt_blob(&bytes)?;
|
||||
if end as usize <= bytes.len() {
|
||||
let body = bytes_body(bytes[begin as usize..end as usize].to_vec().into());
|
||||
Ok(resp_builder.body(body)?)
|
||||
|
@ -509,8 +377,7 @@ async fn handle_get_range(
|
|||
.await?
|
||||
.ok_or(Error::NoSuchKey)?;
|
||||
|
||||
let body =
|
||||
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
|
||||
let body = body_from_blocks_range(garage, version.blocks.items(), begin, end);
|
||||
Ok(resp_builder.body(body)?)
|
||||
}
|
||||
}
|
||||
|
@ -521,28 +388,17 @@ async fn handle_get_part(
|
|||
object_version: &ObjectVersion,
|
||||
version_data: &ObjectVersionData,
|
||||
version_meta: &ObjectVersionMeta,
|
||||
encryption: EncryptionParams,
|
||||
meta_inner: &ObjectVersionMetaInner,
|
||||
part_number: u64,
|
||||
checksum_mode: ChecksumMode,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
// Same as for get_range, no getobject_override_headers
|
||||
let resp_builder = object_headers(
|
||||
object_version,
|
||||
version_meta,
|
||||
meta_inner,
|
||||
encryption,
|
||||
checksum_mode,
|
||||
)
|
||||
.status(StatusCode::PARTIAL_CONTENT);
|
||||
let resp_builder =
|
||||
object_headers(object_version, version_meta).status(StatusCode::PARTIAL_CONTENT);
|
||||
|
||||
match version_data {
|
||||
ObjectVersionData::Inline(_, bytes) => {
|
||||
if part_number != 1 {
|
||||
return Err(Error::InvalidPart);
|
||||
}
|
||||
let bytes = encryption.decrypt_blob(&bytes)?;
|
||||
assert_eq!(bytes.len() as u64, version_meta.size);
|
||||
Ok(resp_builder
|
||||
.header(CONTENT_LENGTH, format!("{}", bytes.len()))
|
||||
.header(
|
||||
|
@ -550,7 +406,7 @@ async fn handle_get_part(
|
|||
format!("bytes {}-{}/{}", 0, bytes.len() - 1, bytes.len()),
|
||||
)
|
||||
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
||||
.body(bytes_body(bytes.into_owned().into()))?)
|
||||
.body(bytes_body(bytes.to_vec().into()))?)
|
||||
}
|
||||
ObjectVersionData::FirstBlock(_, _) => {
|
||||
let version = garage
|
||||
|
@ -562,8 +418,7 @@ async fn handle_get_part(
|
|||
let (begin, end) =
|
||||
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
|
||||
|
||||
let body =
|
||||
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
|
||||
let body = body_from_blocks_range(garage, version.blocks.items(), begin, end);
|
||||
|
||||
Ok(resp_builder
|
||||
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
||||
|
@ -616,23 +471,8 @@ fn calculate_part_bounds(v: &Version, part_number: u64) -> Option<(u64, u64)> {
|
|||
None
|
||||
}
|
||||
|
||||
struct ChecksumMode {
|
||||
enabled: bool,
|
||||
}
|
||||
|
||||
fn checksum_mode(req: &Request<impl Body>) -> ChecksumMode {
|
||||
ChecksumMode {
|
||||
enabled: req
|
||||
.headers()
|
||||
.get(X_AMZ_CHECKSUM_MODE)
|
||||
.map(|x| x == "ENABLED")
|
||||
.unwrap_or(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn body_from_blocks_range(
|
||||
garage: Arc<Garage>,
|
||||
encryption: EncryptionParams,
|
||||
all_blocks: &[(VersionBlockKey, VersionBlock)],
|
||||
begin: u64,
|
||||
end: u64,
|
||||
|
@ -662,11 +502,12 @@ fn body_from_blocks_range(
|
|||
|
||||
tokio::spawn(async move {
|
||||
match async {
|
||||
let garage = garage.clone();
|
||||
for (i, (block, block_offset)) in blocks.iter().enumerate() {
|
||||
let block_stream = encryption
|
||||
.get_block(&garage, &block.hash, Some(order_stream.order(i as u64)))
|
||||
.await?;
|
||||
let block_stream = block_stream
|
||||
let block_stream = garage
|
||||
.block_manager
|
||||
.rpc_get_block_streaming(&block.hash, Some(order_stream.order(i as u64)))
|
||||
.await?
|
||||
.scan(*block_offset, move |chunk_offset, chunk| {
|
||||
let r = match chunk {
|
||||
Ok(chunk_bytes) => {
|
||||
|
@ -726,15 +567,9 @@ fn body_from_blocks_range(
|
|||
}
|
||||
|
||||
fn response_body_from_block_stream(rx: mpsc::Receiver<ByteStream>) -> ResBody {
|
||||
let body_stream = tokio_stream::wrappers::ReceiverStream::new(rx).flatten();
|
||||
response_body_from_stream(body_stream)
|
||||
}
|
||||
|
||||
fn response_body_from_stream<S>(stream: S) -> ResBody
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, std::io::Error>> + Send + Sync + 'static,
|
||||
{
|
||||
let body_stream = stream.map(|x| {
|
||||
let body_stream = tokio_stream::wrappers::ReceiverStream::new(rx)
|
||||
.flatten()
|
||||
.map(|x| {
|
||||
x.map(hyper::body::Frame::data)
|
||||
.map_err(|e| Error::from(garage_util::error::Error::from(e)))
|
||||
});
|
||||
|
@ -742,14 +577,9 @@ where
|
|||
}
|
||||
|
||||
fn error_stream_item<E: std::fmt::Display>(e: E) -> ByteStream {
|
||||
Box::pin(stream::once(future::ready(Err(std_error_from_read_error(
|
||||
e,
|
||||
)))))
|
||||
}
|
||||
|
||||
fn std_error_from_read_error<E: std::fmt::Display>(e: E) -> std::io::Error {
|
||||
std::io::Error::new(
|
||||
let err = std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
format!("Error while reading object data: {}", e),
|
||||
)
|
||||
format!("Error while getting object data: {}", e),
|
||||
);
|
||||
Box::pin(stream::once(future::ready(Err(err))))
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use quick_xml::de::from_reader;
|
||||
use std::sync::Arc;
|
||||
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
|
@ -15,12 +16,15 @@ use garage_model::bucket_table::{
|
|||
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
|
||||
LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule,
|
||||
};
|
||||
use garage_model::garage::Garage;
|
||||
use garage_util::data::*;
|
||||
|
||||
pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { bucket_params, .. } = ctx;
|
||||
pub async fn handle_get_lifecycle(bucket: &Bucket) -> Result<Response<ResBody>, Error> {
|
||||
let param = bucket
|
||||
.params()
|
||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||
|
||||
if let Some(lifecycle) = bucket_params.lifecycle_config.get() {
|
||||
if let Some(lifecycle) = param.lifecycle_config.get() {
|
||||
let wc = LifecycleConfiguration::from_garage_lifecycle_config(lifecycle);
|
||||
let xml = to_xml_with_header(&wc)?;
|
||||
Ok(Response::builder()
|
||||
|
@ -34,18 +38,16 @@ pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Erro
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn handle_delete_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
mut bucket_params,
|
||||
..
|
||||
} = ctx;
|
||||
bucket_params.lifecycle_config.update(None);
|
||||
garage
|
||||
.bucket_table
|
||||
.insert(&Bucket::present(bucket_id, bucket_params))
|
||||
.await?;
|
||||
pub async fn handle_delete_lifecycle(
|
||||
garage: Arc<Garage>,
|
||||
mut bucket: Bucket,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let param = bucket
|
||||
.params_mut()
|
||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||
|
||||
param.lifecycle_config.update(None);
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
|
@ -53,33 +55,28 @@ pub async fn handle_delete_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, E
|
|||
}
|
||||
|
||||
pub async fn handle_put_lifecycle(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
mut bucket: Bucket,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
mut bucket_params,
|
||||
..
|
||||
} = ctx;
|
||||
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
|
||||
let param = bucket
|
||||
.params_mut()
|
||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||
|
||||
let conf: LifecycleConfiguration = from_reader(&body as &[u8])?;
|
||||
let config = conf
|
||||
.validate_into_garage_lifecycle_config()
|
||||
.ok_or_bad_request("Invalid lifecycle configuration")?;
|
||||
|
||||
bucket_params.lifecycle_config.update(Some(config));
|
||||
garage
|
||||
.bucket_table
|
||||
.insert(&Bucket::present(bucket_id, bucket_params))
|
||||
.await?;
|
||||
param.lifecycle_config.update(Some(config));
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::iter::{Iterator, Peekable};
|
||||
use std::sync::Arc;
|
||||
|
||||
use base64::prelude::*;
|
||||
use hyper::{Request, Response};
|
||||
use hyper::Response;
|
||||
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::time::*;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::mpu_table::*;
|
||||
use garage_model::s3::object_table::*;
|
||||
|
||||
|
@ -15,8 +17,7 @@ use garage_table::EnumerationOrder;
|
|||
|
||||
use crate::encoding::*;
|
||||
use crate::helpers::*;
|
||||
use crate::s3::api_server::{ReqBody, ResBody};
|
||||
use crate::s3::encryption::EncryptionParams;
|
||||
use crate::s3::api_server::ResBody;
|
||||
use crate::s3::error::*;
|
||||
use crate::s3::multipart as s3_multipart;
|
||||
use crate::s3::xml as s3_xml;
|
||||
|
@ -61,10 +62,9 @@ pub struct ListPartsQuery {
|
|||
}
|
||||
|
||||
pub async fn handle_list(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
query: &ListObjectsQuery,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { garage, .. } = &ctx;
|
||||
let io = |bucket, key, count| {
|
||||
let t = &garage.object_table;
|
||||
async move {
|
||||
|
@ -167,11 +167,9 @@ pub async fn handle_list(
|
|||
}
|
||||
|
||||
pub async fn handle_list_multipart_upload(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
query: &ListMultipartUploadsQuery,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { garage, .. } = &ctx;
|
||||
|
||||
let io = |bucket, key, count| {
|
||||
let t = &garage.object_table;
|
||||
async move {
|
||||
|
@ -271,22 +269,15 @@ pub async fn handle_list_multipart_upload(
|
|||
}
|
||||
|
||||
pub async fn handle_list_parts(
|
||||
ctx: ReqCtx,
|
||||
req: Request<ReqBody>,
|
||||
garage: Arc<Garage>,
|
||||
query: &ListPartsQuery,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
debug!("ListParts {:?}", query);
|
||||
|
||||
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
|
||||
|
||||
let (_, object_version, mpu) = s3_multipart::get_upload(&ctx, &query.key, &upload_id).await?;
|
||||
|
||||
let object_encryption = match object_version.state {
|
||||
ObjectVersionState::Uploading { encryption, .. } => encryption,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let encryption_res =
|
||||
EncryptionParams::check_decrypt(&ctx.garage, req.headers(), &object_encryption);
|
||||
let (_, _, mpu) =
|
||||
s3_multipart::get_upload(&garage, &query.bucket_id, &query.key, &upload_id).await?;
|
||||
|
||||
let (info, next) = fetch_part_info(query, &mpu)?;
|
||||
|
||||
|
@ -305,40 +296,11 @@ pub async fn handle_list_parts(
|
|||
is_truncated: s3_xml::Value(format!("{}", next.is_some())),
|
||||
parts: info
|
||||
.iter()
|
||||
.map(|part| {
|
||||
// hide checksum if object is encrypted and the decryption
|
||||
// keys are not provided
|
||||
let checksum = part.checksum.filter(|_| encryption_res.is_ok());
|
||||
s3_xml::PartItem {
|
||||
.map(|part| s3_xml::PartItem {
|
||||
etag: s3_xml::Value(format!("\"{}\"", part.etag)),
|
||||
last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)),
|
||||
part_number: s3_xml::IntValue(part.part_number as i64),
|
||||
size: s3_xml::IntValue(part.size as i64),
|
||||
checksum_crc32: match &checksum {
|
||||
Some(ChecksumValue::Crc32(x)) => {
|
||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
||||
}
|
||||
_ => None,
|
||||
},
|
||||
checksum_crc32c: match &checksum {
|
||||
Some(ChecksumValue::Crc32c(x)) => {
|
||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
||||
}
|
||||
_ => None,
|
||||
},
|
||||
checksum_sha1: match &checksum {
|
||||
Some(ChecksumValue::Sha1(x)) => {
|
||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
||||
}
|
||||
_ => None,
|
||||
},
|
||||
checksum_sha256: match &checksum {
|
||||
Some(ChecksumValue::Sha256(x)) => {
|
||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
||||
}
|
||||
_ => None,
|
||||
},
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
|
||||
|
@ -384,7 +346,6 @@ struct PartInfo<'a> {
|
|||
timestamp: u64,
|
||||
part_number: u64,
|
||||
size: u64,
|
||||
checksum: Option<ChecksumValue>,
|
||||
}
|
||||
|
||||
enum ExtractionResult {
|
||||
|
@ -525,7 +486,6 @@ fn fetch_part_info<'a>(
|
|||
timestamp: pk.timestamp,
|
||||
etag,
|
||||
size,
|
||||
checksum: p.checksum,
|
||||
};
|
||||
match parts.last_mut() {
|
||||
Some(lastpart) if lastpart.part_number == pk.part_number => {
|
||||
|
@ -984,14 +944,11 @@ mod tests {
|
|||
timestamp: TS,
|
||||
state: ObjectVersionState::Uploading {
|
||||
multipart: true,
|
||||
encryption: ObjectVersionEncryption::Plaintext {
|
||||
inner: ObjectVersionMetaInner {
|
||||
headers: vec![],
|
||||
checksum: None,
|
||||
headers: ObjectVersionHeaders {
|
||||
content_type: "text/plain".to_string(),
|
||||
other: BTreeMap::<String, String>::new(),
|
||||
},
|
||||
},
|
||||
checksum_algorithm: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1179,7 +1136,6 @@ mod tests {
|
|||
version: uuid,
|
||||
size: Some(3),
|
||||
etag: Some("etag1".into()),
|
||||
checksum: None,
|
||||
},
|
||||
),
|
||||
(
|
||||
|
@ -1191,7 +1147,6 @@ mod tests {
|
|||
version: uuid,
|
||||
size: None,
|
||||
etag: None,
|
||||
checksum: None,
|
||||
},
|
||||
),
|
||||
(
|
||||
|
@ -1203,7 +1158,6 @@ mod tests {
|
|||
version: uuid,
|
||||
size: Some(10),
|
||||
etag: Some("etag2".into()),
|
||||
checksum: None,
|
||||
},
|
||||
),
|
||||
(
|
||||
|
@ -1215,7 +1169,6 @@ mod tests {
|
|||
version: uuid,
|
||||
size: Some(7),
|
||||
etag: Some("etag3".into()),
|
||||
checksum: None,
|
||||
},
|
||||
),
|
||||
(
|
||||
|
@ -1227,7 +1180,6 @@ mod tests {
|
|||
version: uuid,
|
||||
size: Some(5),
|
||||
etag: Some("etag4".into()),
|
||||
checksum: None,
|
||||
},
|
||||
),
|
||||
];
|
||||
|
@ -1266,14 +1218,12 @@ mod tests {
|
|||
etag: "etag1",
|
||||
timestamp: TS,
|
||||
part_number: 1,
|
||||
size: 3,
|
||||
checksum: None,
|
||||
size: 3
|
||||
},
|
||||
PartInfo {
|
||||
etag: "etag2",
|
||||
timestamp: TS,
|
||||
part_number: 3,
|
||||
checksum: None,
|
||||
size: 10
|
||||
},
|
||||
]
|
||||
|
@ -1289,14 +1239,12 @@ mod tests {
|
|||
PartInfo {
|
||||
etag: "etag3",
|
||||
timestamp: TS,
|
||||
checksum: None,
|
||||
part_number: 5,
|
||||
size: 7
|
||||
},
|
||||
PartInfo {
|
||||
etag: "etag4",
|
||||
timestamp: TS,
|
||||
checksum: None,
|
||||
part_number: 8,
|
||||
size: 5
|
||||
},
|
||||
|
@ -1320,28 +1268,24 @@ mod tests {
|
|||
PartInfo {
|
||||
etag: "etag1",
|
||||
timestamp: TS,
|
||||
checksum: None,
|
||||
part_number: 1,
|
||||
size: 3
|
||||
},
|
||||
PartInfo {
|
||||
etag: "etag2",
|
||||
timestamp: TS,
|
||||
checksum: None,
|
||||
part_number: 3,
|
||||
size: 10
|
||||
},
|
||||
PartInfo {
|
||||
etag: "etag3",
|
||||
timestamp: TS,
|
||||
checksum: None,
|
||||
part_number: 5,
|
||||
size: 7
|
||||
},
|
||||
PartInfo {
|
||||
etag: "etag4",
|
||||
timestamp: TS,
|
||||
checksum: None,
|
||||
part_number: 8,
|
||||
size: 5
|
||||
},
|
||||
|
|
|
@ -13,7 +13,5 @@ mod post_object;
|
|||
mod put;
|
||||
mod website;
|
||||
|
||||
mod checksum;
|
||||
mod encryption;
|
||||
mod router;
|
||||
pub mod xml;
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
use std::sync::Arc;
|
||||
|
||||
use base64::prelude::*;
|
||||
use futures::prelude::*;
|
||||
use hyper::{Request, Response};
|
||||
use md5::{Digest as Md5Digest, Md5};
|
||||
|
||||
use garage_table::*;
|
||||
use garage_util::async_hash::*;
|
||||
use garage_util::data::*;
|
||||
|
||||
use garage_model::bucket_table::Bucket;
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::block_ref_table::*;
|
||||
use garage_model::s3::mpu_table::*;
|
||||
|
@ -17,8 +18,6 @@ use garage_model::s3::version_table::*;
|
|||
|
||||
use crate::helpers::*;
|
||||
use crate::s3::api_server::{ReqBody, ResBody};
|
||||
use crate::s3::checksum::*;
|
||||
use crate::s3::encryption::EncryptionParams;
|
||||
use crate::s3::error::*;
|
||||
use crate::s3::put::*;
|
||||
use crate::s3::xml as s3_xml;
|
||||
|
@ -27,32 +26,18 @@ use crate::signature::verify_signed_content;
|
|||
// ----
|
||||
|
||||
pub async fn handle_create_multipart_upload(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
req: &Request<ReqBody>,
|
||||
bucket_name: &str,
|
||||
bucket_id: Uuid,
|
||||
key: &String,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
bucket_name,
|
||||
..
|
||||
} = &ctx;
|
||||
let existing_object = garage.object_table.get(&bucket_id, &key).await?;
|
||||
|
||||
let upload_id = gen_uuid();
|
||||
let timestamp = next_timestamp(existing_object.as_ref());
|
||||
|
||||
let headers = get_headers(req.headers())?;
|
||||
let meta = ObjectVersionMetaInner {
|
||||
headers,
|
||||
checksum: None,
|
||||
};
|
||||
|
||||
// Determine whether object should be encrypted, and if so the key
|
||||
let encryption = EncryptionParams::new_from_headers(&garage, req.headers())?;
|
||||
let object_encryption = encryption.encrypt_meta(meta)?;
|
||||
|
||||
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
|
||||
|
||||
// Create object in object table
|
||||
let object_version = ObjectVersion {
|
||||
|
@ -60,17 +45,16 @@ pub async fn handle_create_multipart_upload(
|
|||
timestamp,
|
||||
state: ObjectVersionState::Uploading {
|
||||
multipart: true,
|
||||
encryption: object_encryption,
|
||||
checksum_algorithm,
|
||||
headers,
|
||||
},
|
||||
};
|
||||
let object = Object::new(*bucket_id, key.to_string(), vec![object_version]);
|
||||
let object = Object::new(bucket_id, key.to_string(), vec![object_version]);
|
||||
garage.object_table.insert(&object).await?;
|
||||
|
||||
// Create multipart upload in mpu table
|
||||
// This multipart upload will hold references to uploaded parts
|
||||
// (which are entries in the Version table)
|
||||
let mpu = MultipartUpload::new(upload_id, timestamp, *bucket_id, key.into(), false);
|
||||
let mpu = MultipartUpload::new(upload_id, timestamp, bucket_id, key.into(), false);
|
||||
garage.mpu_table.insert(&mpu).await?;
|
||||
|
||||
// Send success response
|
||||
|
@ -82,53 +66,35 @@ pub async fn handle_create_multipart_upload(
|
|||
};
|
||||
let xml = s3_xml::to_xml_with_header(&result)?;
|
||||
|
||||
let mut resp = Response::builder();
|
||||
encryption.add_response_headers(&mut resp);
|
||||
Ok(resp.body(string_body(xml))?)
|
||||
Ok(Response::new(string_body(xml)))
|
||||
}
|
||||
|
||||
pub async fn handle_put_part(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
req: Request<ReqBody>,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
part_number: u64,
|
||||
upload_id: &str,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { garage, .. } = &ctx;
|
||||
|
||||
let upload_id = decode_upload_id(upload_id)?;
|
||||
|
||||
let expected_checksums = ExpectedChecksums {
|
||||
md5: match req.headers().get("content-md5") {
|
||||
let content_md5 = match req.headers().get("content-md5") {
|
||||
Some(x) => Some(x.to_str()?.to_string()),
|
||||
None => None,
|
||||
},
|
||||
sha256: content_sha256,
|
||||
extra: request_checksum_value(req.headers())?,
|
||||
};
|
||||
|
||||
// Read first chuck, and at the same time try to get object to see if it exists
|
||||
let key = key.to_string();
|
||||
|
||||
let (req_head, req_body) = req.into_parts();
|
||||
let stream = body_stream(req_body);
|
||||
let stream = body_stream(req.into_body());
|
||||
let mut chunker = StreamChunker::new(stream, garage.config.block_size);
|
||||
|
||||
let ((_, object_version, mut mpu), first_block) =
|
||||
futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?;
|
||||
|
||||
// Check encryption params
|
||||
let (object_encryption, checksum_algorithm) = match object_version.state {
|
||||
ObjectVersionState::Uploading {
|
||||
encryption,
|
||||
checksum_algorithm,
|
||||
..
|
||||
} => (encryption, checksum_algorithm),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let (encryption, _) =
|
||||
EncryptionParams::check_decrypt(&garage, &req_head.headers, &object_encryption)?;
|
||||
let ((_, _, mut mpu), first_block) = futures::try_join!(
|
||||
get_upload(&garage, &bucket_id, &key, &upload_id),
|
||||
chunker.next(),
|
||||
)?;
|
||||
|
||||
// Check object is valid and part can be accepted
|
||||
let first_block = first_block.ok_or_bad_request("Empty body")?;
|
||||
|
@ -155,9 +121,7 @@ pub async fn handle_put_part(
|
|||
mpu_part_key,
|
||||
MpuPart {
|
||||
version: version_uuid,
|
||||
// all these are filled in later, at the end of this function
|
||||
etag: None,
|
||||
checksum: None,
|
||||
size: None,
|
||||
},
|
||||
);
|
||||
|
@ -171,31 +135,33 @@ pub async fn handle_put_part(
|
|||
garage.version_table.insert(&version).await?;
|
||||
|
||||
// Copy data to version
|
||||
let checksummer =
|
||||
Checksummer::init(&expected_checksums, !encryption.is_encrypted()).add(checksum_algorithm);
|
||||
let (total_size, checksums, _) = read_and_put_blocks(
|
||||
&ctx,
|
||||
let first_block_hash = async_blake2sum(first_block.clone()).await;
|
||||
|
||||
let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
|
||||
&garage,
|
||||
&version,
|
||||
encryption,
|
||||
part_number,
|
||||
first_block,
|
||||
first_block_hash,
|
||||
&mut chunker,
|
||||
checksummer,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Verify that checksums map
|
||||
checksums.verify(&expected_checksums)?;
|
||||
ensure_checksum_matches(
|
||||
data_md5sum.as_slice(),
|
||||
data_sha256sum,
|
||||
content_md5.as_deref(),
|
||||
content_sha256,
|
||||
)?;
|
||||
|
||||
// Store part etag in version
|
||||
let etag = encryption.etag_from_md5(&checksums.md5);
|
||||
|
||||
let data_md5sum_hex = hex::encode(data_md5sum);
|
||||
mpu.parts.put(
|
||||
mpu_part_key,
|
||||
MpuPart {
|
||||
version: version_uuid,
|
||||
etag: Some(etag.clone()),
|
||||
checksum: checksums.extract(checksum_algorithm),
|
||||
etag: Some(data_md5sum_hex.clone()),
|
||||
size: Some(total_size),
|
||||
},
|
||||
);
|
||||
|
@ -205,10 +171,11 @@ pub async fn handle_put_part(
|
|||
// We won't have to clean up on drop.
|
||||
interrupted_cleanup.cancel();
|
||||
|
||||
let mut resp = Response::builder().header("ETag", format!("\"{}\"", etag));
|
||||
encryption.add_response_headers(&mut resp);
|
||||
let resp = add_checksum_response_headers(&expected_checksums.extra, resp);
|
||||
Ok(resp.body(empty_body())?)
|
||||
let response = Response::builder()
|
||||
.header("ETag", format!("\"{}\"", data_md5sum_hex))
|
||||
.body(empty_body())
|
||||
.unwrap();
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
struct InterruptedCleanup(Option<InterruptedCleanupInner>);
|
||||
|
@ -243,23 +210,17 @@ impl Drop for InterruptedCleanup {
|
|||
}
|
||||
|
||||
pub async fn handle_complete_multipart_upload(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
req: Request<ReqBody>,
|
||||
bucket_name: &str,
|
||||
bucket: &Bucket,
|
||||
key: &str,
|
||||
upload_id: &str,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
bucket_name,
|
||||
..
|
||||
} = &ctx;
|
||||
let (req_head, req_body) = req.into_parts();
|
||||
|
||||
let expected_checksum = request_checksum_value(&req_head.headers)?;
|
||||
|
||||
let body = http_body_util::BodyExt::collect(req_body).await?.to_bytes();
|
||||
let body = http_body_util::BodyExt::collect(req.into_body())
|
||||
.await?
|
||||
.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
|
@ -277,18 +238,15 @@ pub async fn handle_complete_multipart_upload(
|
|||
|
||||
// Get object and multipart upload
|
||||
let key = key.to_string();
|
||||
let (object, mut object_version, mpu) = get_upload(&ctx, &key, &upload_id).await?;
|
||||
let (object, mut object_version, mpu) =
|
||||
get_upload(&garage, &bucket.id, &key, &upload_id).await?;
|
||||
|
||||
if mpu.parts.is_empty() {
|
||||
return Err(Error::bad_request("No data was uploaded"));
|
||||
}
|
||||
|
||||
let (object_encryption, checksum_algorithm) = match object_version.state {
|
||||
ObjectVersionState::Uploading {
|
||||
encryption,
|
||||
checksum_algorithm,
|
||||
..
|
||||
} => (encryption, checksum_algorithm),
|
||||
let headers = match object_version.state {
|
||||
ObjectVersionState::Uploading { headers, .. } => headers,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
|
@ -316,13 +274,6 @@ pub async fn handle_complete_multipart_upload(
|
|||
for req_part in body_list_of_parts.iter() {
|
||||
match have_parts.get(&req_part.part_number) {
|
||||
Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => {
|
||||
// alternative version: if req_part.checksum.is_some() && part.checksum != req_part.checksum {
|
||||
if part.checksum != req_part.checksum {
|
||||
return Err(Error::InvalidDigest(format!(
|
||||
"Invalid checksum for part {}: in request = {:?}, uploaded part = {:?}",
|
||||
req_part.part_number, req_part.checksum, part.checksum
|
||||
)));
|
||||
}
|
||||
parts.push(*part)
|
||||
}
|
||||
_ => return Err(Error::InvalidPart),
|
||||
|
@ -342,7 +293,7 @@ pub async fn handle_complete_multipart_upload(
|
|||
let mut final_version = Version::new(
|
||||
upload_id,
|
||||
VersionBacklink::Object {
|
||||
bucket_id: *bucket_id,
|
||||
bucket_id: bucket.id,
|
||||
key: key.to_string(),
|
||||
},
|
||||
false,
|
||||
|
@ -370,60 +321,41 @@ pub async fn handle_complete_multipart_upload(
|
|||
});
|
||||
garage.block_ref_table.insert_many(block_refs).await?;
|
||||
|
||||
// Calculate checksum and etag of final object
|
||||
// Calculate etag of final object
|
||||
// To understand how etags are calculated, read more here:
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||
// https://teppen.io/2018/06/23/aws_s3_etags/
|
||||
let mut checksummer = MultipartChecksummer::init(checksum_algorithm);
|
||||
let mut etag_md5_hasher = Md5::new();
|
||||
for part in parts.iter() {
|
||||
checksummer.update(part.etag.as_ref().unwrap(), part.checksum)?;
|
||||
etag_md5_hasher.update(part.etag.as_ref().unwrap().as_bytes());
|
||||
}
|
||||
let (checksum_md5, checksum_extra) = checksummer.finalize();
|
||||
|
||||
if expected_checksum.is_some() && checksum_extra != expected_checksum {
|
||||
return Err(Error::InvalidDigest(
|
||||
"Failed to validate x-amz-checksum-*".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let etag = format!("{}-{}", hex::encode(&checksum_md5[..]), parts.len());
|
||||
let etag = format!(
|
||||
"{}-{}",
|
||||
hex::encode(etag_md5_hasher.finalize()),
|
||||
parts.len()
|
||||
);
|
||||
|
||||
// Calculate total size of final object
|
||||
let total_size = parts.iter().map(|x| x.size.unwrap()).sum();
|
||||
|
||||
if let Err(e) = check_quotas(&ctx, total_size, Some(&object)).await {
|
||||
if let Err(e) = check_quotas(&garage, bucket, total_size, Some(&object)).await {
|
||||
object_version.state = ObjectVersionState::Aborted;
|
||||
let final_object = Object::new(*bucket_id, key.clone(), vec![object_version]);
|
||||
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
|
||||
garage.object_table.insert(&final_object).await?;
|
||||
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
// If there is a checksum algorithm, update metadata with checksum
|
||||
let object_encryption = match checksum_algorithm {
|
||||
None => object_encryption,
|
||||
Some(_) => {
|
||||
let (encryption, meta) =
|
||||
EncryptionParams::check_decrypt(&garage, &req_head.headers, &object_encryption)?;
|
||||
let new_meta = ObjectVersionMetaInner {
|
||||
headers: meta.into_owned().headers,
|
||||
checksum: checksum_extra,
|
||||
};
|
||||
encryption.encrypt_meta(new_meta)?
|
||||
}
|
||||
};
|
||||
|
||||
// Write final object version
|
||||
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||
ObjectVersionMeta {
|
||||
encryption: object_encryption,
|
||||
headers,
|
||||
size: total_size,
|
||||
etag: etag.clone(),
|
||||
},
|
||||
final_version.blocks.items()[0].1.hash,
|
||||
));
|
||||
|
||||
let final_object = Object::new(*bucket_id, key.clone(), vec![object_version]);
|
||||
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
|
||||
garage.object_table.insert(&final_object).await?;
|
||||
|
||||
// Send response saying ok we're done
|
||||
|
@ -433,45 +365,25 @@ pub async fn handle_complete_multipart_upload(
|
|||
bucket: s3_xml::Value(bucket_name.to_string()),
|
||||
key: s3_xml::Value(key),
|
||||
etag: s3_xml::Value(format!("\"{}\"", etag)),
|
||||
checksum_crc32: match &checksum_extra {
|
||||
Some(ChecksumValue::Crc32(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
||||
_ => None,
|
||||
},
|
||||
checksum_crc32c: match &checksum_extra {
|
||||
Some(ChecksumValue::Crc32c(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
||||
_ => None,
|
||||
},
|
||||
checksum_sha1: match &checksum_extra {
|
||||
Some(ChecksumValue::Sha1(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
||||
_ => None,
|
||||
},
|
||||
checksum_sha256: match &checksum_extra {
|
||||
Some(ChecksumValue::Sha256(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
||||
_ => None,
|
||||
},
|
||||
};
|
||||
let xml = s3_xml::to_xml_with_header(&result)?;
|
||||
|
||||
let resp = Response::builder();
|
||||
let resp = add_checksum_response_headers(&expected_checksum, resp);
|
||||
Ok(resp.body(string_body(xml))?)
|
||||
Ok(Response::new(string_body(xml)))
|
||||
}
|
||||
|
||||
pub async fn handle_abort_multipart_upload(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
bucket_id: Uuid,
|
||||
key: &str,
|
||||
upload_id: &str,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = &ctx;
|
||||
|
||||
let upload_id = decode_upload_id(upload_id)?;
|
||||
|
||||
let (_, mut object_version, _) = get_upload(&ctx, &key.to_string(), &upload_id).await?;
|
||||
let (_, mut object_version, _) =
|
||||
get_upload(&garage, &bucket_id, &key.to_string(), &upload_id).await?;
|
||||
|
||||
object_version.state = ObjectVersionState::Aborted;
|
||||
let final_object = Object::new(*bucket_id, key.to_string(), vec![object_version]);
|
||||
let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
|
||||
garage.object_table.insert(&final_object).await?;
|
||||
|
||||
Ok(Response::new(empty_body()))
|
||||
|
@ -481,13 +393,11 @@ pub async fn handle_abort_multipart_upload(
|
|||
|
||||
#[allow(clippy::ptr_arg)]
|
||||
pub(crate) async fn get_upload(
|
||||
ctx: &ReqCtx,
|
||||
garage: &Garage,
|
||||
bucket_id: &Uuid,
|
||||
key: &String,
|
||||
upload_id: &Uuid,
|
||||
) -> Result<(Object, ObjectVersion, MultipartUpload), Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = ctx;
|
||||
let (object, mpu) = futures::try_join!(
|
||||
garage.object_table.get(bucket_id, key).map_err(Error::from),
|
||||
garage
|
||||
|
@ -523,7 +433,6 @@ pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
|
|||
struct CompleteMultipartUploadPart {
|
||||
etag: String,
|
||||
part_number: u64,
|
||||
checksum: Option<ChecksumValue>,
|
||||
}
|
||||
|
||||
fn parse_complete_multipart_upload_body(
|
||||
|
@ -549,41 +458,9 @@ fn parse_complete_multipart_upload_body(
|
|||
.children()
|
||||
.find(|e| e.has_tag_name("PartNumber"))?
|
||||
.text()?;
|
||||
let checksum = if let Some(crc32) =
|
||||
item.children().find(|e| e.has_tag_name("ChecksumCRC32"))
|
||||
{
|
||||
Some(ChecksumValue::Crc32(
|
||||
BASE64_STANDARD.decode(crc32.text()?).ok()?[..]
|
||||
.try_into()
|
||||
.ok()?,
|
||||
))
|
||||
} else if let Some(crc32c) = item.children().find(|e| e.has_tag_name("ChecksumCRC32C"))
|
||||
{
|
||||
Some(ChecksumValue::Crc32c(
|
||||
BASE64_STANDARD.decode(crc32c.text()?).ok()?[..]
|
||||
.try_into()
|
||||
.ok()?,
|
||||
))
|
||||
} else if let Some(sha1) = item.children().find(|e| e.has_tag_name("ChecksumSHA1")) {
|
||||
Some(ChecksumValue::Sha1(
|
||||
BASE64_STANDARD.decode(sha1.text()?).ok()?[..]
|
||||
.try_into()
|
||||
.ok()?,
|
||||
))
|
||||
} else if let Some(sha256) = item.children().find(|e| e.has_tag_name("ChecksumSHA256"))
|
||||
{
|
||||
Some(ChecksumValue::Sha256(
|
||||
BASE64_STANDARD.decode(sha256.text()?).ok()?[..]
|
||||
.try_into()
|
||||
.ok()?,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
parts.push(CompleteMultipartUploadPart {
|
||||
etag: etag.trim_matches('"').to_string(),
|
||||
part_number: part_number.parse().ok()?,
|
||||
checksum,
|
||||
});
|
||||
} else {
|
||||
return None;
|
||||
|
|
|
@ -14,17 +14,14 @@ use multer::{Constraints, Multipart, SizeLimit};
|
|||
use serde::Deserialize;
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::s3::object_table::*;
|
||||
|
||||
use crate::helpers::*;
|
||||
use crate::s3::api_server::ResBody;
|
||||
use crate::s3::checksum::*;
|
||||
use crate::s3::cors::*;
|
||||
use crate::s3::encryption::EncryptionParams;
|
||||
use crate::s3::error::*;
|
||||
use crate::s3::put::{get_headers, save_stream, ChecksumMode};
|
||||
use crate::s3::put::{get_headers, save_stream};
|
||||
use crate::s3::xml as s3_xml;
|
||||
use crate::signature::payload::{verify_v4, Authorization};
|
||||
use crate::signature::payload::{parse_date, verify_v4};
|
||||
|
||||
pub async fn handle_post_object(
|
||||
garage: Arc<Garage>,
|
||||
|
@ -51,17 +48,13 @@ pub async fn handle_post_object(
|
|||
let mut multipart = Multipart::with_constraints(stream, boundary, constraints);
|
||||
|
||||
let mut params = HeaderMap::new();
|
||||
let file_field = loop {
|
||||
let field = loop {
|
||||
let field = if let Some(field) = multipart.next_field().await? {
|
||||
field
|
||||
} else {
|
||||
return Err(Error::bad_request("Request did not contain a file"));
|
||||
};
|
||||
let name: HeaderName = if let Some(Ok(name)) = field
|
||||
.name()
|
||||
.map(str::to_ascii_lowercase)
|
||||
.map(TryInto::try_into)
|
||||
{
|
||||
let name: HeaderName = if let Some(Ok(name)) = field.name().map(TryInto::try_into) {
|
||||
name
|
||||
} else {
|
||||
continue;
|
||||
|
@ -71,6 +64,14 @@ pub async fn handle_post_object(
|
|||
}
|
||||
|
||||
if let Ok(content) = HeaderValue::from_str(&field.text().await?) {
|
||||
match name.as_str() {
|
||||
"tag" => (/* tag need to be reencoded, but we don't support them yet anyway */),
|
||||
"acl" => {
|
||||
if params.insert("x-amz-acl", content).is_some() {
|
||||
return Err(Error::bad_request("Field 'acl' provided more than once"));
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
if params.insert(&name, content).is_some() {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Field '{}' provided more than once",
|
||||
|
@ -78,6 +79,8 @@ pub async fn handle_post_object(
|
|||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Current part is file. Do some checks before handling to PutObject code
|
||||
|
@ -85,15 +88,26 @@ pub async fn handle_post_object(
|
|||
.get("key")
|
||||
.ok_or_bad_request("No key was provided")?
|
||||
.to_str()?;
|
||||
let credential = params
|
||||
.get("x-amz-credential")
|
||||
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?
|
||||
.to_str()?;
|
||||
let policy = params
|
||||
.get("policy")
|
||||
.ok_or_bad_request("No policy was provided")?
|
||||
.to_str()?;
|
||||
let authorization = Authorization::parse_form(¶ms)?;
|
||||
let signature = params
|
||||
.get("x-amz-signature")
|
||||
.ok_or_bad_request("No signature was provided")?
|
||||
.to_str()?;
|
||||
let date = params
|
||||
.get("x-amz-date")
|
||||
.ok_or_bad_request("No date was provided")?
|
||||
.to_str()?;
|
||||
|
||||
let key = if key.contains("${filename}") {
|
||||
// if no filename is provided, don't replace. This matches the behavior of AWS.
|
||||
if let Some(filename) = file_field.file_name() {
|
||||
if let Some(filename) = field.file_name() {
|
||||
key.replace("${filename}", filename)
|
||||
} else {
|
||||
key.to_owned()
|
||||
|
@ -102,7 +116,16 @@ pub async fn handle_post_object(
|
|||
key.to_owned()
|
||||
};
|
||||
|
||||
let api_key = verify_v4(&garage, "s3", &authorization, policy.as_bytes()).await?;
|
||||
let date = parse_date(date)?;
|
||||
let api_key = verify_v4(
|
||||
&garage,
|
||||
"s3",
|
||||
credential,
|
||||
&date,
|
||||
signature,
|
||||
policy.as_bytes(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let bucket_id = garage
|
||||
.bucket_helper()
|
||||
|
@ -117,12 +140,6 @@ pub async fn handle_post_object(
|
|||
.bucket_helper()
|
||||
.get_existing_bucket(bucket_id)
|
||||
.await?;
|
||||
let bucket_params = bucket.state.into_option().unwrap();
|
||||
let matching_cors_rule = find_matching_cors_rule(
|
||||
&bucket_params,
|
||||
&Request::from_parts(head.clone(), empty_body::<Infallible>()),
|
||||
)?
|
||||
.cloned();
|
||||
|
||||
let decoded_policy = BASE64_STANDARD
|
||||
.decode(policy)
|
||||
|
@ -140,8 +157,9 @@ pub async fn handle_post_object(
|
|||
let mut conditions = decoded_policy.into_conditions()?;
|
||||
|
||||
for (param_key, value) in params.iter() {
|
||||
let param_key = param_key.as_str();
|
||||
match param_key {
|
||||
let mut param_key = param_key.to_string();
|
||||
param_key.make_ascii_lowercase();
|
||||
match param_key.as_str() {
|
||||
"policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields
|
||||
"content-type" => {
|
||||
let conds = conditions.params.remove("content-type").ok_or_else(|| {
|
||||
|
@ -186,7 +204,7 @@ pub async fn handle_post_object(
|
|||
// how aws seems to behave.
|
||||
continue;
|
||||
}
|
||||
let conds = conditions.params.remove(param_key).ok_or_else(|| {
|
||||
let conds = conditions.params.remove(¶m_key).ok_or_else(|| {
|
||||
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
|
||||
})?;
|
||||
for cond in conds {
|
||||
|
@ -212,47 +230,21 @@ pub async fn handle_post_object(
|
|||
)));
|
||||
}
|
||||
|
||||
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
|
||||
// arround here to make sure the rest of the machinery takes our acl into account.
|
||||
let headers = get_headers(¶ms)?;
|
||||
|
||||
let expected_checksums = ExpectedChecksums {
|
||||
md5: params
|
||||
.get("content-md5")
|
||||
.map(HeaderValue::to_str)
|
||||
.transpose()?
|
||||
.map(str::to_string),
|
||||
sha256: None,
|
||||
extra: request_checksum_algorithm_value(¶ms)?,
|
||||
};
|
||||
|
||||
let meta = ObjectVersionMetaInner {
|
||||
headers,
|
||||
checksum: expected_checksums.extra,
|
||||
};
|
||||
|
||||
let encryption = EncryptionParams::new_from_headers(&garage, ¶ms)?;
|
||||
|
||||
let stream = file_field.map(|r| r.map_err(Into::into));
|
||||
let ctx = ReqCtx {
|
||||
let stream = field.map(|r| r.map_err(Into::into));
|
||||
let (_, md5) = save_stream(
|
||||
garage,
|
||||
bucket_id,
|
||||
bucket_name,
|
||||
bucket_params,
|
||||
api_key,
|
||||
};
|
||||
|
||||
let res = save_stream(
|
||||
&ctx,
|
||||
meta,
|
||||
encryption,
|
||||
headers,
|
||||
StreamLimiter::new(stream, conditions.content_length),
|
||||
&bucket,
|
||||
&key,
|
||||
ChecksumMode::Verify(&expected_checksums),
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let etag = format!("\"{}\"", res.etag);
|
||||
let etag = format!("\"{}\"", md5);
|
||||
|
||||
let mut resp = if let Some(mut target) = params
|
||||
.get("success_action_redirect")
|
||||
|
@ -262,16 +254,15 @@ pub async fn handle_post_object(
|
|||
{
|
||||
target
|
||||
.query_pairs_mut()
|
||||
.append_pair("bucket", &ctx.bucket_name)
|
||||
.append_pair("bucket", &bucket_name)
|
||||
.append_pair("key", &key)
|
||||
.append_pair("etag", &etag);
|
||||
let target = target.to_string();
|
||||
let mut resp = Response::builder()
|
||||
Response::builder()
|
||||
.status(StatusCode::SEE_OTHER)
|
||||
.header(header::LOCATION, target.clone())
|
||||
.header(header::ETAG, etag);
|
||||
encryption.add_response_headers(&mut resp);
|
||||
resp.body(string_body(target))?
|
||||
.header(header::ETAG, etag)
|
||||
.body(string_body(target))?
|
||||
} else {
|
||||
let path = head
|
||||
.uri
|
||||
|
@ -298,17 +289,16 @@ pub async fn handle_post_object(
|
|||
.get("success_action_status")
|
||||
.and_then(|h| h.to_str().ok())
|
||||
.unwrap_or("204");
|
||||
let mut builder = Response::builder()
|
||||
let builder = Response::builder()
|
||||
.header(header::LOCATION, location.clone())
|
||||
.header(header::ETAG, etag.clone());
|
||||
encryption.add_response_headers(&mut builder);
|
||||
match action {
|
||||
"200" => builder.status(StatusCode::OK).body(empty_body())?,
|
||||
"201" => {
|
||||
let xml = s3_xml::PostObject {
|
||||
xmlns: (),
|
||||
location: s3_xml::Value(location),
|
||||
bucket: s3_xml::Value(ctx.bucket_name),
|
||||
bucket: s3_xml::Value(bucket_name),
|
||||
key: s3_xml::Value(key),
|
||||
etag: s3_xml::Value(etag),
|
||||
};
|
||||
|
@ -321,8 +311,12 @@ pub async fn handle_post_object(
|
|||
}
|
||||
};
|
||||
|
||||
let matching_cors_rule = find_matching_cors_rule(
|
||||
&bucket,
|
||||
&Request::from_parts(head, empty_body::<Infallible>()),
|
||||
)?;
|
||||
if let Some(rule) = matching_cors_rule {
|
||||
add_cors_headers(&mut resp, &rule)
|
||||
add_cors_headers(&mut resp, rule)
|
||||
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
use std::collections::HashMap;
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::sync::Arc;
|
||||
|
||||
use base64::prelude::*;
|
||||
use futures::prelude::*;
|
||||
use futures::stream::FuturesOrdered;
|
||||
use futures::try_join;
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
use md5::{digest::generic_array::*, Digest as Md5Digest, Md5};
|
||||
use sha2::Sha256;
|
||||
|
||||
use hyper::body::Bytes;
|
||||
use hyper::header::{HeaderMap, HeaderValue};
|
||||
|
@ -17,13 +17,14 @@ use opentelemetry::{
|
|||
};
|
||||
|
||||
use garage_net::bytes_buf::BytesBuf;
|
||||
use garage_rpc::rpc_helper::OrderTag;
|
||||
use garage_table::*;
|
||||
use garage_util::async_hash::*;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::Error as GarageError;
|
||||
use garage_util::time::*;
|
||||
|
||||
use garage_block::manager::INLINE_THRESHOLD;
|
||||
use garage_model::bucket_table::Bucket;
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::index_counter::CountedItem;
|
||||
use garage_model::s3::block_ref_table::*;
|
||||
|
@ -32,27 +33,12 @@ use garage_model::s3::version_table::*;
|
|||
|
||||
use crate::helpers::*;
|
||||
use crate::s3::api_server::{ReqBody, ResBody};
|
||||
use crate::s3::checksum::*;
|
||||
use crate::s3::encryption::EncryptionParams;
|
||||
use crate::s3::error::*;
|
||||
|
||||
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
||||
|
||||
pub(crate) struct SaveStreamResult {
|
||||
pub(crate) version_uuid: Uuid,
|
||||
pub(crate) version_timestamp: u64,
|
||||
/// Etag WITHOUT THE QUOTES (just the hex value)
|
||||
pub(crate) etag: String,
|
||||
}
|
||||
|
||||
pub(crate) enum ChecksumMode<'a> {
|
||||
Verify(&'a ExpectedChecksums),
|
||||
Calculate(Option<ChecksumAlgorithm>),
|
||||
}
|
||||
|
||||
pub async fn handle_put(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
req: Request<ReqBody>,
|
||||
bucket: &Bucket,
|
||||
key: &String,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
|
@ -60,59 +46,42 @@ pub async fn handle_put(
|
|||
let headers = get_headers(req.headers())?;
|
||||
debug!("Object headers: {:?}", headers);
|
||||
|
||||
let expected_checksums = ExpectedChecksums {
|
||||
md5: match req.headers().get("content-md5") {
|
||||
let content_md5 = match req.headers().get("content-md5") {
|
||||
Some(x) => Some(x.to_str()?.to_string()),
|
||||
None => None,
|
||||
},
|
||||
sha256: content_sha256,
|
||||
extra: request_checksum_value(req.headers())?,
|
||||
};
|
||||
|
||||
let meta = ObjectVersionMetaInner {
|
||||
headers,
|
||||
checksum: expected_checksums.extra,
|
||||
};
|
||||
|
||||
// Determine whether object should be encrypted, and if so the key
|
||||
let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
||||
|
||||
let stream = body_stream(req.into_body());
|
||||
|
||||
let res = save_stream(
|
||||
&ctx,
|
||||
meta,
|
||||
encryption,
|
||||
save_stream(
|
||||
garage,
|
||||
headers,
|
||||
stream,
|
||||
bucket,
|
||||
key,
|
||||
ChecksumMode::Verify(&expected_checksums),
|
||||
content_md5,
|
||||
content_sha256,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut resp = Response::builder()
|
||||
.header("x-amz-version-id", hex::encode(res.version_uuid))
|
||||
.header("ETag", format!("\"{}\"", res.etag));
|
||||
encryption.add_response_headers(&mut resp);
|
||||
let resp = add_checksum_response_headers(&expected_checksums.extra, resp);
|
||||
Ok(resp.body(empty_body())?)
|
||||
.await
|
||||
.map(|(uuid, md5)| put_response(uuid, md5))
|
||||
}
|
||||
|
||||
pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||
ctx: &ReqCtx,
|
||||
mut meta: ObjectVersionMetaInner,
|
||||
encryption: EncryptionParams,
|
||||
garage: Arc<Garage>,
|
||||
headers: ObjectVersionHeaders,
|
||||
body: S,
|
||||
bucket: &Bucket,
|
||||
key: &String,
|
||||
checksum_mode: ChecksumMode<'_>,
|
||||
) -> Result<SaveStreamResult, Error> {
|
||||
let ReqCtx {
|
||||
garage, bucket_id, ..
|
||||
} = ctx;
|
||||
|
||||
content_md5: Option<String>,
|
||||
content_sha256: Option<FixedBytes32>,
|
||||
) -> Result<(Uuid, String), Error> {
|
||||
let mut chunker = StreamChunker::new(body, garage.config.block_size);
|
||||
let (first_block_opt, existing_object) = try_join!(
|
||||
chunker.next(),
|
||||
garage.object_table.get(bucket_id, key).map_err(Error::from),
|
||||
garage
|
||||
.object_table
|
||||
.get(&bucket.id, key)
|
||||
.map_err(Error::from),
|
||||
)?;
|
||||
|
||||
let first_block = first_block_opt.unwrap_or_default();
|
||||
|
@ -121,55 +90,43 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
let version_uuid = gen_uuid();
|
||||
let version_timestamp = next_timestamp(existing_object.as_ref());
|
||||
|
||||
let mut checksummer = match checksum_mode {
|
||||
ChecksumMode::Verify(expected) => Checksummer::init(expected, !encryption.is_encrypted()),
|
||||
ChecksumMode::Calculate(algo) => {
|
||||
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(algo)
|
||||
}
|
||||
};
|
||||
|
||||
// If body is small enough, store it directly in the object table
|
||||
// as "inline data". We can then return immediately.
|
||||
if first_block.len() < INLINE_THRESHOLD {
|
||||
checksummer.update(&first_block);
|
||||
let checksums = checksummer.finalize();
|
||||
|
||||
match checksum_mode {
|
||||
ChecksumMode::Verify(expected) => {
|
||||
checksums.verify(&expected)?;
|
||||
}
|
||||
ChecksumMode::Calculate(algo) => {
|
||||
meta.checksum = checksums.extract(algo);
|
||||
}
|
||||
};
|
||||
let mut md5sum = Md5::new();
|
||||
md5sum.update(&first_block[..]);
|
||||
let data_md5sum = md5sum.finalize();
|
||||
let data_md5sum_hex = hex::encode(data_md5sum);
|
||||
|
||||
let data_sha256sum = sha256sum(&first_block[..]);
|
||||
let size = first_block.len() as u64;
|
||||
check_quotas(ctx, size, existing_object.as_ref()).await?;
|
||||
|
||||
let etag = encryption.etag_from_md5(&checksums.md5);
|
||||
let inline_data = encryption.encrypt_blob(&first_block)?.to_vec();
|
||||
ensure_checksum_matches(
|
||||
data_md5sum.as_slice(),
|
||||
data_sha256sum,
|
||||
content_md5.as_deref(),
|
||||
content_sha256,
|
||||
)?;
|
||||
|
||||
check_quotas(&garage, bucket, size, existing_object.as_ref()).await?;
|
||||
|
||||
let object_version = ObjectVersion {
|
||||
uuid: version_uuid,
|
||||
timestamp: version_timestamp,
|
||||
state: ObjectVersionState::Complete(ObjectVersionData::Inline(
|
||||
ObjectVersionMeta {
|
||||
encryption: encryption.encrypt_meta(meta)?,
|
||||
headers,
|
||||
size,
|
||||
etag: etag.clone(),
|
||||
etag: data_md5sum_hex.clone(),
|
||||
},
|
||||
inline_data,
|
||||
first_block.to_vec(),
|
||||
)),
|
||||
};
|
||||
|
||||
let object = Object::new(*bucket_id, key.into(), vec![object_version]);
|
||||
let object = Object::new(bucket.id, key.into(), vec![object_version]);
|
||||
garage.object_table.insert(&object).await?;
|
||||
|
||||
return Ok(SaveStreamResult {
|
||||
version_uuid,
|
||||
version_timestamp,
|
||||
etag,
|
||||
});
|
||||
return Ok((version_uuid, data_md5sum_hex));
|
||||
}
|
||||
|
||||
// The following consists in many steps that can each fail.
|
||||
|
@ -177,7 +134,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
// before everything is finished (cleanup is done using the Drop trait).
|
||||
let mut interrupted_cleanup = InterruptedCleanup(Some(InterruptedCleanupInner {
|
||||
garage: garage.clone(),
|
||||
bucket_id: *bucket_id,
|
||||
bucket_id: bucket.id,
|
||||
key: key.into(),
|
||||
version_uuid,
|
||||
version_timestamp,
|
||||
|
@ -189,12 +146,11 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
uuid: version_uuid,
|
||||
timestamp: version_timestamp,
|
||||
state: ObjectVersionState::Uploading {
|
||||
encryption: encryption.encrypt_meta(meta.clone())?,
|
||||
checksum_algorithm: None, // don't care; overwritten later
|
||||
headers: headers.clone(),
|
||||
multipart: false,
|
||||
},
|
||||
};
|
||||
let object = Object::new(*bucket_id, key.into(), vec![object_version.clone()]);
|
||||
let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]);
|
||||
garage.object_table.insert(&object).await?;
|
||||
|
||||
// Initialize corresponding entry in version table
|
||||
|
@ -204,77 +160,90 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
|||
let version = Version::new(
|
||||
version_uuid,
|
||||
VersionBacklink::Object {
|
||||
bucket_id: *bucket_id,
|
||||
bucket_id: bucket.id,
|
||||
key: key.into(),
|
||||
},
|
||||
false,
|
||||
);
|
||||
garage.version_table.insert(&version).await?;
|
||||
|
||||
// Transfer data
|
||||
let (total_size, checksums, first_block_hash) = read_and_put_blocks(
|
||||
ctx,
|
||||
// Transfer data and verify checksum
|
||||
let first_block_hash = async_blake2sum(first_block.clone()).await;
|
||||
|
||||
let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
|
||||
&garage,
|
||||
&version,
|
||||
encryption,
|
||||
1,
|
||||
first_block,
|
||||
first_block_hash,
|
||||
&mut chunker,
|
||||
checksummer,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Verify checksums are ok / add calculated checksum to metadata
|
||||
match checksum_mode {
|
||||
ChecksumMode::Verify(expected) => {
|
||||
checksums.verify(&expected)?;
|
||||
}
|
||||
ChecksumMode::Calculate(algo) => {
|
||||
meta.checksum = checksums.extract(algo);
|
||||
}
|
||||
};
|
||||
ensure_checksum_matches(
|
||||
data_md5sum.as_slice(),
|
||||
data_sha256sum,
|
||||
content_md5.as_deref(),
|
||||
content_sha256,
|
||||
)?;
|
||||
|
||||
// Verify quotas are respsected
|
||||
check_quotas(ctx, total_size, existing_object.as_ref()).await?;
|
||||
check_quotas(&garage, bucket, total_size, existing_object.as_ref()).await?;
|
||||
|
||||
// Save final object state, marked as Complete
|
||||
let etag = encryption.etag_from_md5(&checksums.md5);
|
||||
|
||||
let md5sum_hex = hex::encode(data_md5sum);
|
||||
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||
ObjectVersionMeta {
|
||||
encryption: encryption.encrypt_meta(meta)?,
|
||||
headers,
|
||||
size: total_size,
|
||||
etag: etag.clone(),
|
||||
etag: md5sum_hex.clone(),
|
||||
},
|
||||
first_block_hash,
|
||||
));
|
||||
let object = Object::new(*bucket_id, key.into(), vec![object_version]);
|
||||
let object = Object::new(bucket.id, key.into(), vec![object_version]);
|
||||
garage.object_table.insert(&object).await?;
|
||||
|
||||
// We were not interrupted, everything went fine.
|
||||
// We won't have to clean up on drop.
|
||||
interrupted_cleanup.cancel();
|
||||
|
||||
Ok(SaveStreamResult {
|
||||
version_uuid,
|
||||
version_timestamp,
|
||||
etag,
|
||||
})
|
||||
Ok((version_uuid, md5sum_hex))
|
||||
}
|
||||
|
||||
/// Validate MD5 sum against content-md5 header
|
||||
/// and sha256sum against signed content-sha256
|
||||
pub(crate) fn ensure_checksum_matches(
|
||||
data_md5sum: &[u8],
|
||||
data_sha256sum: garage_util::data::FixedBytes32,
|
||||
content_md5: Option<&str>,
|
||||
content_sha256: Option<garage_util::data::FixedBytes32>,
|
||||
) -> Result<(), Error> {
|
||||
if let Some(expected_sha256) = content_sha256 {
|
||||
if expected_sha256 != data_sha256sum {
|
||||
return Err(Error::bad_request(
|
||||
"Unable to validate x-amz-content-sha256",
|
||||
));
|
||||
} else {
|
||||
trace!("Successfully validated x-amz-content-sha256");
|
||||
}
|
||||
}
|
||||
if let Some(expected_md5) = content_md5 {
|
||||
if expected_md5.trim_matches('"') != BASE64_STANDARD.encode(data_md5sum) {
|
||||
return Err(Error::bad_request("Unable to validate content-md5"));
|
||||
} else {
|
||||
trace!("Successfully validated content-md5");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check that inserting this object with this size doesn't exceed bucket quotas
|
||||
pub(crate) async fn check_quotas(
|
||||
ctx: &ReqCtx,
|
||||
garage: &Arc<Garage>,
|
||||
bucket: &Bucket,
|
||||
size: u64,
|
||||
prev_object: Option<&Object>,
|
||||
) -> Result<(), Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
bucket_params,
|
||||
..
|
||||
} = ctx;
|
||||
|
||||
let quotas = bucket_params.quotas.get();
|
||||
let quotas = bucket.state.as_option().unwrap().quotas.get();
|
||||
if quotas.max_objects.is_none() && quotas.max_size.is_none() {
|
||||
return Ok(());
|
||||
};
|
||||
|
@ -282,11 +251,11 @@ pub(crate) async fn check_quotas(
|
|||
let counters = garage
|
||||
.object_counter_table
|
||||
.table
|
||||
.get(bucket_id, &EmptyKey)
|
||||
.get(&bucket.id, &EmptyKey)
|
||||
.await?;
|
||||
|
||||
let counters = counters
|
||||
.map(|x| x.filtered_values(&garage.system.cluster_layout()))
|
||||
.map(|x| x.filtered_values(&garage.system.ring.borrow()))
|
||||
.unwrap_or_default();
|
||||
|
||||
let (prev_cnt_obj, prev_cnt_size) = match prev_object {
|
||||
|
@ -326,185 +295,89 @@ pub(crate) async fn check_quotas(
|
|||
}
|
||||
|
||||
pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||
ctx: &ReqCtx,
|
||||
garage: &Garage,
|
||||
version: &Version,
|
||||
encryption: EncryptionParams,
|
||||
part_number: u64,
|
||||
first_block: Bytes,
|
||||
first_block_hash: Hash,
|
||||
chunker: &mut StreamChunker<S>,
|
||||
checksummer: Checksummer,
|
||||
) -> Result<(u64, Checksums, Hash), Error> {
|
||||
) -> Result<(u64, GenericArray<u8, typenum::U16>, Hash), Error> {
|
||||
let tracer = opentelemetry::global::tracer("garage");
|
||||
|
||||
let (block_tx, mut block_rx) = mpsc::channel::<Result<Bytes, Error>>(2);
|
||||
let read_blocks = async {
|
||||
block_tx.send(Ok(first_block)).await?;
|
||||
loop {
|
||||
let res = chunker
|
||||
.next()
|
||||
let md5hasher = AsyncHasher::<Md5>::new();
|
||||
let sha256hasher = AsyncHasher::<Sha256>::new();
|
||||
|
||||
futures::future::join(
|
||||
md5hasher.update(first_block.clone()),
|
||||
sha256hasher.update(first_block.clone()),
|
||||
)
|
||||
.with_context(Context::current_with_span(
|
||||
tracer.start("Read block from client"),
|
||||
tracer.start("Hash first block (md5, sha256)"),
|
||||
))
|
||||
.await;
|
||||
match res {
|
||||
Ok(Some(block)) => block_tx.send(Ok(block)).await?,
|
||||
Ok(None) => break,
|
||||
Err(e) => {
|
||||
block_tx.send(Err(e)).await?;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
drop(block_tx);
|
||||
Ok::<_, mpsc::error::SendError<_>>(())
|
||||
};
|
||||
|
||||
let (block_tx2, mut block_rx2) = mpsc::channel::<Result<Bytes, Error>>(1);
|
||||
let hash_stream = async {
|
||||
let mut checksummer = checksummer;
|
||||
while let Some(next) = block_rx.recv().await {
|
||||
match next {
|
||||
Ok(block) => {
|
||||
block_tx2.send(Ok(block.clone())).await?;
|
||||
checksummer = tokio::task::spawn_blocking(move || {
|
||||
checksummer.update(&block);
|
||||
checksummer
|
||||
})
|
||||
.with_context(Context::current_with_span(
|
||||
tracer.start("Hash block (md5, sha256)"),
|
||||
))
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
Err(e) => {
|
||||
block_tx2.send(Err(e)).await?;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
drop(block_tx2);
|
||||
Ok::<_, mpsc::error::SendError<_>>(checksummer)
|
||||
};
|
||||
|
||||
let (block_tx3, mut block_rx3) = mpsc::channel::<Result<(Bytes, u64, Hash), Error>>(1);
|
||||
let encrypt_hash_blocks = async {
|
||||
let mut first_block_hash = None;
|
||||
while let Some(next) = block_rx2.recv().await {
|
||||
match next {
|
||||
Ok(block) => {
|
||||
let unencrypted_len = block.len() as u64;
|
||||
let res = tokio::task::spawn_blocking(move || {
|
||||
let block = encryption.encrypt_block(block)?;
|
||||
let hash = blake2sum(&block);
|
||||
Ok((block, hash))
|
||||
})
|
||||
.with_context(Context::current_with_span(
|
||||
tracer.start("Encrypt and hash (blake2) block"),
|
||||
))
|
||||
.await
|
||||
.unwrap();
|
||||
match res {
|
||||
Ok((block, hash)) => {
|
||||
if first_block_hash.is_none() {
|
||||
first_block_hash = Some(hash);
|
||||
}
|
||||
block_tx3.send(Ok((block, unencrypted_len, hash))).await?;
|
||||
}
|
||||
Err(e) => {
|
||||
block_tx3.send(Err(e)).await?;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
block_tx3.send(Err(e)).await?;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
drop(block_tx3);
|
||||
Ok::<_, mpsc::error::SendError<_>>(first_block_hash.unwrap())
|
||||
};
|
||||
|
||||
let put_blocks = async {
|
||||
// Structure for handling several concurrent writes to storage nodes
|
||||
let order_stream = OrderTag::stream();
|
||||
let mut write_futs = FuturesOrdered::new();
|
||||
let mut written_bytes = 0u64;
|
||||
loop {
|
||||
// Simultaneously write blocks to storage nodes & await for next block to be written
|
||||
let currently_running = write_futs.len();
|
||||
let write_futs_next = async {
|
||||
if write_futs.is_empty() {
|
||||
futures::future::pending().await
|
||||
} else {
|
||||
write_futs.next().await.unwrap()
|
||||
}
|
||||
};
|
||||
let recv_next = async {
|
||||
// If more than a maximum number of writes are in progress, don't add more for now
|
||||
if currently_running >= PUT_BLOCKS_MAX_PARALLEL {
|
||||
futures::future::pending().await
|
||||
} else {
|
||||
block_rx3.recv().await
|
||||
}
|
||||
};
|
||||
let (block, unencrypted_len, hash) = tokio::select! {
|
||||
result = write_futs_next => {
|
||||
result?;
|
||||
continue;
|
||||
},
|
||||
recv = recv_next => match recv {
|
||||
Some(next) => next?,
|
||||
None => break,
|
||||
},
|
||||
};
|
||||
|
||||
// For next block to be written: count its size and spawn future to write it
|
||||
write_futs.push_back(put_block_and_meta(
|
||||
ctx,
|
||||
let mut next_offset = first_block.len();
|
||||
let mut put_curr_version_block = put_block_meta(
|
||||
garage,
|
||||
version,
|
||||
part_number,
|
||||
written_bytes,
|
||||
hash,
|
||||
block,
|
||||
unencrypted_len,
|
||||
encryption.is_encrypted(),
|
||||
order_stream.order(written_bytes),
|
||||
));
|
||||
written_bytes += unencrypted_len;
|
||||
0,
|
||||
first_block_hash,
|
||||
first_block.len() as u64,
|
||||
);
|
||||
let mut put_curr_block = garage
|
||||
.block_manager
|
||||
.rpc_put_block(first_block_hash, first_block);
|
||||
|
||||
loop {
|
||||
let (_, _, next_block) = futures::try_join!(
|
||||
put_curr_block.map_err(Error::from),
|
||||
put_curr_version_block.map_err(Error::from),
|
||||
chunker.next(),
|
||||
)?;
|
||||
if let Some(block) = next_block {
|
||||
let (_, _, block_hash) = futures::future::join3(
|
||||
md5hasher.update(block.clone()),
|
||||
sha256hasher.update(block.clone()),
|
||||
async_blake2sum(block.clone()),
|
||||
)
|
||||
.with_context(Context::current_with_span(
|
||||
tracer.start("Hash block (md5, sha256, blake2)"),
|
||||
))
|
||||
.await;
|
||||
let block_len = block.len();
|
||||
put_curr_version_block = put_block_meta(
|
||||
garage,
|
||||
version,
|
||||
part_number,
|
||||
next_offset as u64,
|
||||
block_hash,
|
||||
block_len as u64,
|
||||
);
|
||||
put_curr_block = garage.block_manager.rpc_put_block(block_hash, block);
|
||||
next_offset += block_len;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
while let Some(res) = write_futs.next().await {
|
||||
res?;
|
||||
}
|
||||
Ok::<_, Error>(written_bytes)
|
||||
};
|
||||
|
||||
let (_, stream_hash_result, block_hash_result, final_result) =
|
||||
futures::join!(read_blocks, hash_stream, encrypt_hash_blocks, put_blocks);
|
||||
let total_size = next_offset as u64;
|
||||
let data_md5sum = md5hasher.finalize().await;
|
||||
|
||||
let total_size = final_result?;
|
||||
// unwrap here is ok, because if hasher failed, it is because something failed
|
||||
// later in the pipeline which already caused a return at the ? on previous line
|
||||
let first_block_hash = block_hash_result.unwrap();
|
||||
let checksums = stream_hash_result.unwrap().finalize();
|
||||
let data_sha256sum = sha256hasher.finalize().await;
|
||||
let data_sha256sum = Hash::try_from(&data_sha256sum[..]).unwrap();
|
||||
|
||||
Ok((total_size, checksums, first_block_hash))
|
||||
Ok((total_size, data_md5sum, data_sha256sum))
|
||||
}
|
||||
|
||||
async fn put_block_and_meta(
|
||||
ctx: &ReqCtx,
|
||||
async fn put_block_meta(
|
||||
garage: &Garage,
|
||||
version: &Version,
|
||||
part_number: u64,
|
||||
offset: u64,
|
||||
hash: Hash,
|
||||
block: Bytes,
|
||||
size: u64,
|
||||
is_encrypted: bool,
|
||||
order_tag: OrderTag,
|
||||
) -> Result<(), GarageError> {
|
||||
let ReqCtx { garage, .. } = ctx;
|
||||
|
||||
let mut version = version.clone();
|
||||
version.blocks.put(
|
||||
VersionBlockKey {
|
||||
|
@ -521,9 +394,6 @@ async fn put_block_and_meta(
|
|||
};
|
||||
|
||||
futures::try_join!(
|
||||
garage
|
||||
.block_manager
|
||||
.rpc_put_block(hash, block, is_encrypted, Some(order_tag)),
|
||||
garage.version_table.insert(&version),
|
||||
garage.block_ref_table.insert(&block_ref),
|
||||
)?;
|
||||
|
@ -566,6 +436,14 @@ impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<ResBody> {
|
||||
Response::builder()
|
||||
.header("x-amz-version-id", hex::encode(version_uuid))
|
||||
.header("ETag", format!("\"{}\"", md5sum_hex))
|
||||
.body(empty_body())
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
struct InterruptedCleanup(Option<InterruptedCleanupInner>);
|
||||
struct InterruptedCleanupInner {
|
||||
garage: Arc<Garage>,
|
||||
|
@ -600,35 +478,57 @@ impl Drop for InterruptedCleanup {
|
|||
|
||||
// ============ helpers ============
|
||||
|
||||
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList, Error> {
|
||||
let mut ret = Vec::new();
|
||||
pub(crate) fn get_mime_type(headers: &HeaderMap<HeaderValue>) -> Result<String, Error> {
|
||||
Ok(headers
|
||||
.get(hyper::header::CONTENT_TYPE)
|
||||
.map(|x| x.to_str())
|
||||
.unwrap_or(Ok("blob"))?
|
||||
.to_string())
|
||||
}
|
||||
|
||||
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<ObjectVersionHeaders, Error> {
|
||||
let content_type = get_mime_type(headers)?;
|
||||
let mut other = BTreeMap::new();
|
||||
|
||||
// Preserve standard headers
|
||||
let standard_header = vec![
|
||||
hyper::header::CONTENT_TYPE,
|
||||
hyper::header::CACHE_CONTROL,
|
||||
hyper::header::CONTENT_DISPOSITION,
|
||||
hyper::header::CONTENT_ENCODING,
|
||||
hyper::header::CONTENT_LANGUAGE,
|
||||
hyper::header::EXPIRES,
|
||||
];
|
||||
for name in standard_header.iter() {
|
||||
if let Some(value) = headers.get(name) {
|
||||
ret.push((name.to_string(), value.to_str()?.to_string()));
|
||||
for h in standard_header.iter() {
|
||||
if let Some(v) = headers.get(h) {
|
||||
match v.to_str() {
|
||||
Ok(v_str) => {
|
||||
other.insert(h.to_string(), v_str.to_string());
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Discarding header {}, error in .to_str(): {}", h, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Preserve x-amz-meta- headers
|
||||
for (name, value) in headers.iter() {
|
||||
if name.as_str().starts_with("x-amz-meta-") {
|
||||
ret.push((
|
||||
name.to_string(),
|
||||
std::str::from_utf8(value.as_bytes())?.to_string(),
|
||||
));
|
||||
for (k, v) in headers.iter() {
|
||||
if k.as_str().starts_with("x-amz-meta-") {
|
||||
match v.to_str() {
|
||||
Ok(v_str) => {
|
||||
other.insert(k.to_string(), v_str.to_string());
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Discarding header {}, error in .to_str(): {}", k, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
Ok(ObjectVersionHeaders {
|
||||
content_type,
|
||||
other,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn next_timestamp(existing_object: Option<&Object>) -> u64 {
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use quick_xml::de::from_reader;
|
||||
use std::sync::Arc;
|
||||
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
|
@ -11,11 +12,15 @@ use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
|||
use crate::signature::verify_signed_content;
|
||||
|
||||
use garage_model::bucket_table::*;
|
||||
use garage_model::garage::Garage;
|
||||
use garage_util::data::*;
|
||||
|
||||
pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx { bucket_params, .. } = ctx;
|
||||
if let Some(website) = bucket_params.website_config.get() {
|
||||
pub async fn handle_get_website(bucket: &Bucket) -> Result<Response<ResBody>, Error> {
|
||||
let param = bucket
|
||||
.params()
|
||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||
|
||||
if let Some(website) = param.website_config.get() {
|
||||
let wc = WebsiteConfiguration {
|
||||
xmlns: (),
|
||||
error_document: website.error_document.as_ref().map(|v| Key {
|
||||
|
@ -39,18 +44,16 @@ pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error>
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn handle_delete_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
mut bucket_params,
|
||||
..
|
||||
} = ctx;
|
||||
bucket_params.website_config.update(None);
|
||||
garage
|
||||
.bucket_table
|
||||
.insert(&Bucket::present(bucket_id, bucket_params))
|
||||
.await?;
|
||||
pub async fn handle_delete_website(
|
||||
garage: Arc<Garage>,
|
||||
mut bucket: Bucket,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let param = bucket
|
||||
.params_mut()
|
||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||
|
||||
param.website_config.update(None);
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::NO_CONTENT)
|
||||
|
@ -58,33 +61,28 @@ pub async fn handle_delete_website(ctx: ReqCtx) -> Result<Response<ResBody>, Err
|
|||
}
|
||||
|
||||
pub async fn handle_put_website(
|
||||
ctx: ReqCtx,
|
||||
garage: Arc<Garage>,
|
||||
mut bucket: Bucket,
|
||||
req: Request<ReqBody>,
|
||||
content_sha256: Option<Hash>,
|
||||
) -> Result<Response<ResBody>, Error> {
|
||||
let ReqCtx {
|
||||
garage,
|
||||
bucket_id,
|
||||
mut bucket_params,
|
||||
..
|
||||
} = ctx;
|
||||
|
||||
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
verify_signed_content(content_sha256, &body[..])?;
|
||||
}
|
||||
|
||||
let param = bucket
|
||||
.params_mut()
|
||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||
|
||||
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
|
||||
conf.validate()?;
|
||||
|
||||
bucket_params
|
||||
param
|
||||
.website_config
|
||||
.update(Some(conf.into_garage_website_config()?));
|
||||
garage
|
||||
.bucket_table
|
||||
.insert(&Bucket::present(bucket_id, bucket_params))
|
||||
.await?;
|
||||
garage.bucket_table.insert(&bucket).await?;
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
|
|
|
@ -131,14 +131,6 @@ pub struct CompleteMultipartUploadResult {
|
|||
pub key: Value,
|
||||
#[serde(rename = "ETag")]
|
||||
pub etag: Value,
|
||||
#[serde(rename = "ChecksumCRC32")]
|
||||
pub checksum_crc32: Option<Value>,
|
||||
#[serde(rename = "ChecksumCRC32C")]
|
||||
pub checksum_crc32c: Option<Value>,
|
||||
#[serde(rename = "ChecksumSHA1")]
|
||||
pub checksum_sha1: Option<Value>,
|
||||
#[serde(rename = "ChecksumSHA256")]
|
||||
pub checksum_sha256: Option<Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
|
@ -205,14 +197,6 @@ pub struct PartItem {
|
|||
pub part_number: IntValue,
|
||||
#[serde(rename = "Size")]
|
||||
pub size: IntValue,
|
||||
#[serde(rename = "ChecksumCRC32")]
|
||||
pub checksum_crc32: Option<Value>,
|
||||
#[serde(rename = "ChecksumCRC32C")]
|
||||
pub checksum_crc32c: Option<Value>,
|
||||
#[serde(rename = "ChecksumSHA1")]
|
||||
pub checksum_sha1: Option<Value>,
|
||||
#[serde(rename = "ChecksumSHA256")]
|
||||
pub checksum_sha256: Option<Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||
|
@ -516,10 +500,6 @@ mod tests {
|
|||
bucket: Value("mybucket".to_string()),
|
||||
key: Value("a/plop".to_string()),
|
||||
etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()),
|
||||
checksum_crc32: None,
|
||||
checksum_crc32c: None,
|
||||
checksum_sha1: Some(Value("ZJAnHyG8PeKz9tI8UTcHrJos39A=".into())),
|
||||
checksum_sha256: None,
|
||||
};
|
||||
assert_eq!(
|
||||
to_xml_with_header(&result)?,
|
||||
|
@ -529,7 +509,6 @@ mod tests {
|
|||
<Bucket>mybucket</Bucket>\
|
||||
<Key>a/plop</Key>\
|
||||
<ETag>"3858f62230ac3c915f300c664312c11f-9"</ETag>\
|
||||
<ChecksumSHA1>ZJAnHyG8PeKz9tI8UTcHrJos39A=</ChecksumSHA1>\
|
||||
</CompleteMultipartUploadResult>"
|
||||
);
|
||||
Ok(())
|
||||
|
@ -801,22 +780,12 @@ mod tests {
|
|||
last_modified: Value("2010-11-10T20:48:34.000Z".to_string()),
|
||||
part_number: IntValue(2),
|
||||
size: IntValue(10485760),
|
||||
checksum_crc32: None,
|
||||
checksum_crc32c: None,
|
||||
checksum_sha256: Some(Value(
|
||||
"5RQ3A5uk0w7ojNjvegohch4JRBBGN/cLhsNrPzfv/hA=".into(),
|
||||
)),
|
||||
checksum_sha1: None,
|
||||
},
|
||||
PartItem {
|
||||
etag: Value("\"aaaa18db4cc2f85cedef654fccc4a4x8\"".to_string()),
|
||||
last_modified: Value("2010-11-10T20:48:33.000Z".to_string()),
|
||||
part_number: IntValue(3),
|
||||
size: IntValue(10485760),
|
||||
checksum_sha256: None,
|
||||
checksum_crc32c: None,
|
||||
checksum_crc32: Some(Value("ZJAnHyG8=".into())),
|
||||
checksum_sha1: None,
|
||||
},
|
||||
],
|
||||
initiator: Initiator {
|
||||
|
@ -851,14 +820,12 @@ mod tests {
|
|||
<LastModified>2010-11-10T20:48:34.000Z</LastModified>\
|
||||
<PartNumber>2</PartNumber>\
|
||||
<Size>10485760</Size>\
|
||||
<ChecksumSHA256>5RQ3A5uk0w7ojNjvegohch4JRBBGN/cLhsNrPzfv/hA=</ChecksumSHA256>\
|
||||
</Part>\
|
||||
<Part>\
|
||||
<ETag>"aaaa18db4cc2f85cedef654fccc4a4x8"</ETag>\
|
||||
<LastModified>2010-11-10T20:48:33.000Z</LastModified>\
|
||||
<PartNumber>3</PartNumber>\
|
||||
<Size>10485760</Size>\
|
||||
<ChecksumCRC32>ZJAnHyG8=</ChecksumCRC32>\
|
||||
</Part>\
|
||||
<Initiator>\
|
||||
<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\
|
||||
|
|
|
@ -2,44 +2,19 @@ use chrono::{DateTime, Utc};
|
|||
use hmac::{Hmac, Mac};
|
||||
use sha2::Sha256;
|
||||
|
||||
use hyper::{body::Incoming as IncomingBody, Request};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_model::key_table::Key;
|
||||
use garage_util::data::{sha256sum, Hash};
|
||||
|
||||
use error::*;
|
||||
|
||||
pub mod error;
|
||||
pub mod payload;
|
||||
pub mod streaming;
|
||||
|
||||
use error::*;
|
||||
|
||||
pub const SHORT_DATE: &str = "%Y%m%d";
|
||||
pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ";
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
pub async fn verify_request(
|
||||
garage: &Garage,
|
||||
mut req: Request<IncomingBody>,
|
||||
service: &'static str,
|
||||
) -> Result<(Request<streaming::ReqBody>, Key, Option<Hash>), Error> {
|
||||
let (api_key, mut content_sha256) =
|
||||
payload::check_payload_signature(&garage, &mut req, service).await?;
|
||||
let api_key =
|
||||
api_key.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
||||
|
||||
let req = streaming::parse_streaming_body(
|
||||
&api_key,
|
||||
req,
|
||||
&mut content_sha256,
|
||||
&garage.config.s3_api.s3_region,
|
||||
service,
|
||||
)?;
|
||||
|
||||
Ok((req, api_key, content_sha256))
|
||||
}
|
||||
|
||||
pub fn verify_signed_content(expected_sha256: Hash, body: &[u8]) -> Result<(), Error> {
|
||||
if expected_sha256 != sha256sum(body) {
|
||||
return Err(Error::bad_request(
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
use std::collections::HashMap;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use chrono::{DateTime, Duration, NaiveDateTime, TimeZone, Utc};
|
||||
use hmac::Mac;
|
||||
use hyper::header::{HeaderMap, HeaderName, HeaderValue, AUTHORIZATION, HOST};
|
||||
use hyper::{body::Incoming as IncomingBody, Method, Request};
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
|
@ -19,98 +17,66 @@ use super::{compute_scope, signing_hmac};
|
|||
use crate::encoding::uri_encode;
|
||||
use crate::signature::error::*;
|
||||
|
||||
pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
|
||||
pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
|
||||
pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
|
||||
pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
|
||||
pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
|
||||
pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
|
||||
pub const X_AMZ_CONTENT_SH256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
|
||||
|
||||
pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
|
||||
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
|
||||
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
|
||||
|
||||
pub type QueryMap = HeaderMap<QueryValue>;
|
||||
pub struct QueryValue {
|
||||
/// Original key with potential uppercase characters,
|
||||
/// for use in signature calculation
|
||||
key: String,
|
||||
value: String,
|
||||
}
|
||||
|
||||
pub async fn check_payload_signature(
|
||||
garage: &Garage,
|
||||
request: &mut Request<IncomingBody>,
|
||||
service: &'static str,
|
||||
request: &Request<IncomingBody>,
|
||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
||||
let query = parse_query_map(request.uri())?;
|
||||
let mut headers = HashMap::new();
|
||||
for (key, val) in request.headers() {
|
||||
headers.insert(key.to_string(), val.to_str()?.to_string());
|
||||
}
|
||||
if let Some(query) = request.uri().query() {
|
||||
let query_pairs = url::form_urlencoded::parse(query.as_bytes());
|
||||
for (key, val) in query_pairs {
|
||||
headers.insert(key.to_lowercase(), val.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if query.contains_key(&X_AMZ_ALGORITHM) {
|
||||
// We check for presigned-URL-style authentification first, because
|
||||
// the browser or someting else could inject an Authorization header
|
||||
// that is totally unrelated to AWS signatures.
|
||||
check_presigned_signature(garage, service, request, query).await
|
||||
} else if request.headers().contains_key(AUTHORIZATION) {
|
||||
check_standard_signature(garage, service, request, query).await
|
||||
let authorization = if let Some(authorization) = headers.get("authorization") {
|
||||
parse_authorization(authorization, &headers)?
|
||||
} else if let Some(algorithm) = headers.get("x-amz-algorithm") {
|
||||
parse_query_authorization(algorithm, &headers)?
|
||||
} else {
|
||||
// Unsigned (anonymous) request
|
||||
let content_sha256 = request
|
||||
.headers()
|
||||
.get("x-amz-content-sha256")
|
||||
.filter(|c| c.as_bytes() != UNSIGNED_PAYLOAD.as_bytes());
|
||||
if let Some(content_sha256) = content_sha256 {
|
||||
let content_sha256 = headers.get("x-amz-content-sha256");
|
||||
if let Some(content_sha256) = content_sha256.filter(|c| "UNSIGNED-PAYLOAD" != c.as_str()) {
|
||||
let sha256 = hex::decode(content_sha256)
|
||||
.ok()
|
||||
.and_then(|bytes| Hash::try_from(&bytes))
|
||||
.ok_or_bad_request("Invalid content sha256 hash")?;
|
||||
Ok((None, Some(sha256)))
|
||||
return Ok((None, Some(sha256)));
|
||||
} else {
|
||||
Ok((None, None))
|
||||
return Ok((None, None));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_standard_signature(
|
||||
garage: &Garage,
|
||||
service: &'static str,
|
||||
request: &Request<IncomingBody>,
|
||||
query: QueryMap,
|
||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
||||
let authorization = Authorization::parse_header(request.headers())?;
|
||||
|
||||
// Verify that all necessary request headers are included in signed_headers
|
||||
// The following must be included for all signatures:
|
||||
// - the Host header (mandatory)
|
||||
// - all x-amz-* headers used in the request
|
||||
// AWS also indicates that the Content-Type header should be signed if
|
||||
// it is used, but Minio client doesn't sign it so we don't check it for compatibility.
|
||||
let signed_headers = split_signed_headers(&authorization)?;
|
||||
verify_signed_headers(request.headers(), &signed_headers)?;
|
||||
};
|
||||
|
||||
let canonical_request = canonical_request(
|
||||
service,
|
||||
request.method(),
|
||||
request.uri().path(),
|
||||
&query,
|
||||
request.headers(),
|
||||
&signed_headers,
|
||||
request.uri(),
|
||||
&headers,
|
||||
&authorization.signed_headers,
|
||||
&authorization.content_sha256,
|
||||
)?;
|
||||
let string_to_sign = string_to_sign(
|
||||
&authorization.date,
|
||||
&authorization.scope,
|
||||
&canonical_request,
|
||||
);
|
||||
let (_, scope) = parse_credential(&authorization.credential)?;
|
||||
let string_to_sign = string_to_sign(&authorization.date, &scope, &canonical_request);
|
||||
|
||||
trace!("canonical request:\n{}", canonical_request);
|
||||
trace!("string to sign:\n{}", string_to_sign);
|
||||
|
||||
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
|
||||
let key = verify_v4(
|
||||
garage,
|
||||
service,
|
||||
&authorization.credential,
|
||||
&authorization.date,
|
||||
&authorization.signature,
|
||||
string_to_sign.as_bytes(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let content_sha256 = if authorization.content_sha256 == UNSIGNED_PAYLOAD {
|
||||
let content_sha256 = if authorization.content_sha256 == "UNSIGNED-PAYLOAD" {
|
||||
None
|
||||
} else if authorization.content_sha256 == STREAMING_AWS4_HMAC_SHA256_PAYLOAD {
|
||||
} else if authorization.content_sha256 == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" {
|
||||
let bytes = hex::decode(authorization.signature).ok_or_bad_request("Invalid signature")?;
|
||||
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid signature")?)
|
||||
} else {
|
||||
|
@ -122,102 +88,124 @@ async fn check_standard_signature(
|
|||
Ok((Some(key), content_sha256))
|
||||
}
|
||||
|
||||
async fn check_presigned_signature(
|
||||
garage: &Garage,
|
||||
service: &'static str,
|
||||
request: &mut Request<IncomingBody>,
|
||||
mut query: QueryMap,
|
||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
||||
let algorithm = query.get(&X_AMZ_ALGORITHM).unwrap();
|
||||
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
|
||||
|
||||
// Verify that all necessary request headers are included in signed_headers
|
||||
// For AWSv4 pre-signed URLs, the following must be incldued:
|
||||
// - the Host header (mandatory)
|
||||
// - all x-amz-* headers used in the request
|
||||
let signed_headers = split_signed_headers(&authorization)?;
|
||||
verify_signed_headers(request.headers(), &signed_headers)?;
|
||||
|
||||
// The X-Amz-Signature value is passed as a query parameter,
|
||||
// but the signature cannot be computed from a string that contains itself.
|
||||
// AWS specifies that all query params except X-Amz-Signature are included
|
||||
// in the canonical request.
|
||||
query.remove(&X_AMZ_SIGNATURE);
|
||||
let canonical_request = canonical_request(
|
||||
service,
|
||||
request.method(),
|
||||
request.uri().path(),
|
||||
&query,
|
||||
request.headers(),
|
||||
&signed_headers,
|
||||
&authorization.content_sha256,
|
||||
)?;
|
||||
let string_to_sign = string_to_sign(
|
||||
&authorization.date,
|
||||
&authorization.scope,
|
||||
&canonical_request,
|
||||
);
|
||||
|
||||
trace!("canonical request (presigned url):\n{}", canonical_request);
|
||||
trace!("string to sign (presigned url):\n{}", string_to_sign);
|
||||
|
||||
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
|
||||
|
||||
// In the page on presigned URLs, AWS specifies that if a signed query
|
||||
// parameter and a signed header of the same name have different values,
|
||||
// then an InvalidRequest error is raised.
|
||||
let headers_mut = request.headers_mut();
|
||||
for (name, value) in query.iter() {
|
||||
if let Some(existing) = headers_mut.get(name) {
|
||||
if signed_headers.contains(&name) && existing.as_bytes() != value.value.as_bytes() {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Conflicting values for `{}` in query parameters and request headers",
|
||||
name
|
||||
)));
|
||||
}
|
||||
}
|
||||
if name.as_str().starts_with("x-amz-") {
|
||||
// Query parameters that start by x-amz- are actually intended to stand in for
|
||||
// headers that can't be added at the time the request is made.
|
||||
// What we do is just add them to the Request object as regular headers,
|
||||
// that will be handled downstream as if they were included like in a normal request.
|
||||
// (Here we allow such query parameters to override headers with the same name
|
||||
// that are not signed, however there is not much reason that this would happen)
|
||||
headers_mut.insert(
|
||||
name,
|
||||
HeaderValue::from_bytes(value.value.as_bytes())
|
||||
.ok_or_bad_request("invalid query parameter value")?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Presigned URLs always use UNSIGNED-PAYLOAD,
|
||||
// so there is no sha256 hash to return.
|
||||
Ok((Some(key), None))
|
||||
struct Authorization {
|
||||
credential: String,
|
||||
signed_headers: String,
|
||||
signature: String,
|
||||
content_sha256: String,
|
||||
date: DateTime<Utc>,
|
||||
}
|
||||
|
||||
pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> {
|
||||
let mut query = QueryMap::with_capacity(0);
|
||||
if let Some(query_str) = uri.query() {
|
||||
let query_pairs = url::form_urlencoded::parse(query_str.as_bytes());
|
||||
for (key, val) in query_pairs {
|
||||
let name =
|
||||
HeaderName::from_bytes(key.as_bytes()).ok_or_bad_request("Invalid header name")?;
|
||||
fn parse_authorization(
|
||||
authorization: &str,
|
||||
headers: &HashMap<String, String>,
|
||||
) -> Result<Authorization, Error> {
|
||||
let first_space = authorization
|
||||
.find(' ')
|
||||
.ok_or_bad_request("Authorization field to short")?;
|
||||
let (auth_kind, rest) = authorization.split_at(first_space);
|
||||
|
||||
let value = QueryValue {
|
||||
key: key.to_string(),
|
||||
value: val.into_owned(),
|
||||
if auth_kind != "AWS4-HMAC-SHA256" {
|
||||
return Err(Error::bad_request("Unsupported authorization method"));
|
||||
}
|
||||
|
||||
let mut auth_params = HashMap::new();
|
||||
for auth_part in rest.split(',') {
|
||||
let auth_part = auth_part.trim();
|
||||
let eq = auth_part
|
||||
.find('=')
|
||||
.ok_or_bad_request("Field without value in authorization header")?;
|
||||
let (key, value) = auth_part.split_at(eq);
|
||||
auth_params.insert(key.to_string(), value.trim_start_matches('=').to_string());
|
||||
}
|
||||
|
||||
let cred = auth_params
|
||||
.get("Credential")
|
||||
.ok_or_bad_request("Could not find Credential in Authorization field")?;
|
||||
|
||||
let content_sha256 = headers
|
||||
.get("x-amz-content-sha256")
|
||||
.ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
|
||||
|
||||
let date = headers
|
||||
.get("x-amz-date")
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")
|
||||
.map_err(Error::from)
|
||||
.and_then(|d| parse_date(d))?;
|
||||
|
||||
if Utc::now() - date > Duration::hours(24) {
|
||||
return Err(Error::bad_request("Date is too old".to_string()));
|
||||
}
|
||||
|
||||
let auth = Authorization {
|
||||
credential: cred.to_string(),
|
||||
signed_headers: auth_params
|
||||
.get("SignedHeaders")
|
||||
.ok_or_bad_request("Could not find SignedHeaders in Authorization field")?
|
||||
.to_string(),
|
||||
signature: auth_params
|
||||
.get("Signature")
|
||||
.ok_or_bad_request("Could not find Signature in Authorization field")?
|
||||
.to_string(),
|
||||
content_sha256: content_sha256.to_string(),
|
||||
date,
|
||||
};
|
||||
Ok(auth)
|
||||
}
|
||||
|
||||
if query.insert(name, value).is_some() {
|
||||
return Err(Error::bad_request(format!(
|
||||
"duplicate query parameter: `{}`",
|
||||
key
|
||||
)));
|
||||
fn parse_query_authorization(
|
||||
algorithm: &str,
|
||||
headers: &HashMap<String, String>,
|
||||
) -> Result<Authorization, Error> {
|
||||
if algorithm != "AWS4-HMAC-SHA256" {
|
||||
return Err(Error::bad_request(
|
||||
"Unsupported authorization method".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let cred = headers
|
||||
.get("x-amz-credential")
|
||||
.ok_or_bad_request("X-Amz-Credential not found in query parameters")?;
|
||||
let signed_headers = headers
|
||||
.get("x-amz-signedheaders")
|
||||
.ok_or_bad_request("X-Amz-SignedHeaders not found in query parameters")?;
|
||||
let signature = headers
|
||||
.get("x-amz-signature")
|
||||
.ok_or_bad_request("X-Amz-Signature not found in query parameters")?;
|
||||
let content_sha256 = headers
|
||||
.get("x-amz-content-sha256")
|
||||
.map(|x| x.as_str())
|
||||
.unwrap_or("UNSIGNED-PAYLOAD");
|
||||
|
||||
let duration = headers
|
||||
.get("x-amz-expires")
|
||||
.ok_or_bad_request("X-Amz-Expires not found in query parameters")?
|
||||
.parse()
|
||||
.map_err(|_| Error::bad_request("X-Amz-Expires is not a number".to_string()))?;
|
||||
|
||||
if duration > 7 * 24 * 3600 {
|
||||
return Err(Error::bad_request(
|
||||
"X-Amz-Expires may not exceed a week".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let date = headers
|
||||
.get("x-amz-date")
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")
|
||||
.map_err(Error::from)
|
||||
.and_then(|d| parse_date(d))?;
|
||||
|
||||
if Utc::now() - date > Duration::seconds(duration) {
|
||||
return Err(Error::bad_request("Date is too old".to_string()));
|
||||
}
|
||||
Ok(query)
|
||||
|
||||
Ok(Authorization {
|
||||
credential: cred.to_string(),
|
||||
signed_headers: signed_headers.to_string(),
|
||||
signature: signature.to_string(),
|
||||
content_sha256: content_sha256.to_string(),
|
||||
date,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_credential(cred: &str) -> Result<(String, String), Error> {
|
||||
|
@ -231,39 +219,11 @@ fn parse_credential(cred: &str) -> Result<(String, String), Error> {
|
|||
))
|
||||
}
|
||||
|
||||
fn split_signed_headers(authorization: &Authorization) -> Result<Vec<HeaderName>, Error> {
|
||||
let mut signed_headers = authorization
|
||||
.signed_headers
|
||||
.split(';')
|
||||
.map(HeaderName::try_from)
|
||||
.collect::<Result<Vec<HeaderName>, _>>()
|
||||
.ok_or_bad_request("invalid header name")?;
|
||||
signed_headers.sort_by(|h1, h2| h1.as_str().cmp(h2.as_str()));
|
||||
Ok(signed_headers)
|
||||
}
|
||||
|
||||
fn verify_signed_headers(headers: &HeaderMap, signed_headers: &[HeaderName]) -> Result<(), Error> {
|
||||
if !signed_headers.contains(&HOST) {
|
||||
return Err(Error::bad_request("Header `Host` should be signed"));
|
||||
}
|
||||
for (name, _) in headers.iter() {
|
||||
if name.as_str().starts_with("x-amz-") {
|
||||
if !signed_headers.contains(name) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Header `{}` should be signed",
|
||||
name
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn string_to_sign(datetime: &DateTime<Utc>, scope_string: &str, canonical_req: &str) -> String {
|
||||
let mut hasher = Sha256::default();
|
||||
hasher.update(canonical_req.as_bytes());
|
||||
[
|
||||
AWS4_HMAC_SHA256,
|
||||
"AWS4-HMAC-SHA256",
|
||||
&datetime.format(LONG_DATETIME).to_string(),
|
||||
scope_string,
|
||||
&hex::encode(hasher.finalize().as_slice()),
|
||||
|
@ -274,12 +234,11 @@ pub fn string_to_sign(datetime: &DateTime<Utc>, scope_string: &str, canonical_re
|
|||
pub fn canonical_request(
|
||||
service: &'static str,
|
||||
method: &Method,
|
||||
canonical_uri: &str,
|
||||
query: &QueryMap,
|
||||
headers: &HeaderMap,
|
||||
signed_headers: &[HeaderName],
|
||||
uri: &hyper::Uri,
|
||||
headers: &HashMap<String, String>,
|
||||
signed_headers: &str,
|
||||
content_sha256: &str,
|
||||
) -> Result<String, Error> {
|
||||
) -> String {
|
||||
// There seems to be evidence that in AWSv4 signatures, the path component is url-encoded
|
||||
// a second time when building the canonical request, as specified in this documentation page:
|
||||
// -> https://docs.aws.amazon.com/rolesanywhere/latest/userguide/authentication-sign-process.html
|
||||
|
@ -309,46 +268,49 @@ pub fn canonical_request(
|
|||
// it mentions it in the comments (same link to the souce code as above).
|
||||
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so
|
||||
// would make non-normalized paths invalid K2V partition keys, and we don't want that.
|
||||
let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
|
||||
uri_encode(canonical_uri, false).into()
|
||||
let path: std::borrow::Cow<str> = if service != "s3" {
|
||||
uri_encode(uri.path(), false).into()
|
||||
} else {
|
||||
canonical_uri.into()
|
||||
uri.path().into()
|
||||
};
|
||||
[
|
||||
method.as_str(),
|
||||
&path,
|
||||
&canonical_query_string(uri),
|
||||
&canonical_header_string(headers, signed_headers),
|
||||
"",
|
||||
signed_headers,
|
||||
content_sha256,
|
||||
]
|
||||
.join("\n")
|
||||
}
|
||||
|
||||
// Canonical query string from passed HeaderMap
|
||||
let canonical_query_string = {
|
||||
let mut items = Vec::with_capacity(query.len());
|
||||
for (_, QueryValue { key, value }) in query.iter() {
|
||||
items.push(uri_encode(&key, true) + "=" + &uri_encode(&value, true));
|
||||
}
|
||||
fn canonical_header_string(headers: &HashMap<String, String>, signed_headers: &str) -> String {
|
||||
let signed_headers_vec = signed_headers.split(';').collect::<Vec<_>>();
|
||||
let mut items = headers
|
||||
.iter()
|
||||
.filter(|(key, _)| signed_headers_vec.contains(&key.as_str()))
|
||||
.collect::<Vec<_>>();
|
||||
items.sort_by(|(k1, _), (k2, _)| k1.cmp(k2));
|
||||
items
|
||||
.iter()
|
||||
.map(|(key, value)| key.to_lowercase() + ":" + value.trim())
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
}
|
||||
|
||||
fn canonical_query_string(uri: &hyper::Uri) -> String {
|
||||
if let Some(query) = uri.query() {
|
||||
let query_pairs = url::form_urlencoded::parse(query.as_bytes());
|
||||
let mut items = query_pairs
|
||||
.filter(|(key, _)| key != "X-Amz-Signature")
|
||||
.map(|(key, value)| uri_encode(&key, true) + "=" + &uri_encode(&value, true))
|
||||
.collect::<Vec<_>>();
|
||||
items.sort();
|
||||
items.join("&")
|
||||
};
|
||||
|
||||
// Canonical header string calculated from signed headers
|
||||
let canonical_header_string = signed_headers
|
||||
.iter()
|
||||
.map(|name| {
|
||||
let value = headers
|
||||
.get(name)
|
||||
.ok_or_bad_request(format!("signed header `{}` is not present", name))?;
|
||||
let value = std::str::from_utf8(value.as_bytes())?;
|
||||
Ok(format!("{}:{}", name.as_str(), value.trim()))
|
||||
})
|
||||
.collect::<Result<Vec<String>, Error>>()?
|
||||
.join("\n");
|
||||
let signed_headers = signed_headers.join(";");
|
||||
|
||||
let list = [
|
||||
method.as_str(),
|
||||
&canonical_uri,
|
||||
&canonical_query_string,
|
||||
&canonical_header_string,
|
||||
"",
|
||||
&signed_headers,
|
||||
content_sha256,
|
||||
];
|
||||
Ok(list.join("\n"))
|
||||
} else {
|
||||
"".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_date(date: &str) -> Result<DateTime<Utc>, Error> {
|
||||
|
@ -360,203 +322,38 @@ pub fn parse_date(date: &str) -> Result<DateTime<Utc>, Error> {
|
|||
pub async fn verify_v4(
|
||||
garage: &Garage,
|
||||
service: &str,
|
||||
auth: &Authorization,
|
||||
credential: &str,
|
||||
date: &DateTime<Utc>,
|
||||
signature: &str,
|
||||
payload: &[u8],
|
||||
) -> Result<Key, Error> {
|
||||
let scope_expected = compute_scope(&auth.date, &garage.config.s3_api.s3_region, service);
|
||||
if auth.scope != scope_expected {
|
||||
return Err(Error::AuthorizationHeaderMalformed(auth.scope.to_string()));
|
||||
let (key_id, scope) = parse_credential(credential)?;
|
||||
|
||||
let scope_expected = compute_scope(date, &garage.config.s3_api.s3_region, service);
|
||||
if scope != scope_expected {
|
||||
return Err(Error::AuthorizationHeaderMalformed(scope.to_string()));
|
||||
}
|
||||
|
||||
let key = garage
|
||||
.key_table
|
||||
.get(&EmptyKey, &auth.key_id)
|
||||
.get(&EmptyKey, &key_id)
|
||||
.await?
|
||||
.filter(|k| !k.state.is_deleted())
|
||||
.ok_or_else(|| Error::forbidden(format!("No such key: {}", &auth.key_id)))?;
|
||||
.ok_or_else(|| Error::forbidden(format!("No such key: {}", &key_id)))?;
|
||||
let key_p = key.params().unwrap();
|
||||
|
||||
let mut hmac = signing_hmac(
|
||||
&auth.date,
|
||||
date,
|
||||
&key_p.secret_key,
|
||||
&garage.config.s3_api.s3_region,
|
||||
service,
|
||||
)
|
||||
.ok_or_internal_error("Unable to build signing HMAC")?;
|
||||
hmac.update(payload);
|
||||
let signature =
|
||||
hex::decode(&auth.signature).map_err(|_| Error::forbidden("Invalid signature"))?;
|
||||
if hmac.verify_slice(&signature).is_err() {
|
||||
return Err(Error::forbidden("Invalid signature"));
|
||||
let our_signature = hex::encode(hmac.finalize().into_bytes());
|
||||
if signature != our_signature {
|
||||
return Err(Error::forbidden("Invalid signature".to_string()));
|
||||
}
|
||||
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
// ============ Authorization header, or X-Amz-* query params =========
|
||||
|
||||
pub struct Authorization {
|
||||
key_id: String,
|
||||
scope: String,
|
||||
signed_headers: String,
|
||||
signature: String,
|
||||
content_sha256: String,
|
||||
date: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl Authorization {
|
||||
fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
|
||||
let authorization = headers
|
||||
.get(AUTHORIZATION)
|
||||
.ok_or_bad_request("Missing authorization header")?
|
||||
.to_str()?;
|
||||
|
||||
let (auth_kind, rest) = authorization
|
||||
.split_once(' ')
|
||||
.ok_or_bad_request("Authorization field to short")?;
|
||||
|
||||
if auth_kind != AWS4_HMAC_SHA256 {
|
||||
return Err(Error::bad_request("Unsupported authorization method"));
|
||||
}
|
||||
|
||||
let mut auth_params = HashMap::new();
|
||||
for auth_part in rest.split(',') {
|
||||
let auth_part = auth_part.trim();
|
||||
let eq = auth_part
|
||||
.find('=')
|
||||
.ok_or_bad_request("Field without value in authorization header")?;
|
||||
let (key, value) = auth_part.split_at(eq);
|
||||
auth_params.insert(key.to_string(), value.trim_start_matches('=').to_string());
|
||||
}
|
||||
|
||||
let cred = auth_params
|
||||
.get("Credential")
|
||||
.ok_or_bad_request("Could not find Credential in Authorization field")?;
|
||||
let signed_headers = auth_params
|
||||
.get("SignedHeaders")
|
||||
.ok_or_bad_request("Could not find SignedHeaders in Authorization field")?
|
||||
.to_string();
|
||||
let signature = auth_params
|
||||
.get("Signature")
|
||||
.ok_or_bad_request("Could not find Signature in Authorization field")?
|
||||
.to_string();
|
||||
|
||||
let content_sha256 = headers
|
||||
.get(X_AMZ_CONTENT_SH256)
|
||||
.ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
|
||||
|
||||
let date = headers
|
||||
.get(X_AMZ_DATE)
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")
|
||||
.map_err(Error::from)?
|
||||
.to_str()?;
|
||||
let date = parse_date(date)?;
|
||||
|
||||
if Utc::now() - date > Duration::hours(24) {
|
||||
return Err(Error::bad_request("Date is too old".to_string()));
|
||||
}
|
||||
|
||||
let (key_id, scope) = parse_credential(cred)?;
|
||||
let auth = Authorization {
|
||||
key_id,
|
||||
scope,
|
||||
signed_headers,
|
||||
signature,
|
||||
content_sha256: content_sha256.to_str()?.to_string(),
|
||||
date,
|
||||
};
|
||||
Ok(auth)
|
||||
}
|
||||
|
||||
fn parse_presigned(algorithm: &str, query: &QueryMap) -> Result<Self, Error> {
|
||||
if algorithm != AWS4_HMAC_SHA256 {
|
||||
return Err(Error::bad_request(
|
||||
"Unsupported authorization method".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let cred = query
|
||||
.get(&X_AMZ_CREDENTIAL)
|
||||
.ok_or_bad_request("X-Amz-Credential not found in query parameters")?;
|
||||
let signed_headers = query
|
||||
.get(&X_AMZ_SIGNEDHEADERS)
|
||||
.ok_or_bad_request("X-Amz-SignedHeaders not found in query parameters")?;
|
||||
let signature = query
|
||||
.get(&X_AMZ_SIGNATURE)
|
||||
.ok_or_bad_request("X-Amz-Signature not found in query parameters")?;
|
||||
|
||||
let duration = query
|
||||
.get(&X_AMZ_EXPIRES)
|
||||
.ok_or_bad_request("X-Amz-Expires not found in query parameters")?
|
||||
.value
|
||||
.parse()
|
||||
.map_err(|_| Error::bad_request("X-Amz-Expires is not a number".to_string()))?;
|
||||
|
||||
if duration > 7 * 24 * 3600 {
|
||||
return Err(Error::bad_request(
|
||||
"X-Amz-Expires may not exceed a week".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let date = query
|
||||
.get(&X_AMZ_DATE)
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")?;
|
||||
let date = parse_date(&date.value)?;
|
||||
|
||||
if Utc::now() - date > Duration::seconds(duration) {
|
||||
return Err(Error::bad_request("Date is too old".to_string()));
|
||||
}
|
||||
|
||||
let (key_id, scope) = parse_credential(&cred.value)?;
|
||||
Ok(Authorization {
|
||||
key_id,
|
||||
scope,
|
||||
signed_headers: signed_headers.value.clone(),
|
||||
signature: signature.value.clone(),
|
||||
content_sha256: UNSIGNED_PAYLOAD.to_string(),
|
||||
date,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
|
||||
let algorithm = params
|
||||
.get(X_AMZ_ALGORITHM)
|
||||
.ok_or_bad_request("Missing X-Amz-Algorithm header")?
|
||||
.to_str()?;
|
||||
if algorithm != AWS4_HMAC_SHA256 {
|
||||
return Err(Error::bad_request(
|
||||
"Unsupported authorization method".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let credential = params
|
||||
.get(X_AMZ_CREDENTIAL)
|
||||
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?
|
||||
.to_str()?;
|
||||
let signature = params
|
||||
.get(X_AMZ_SIGNATURE)
|
||||
.ok_or_bad_request("No signature was provided")?
|
||||
.to_str()?
|
||||
.to_string();
|
||||
let date = params
|
||||
.get(X_AMZ_DATE)
|
||||
.ok_or_bad_request("No date was provided")?
|
||||
.to_str()?;
|
||||
let date = parse_date(date)?;
|
||||
|
||||
if Utc::now() - date > Duration::hours(24) {
|
||||
return Err(Error::bad_request("Date is too old".to_string()));
|
||||
}
|
||||
|
||||
let (key_id, scope) = parse_credential(credential)?;
|
||||
let auth = Authorization {
|
||||
key_id,
|
||||
scope,
|
||||
signed_headers: "".to_string(),
|
||||
signature,
|
||||
content_sha256: UNSIGNED_PAYLOAD.to_string(),
|
||||
date,
|
||||
};
|
||||
Ok(auth)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,11 +15,6 @@ use super::{compute_scope, sha256sum, HmacSha256, LONG_DATETIME};
|
|||
|
||||
use crate::helpers::*;
|
||||
use crate::signature::error::*;
|
||||
use crate::signature::payload::{
|
||||
STREAMING_AWS4_HMAC_SHA256_PAYLOAD, X_AMZ_CONTENT_SH256, X_AMZ_DATE,
|
||||
};
|
||||
|
||||
pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
|
||||
|
||||
pub type ReqBody = BoxBody<Error>;
|
||||
|
||||
|
@ -30,8 +25,8 @@ pub fn parse_streaming_body(
|
|||
region: &str,
|
||||
service: &str,
|
||||
) -> Result<Request<ReqBody>, Error> {
|
||||
match req.headers().get(X_AMZ_CONTENT_SH256) {
|
||||
Some(header) if header == STREAMING_AWS4_HMAC_SHA256_PAYLOAD => {
|
||||
match req.headers().get("x-amz-content-sha256") {
|
||||
Some(header) if header == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" => {
|
||||
let signature = content_sha256
|
||||
.take()
|
||||
.ok_or_bad_request("No signature provided")?;
|
||||
|
@ -44,7 +39,7 @@ pub fn parse_streaming_body(
|
|||
|
||||
let date = req
|
||||
.headers()
|
||||
.get(X_AMZ_DATE)
|
||||
.get("x-amz-date")
|
||||
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||
.to_str()?;
|
||||
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
|
||||
|
@ -80,7 +75,7 @@ fn compute_streaming_payload_signature(
|
|||
content_sha256: Hash,
|
||||
) -> Result<Hash, Error> {
|
||||
let string_to_sign = [
|
||||
AWS4_HMAC_SHA256_PAYLOAD,
|
||||
"AWS4-HMAC-SHA256-PAYLOAD",
|
||||
&date.format(LONG_DATETIME).to_string(),
|
||||
scope,
|
||||
&hex::encode(previous_signature),
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_block"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
|
|
@ -96,7 +96,7 @@ impl DataBlock {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn zstd_encode<R: std::io::Read>(mut source: R, level: i32) -> std::io::Result<Vec<u8>> {
|
||||
fn zstd_encode<R: std::io::Read>(mut source: R, level: i32) -> std::io::Result<Vec<u8>> {
|
||||
let mut result = Vec::<u8>::new();
|
||||
let mut encoder = Encoder::new(&mut result, level)?;
|
||||
encoder.include_checksum(true)?;
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
@ -14,12 +13,9 @@ const DRIVE_NPART: usize = 1024;
|
|||
|
||||
const HASH_DRIVE_BYTES: (usize, usize) = (2, 3);
|
||||
|
||||
const MARKER_FILE_NAME: &str = "garage-marker";
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub(crate) struct DataLayout {
|
||||
pub(crate) data_dirs: Vec<DataDir>,
|
||||
markers: HashMap<PathBuf, String>,
|
||||
|
||||
/// Primary storage location (index in data_dirs) for each partition
|
||||
/// = the location where the data is supposed to be, blocks are always
|
||||
|
@ -79,17 +75,16 @@ impl DataLayout {
|
|||
|
||||
Ok(Self {
|
||||
data_dirs,
|
||||
markers: HashMap::new(),
|
||||
part_prim,
|
||||
part_sec,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn update(self, dirs: &DataDirEnum) -> Result<Self, Error> {
|
||||
pub(crate) fn update(&mut self, dirs: &DataDirEnum) -> Result<(), Error> {
|
||||
// Make list of new data directories, exit if nothing changed
|
||||
let data_dirs = make_data_dirs(dirs)?;
|
||||
if data_dirs == self.data_dirs {
|
||||
return Ok(self);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>();
|
||||
|
@ -219,43 +214,11 @@ impl DataLayout {
|
|||
}
|
||||
|
||||
// Apply newly generated config
|
||||
Ok(Self {
|
||||
*self = Self {
|
||||
data_dirs,
|
||||
markers: self.markers,
|
||||
part_prim,
|
||||
part_sec,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn check_markers(&mut self) -> Result<(), Error> {
|
||||
let data_dirs = &self.data_dirs;
|
||||
self.markers
|
||||
.retain(|k, _| data_dirs.iter().any(|x| x.path == *k));
|
||||
|
||||
for dir in self.data_dirs.iter() {
|
||||
let mut marker_path = dir.path.clone();
|
||||
marker_path.push(MARKER_FILE_NAME);
|
||||
let existing_marker = std::fs::read_to_string(&marker_path).ok();
|
||||
match (existing_marker, self.markers.get(&dir.path)) {
|
||||
(Some(m1), Some(m2)) => {
|
||||
if m1 != *m2 {
|
||||
return Err(Error::Message(format!("Mismatched content for marker file `{}` in data directory `{}`. If you moved data directories or changed their mountpoints, you should remove the `data_layout` file in Garage's metadata directory and restart Garage.", MARKER_FILE_NAME, dir.path.display())));
|
||||
}
|
||||
}
|
||||
(None, Some(_)) => {
|
||||
return Err(Error::Message(format!("Could not find expected marker file `{}` in data directory `{}`, make sure this data directory is mounted correctly.", MARKER_FILE_NAME, dir.path.display())));
|
||||
}
|
||||
(Some(mkr), None) => {
|
||||
self.markers.insert(dir.path.clone(), mkr);
|
||||
}
|
||||
(None, None) => {
|
||||
let mkr = hex::encode(garage_util::data::gen_uuid().as_slice());
|
||||
std::fs::write(&marker_path, &mkr)?;
|
||||
self.markers.insert(dir.path.clone(), mkr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -292,7 +255,6 @@ impl DataLayout {
|
|||
pub(crate) fn without_secondary_locations(&self) -> Self {
|
||||
Self {
|
||||
data_dirs: self.data_dirs.clone(),
|
||||
markers: self.markers.clone(),
|
||||
part_prim: self.part_prim.clone(),
|
||||
part_sec: self.part_sec.iter().map(|_| vec![]).collect::<Vec<_>>(),
|
||||
}
|
||||
|
@ -360,12 +322,14 @@ fn make_data_dirs(dirs: &DataDirEnum) -> Result<Vec<DataDir>, Error> {
|
|||
fn dir_not_empty(path: &PathBuf) -> Result<bool, Error> {
|
||||
for entry in std::fs::read_dir(&path)? {
|
||||
let dir = entry?;
|
||||
let ft = dir.file_type()?;
|
||||
let name = dir.file_name().into_string().ok();
|
||||
if ft.is_file() && name.as_deref() == Some(MARKER_FILE_NAME) {
|
||||
return Ok(true);
|
||||
}
|
||||
if ft.is_dir() && name.and_then(|hex| hex::decode(&hex).ok()).is_some() {
|
||||
if dir.file_type()?.is_dir()
|
||||
&& dir
|
||||
.file_name()
|
||||
.into_string()
|
||||
.ok()
|
||||
.and_then(|hex| hex::decode(&hex).ok())
|
||||
.is_some()
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,6 +9,3 @@ mod block;
|
|||
mod layout;
|
||||
mod metrics;
|
||||
mod rc;
|
||||
|
||||
pub use block::zstd_encode;
|
||||
pub use rc::CalculateRefcount;
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
use std::convert::TryInto;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
@ -11,7 +10,7 @@ use serde::{Deserialize, Serialize};
|
|||
|
||||
use tokio::fs;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader};
|
||||
use tokio::sync::{mpsc, Mutex, MutexGuard, Semaphore};
|
||||
use tokio::sync::{mpsc, Mutex, MutexGuard};
|
||||
|
||||
use opentelemetry::{
|
||||
trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer},
|
||||
|
@ -23,7 +22,7 @@ use garage_net::stream::{read_stream_to_end, stream_asyncread, ByteStream};
|
|||
use garage_db as db;
|
||||
|
||||
use garage_util::background::{vars, BackgroundRunner};
|
||||
use garage_util::config::Config;
|
||||
use garage_util::config::DataDirEnum;
|
||||
use garage_util::data::*;
|
||||
use garage_util::error::*;
|
||||
use garage_util::metrics::RecordDuration;
|
||||
|
@ -85,16 +84,14 @@ pub struct BlockManager {
|
|||
|
||||
data_fsync: bool,
|
||||
compression_level: Option<i32>,
|
||||
disable_scrub: bool,
|
||||
|
||||
mutation_lock: Vec<Mutex<BlockManagerLocked>>,
|
||||
|
||||
pub rc: BlockRc,
|
||||
pub(crate) rc: BlockRc,
|
||||
pub resync: BlockResyncManager,
|
||||
|
||||
pub(crate) system: Arc<System>,
|
||||
pub(crate) endpoint: Arc<Endpoint<BlockRpc, Self>>,
|
||||
buffer_kb_semaphore: Arc<Semaphore>,
|
||||
|
||||
pub(crate) metrics: BlockManagerMetrics,
|
||||
|
||||
|
@ -122,22 +119,24 @@ struct BlockManagerLocked();
|
|||
impl BlockManager {
|
||||
pub fn new(
|
||||
db: &db::Db,
|
||||
config: &Config,
|
||||
data_dir: DataDirEnum,
|
||||
data_fsync: bool,
|
||||
compression_level: Option<i32>,
|
||||
replication: TableShardedReplication,
|
||||
system: Arc<System>,
|
||||
) -> Result<Arc<Self>, Error> {
|
||||
// Load or compute layout, i.e. assignment of data blocks to the different data directories
|
||||
let data_layout_persister: Persister<DataLayout> =
|
||||
Persister::new(&system.metadata_dir, "data_layout");
|
||||
let mut data_layout = match data_layout_persister.load() {
|
||||
Ok(layout) => layout
|
||||
.update(&config.data_dir)
|
||||
.ok_or_message("invalid data_dir config")?,
|
||||
Err(_) => {
|
||||
DataLayout::initialize(&config.data_dir).ok_or_message("invalid data_dir config")?
|
||||
let data_layout = match data_layout_persister.load() {
|
||||
Ok(mut layout) => {
|
||||
layout
|
||||
.update(&data_dir)
|
||||
.ok_or_message("invalid data_dir config")?;
|
||||
layout
|
||||
}
|
||||
Err(_) => DataLayout::initialize(&data_dir).ok_or_message("invalid data_dir config")?,
|
||||
};
|
||||
data_layout.check_markers()?;
|
||||
data_layout_persister
|
||||
.save(&data_layout)
|
||||
.expect("cannot save data_layout");
|
||||
|
@ -154,14 +153,11 @@ impl BlockManager {
|
|||
.netapp
|
||||
.endpoint("garage_block/manager.rs/Rpc".to_string());
|
||||
|
||||
let buffer_kb_semaphore = Arc::new(Semaphore::new(config.block_ram_buffer_max / 1024));
|
||||
|
||||
let metrics = BlockManagerMetrics::new(
|
||||
config.compression_level,
|
||||
rc.rc_table.clone(),
|
||||
compression_level,
|
||||
rc.rc.clone(),
|
||||
resync.queue.clone(),
|
||||
resync.errors.clone(),
|
||||
buffer_kb_semaphore.clone(),
|
||||
);
|
||||
|
||||
let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info");
|
||||
|
@ -170,9 +166,8 @@ impl BlockManager {
|
|||
replication,
|
||||
data_layout: ArcSwap::new(Arc::new(data_layout)),
|
||||
data_layout_persister,
|
||||
data_fsync: config.data_fsync,
|
||||
disable_scrub: config.disable_scrub,
|
||||
compression_level: config.compression_level,
|
||||
data_fsync,
|
||||
compression_level,
|
||||
mutation_lock: vec![(); MUTEX_COUNT]
|
||||
.iter()
|
||||
.map(|_| Mutex::new(BlockManagerLocked()))
|
||||
|
@ -181,7 +176,6 @@ impl BlockManager {
|
|||
resync,
|
||||
system,
|
||||
endpoint,
|
||||
buffer_kb_semaphore,
|
||||
metrics,
|
||||
scrub_persister,
|
||||
tx_scrub_command: ArcSwapOption::new(None),
|
||||
|
@ -200,7 +194,6 @@ impl BlockManager {
|
|||
}
|
||||
|
||||
// Spawn scrub worker
|
||||
if !self.disable_scrub {
|
||||
let (scrub_tx, scrub_rx) = mpsc::channel(1);
|
||||
self.tx_scrub_command.store(Some(Arc::new(scrub_tx)));
|
||||
bg.spawn_worker(ScrubWorker::new(
|
||||
|
@ -209,12 +202,10 @@ impl BlockManager {
|
|||
self.scrub_persister.clone(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
||||
self.resync.register_bg_vars(vars);
|
||||
|
||||
if !self.disable_scrub {
|
||||
vars.register_rw(
|
||||
&self.scrub_persister,
|
||||
"scrub-tranquility",
|
||||
|
@ -231,28 +222,15 @@ impl BlockManager {
|
|||
p.get_with(|x| x.corruptions_detected)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialization: set how block references are recalculated
|
||||
/// for repair operations
|
||||
pub fn set_recalc_rc(&self, recalc: Vec<CalculateRefcount>) {
|
||||
self.rc.recalc_rc.store(Some(Arc::new(recalc)));
|
||||
}
|
||||
|
||||
/// Ask nodes that might have a (possibly compressed) block for it
|
||||
/// Return it as a stream with a header
|
||||
async fn rpc_get_raw_block_streaming(
|
||||
&self,
|
||||
hash: &Hash,
|
||||
priority: RequestPriority,
|
||||
order_tag: Option<OrderTag>,
|
||||
) -> Result<DataBlockStream, Error> {
|
||||
self.rpc_get_raw_block_internal(
|
||||
hash,
|
||||
priority,
|
||||
order_tag,
|
||||
|stream| async move { Ok(stream) },
|
||||
)
|
||||
self.rpc_get_raw_block_internal(hash, order_tag, |stream| async move { Ok(stream) })
|
||||
.await
|
||||
}
|
||||
|
||||
|
@ -261,10 +239,9 @@ impl BlockManager {
|
|||
pub(crate) async fn rpc_get_raw_block(
|
||||
&self,
|
||||
hash: &Hash,
|
||||
priority: RequestPriority,
|
||||
order_tag: Option<OrderTag>,
|
||||
) -> Result<DataBlock, Error> {
|
||||
self.rpc_get_raw_block_internal(hash, priority, order_tag, |block_stream| async move {
|
||||
self.rpc_get_raw_block_internal(hash, order_tag, |block_stream| async move {
|
||||
let (header, stream) = block_stream.into_parts();
|
||||
read_stream_to_end(stream)
|
||||
.await
|
||||
|
@ -277,7 +254,6 @@ impl BlockManager {
|
|||
async fn rpc_get_raw_block_internal<F, Fut, T>(
|
||||
&self,
|
||||
hash: &Hash,
|
||||
priority: RequestPriority,
|
||||
order_tag: Option<OrderTag>,
|
||||
f: F,
|
||||
) -> Result<T, Error>
|
||||
|
@ -285,17 +261,15 @@ impl BlockManager {
|
|||
F: Fn(DataBlockStream) -> Fut,
|
||||
Fut: futures::Future<Output = Result<T, Error>>,
|
||||
{
|
||||
let who = self
|
||||
.system
|
||||
.rpc_helper()
|
||||
.block_read_nodes_of(hash, self.system.rpc_helper());
|
||||
let who = self.replication.read_nodes(hash);
|
||||
let who = self.system.rpc.request_order(&who);
|
||||
|
||||
for node in who.iter() {
|
||||
let node_id = NodeID::from(*node);
|
||||
let rpc = self.endpoint.call_streaming(
|
||||
&node_id,
|
||||
BlockRpc::GetBlock(*hash, order_tag),
|
||||
priority,
|
||||
PRIO_NORMAL | PRIO_SECONDARY,
|
||||
);
|
||||
tokio::select! {
|
||||
res = rpc => {
|
||||
|
@ -328,15 +302,15 @@ impl BlockManager {
|
|||
// if the first one doesn't succeed rapidly
|
||||
// TODO: keep first request running when initiating a new one and take the
|
||||
// one that finishes earlier
|
||||
_ = tokio::time::sleep(self.system.rpc_helper().rpc_timeout()) => {
|
||||
_ = tokio::time::sleep(self.system.rpc.rpc_timeout()) => {
|
||||
debug!("Get block {:?}: node {:?} didn't return block in time, trying next.", hash, node);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let err = Error::MissingBlock(*hash);
|
||||
debug!("{}", err);
|
||||
Err(err)
|
||||
let msg = format!("Get block {:?}: no node returned a valid block", hash);
|
||||
debug!("{}", msg);
|
||||
Err(Error::Message(msg))
|
||||
}
|
||||
|
||||
// ---- Public interface ----
|
||||
|
@ -347,9 +321,7 @@ impl BlockManager {
|
|||
hash: &Hash,
|
||||
order_tag: Option<OrderTag>,
|
||||
) -> Result<ByteStream, Error> {
|
||||
let block_stream = self
|
||||
.rpc_get_raw_block_streaming(hash, PRIO_NORMAL | PRIO_SECONDARY, order_tag)
|
||||
.await?;
|
||||
let block_stream = self.rpc_get_raw_block_streaming(hash, order_tag).await?;
|
||||
let (header, stream) = block_stream.into_parts();
|
||||
match header {
|
||||
DataBlockHeader::Plain => Ok(stream),
|
||||
|
@ -363,44 +335,33 @@ impl BlockManager {
|
|||
}
|
||||
}
|
||||
|
||||
/// Send block to nodes that should have it
|
||||
pub async fn rpc_put_block(
|
||||
/// Ask nodes that might have a block for it, return it as one big Bytes
|
||||
pub async fn rpc_get_block(
|
||||
&self,
|
||||
hash: Hash,
|
||||
data: Bytes,
|
||||
prevent_compression: bool,
|
||||
hash: &Hash,
|
||||
order_tag: Option<OrderTag>,
|
||||
) -> Result<(), Error> {
|
||||
let who = self.replication.write_sets(&hash);
|
||||
) -> Result<Bytes, Error> {
|
||||
let stream = self.rpc_get_block_streaming(hash, order_tag).await?;
|
||||
Ok(read_stream_to_end(stream).await?.into_bytes())
|
||||
}
|
||||
|
||||
let compression_level = self.compression_level.filter(|_| !prevent_compression);
|
||||
let (header, bytes) = DataBlock::from_buffer(data, compression_level)
|
||||
/// Send block to nodes that should have it
|
||||
pub async fn rpc_put_block(&self, hash: Hash, data: Bytes) -> Result<(), Error> {
|
||||
let who = self.replication.write_nodes(&hash);
|
||||
|
||||
let (header, bytes) = DataBlock::from_buffer(data, self.compression_level)
|
||||
.await
|
||||
.into_parts();
|
||||
|
||||
let permit = self
|
||||
.buffer_kb_semaphore
|
||||
.clone()
|
||||
.acquire_many_owned((bytes.len() / 1024).try_into().unwrap())
|
||||
.await
|
||||
.ok_or_message("could not reserve space for buffer of data to send to remote nodes")?;
|
||||
|
||||
let put_block_rpc =
|
||||
Req::new(BlockRpc::PutBlock { hash, header })?.with_stream_from_buffer(bytes);
|
||||
let put_block_rpc = if let Some(tag) = order_tag {
|
||||
put_block_rpc.with_order_tag(tag)
|
||||
} else {
|
||||
put_block_rpc
|
||||
};
|
||||
|
||||
self.system
|
||||
.rpc_helper()
|
||||
.try_write_many_sets(
|
||||
.rpc
|
||||
.try_call_many(
|
||||
&self.endpoint,
|
||||
who.as_ref(),
|
||||
&who[..],
|
||||
put_block_rpc,
|
||||
RequestStrategy::with_priority(PRIO_NORMAL | PRIO_SECONDARY)
|
||||
.with_drop_on_completion(permit)
|
||||
.with_quorum(self.replication.write_quorum()),
|
||||
)
|
||||
.await?;
|
||||
|
@ -410,7 +371,12 @@ impl BlockManager {
|
|||
|
||||
/// Get number of items in the refcount table
|
||||
pub fn rc_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.rc.rc_table.len()?)
|
||||
Ok(self.rc.rc.len()?)
|
||||
}
|
||||
|
||||
/// Get number of items in the refcount table
|
||||
pub fn rc_fast_len(&self) -> Result<Option<usize>, Error> {
|
||||
Ok(self.rc.rc.fast_len()?)
|
||||
}
|
||||
|
||||
/// Send command to start/stop/manager scrub worker
|
||||
|
@ -428,7 +394,7 @@ impl BlockManager {
|
|||
|
||||
/// List all resync errors
|
||||
pub fn list_resync_errors(&self) -> Result<Vec<BlockResyncErrorInfo>, Error> {
|
||||
let mut blocks = Vec::with_capacity(self.resync.errors.len()?);
|
||||
let mut blocks = Vec::with_capacity(self.resync.errors.len());
|
||||
for ent in self.resync.errors.iter()? {
|
||||
let (hash, cnt) = ent?;
|
||||
let cnt = ErrorCounter::decode(&cnt);
|
||||
|
@ -466,7 +432,7 @@ impl BlockManager {
|
|||
tokio::spawn(async move {
|
||||
if let Err(e) = this
|
||||
.resync
|
||||
.put_to_resync(&hash, 2 * this.system.rpc_helper().rpc_timeout())
|
||||
.put_to_resync(&hash, 2 * this.system.rpc.rpc_timeout())
|
||||
{
|
||||
error!("Block {:?} could not be put in resync queue: {}.", hash, e);
|
||||
}
|
||||
|
@ -560,7 +526,7 @@ impl BlockManager {
|
|||
None => {
|
||||
// Not found but maybe we should have had it ??
|
||||
self.resync
|
||||
.put_to_resync(hash, 2 * self.system.rpc_helper().rpc_timeout())?;
|
||||
.put_to_resync(hash, 2 * self.system.rpc.rpc_timeout())?;
|
||||
return Err(Error::Message(format!(
|
||||
"block {:?} not found on node",
|
||||
hash
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use opentelemetry::{global, metrics::*};
|
||||
|
||||
use garage_db as db;
|
||||
use garage_db::counted_tree_hack::CountedTree;
|
||||
|
||||
/// TableMetrics reference all counter used for metrics
|
||||
pub struct BlockManagerMetrics {
|
||||
|
@ -12,7 +9,6 @@ pub struct BlockManagerMetrics {
|
|||
pub(crate) _rc_size: ValueObserver<u64>,
|
||||
pub(crate) _resync_queue_len: ValueObserver<u64>,
|
||||
pub(crate) _resync_errored_blocks: ValueObserver<u64>,
|
||||
pub(crate) _buffer_free_kb: ValueObserver<u64>,
|
||||
|
||||
pub(crate) resync_counter: BoundCounter<u64>,
|
||||
pub(crate) resync_error_counter: BoundCounter<u64>,
|
||||
|
@ -33,9 +29,8 @@ impl BlockManagerMetrics {
|
|||
pub fn new(
|
||||
compression_level: Option<i32>,
|
||||
rc_tree: db::Tree,
|
||||
resync_queue: db::Tree,
|
||||
resync_errors: db::Tree,
|
||||
buffer_semaphore: Arc<Semaphore>,
|
||||
resync_queue: CountedTree,
|
||||
resync_errors: CountedTree,
|
||||
) -> Self {
|
||||
let meter = global::meter("garage_model/block");
|
||||
Self {
|
||||
|
@ -50,17 +45,15 @@ impl BlockManagerMetrics {
|
|||
.init(),
|
||||
_rc_size: meter
|
||||
.u64_value_observer("block.rc_size", move |observer| {
|
||||
if let Ok(value) = rc_tree.len() {
|
||||
observer.observe(value as u64, &[])
|
||||
if let Ok(Some(v)) = rc_tree.fast_len() {
|
||||
observer.observe(v as u64, &[])
|
||||
}
|
||||
})
|
||||
.with_description("Number of blocks known to the reference counter")
|
||||
.init(),
|
||||
_resync_queue_len: meter
|
||||
.u64_value_observer("block.resync_queue_length", move |observer| {
|
||||
if let Ok(value) = resync_queue.len() {
|
||||
observer.observe(value as u64, &[]);
|
||||
}
|
||||
observer.observe(resync_queue.len() as u64, &[])
|
||||
})
|
||||
.with_description(
|
||||
"Number of block hashes queued for local check and possible resync",
|
||||
|
@ -68,22 +61,11 @@ impl BlockManagerMetrics {
|
|||
.init(),
|
||||
_resync_errored_blocks: meter
|
||||
.u64_value_observer("block.resync_errored_blocks", move |observer| {
|
||||
if let Ok(value) = resync_errors.len() {
|
||||
observer.observe(value as u64, &[]);
|
||||
}
|
||||
observer.observe(resync_errors.len() as u64, &[])
|
||||
})
|
||||
.with_description("Number of block hashes whose last resync resulted in an error")
|
||||
.init(),
|
||||
|
||||
_buffer_free_kb: meter
|
||||
.u64_value_observer("block.ram_buffer_free_kb", move |observer| {
|
||||
observer.observe(buffer_semaphore.available_permits() as u64, &[])
|
||||
})
|
||||
.with_description(
|
||||
"Available RAM in KiB to use for buffering data blocks to be written to remote nodes",
|
||||
)
|
||||
.init(),
|
||||
|
||||
resync_counter: meter
|
||||
.u64_counter("block.resync_counter")
|
||||
.with_description("Number of calls to resync_block")
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
use std::convert::TryInto;
|
||||
|
||||
use arc_swap::ArcSwapOption;
|
||||
|
||||
use garage_db as db;
|
||||
|
||||
use garage_util::data::*;
|
||||
|
@ -10,20 +8,13 @@ use garage_util::time::*;
|
|||
|
||||
use crate::manager::BLOCK_GC_DELAY;
|
||||
|
||||
pub type CalculateRefcount =
|
||||
Box<dyn Fn(&db::Transaction, &Hash) -> db::TxResult<usize, Error> + Send + Sync>;
|
||||
|
||||
pub struct BlockRc {
|
||||
pub rc_table: db::Tree,
|
||||
pub(crate) recalc_rc: ArcSwapOption<Vec<CalculateRefcount>>,
|
||||
pub(crate) rc: db::Tree,
|
||||
}
|
||||
|
||||
impl BlockRc {
|
||||
pub(crate) fn new(rc: db::Tree) -> Self {
|
||||
Self {
|
||||
rc_table: rc,
|
||||
recalc_rc: ArcSwapOption::new(None),
|
||||
}
|
||||
Self { rc }
|
||||
}
|
||||
|
||||
/// Increment the reference counter associated to a hash.
|
||||
|
@ -33,9 +24,9 @@ impl BlockRc {
|
|||
tx: &mut db::Transaction,
|
||||
hash: &Hash,
|
||||
) -> db::TxOpResult<bool> {
|
||||
let old_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
|
||||
let old_rc = RcEntry::parse_opt(tx.get(&self.rc, hash)?);
|
||||
match old_rc.increment().serialize() {
|
||||
Some(x) => tx.insert(&self.rc_table, hash, x)?,
|
||||
Some(x) => tx.insert(&self.rc, hash, x)?,
|
||||
None => unreachable!(),
|
||||
};
|
||||
Ok(old_rc.is_zero())
|
||||
|
@ -48,28 +39,28 @@ impl BlockRc {
|
|||
tx: &mut db::Transaction,
|
||||
hash: &Hash,
|
||||
) -> db::TxOpResult<bool> {
|
||||
let new_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?).decrement();
|
||||
let new_rc = RcEntry::parse_opt(tx.get(&self.rc, hash)?).decrement();
|
||||
match new_rc.serialize() {
|
||||
Some(x) => tx.insert(&self.rc_table, hash, x)?,
|
||||
None => tx.remove(&self.rc_table, hash)?,
|
||||
Some(x) => tx.insert(&self.rc, hash, x)?,
|
||||
None => tx.remove(&self.rc, hash)?,
|
||||
};
|
||||
Ok(matches!(new_rc, RcEntry::Deletable { .. }))
|
||||
}
|
||||
|
||||
/// Read a block's reference count
|
||||
pub(crate) fn get_block_rc(&self, hash: &Hash) -> Result<RcEntry, Error> {
|
||||
Ok(RcEntry::parse_opt(self.rc_table.get(hash.as_ref())?))
|
||||
Ok(RcEntry::parse_opt(self.rc.get(hash.as_ref())?))
|
||||
}
|
||||
|
||||
/// Delete an entry in the RC table if it is deletable and the
|
||||
/// deletion time has passed
|
||||
pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> {
|
||||
let now = now_msec();
|
||||
self.rc_table.db().transaction(|tx| {
|
||||
let rcval = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
|
||||
self.rc.db().transaction(|tx| {
|
||||
let rcval = RcEntry::parse_opt(tx.get(&self.rc, hash)?);
|
||||
match rcval {
|
||||
RcEntry::Deletable { at_time } if now > at_time => {
|
||||
tx.remove(&self.rc_table, hash)?;
|
||||
tx.remove(&self.rc, hash)?;
|
||||
}
|
||||
_ => (),
|
||||
};
|
||||
|
@ -77,58 +68,6 @@ impl BlockRc {
|
|||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Recalculate the reference counter of a block
|
||||
/// to fix potential inconsistencies
|
||||
pub fn recalculate_rc(&self, hash: &Hash) -> Result<(usize, bool), Error> {
|
||||
if let Some(recalc_fns) = self.recalc_rc.load().as_ref() {
|
||||
trace!("Repair block RC for {:?}", hash);
|
||||
let res = self
|
||||
.rc_table
|
||||
.db()
|
||||
.transaction(|tx| {
|
||||
let mut cnt = 0;
|
||||
for f in recalc_fns.iter() {
|
||||
cnt += f(&tx, hash)?;
|
||||
}
|
||||
let old_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
|
||||
trace!(
|
||||
"Block RC for {:?}: stored={}, calculated={}",
|
||||
hash,
|
||||
old_rc.as_u64(),
|
||||
cnt
|
||||
);
|
||||
if cnt as u64 != old_rc.as_u64() {
|
||||
warn!(
|
||||
"Fixing inconsistent block RC for {:?}: was {}, should be {}",
|
||||
hash,
|
||||
old_rc.as_u64(),
|
||||
cnt
|
||||
);
|
||||
let new_rc = if cnt > 0 {
|
||||
RcEntry::Present { count: cnt as u64 }
|
||||
} else {
|
||||
RcEntry::Deletable {
|
||||
at_time: now_msec() + BLOCK_GC_DELAY.as_millis() as u64,
|
||||
}
|
||||
};
|
||||
tx.insert(&self.rc_table, hash, new_rc.serialize().unwrap())?;
|
||||
Ok((cnt, true))
|
||||
} else {
|
||||
Ok((cnt, false))
|
||||
}
|
||||
})
|
||||
.map_err(Error::from);
|
||||
if let Err(e) = &res {
|
||||
error!("Failed to fix RC for block {:?}: {}", hash, e);
|
||||
}
|
||||
res
|
||||
} else {
|
||||
Err(Error::Message(
|
||||
"Block RC recalculation is not available at this point".into(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Describes the state of the reference counter for a block
|
||||
|
|
|
@ -107,7 +107,7 @@ impl Worker for RepairWorker {
|
|||
for entry in self
|
||||
.manager
|
||||
.rc
|
||||
.rc_table
|
||||
.rc
|
||||
.range::<&[u8], _>((start_bound, Bound::Unbounded))?
|
||||
{
|
||||
let (hash, _) = entry?;
|
||||
|
|
|
@ -15,6 +15,7 @@ use opentelemetry::{
|
|||
};
|
||||
|
||||
use garage_db as db;
|
||||
use garage_db::counted_tree_hack::CountedTree;
|
||||
|
||||
use garage_util::background::*;
|
||||
use garage_util::data::*;
|
||||
|
@ -46,9 +47,9 @@ pub(crate) const MAX_RESYNC_WORKERS: usize = 8;
|
|||
const INITIAL_RESYNC_TRANQUILITY: u32 = 2;
|
||||
|
||||
pub struct BlockResyncManager {
|
||||
pub(crate) queue: db::Tree,
|
||||
pub(crate) queue: CountedTree,
|
||||
pub(crate) notify: Arc<Notify>,
|
||||
pub(crate) errors: db::Tree,
|
||||
pub(crate) errors: CountedTree,
|
||||
|
||||
busy_set: BusySet,
|
||||
|
||||
|
@ -89,10 +90,12 @@ impl BlockResyncManager {
|
|||
let queue = db
|
||||
.open_tree("block_local_resync_queue")
|
||||
.expect("Unable to open block_local_resync_queue tree");
|
||||
let queue = CountedTree::new(queue).expect("Could not count block_local_resync_queue");
|
||||
|
||||
let errors = db
|
||||
.open_tree("block_local_resync_errors")
|
||||
.expect("Unable to open block_local_resync_errors tree");
|
||||
let errors = CountedTree::new(errors).expect("Could not count block_local_resync_errors");
|
||||
|
||||
let persister = PersisterShared::new(&system.metadata_dir, "resync_cfg");
|
||||
|
||||
|
@ -107,12 +110,16 @@ impl BlockResyncManager {
|
|||
|
||||
/// Get lenght of resync queue
|
||||
pub fn queue_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.queue.len()?)
|
||||
// This currently can't return an error because the CountedTree hack
|
||||
// doesn't error on .len(), but this will change when we remove the hack
|
||||
// (hopefully someday!)
|
||||
Ok(self.queue.len())
|
||||
}
|
||||
|
||||
/// Get number of blocks that have an error
|
||||
pub fn errors_len(&self) -> Result<usize, Error> {
|
||||
Ok(self.errors.len()?)
|
||||
// (see queue_len comment)
|
||||
Ok(self.errors.len())
|
||||
}
|
||||
|
||||
/// Clear the error counter for a block and put it in queue immediately
|
||||
|
@ -173,7 +180,7 @@ impl BlockResyncManager {
|
|||
// deleted once the garbage collection delay has passed.
|
||||
//
|
||||
// Here are some explanations on how the resync queue works.
|
||||
// There are two db trees that are used to have information
|
||||
// There are two Sled trees that are used to have information
|
||||
// about the status of blocks that need to be resynchronized:
|
||||
//
|
||||
// - resync.queue: a tree that is ordered first by a timestamp
|
||||
|
@ -367,17 +374,10 @@ impl BlockResyncManager {
|
|||
}
|
||||
|
||||
if exists && rc.is_deletable() {
|
||||
if manager.rc.recalculate_rc(hash)?.0 > 0 {
|
||||
return Err(Error::Message(format!(
|
||||
"Refcount for block {:?} was inconsistent, retrying later",
|
||||
hash
|
||||
)));
|
||||
}
|
||||
|
||||
info!("Resync block {:?}: offloading and deleting", hash);
|
||||
let existing_path = existing_path.unwrap();
|
||||
|
||||
let mut who = manager.replication.storage_nodes(hash);
|
||||
let mut who = manager.replication.write_nodes(hash);
|
||||
if who.len() < manager.replication.write_quorum() {
|
||||
return Err(Error::Message("Not trying to offload block because we don't have a quorum of nodes to write to".to_string()));
|
||||
}
|
||||
|
@ -385,7 +385,7 @@ impl BlockResyncManager {
|
|||
|
||||
let who_needs_resps = manager
|
||||
.system
|
||||
.rpc_helper()
|
||||
.rpc
|
||||
.call_many(
|
||||
&manager.endpoint,
|
||||
&who,
|
||||
|
@ -431,12 +431,12 @@ impl BlockResyncManager {
|
|||
.with_stream_from_buffer(bytes);
|
||||
manager
|
||||
.system
|
||||
.rpc_helper()
|
||||
.rpc
|
||||
.try_call_many(
|
||||
&manager.endpoint,
|
||||
&need_nodes,
|
||||
&need_nodes[..],
|
||||
put_block_message,
|
||||
RequestStrategy::with_priority(PRIO_BACKGROUND | PRIO_SECONDARY)
|
||||
RequestStrategy::with_priority(PRIO_BACKGROUND)
|
||||
.with_quorum(need_nodes.len()),
|
||||
)
|
||||
.await
|
||||
|
@ -460,17 +460,7 @@ impl BlockResyncManager {
|
|||
hash
|
||||
);
|
||||
|
||||
let block_data = manager
|
||||
.rpc_get_raw_block(hash, PRIO_BACKGROUND | PRIO_SECONDARY, None)
|
||||
.await;
|
||||
if matches!(block_data, Err(Error::MissingBlock(_))) {
|
||||
warn!(
|
||||
"Could not fetch needed block {:?}, no node returned valid data. Checking that refcount is correct.",
|
||||
hash
|
||||
);
|
||||
manager.rc.recalculate_rc(hash)?;
|
||||
}
|
||||
let block_data = block_data?;
|
||||
let block_data = manager.rpc_get_raw_block(hash, None).await?;
|
||||
|
||||
manager.metrics.resync_recv_counter.add(1);
|
||||
|
||||
|
@ -551,9 +541,9 @@ impl Worker for ResyncWorker {
|
|||
Ok(WorkerState::Idle)
|
||||
}
|
||||
Err(e) => {
|
||||
// The errors that we have here are only db errors
|
||||
// The errors that we have here are only Sled errors
|
||||
// We don't really know how to handle them so just ¯\_(ツ)_/¯
|
||||
// (there is kind of an assumption that the db won't error on us,
|
||||
// (there is kind of an assumption that Sled won't error on us,
|
||||
// if it does there is not much we can do -- TODO should we just panic?)
|
||||
// Here we just give the error to the worker manager,
|
||||
// it will print it to the logs and increment a counter
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "garage_db"
|
||||
version = "1.0.1"
|
||||
version = "0.9.1"
|
||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||
edition = "2018"
|
||||
license = "AGPL-3.0"
|
||||
|
@ -17,15 +17,14 @@ hexdump.workspace = true
|
|||
tracing.workspace = true
|
||||
|
||||
heed = { workspace = true, optional = true }
|
||||
rusqlite = { workspace = true, optional = true, features = ["backup"] }
|
||||
r2d2 = { workspace = true, optional = true }
|
||||
r2d2_sqlite = { workspace = true, optional = true }
|
||||
rusqlite = { workspace = true, optional = true }
|
||||
sled = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
mktemp.workspace = true
|
||||
|
||||
[features]
|
||||
default = [ "lmdb", "sqlite" ]
|
||||
default = [ "sled", "lmdb", "sqlite" ]
|
||||
bundled-libs = [ "rusqlite?/bundled" ]
|
||||
lmdb = [ "heed" ]
|
||||
sqlite = [ "rusqlite", "r2d2", "r2d2_sqlite" ]
|
||||
sqlite = [ "rusqlite" ]
|
||||
|
|
127
src/db/counted_tree_hack.rs
Normal file
|
@ -0,0 +1,127 @@
|
|||
//! This hack allows a db tree to keep in RAM a counter of the number of entries
|
||||
//! it contains, which is used to call .len() on it. This is usefull only for
|
||||
//! the sled backend where .len() otherwise would have to traverse the whole
|
||||
//! tree to count items. For sqlite and lmdb, this is mostly useless (but
|
||||
//! hopefully not harmfull!). Note that a CountedTree cannot be part of a
|
||||
//! transaction.
|
||||
|
||||
use std::sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
};
|
||||
|
||||
use crate::{Result, Tree, TxError, Value, ValueIter};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CountedTree(Arc<CountedTreeInternal>);
|
||||
|
||||
struct CountedTreeInternal {
|
||||
tree: Tree,
|
||||
len: AtomicUsize,
|
||||
}
|
||||
|
||||
impl CountedTree {
|
||||
pub fn new(tree: Tree) -> Result<Self> {
|
||||
let len = tree.len()?;
|
||||
Ok(Self(Arc::new(CountedTreeInternal {
|
||||
tree,
|
||||
len: AtomicUsize::new(len),
|
||||
})))
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
pub fn get<K: AsRef<[u8]>>(&self, key: K) -> Result<Option<Value>> {
|
||||
self.0.tree.get(key)
|
||||
}
|
||||
|
||||
pub fn first(&self) -> Result<Option<(Value, Value)>> {
|
||||
self.0.tree.first()
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> Result<ValueIter<'_>> {
|
||||
self.0.tree.iter()
|
||||
}
|
||||
|
||||
// ---- writing functions ----
|
||||
|
||||
pub fn insert<K, V>(&self, key: K, value: V) -> Result<Option<Value>>
|
||||
where
|
||||
K: AsRef<[u8]>,
|
||||
V: AsRef<[u8]>,
|
||||
{
|
||||
let old_val = self.0.tree.insert(key, value)?;
|
||||
if old_val.is_none() {
|
||||
self.0.len.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
Ok(old_val)
|
||||
}
|
||||
|
||||
pub fn remove<K: AsRef<[u8]>>(&self, key: K) -> Result<Option<Value>> {
|
||||
let old_val = self.0.tree.remove(key)?;
|
||||
if old_val.is_some() {
|
||||
self.0.len.fetch_sub(1, Ordering::SeqCst);
|
||||
}
|
||||
Ok(old_val)
|
||||
}
|
||||
|
||||
pub fn compare_and_swap<K, OV, NV>(
|
||||
&self,
|
||||
key: K,
|
||||
expected_old: Option<OV>,
|
||||
new: Option<NV>,
|
||||
) -> Result<bool>
|
||||
where
|
||||
K: AsRef<[u8]>,
|
||||
OV: AsRef<[u8]>,
|
||||
NV: AsRef<[u8]>,
|
||||
{
|
||||
let old_some = expected_old.is_some();
|
||||
let new_some = new.is_some();
|
||||
|
||||
let tx_res = self.0.tree.db().transaction(|tx| {
|
||||
let old_val = tx.get(&self.0.tree, &key)?;
|
||||
let is_same = match (&old_val, &expected_old) {
|
||||
(None, None) => true,
|
||||
(Some(x), Some(y)) if x == y.as_ref() => true,
|
||||
_ => false,
|
||||
};
|
||||
if is_same {
|
||||
match &new {
|
||||
Some(v) => {
|
||||
tx.insert(&self.0.tree, &key, v)?;
|
||||
}
|
||||
None => {
|
||||
tx.remove(&self.0.tree, &key)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err(TxError::Abort(()))
|
||||
}
|
||||
});
|
||||
|
||||
match tx_res {
|
||||
Ok(()) => {
|
||||
match (old_some, new_some) {
|
||||
(false, true) => {
|
||||
self.0.len.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
(true, false) => {
|
||||
self.0.len.fetch_sub(1, Ordering::SeqCst);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
Err(TxError::Abort(())) => Ok(false),
|
||||
Err(TxError::Db(e)) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,12 +1,15 @@
|
|||
#[macro_use]
|
||||
#[cfg(feature = "sqlite")]
|
||||
extern crate tracing;
|
||||
|
||||
#[cfg(feature = "lmdb")]
|
||||
pub mod lmdb_adapter;
|
||||
#[cfg(feature = "sled")]
|
||||
pub mod sled_adapter;
|
||||
#[cfg(feature = "sqlite")]
|
||||
pub mod sqlite_adapter;
|
||||
|
||||
pub mod open;
|
||||
pub mod counted_tree_hack;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test;
|
||||
|
@ -15,13 +18,10 @@ use core::ops::{Bound, RangeBounds};
|
|||
|
||||
use std::borrow::Cow;
|
||||
use std::cell::Cell;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use err_derive::Error;
|
||||
|
||||
pub use open::*;
|
||||
|
||||
pub(crate) type OnCommit = Vec<Box<dyn FnOnce()>>;
|
||||
|
||||
#[derive(Clone)]
|
||||
|
@ -45,12 +45,6 @@ pub type TxValueIter<'a> = Box<dyn std::iter::Iterator<Item = TxOpResult<(Value,
|
|||
#[error(display = "{}", _0)]
|
||||
pub struct Error(pub Cow<'static, str>);
|
||||
|
||||
impl From<std::io::Error> for Error {
|
||||
fn from(e: std::io::Error) -> Error {
|
||||
Error(format!("IO: {}", e).into())
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
|
@ -58,7 +52,6 @@ pub type Result<T> = std::result::Result<T, Error>;
|
|||
pub struct TxOpError(pub(crate) Error);
|
||||
pub type TxOpResult<T> = std::result::Result<T, TxOpError>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum TxError<E> {
|
||||
Abort(E),
|
||||
Db(Error),
|
||||
|
@ -133,10 +126,6 @@ impl Db {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn snapshot(&self, path: &PathBuf) -> Result<()> {
|
||||
self.0.snapshot(path)
|
||||
}
|
||||
|
||||
pub fn import(&self, other: &Db) -> Result<()> {
|
||||
let existing_trees = self.list_trees()?;
|
||||
if !existing_trees.is_empty() {
|
||||
|
@ -182,6 +171,48 @@ impl Db {
|
|||
}
|
||||
}
|
||||
|
||||
/// List of supported database engine types
|
||||
///
|
||||
/// The `enum` holds list of *all* database engines that are are be supported by crate, no matter
|
||||
/// if relevant feature is enabled or not. It allows us to distinguish between invalid engine
|
||||
/// and valid engine, whose support is not enabled via feature flag.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum Engine {
|
||||
Lmdb,
|
||||
Sqlite,
|
||||
Sled,
|
||||
}
|
||||
|
||||
impl Engine {
|
||||
/// Return variant name as static `&str`
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Lmdb => "lmdb",
|
||||
Self::Sqlite => "sqlite",
|
||||
Self::Sled => "sled",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Engine {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
self.as_str().fmt(fmt)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Engine {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(text: &str) -> Result<Engine> {
|
||||
match text {
|
||||
"lmdb" | "heed" => Ok(Self::Lmdb),
|
||||
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
|
||||
"sled" => Ok(Self::Sled),
|
||||
kind => Err(Error(format!("Invalid DB engine: {}", kind).into())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::len_without_is_empty)]
|
||||
impl Tree {
|
||||
#[inline]
|
||||
|
@ -197,6 +228,10 @@ impl Tree {
|
|||
pub fn len(&self) -> Result<usize> {
|
||||
self.0.len(self.1)
|
||||
}
|
||||
#[inline]
|
||||
pub fn fast_len(&self) -> Result<Option<usize>> {
|
||||
self.0.fast_len(self.1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn first(&self) -> Result<Option<(Value, Value)>> {
|
||||
|
@ -211,12 +246,16 @@ impl Tree {
|
|||
|
||||
/// Returns the old value if there was one
|
||||
#[inline]
|
||||
pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>(&self, key: T, value: U) -> Result<()> {
|
||||
pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>(
|
||||
&self,
|
||||
key: T,
|
||||
value: U,
|
||||
) -> Result<Option<Value>> {
|
||||
self.0.insert(self.1, key.as_ref(), value.as_ref())
|
||||
}
|
||||
/// Returns the old value if there was one
|
||||
#[inline]
|
||||
pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<()> {
|
||||
pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<Option<Value>> {
|
||||
self.0.remove(self.1, key.as_ref())
|
||||
}
|
||||
/// Clears all values from the tree
|
||||
|
@ -274,19 +313,14 @@ impl<'a> Transaction<'a> {
|
|||
tree: &Tree,
|
||||
key: T,
|
||||
value: U,
|
||||
) -> TxOpResult<()> {
|
||||
) -> TxOpResult<Option<Value>> {
|
||||
self.tx.insert(tree.1, key.as_ref(), value.as_ref())
|
||||
}
|
||||
/// Returns the old value if there was one
|
||||
#[inline]
|
||||
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<()> {
|
||||
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> {
|
||||
self.tx.remove(tree.1, key.as_ref())
|
||||
}
|
||||
/// Clears all values in a tree
|
||||
#[inline]
|
||||
pub fn clear(&mut self, tree: &Tree) -> TxOpResult<()> {
|
||||
self.tx.clear(tree.1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn iter(&self, tree: &Tree) -> TxOpResult<TxValueIter<'_>> {
|
||||
|
@ -330,13 +364,15 @@ pub(crate) trait IDb: Send + Sync {
|
|||
fn engine(&self) -> String;
|
||||
fn open_tree(&self, name: &str) -> Result<usize>;
|
||||
fn list_trees(&self) -> Result<Vec<String>>;
|
||||
fn snapshot(&self, path: &PathBuf) -> Result<()>;
|
||||
|
||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||
fn len(&self, tree: usize) -> Result<usize>;
|
||||
fn fast_len(&self, _tree: usize) -> Result<Option<usize>> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()>;
|
||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<()>;
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>>;
|
||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||
fn clear(&self, tree: usize) -> Result<()>;
|
||||
|
||||
fn iter(&self, tree: usize) -> Result<ValueIter<'_>>;
|
||||
|
@ -362,9 +398,8 @@ pub(crate) trait ITx {
|
|||
fn get(&self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
|
||||
fn len(&self, tree: usize) -> TxOpResult<usize>;
|
||||
|
||||
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()>;
|
||||
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()>;
|
||||
fn clear(&mut self, tree: usize) -> TxOpResult<()>;
|
||||
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>>;
|
||||
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
|
||||
|
||||
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>>;
|
||||
fn iter_rev(&self, tree: usize) -> TxOpResult<TxValueIter<'_>>;
|
||||
|
|
|
@ -3,8 +3,6 @@ use core::ptr::NonNull;
|
|||
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use heed::types::ByteSlice;
|
||||
|
@ -104,15 +102,6 @@ impl IDb for LmdbDb {
|
|||
Ok(ret2)
|
||||
}
|
||||
|
||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
||||
std::fs::create_dir_all(to)?;
|
||||
let mut path = to.clone();
|
||||
path.push("data.mdb");
|
||||
self.db
|
||||
.copy_to_path(path, heed::CompactionOption::Disabled)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||
|
@ -132,20 +121,26 @@ impl IDb for LmdbDb {
|
|||
Ok(tree.len(&tx)?.try_into().unwrap())
|
||||
}
|
||||
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let mut tx = self.db.write_txn()?;
|
||||
tree.put(&mut tx, key, value)?;
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
fn fast_len(&self, tree: usize) -> Result<Option<usize>> {
|
||||
Ok(Some(self.len(tree)?))
|
||||
}
|
||||
|
||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<()> {
|
||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let mut tx = self.db.write_txn()?;
|
||||
let old_val = tree.get(&tx, key)?.map(Vec::from);
|
||||
tree.put(&mut tx, key, value)?;
|
||||
tx.commit()?;
|
||||
Ok(old_val)
|
||||
}
|
||||
|
||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let mut tx = self.db.write_txn()?;
|
||||
let old_val = tree.get(&tx, key)?.map(Vec::from);
|
||||
tree.delete(&mut tx, key)?;
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
Ok(old_val)
|
||||
}
|
||||
|
||||
fn clear(&self, tree: usize) -> Result<()> {
|
||||
|
@ -247,63 +242,49 @@ impl<'a> ITx for LmdbTx<'a> {
|
|||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
fn len(&self, tree: usize) -> TxOpResult<usize> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
Ok(tree.len(&self.tx)? as usize)
|
||||
fn len(&self, _tree: usize) -> TxOpResult<usize> {
|
||||
unimplemented!(".len() in transaction not supported with LMDB backend")
|
||||
}
|
||||
|
||||
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
|
||||
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>> {
|
||||
let tree = *self.get_tree(tree)?;
|
||||
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
|
||||
tree.put(&mut self.tx, key, value)?;
|
||||
Ok(())
|
||||
Ok(old_val)
|
||||
}
|
||||
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()> {
|
||||
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
|
||||
let tree = *self.get_tree(tree)?;
|
||||
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
|
||||
tree.delete(&mut self.tx, key)?;
|
||||
Ok(())
|
||||
}
|
||||
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
|
||||
let tree = *self.get_tree(tree)?;
|
||||
tree.clear(&mut self.tx)?;
|
||||
Ok(())
|
||||
Ok(old_val)
|
||||
}
|
||||
|
||||
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = *self.get_tree(tree)?;
|
||||
Ok(Box::new(tree.iter(&self.tx)?.map(tx_iter_item)))
|
||||
fn iter(&self, _tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||
unimplemented!("Iterators in transactions not supported with LMDB backend");
|
||||
}
|
||||
fn iter_rev(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = *self.get_tree(tree)?;
|
||||
Ok(Box::new(tree.rev_iter(&self.tx)?.map(tx_iter_item)))
|
||||
fn iter_rev(&self, _tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||
unimplemented!("Iterators in transactions not supported with LMDB backend");
|
||||
}
|
||||
|
||||
fn range<'r>(
|
||||
&self,
|
||||
tree: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
_tree: usize,
|
||||
_low: Bound<&'r [u8]>,
|
||||
_high: Bound<&'r [u8]>,
|
||||
) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = *self.get_tree(tree)?;
|
||||
Ok(Box::new(
|
||||
tree.range(&self.tx, &(low, high))?.map(tx_iter_item),
|
||||
))
|
||||
unimplemented!("Iterators in transactions not supported with LMDB backend");
|
||||
}
|
||||
fn range_rev<'r>(
|
||||
&self,
|
||||
tree: usize,
|
||||
low: Bound<&'r [u8]>,
|
||||
high: Bound<&'r [u8]>,
|
||||
_tree: usize,
|
||||
_low: Bound<&'r [u8]>,
|
||||
_high: Bound<&'r [u8]>,
|
||||
) -> TxOpResult<TxValueIter<'_>> {
|
||||
let tree = *self.get_tree(tree)?;
|
||||
Ok(Box::new(
|
||||
tree.rev_range(&self.tx, &(low, high))?.map(tx_iter_item),
|
||||
))
|
||||
unimplemented!("Iterators in transactions not supported with LMDB backend");
|
||||
}
|
||||
}
|
||||
|
||||
// ---- iterators outside transactions ----
|
||||
// complicated, they must hold the transaction object
|
||||
// therefore a bit of unsafe code (it is a self-referential struct)
|
||||
// ----
|
||||
|
||||
type IteratorItem<'a> = heed::Result<(
|
||||
<ByteSlice as BytesDecode<'a>>::DItem,
|
||||
|
@ -326,20 +307,12 @@ where
|
|||
where
|
||||
F: FnOnce(&'a RoTxn<'a>) -> Result<I>,
|
||||
{
|
||||
let res = TxAndIterator { tx, iter: None };
|
||||
let mut boxed = Box::pin(res);
|
||||
let mut res = TxAndIterator { tx, iter: None };
|
||||
|
||||
// This unsafe allows us to bypass lifetime checks
|
||||
let tx = unsafe { NonNull::from(&boxed.tx).as_ref() };
|
||||
let iter = iterfun(tx)?;
|
||||
let tx = unsafe { NonNull::from(&res.tx).as_ref() };
|
||||
res.iter = Some(iterfun(tx)?);
|
||||
|
||||
let mut_ref = Pin::as_mut(&mut boxed);
|
||||
// This unsafe allows us to write in a field of the pinned struct
|
||||
unsafe {
|
||||
Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
|
||||
}
|
||||
|
||||
Ok(Box::new(TxAndIteratorPin(boxed)))
|
||||
Ok(Box::new(res))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -348,26 +321,18 @@ where
|
|||
I: Iterator<Item = IteratorItem<'a>> + 'a,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
// ensure the iterator is dropped before the RoTxn it references
|
||||
drop(self.iter.take());
|
||||
}
|
||||
}
|
||||
|
||||
struct TxAndIteratorPin<'a, I>(Pin<Box<TxAndIterator<'a, I>>>)
|
||||
where
|
||||
I: Iterator<Item = IteratorItem<'a>> + 'a;
|
||||
|
||||
impl<'a, I> Iterator for TxAndIteratorPin<'a, I>
|
||||
impl<'a, I> Iterator for TxAndIterator<'a, I>
|
||||
where
|
||||
I: Iterator<Item = IteratorItem<'a>> + 'a,
|
||||
{
|
||||
type Item = Result<(Value, Value)>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let mut_ref = Pin::as_mut(&mut self.0);
|
||||
// This unsafe allows us to mutably access the iterator field
|
||||
let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
|
||||
match next {
|
||||
match self.iter.as_mut().unwrap().next() {
|
||||
None => None,
|
||||
Some(Err(e)) => Some(Err(e.into())),
|
||||
Some(Ok((k, v))) => Some(Ok((k.to_vec(), v.to_vec()))),
|
||||
|
@ -375,16 +340,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
// ---- iterators within transactions ----
|
||||
|
||||
fn tx_iter_item<'a>(
|
||||
item: std::result::Result<(&'a [u8], &'a [u8]), heed::Error>,
|
||||
) -> TxOpResult<(Vec<u8>, Vec<u8>)> {
|
||||
item.map(|(k, v)| (k.to_vec(), v.to_vec()))
|
||||
.map_err(|e| TxOpError(Error::from(e)))
|
||||
}
|
||||
|
||||
// ---- utility ----
|
||||
// ----
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
pub fn recommended_map_size() -> usize {
|
||||
|
|