Compare commits
1 commit
main
...
db-no-unsa
Author | SHA1 | Date | |
---|---|---|---|
a46c3d2502 |
146 changed files with 1987 additions and 3955 deletions
|
@ -16,7 +16,7 @@ steps:
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --attr pkgs.amd64.debug --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
|
||||||
- name: unit + func tests
|
- name: unit + func tests
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
@ -24,7 +24,7 @@ steps:
|
||||||
GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage
|
GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage
|
||||||
GARAGE_TEST_INTEGRATION_PATH: tmp-garage-integration
|
GARAGE_TEST_INTEGRATION_PATH: tmp-garage-integration
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --attr pkgs.amd64.debug --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
- nix-build --no-build-output --attr test.amd64
|
- nix-build --no-build-output --attr test.amd64
|
||||||
- ./result/bin/garage_db-*
|
- ./result/bin/garage_db-*
|
||||||
- ./result/bin/garage_api-*
|
- ./result/bin/garage_api-*
|
||||||
|
@ -34,14 +34,12 @@ steps:
|
||||||
- ./result/bin/garage_util-*
|
- ./result/bin/garage_util-*
|
||||||
- ./result/bin/garage_web-*
|
- ./result/bin/garage_web-*
|
||||||
- ./result/bin/garage-*
|
- ./result/bin/garage-*
|
||||||
- GARAGE_TEST_INTEGRATION_DB_ENGINE=lmdb ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
- ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
||||||
- nix-shell --attr ci --run "killall -9 garage" || true
|
|
||||||
- GARAGE_TEST_INTEGRATION_DB_ENGINE=sqlite ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
|
||||||
- rm result
|
- rm result
|
||||||
- rm -rv tmp-garage-integration
|
- rm -rv tmp-garage-integration
|
||||||
|
|
||||||
- name: integration tests
|
- name: integration tests
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --attr pkgs.amd64.debug --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||||
|
|
|
@ -9,11 +9,11 @@ depends_on:
|
||||||
steps:
|
steps:
|
||||||
- name: refresh-index
|
- name: refresh-index
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
environment:
|
secrets:
|
||||||
AWS_ACCESS_KEY_ID:
|
- source: garagehq_aws_access_key_id
|
||||||
from_secret: garagehq_aws_access_key_id
|
target: AWS_ACCESS_KEY_ID
|
||||||
AWS_SECRET_ACCESS_KEY:
|
- source: garagehq_aws_secret_access_key
|
||||||
from_secret: garagehq_aws_secret_access_key
|
target: AWS_SECRET_ACCESS_KEY
|
||||||
commands:
|
commands:
|
||||||
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
|
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
|
||||||
- nix-shell --attr ci --run "refresh_index"
|
- nix-shell --attr ci --run "refresh_index"
|
||||||
|
|
|
@ -48,10 +48,11 @@ steps:
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
environment:
|
environment:
|
||||||
TARGET: "${TARGET}"
|
TARGET: "${TARGET}"
|
||||||
AWS_ACCESS_KEY_ID:
|
secrets:
|
||||||
from_secret: garagehq_aws_access_key_id
|
- source: garagehq_aws_access_key_id
|
||||||
AWS_SECRET_ACCESS_KEY:
|
target: AWS_ACCESS_KEY_ID
|
||||||
from_secret: garagehq_aws_secret_access_key
|
- source: garagehq_aws_secret_access_key
|
||||||
|
target: AWS_SECRET_ACCESS_KEY
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --attr ci --run "to_s3"
|
- nix-shell --attr ci --run "to_s3"
|
||||||
|
|
||||||
|
|
153
Cargo.lock
generated
153
Cargo.lock
generated
|
@ -74,6 +74,12 @@ dependencies = [
|
||||||
"memchr",
|
"memchr",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "aliasable"
|
||||||
|
version = "0.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "allocator-api2"
|
name = "allocator-api2"
|
||||||
version = "0.2.16"
|
version = "0.2.16"
|
||||||
|
@ -905,9 +911,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crc32fast"
|
name = "crc32fast"
|
||||||
version = "1.4.0"
|
version = "1.3.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"
|
checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
]
|
]
|
||||||
|
@ -1304,7 +1310,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"assert-json-diff",
|
"assert-json-diff",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1346,11 +1352,9 @@ dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"sha1",
|
|
||||||
"sha2",
|
"sha2",
|
||||||
"static_init",
|
"static_init",
|
||||||
"structopt",
|
"structopt",
|
||||||
"syslog-tracing",
|
|
||||||
"timeago",
|
"timeago",
|
||||||
"tokio",
|
"tokio",
|
||||||
"toml",
|
"toml",
|
||||||
|
@ -1360,7 +1364,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes-gcm",
|
"aes-gcm",
|
||||||
"argon2",
|
"argon2",
|
||||||
|
@ -1369,8 +1373,6 @@ dependencies = [
|
||||||
"base64 0.21.7",
|
"base64 0.21.7",
|
||||||
"bytes",
|
"bytes",
|
||||||
"chrono",
|
"chrono",
|
||||||
"crc32c",
|
|
||||||
"crc32fast",
|
|
||||||
"crypto-common",
|
"crypto-common",
|
||||||
"err-derive",
|
"err-derive",
|
||||||
"form_urlencoded",
|
"form_urlencoded",
|
||||||
|
@ -1404,7 +1406,6 @@ dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"sha1",
|
|
||||||
"sha2",
|
"sha2",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
|
@ -1415,7 +1416,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-compression",
|
"async-compression",
|
||||||
|
@ -1442,21 +1443,20 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_db"
|
name = "garage_db"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"err-derive",
|
"err-derive",
|
||||||
"heed",
|
"heed",
|
||||||
"hexdump",
|
"hexdump",
|
||||||
"mktemp",
|
"mktemp",
|
||||||
"r2d2",
|
"ouroboros",
|
||||||
"r2d2_sqlite",
|
|
||||||
"rusqlite",
|
"rusqlite",
|
||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1475,7 +1475,6 @@ dependencies = [
|
||||||
"hex",
|
"hex",
|
||||||
"http 1.0.0",
|
"http 1.0.0",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"parse_duration",
|
|
||||||
"rand",
|
"rand",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
|
@ -1486,7 +1485,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_net"
|
name = "garage_net"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1512,7 +1511,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_rpc"
|
name = "garage_rpc"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1527,7 +1526,6 @@ dependencies = [
|
||||||
"garage_util",
|
"garage_util",
|
||||||
"gethostname",
|
"gethostname",
|
||||||
"hex",
|
"hex",
|
||||||
"ipnet",
|
|
||||||
"itertools 0.12.1",
|
"itertools 0.12.1",
|
||||||
"k8s-openapi",
|
"k8s-openapi",
|
||||||
"kube",
|
"kube",
|
||||||
|
@ -1548,7 +1546,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_table"
|
name = "garage_table"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1570,7 +1568,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_util"
|
name = "garage_util"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1604,7 +1602,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_web"
|
name = "garage_web"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"err-derive",
|
"err-derive",
|
||||||
"futures",
|
"futures",
|
||||||
|
@ -1756,9 +1754,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hashlink"
|
name = "hashlink"
|
||||||
version = "0.9.0"
|
version = "0.8.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee"
|
checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hashbrown 0.14.3",
|
"hashbrown 0.14.3",
|
||||||
]
|
]
|
||||||
|
@ -2424,9 +2422,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libsqlite3-sys"
|
name = "libsqlite3-sys"
|
||||||
version = "0.28.0"
|
version = "0.27.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f"
|
checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"pkg-config",
|
"pkg-config",
|
||||||
|
@ -2784,6 +2782,31 @@ dependencies = [
|
||||||
"num-traits",
|
"num-traits",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ouroboros"
|
||||||
|
version = "0.18.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "97b7be5a8a3462b752f4be3ff2b2bf2f7f1d00834902e46be2a4d68b87b0573c"
|
||||||
|
dependencies = [
|
||||||
|
"aliasable",
|
||||||
|
"ouroboros_macro",
|
||||||
|
"static_assertions",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ouroboros_macro"
|
||||||
|
version = "0.18.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b645dcde5f119c2c454a92d0dfa271a2a3b205da92e4292a68ead4bdbfde1f33"
|
||||||
|
dependencies = [
|
||||||
|
"heck 0.4.1",
|
||||||
|
"itertools 0.12.1",
|
||||||
|
"proc-macro2",
|
||||||
|
"proc-macro2-diagnostics",
|
||||||
|
"quote",
|
||||||
|
"syn 2.0.48",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "outref"
|
name = "outref"
|
||||||
version = "0.5.1"
|
version = "0.5.1"
|
||||||
|
@ -3111,6 +3134,19 @@ dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "proc-macro2-diagnostics"
|
||||||
|
version = "0.10.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 2.0.48",
|
||||||
|
"version_check",
|
||||||
|
"yansi",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "prometheus"
|
name = "prometheus"
|
||||||
version = "0.13.3"
|
version = "0.13.3"
|
||||||
|
@ -3204,28 +3240,6 @@ dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "r2d2"
|
|
||||||
version = "0.8.10"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93"
|
|
||||||
dependencies = [
|
|
||||||
"log",
|
|
||||||
"parking_lot 0.12.1",
|
|
||||||
"scheduled-thread-pool",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "r2d2_sqlite"
|
|
||||||
version = "0.24.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "6a982edf65c129796dba72f8775b292ef482b40d035e827a9825b3bc07ccc5f2"
|
|
||||||
dependencies = [
|
|
||||||
"r2d2",
|
|
||||||
"rusqlite",
|
|
||||||
"uuid",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rand"
|
name = "rand"
|
||||||
version = "0.8.5"
|
version = "0.8.5"
|
||||||
|
@ -3419,9 +3433,9 @@ checksum = "3cd14fd5e3b777a7422cca79358c57a8f6e3a703d9ac187448d0daf220c2407f"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rusqlite"
|
name = "rusqlite"
|
||||||
version = "0.31.0"
|
version = "0.30.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae"
|
checksum = "a78046161564f5e7cd9008aff3b2990b3850dc8e0349119b98e8f251e099f24d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 2.4.2",
|
"bitflags 2.4.2",
|
||||||
"fallible-iterator",
|
"fallible-iterator",
|
||||||
|
@ -3586,15 +3600,6 @@ dependencies = [
|
||||||
"windows-sys 0.52.0",
|
"windows-sys 0.52.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "scheduled-thread-pool"
|
|
||||||
version = "0.2.7"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"
|
|
||||||
dependencies = [
|
|
||||||
"parking_lot 0.12.1",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "schemars"
|
name = "schemars"
|
||||||
version = "0.8.16"
|
version = "0.8.16"
|
||||||
|
@ -3874,6 +3879,12 @@ dependencies = [
|
||||||
"der",
|
"der",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "static_assertions"
|
||||||
|
version = "1.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "static_init"
|
name = "static_init"
|
||||||
version = "1.0.3"
|
version = "1.0.3"
|
||||||
|
@ -3987,17 +3998,6 @@ dependencies = [
|
||||||
"unicode-xid",
|
"unicode-xid",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "syslog-tracing"
|
|
||||||
version = "0.3.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "340b1540dcdb6b066bc2966e7974f977ab1a38f21b2be189014ffb0cc2405768"
|
|
||||||
dependencies = [
|
|
||||||
"libc",
|
|
||||||
"tracing-core",
|
|
||||||
"tracing-subscriber",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "system-configuration"
|
name = "system-configuration"
|
||||||
version = "0.5.1"
|
version = "0.5.1"
|
||||||
|
@ -4082,9 +4082,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "time"
|
name = "time"
|
||||||
version = "0.3.36"
|
version = "0.3.34"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
|
checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deranged",
|
"deranged",
|
||||||
"num-conv",
|
"num-conv",
|
||||||
|
@ -4102,9 +4102,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "time-macros"
|
name = "time-macros"
|
||||||
version = "0.2.18"
|
version = "0.2.17"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"
|
checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"num-conv",
|
"num-conv",
|
||||||
"time-core",
|
"time-core",
|
||||||
|
@ -4553,7 +4553,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"
|
checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"getrandom",
|
"getrandom",
|
||||||
"rand",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -4896,6 +4895,12 @@ version = "0.8.8"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "53be06678ed9e83edb1745eb72efc0bbcd7b5c3c35711a860906aed827a13d61"
|
checksum = "53be06678ed9e83edb1745eb72efc0bbcd7b5c3c35711a860906aed827a13d61"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "yansi"
|
||||||
|
version = "1.0.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6c2861d76f58ec8fc95708b9b1e417f7b12fd72ad33c01fa6886707092dea0d3"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerocopy"
|
name = "zerocopy"
|
||||||
version = "0.7.32"
|
version = "0.7.32"
|
||||||
|
|
448
Cargo.nix
448
Cargo.nix
|
@ -25,7 +25,6 @@ args@{
|
||||||
target ? null,
|
target ? null,
|
||||||
codegenOpts ? null,
|
codegenOpts ? null,
|
||||||
profileOpts ? null,
|
profileOpts ? null,
|
||||||
cargoUnstableFlags ? null,
|
|
||||||
rustcLinkFlags ? null,
|
rustcLinkFlags ? null,
|
||||||
rustcBuildFlags ? null,
|
rustcBuildFlags ? null,
|
||||||
mkRustCrate,
|
mkRustCrate,
|
||||||
|
@ -35,7 +34,7 @@ args@{
|
||||||
ignoreLockHash,
|
ignoreLockHash,
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
nixifiedLockHash = "466643eea782cd68c6f205858bb9e053aecdb18e2e58427b0527022aad596130";
|
nixifiedLockHash = "17e620fad07e725e301488a3746ca3512d8785ce3077f634adde82e9e4eab2bb";
|
||||||
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
||||||
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
||||||
lockHashIgnored = if ignoreLockHash
|
lockHashIgnored = if ignoreLockHash
|
||||||
|
@ -52,24 +51,24 @@ else let
|
||||||
rootFeatures' = expandFeatures rootFeatures;
|
rootFeatures' = expandFeatures rootFeatures;
|
||||||
overridableMkRustCrate = f:
|
overridableMkRustCrate = f:
|
||||||
let
|
let
|
||||||
drvs = genDrvsByProfile profilesByName ({ profile, profileName }: mkRustCrate ({ inherit release profile hostPlatformCpu hostPlatformFeatures target profileOpts codegenOpts cargoUnstableFlags rustcLinkFlags rustcBuildFlags; } // (f profileName)));
|
drvs = genDrvsByProfile profilesByName ({ profile, profileName }: mkRustCrate ({ inherit release profile hostPlatformCpu hostPlatformFeatures target profileOpts codegenOpts rustcLinkFlags rustcBuildFlags; } // (f profileName)));
|
||||||
in { compileMode ? null, profileName ? decideProfile compileMode release }:
|
in { compileMode ? null, profileName ? decideProfile compileMode release }:
|
||||||
let drv = drvs.${profileName}; in if compileMode == null then drv else drv.override { inherit compileMode; };
|
let drv = drvs.${profileName}; in if compileMode == null then drv else drv.override { inherit compileMode; };
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
cargo2nixVersion = "0.11.0";
|
cargo2nixVersion = "0.11.0";
|
||||||
workspace = {
|
workspace = {
|
||||||
garage_db = rustPackages.unknown.garage_db."1.0.1";
|
garage_db = rustPackages.unknown.garage_db."0.10.0";
|
||||||
garage_util = rustPackages.unknown.garage_util."1.0.1";
|
garage_util = rustPackages.unknown.garage_util."0.10.0";
|
||||||
garage_net = rustPackages.unknown.garage_net."1.0.1";
|
garage_net = rustPackages.unknown.garage_net."0.10.0";
|
||||||
garage_rpc = rustPackages.unknown.garage_rpc."1.0.1";
|
garage_rpc = rustPackages.unknown.garage_rpc."0.10.0";
|
||||||
format_table = rustPackages.unknown.format_table."0.1.1";
|
format_table = rustPackages.unknown.format_table."0.1.1";
|
||||||
garage_table = rustPackages.unknown.garage_table."1.0.1";
|
garage_table = rustPackages.unknown.garage_table."0.10.0";
|
||||||
garage_block = rustPackages.unknown.garage_block."1.0.1";
|
garage_block = rustPackages.unknown.garage_block."0.10.0";
|
||||||
garage_model = rustPackages.unknown.garage_model."1.0.1";
|
garage_model = rustPackages.unknown.garage_model."0.10.0";
|
||||||
garage_api = rustPackages.unknown.garage_api."1.0.1";
|
garage_api = rustPackages.unknown.garage_api."0.10.0";
|
||||||
garage_web = rustPackages.unknown.garage_web."1.0.1";
|
garage_web = rustPackages.unknown.garage_web."0.10.0";
|
||||||
garage = rustPackages.unknown.garage."1.0.1";
|
garage = rustPackages.unknown.garage."0.10.0";
|
||||||
k2v-client = rustPackages.unknown.k2v-client."0.0.4";
|
k2v-client = rustPackages.unknown.k2v-client."0.0.4";
|
||||||
};
|
};
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.21.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.21.0" = overridableMkRustCrate (profileName: rec {
|
||||||
|
@ -153,13 +152,13 @@ in
|
||||||
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "std")
|
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "std")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "getrandom" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "getrandom" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out;
|
||||||
${ if (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && !((hostPlatform.parsed.cpu.name == "armv6l" || hostPlatform.parsed.cpu.name == "armv7l") && hostPlatform.parsed.kernel.name == "none") then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.19.0" { inherit profileName; }).out;
|
${ if (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && !((hostPlatform.parsed.cpu.name == "armv6l" || hostPlatform.parsed.cpu.name == "armv7l") && hostPlatform.parsed.kernel.name == "none") then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.19.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "zerocopy" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy."0.7.32" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "zerocopy" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy."0.7.32" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
buildDependencies = {
|
buildDependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "version_check" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "version_check" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -177,13 +176,24 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".aliasable."0.1.3" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "aliasable";
|
||||||
|
version = "0.1.3";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd"; };
|
||||||
|
features = builtins.concatLists [
|
||||||
|
[ "alloc" ]
|
||||||
|
[ "default" ]
|
||||||
|
];
|
||||||
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "allocator-api2";
|
name = "allocator-api2";
|
||||||
version = "0.2.16";
|
version = "0.2.16";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"; };
|
src = fetchCratesIo { inherit name version; sha256 = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "alloc")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "alloc")
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -425,7 +435,7 @@ in
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out;
|
||||||
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.28" { inherit profileName; }).out;
|
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.28" { inherit profileName; }).out;
|
||||||
ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out;
|
ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out;
|
||||||
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
|
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
||||||
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
|
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
|
||||||
zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out;
|
zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out;
|
||||||
|
@ -644,7 +654,7 @@ in
|
||||||
ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out;
|
ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out;
|
||||||
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out;
|
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out;
|
||||||
subtle = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".subtle."2.5.0" { inherit profileName; }).out;
|
subtle = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".subtle."2.5.0" { inherit profileName; }).out;
|
||||||
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
|
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out;
|
||||||
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
|
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
|
||||||
zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out;
|
zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
|
@ -675,7 +685,7 @@ in
|
||||||
aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out;
|
aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out;
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
||||||
crc32c = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32c."0.6.4" { inherit profileName; }).out;
|
crc32c = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32c."0.6.4" { inherit profileName; }).out;
|
||||||
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" { inherit profileName; }).out;
|
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out;
|
||||||
http_body = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body."0.4.6" { inherit profileName; }).out;
|
http_body = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body."0.4.6" { inherit profileName; }).out;
|
||||||
|
@ -695,7 +705,7 @@ in
|
||||||
dependencies = {
|
dependencies = {
|
||||||
aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out;
|
aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out;
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
||||||
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" { inherit profileName; }).out;
|
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -824,7 +834,7 @@ in
|
||||||
pin_utils = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-utils."0.1.0" { inherit profileName; }).out;
|
pin_utils = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-utils."0.1.0" { inherit profileName; }).out;
|
||||||
ryu = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ryu."1.0.16" { inherit profileName; }).out;
|
ryu = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ryu."1.0.16" { inherit profileName; }).out;
|
||||||
${ if false then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
${ if false then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
||||||
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
|
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
||||||
tokio_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-util."0.7.10" { inherit profileName; }).out;
|
tokio_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-util."0.7.10" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
|
@ -1288,11 +1298,11 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "crc32fast";
|
name = "crc32fast";
|
||||||
version = "1.4.0";
|
version = "1.3.2";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"; };
|
src = fetchCratesIo { inherit name version; sha256 = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
[ "default" ]
|
[ "default" ]
|
||||||
[ "std" ]
|
[ "std" ]
|
||||||
|
@ -1685,8 +1695,8 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"; };
|
src = fetchCratesIo { inherit name version; sha256 = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "alloc")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "alloc")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -1757,7 +1767,7 @@ in
|
||||||
name = "format_table";
|
name = "format_table";
|
||||||
version = "0.1.1";
|
version = "0.1.1";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/format-table");
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" = overridableMkRustCrate (profileName: rec {
|
||||||
|
@ -1911,11 +1921,11 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage."1.0.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage";
|
name = "garage";
|
||||||
version = "1.0.1";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/garage");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default") "bundled-libs")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default") "bundled-libs")
|
||||||
(lib.optional (rootFeatures' ? "garage/consul-discovery") "consul-discovery")
|
(lib.optional (rootFeatures' ? "garage/consul-discovery") "consul-discovery")
|
||||||
|
@ -1928,8 +1938,6 @@ in
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus") "opentelemetry-prometheus")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus") "opentelemetry-prometheus")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus") "prometheus")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus") "prometheus")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite") "sqlite")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite") "sqlite")
|
||||||
(lib.optional (rootFeatures' ? "garage/syslog") "syslog")
|
|
||||||
(lib.optional (rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing") "syslog-tracing")
|
|
||||||
(lib.optional (rootFeatures' ? "garage/system-libs") "system-libs")
|
(lib.optional (rootFeatures' ? "garage/system-libs") "system-libs")
|
||||||
(lib.optional (rootFeatures' ? "garage/telemetry-otlp") "telemetry-otlp")
|
(lib.optional (rootFeatures' ? "garage/telemetry-otlp") "telemetry-otlp")
|
||||||
];
|
];
|
||||||
|
@ -1941,15 +1949,15 @@ in
|
||||||
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
|
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_api = (rustPackages."unknown".garage_api."1.0.1" { inherit profileName; }).out;
|
garage_api = (rustPackages."unknown".garage_api."0.10.0" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."1.0.1" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.10.0" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."1.0.1" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."1.0.1" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.10.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."1.0.1" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
garage_web = (rustPackages."unknown".garage_web."1.0.1" { inherit profileName; }).out;
|
garage_web = (rustPackages."unknown".garage_web."0.10.0" { inherit profileName; }).out;
|
||||||
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.9" { inherit profileName; }).out;
|
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.9" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
|
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
|
||||||
|
@ -1961,9 +1969,7 @@ in
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
||||||
sha1 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha1."0.10.6" { inherit profileName; }).out;
|
|
||||||
structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out;
|
structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "syslog_tracing" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syslog-tracing."0.3.0" { inherit profileName; }).out;
|
|
||||||
timeago = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".timeago."0.4.2" { inherit profileName; }).out;
|
timeago = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".timeago."0.4.2" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
||||||
toml = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".toml."0.8.10" { inherit profileName; }).out;
|
toml = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".toml."0.8.10" { inherit profileName; }).out;
|
||||||
|
@ -1989,11 +1995,11 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_api."1.0.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_api."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_api";
|
name = "garage_api";
|
||||||
version = "1.0.1";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/api");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v") "k2v")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v") "k2v")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics") "metrics")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics") "metrics")
|
||||||
|
@ -2008,19 +2014,17 @@ in
|
||||||
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.21.7" { inherit profileName; }).out;
|
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.21.7" { inherit profileName; }).out;
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
||||||
chrono = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.33" { inherit profileName; }).out;
|
chrono = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.33" { inherit profileName; }).out;
|
||||||
crc32c = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32c."0.6.4" { inherit profileName; }).out;
|
|
||||||
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" { inherit profileName; }).out;
|
|
||||||
crypto_common = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crypto-common."0.1.6" { inherit profileName; }).out;
|
crypto_common = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crypto-common."0.1.6" { inherit profileName; }).out;
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.2.1" { inherit profileName; }).out;
|
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.2.1" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."1.0.1" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.10.0" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."1.0.1" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."1.0.1" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.10.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."1.0.1" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
|
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
||||||
|
@ -2043,7 +2047,6 @@ in
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
||||||
serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.113" { inherit profileName; }).out;
|
serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.113" { inherit profileName; }).out;
|
||||||
sha1 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha1."0.10.6" { inherit profileName; }).out;
|
|
||||||
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out;
|
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out;
|
||||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
|
||||||
tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.14" { inherit profileName; }).out;
|
tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.14" { inherit profileName; }).out;
|
||||||
|
@ -2053,11 +2056,11 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_block."1.0.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_block."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_block";
|
name = "garage_block";
|
||||||
version = "1.0.1";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/block");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/system-libs" || rootFeatures' ? "garage_block/system-libs") "system-libs")
|
(lib.optional (rootFeatures' ? "garage/system-libs" || rootFeatures' ? "garage_block/system-libs") "system-libs")
|
||||||
];
|
];
|
||||||
|
@ -2069,11 +2072,11 @@ in
|
||||||
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.3.0" { inherit profileName; }).out;
|
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.3.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."1.0.1" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.10.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."1.0.1" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
|
@ -2086,18 +2089,16 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_db."1.0.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_db."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_db";
|
name = "garage_db";
|
||||||
version = "1.0.1";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/db");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled-libs")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled-libs")
|
||||||
(lib.optional (rootFeatures' ? "garage_db/default") "default")
|
(lib.optional (rootFeatures' ? "garage_db/default") "default")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "heed")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "heed")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "r2d2")
|
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "r2d2_sqlite")
|
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "rusqlite")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "rusqlite")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
||||||
];
|
];
|
||||||
|
@ -2105,9 +2106,8 @@ in
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed."0.11.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed."0.11.0" { inherit profileName; }).out;
|
||||||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "r2d2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".r2d2."0.8.10" { inherit profileName; }).out;
|
ouroboros = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ouroboros."0.18.3" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "r2d2_sqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".r2d2_sqlite."0.24.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.30.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.31.0" { inherit profileName; }).out;
|
|
||||||
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
|
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
devDependencies = {
|
devDependencies = {
|
||||||
|
@ -2115,11 +2115,11 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_model."1.0.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_model."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_model";
|
name = "garage_model";
|
||||||
version = "1.0.1";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/model");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage_model/default") "default")
|
(lib.optional (rootFeatures' ? "garage_model/default") "default")
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
|
||||||
|
@ -2135,16 +2135,15 @@ in
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_block = (rustPackages."unknown".garage_block."1.0.1" { inherit profileName; }).out;
|
garage_block = (rustPackages."unknown".garage_block."0.10.0" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."1.0.1" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.10.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."1.0.1" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
parse_duration = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parse_duration."2.1.1" { inherit profileName; }).out;
|
|
||||||
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
||||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
|
||||||
|
@ -2154,11 +2153,11 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_net."1.0.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_net."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_net";
|
name = "garage_net";
|
||||||
version = "1.0.1";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/net");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
[ "default" ]
|
[ "default" ]
|
||||||
(lib.optional (rootFeatures' ? "garage_net/opentelemetry" || rootFeatures' ? "garage_net/telemetry") "opentelemetry")
|
(lib.optional (rootFeatures' ? "garage_net/opentelemetry" || rootFeatures' ? "garage_net/telemetry") "opentelemetry")
|
||||||
|
@ -2191,11 +2190,11 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_rpc."1.0.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_rpc."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_rpc";
|
name = "garage_rpc";
|
||||||
version = "1.0.1";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery") "consul-discovery")
|
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery") "consul-discovery")
|
||||||
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive") "err-derive")
|
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive") "err-derive")
|
||||||
|
@ -2215,12 +2214,11 @@ in
|
||||||
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
|
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out;
|
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
ipnet = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ipnet."2.9.0" { inherit profileName; }).out;
|
|
||||||
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.12.1" { inherit profileName; }).out;
|
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.12.1" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.21.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.21.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "kube" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kube."0.88.1" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "kube" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kube."0.88.1" { inherit profileName; }).out;
|
||||||
|
@ -2240,20 +2238,20 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_table."1.0.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_table."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_table";
|
name = "garage_table";
|
||||||
version = "1.0.1";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/table");
|
||||||
dependencies = {
|
dependencies = {
|
||||||
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.6.0" { inherit profileName; }).out;
|
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.6.0" { inherit profileName; }).out;
|
||||||
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.77" { profileName = "__noProfile"; }).out;
|
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.77" { profileName = "__noProfile"; }).out;
|
||||||
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_rpc = (rustPackages."unknown".garage_rpc."1.0.1" { inherit profileName; }).out;
|
garage_rpc = (rustPackages."unknown".garage_rpc."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||||
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
|
||||||
|
@ -2265,11 +2263,11 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_util."1.0.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_util."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_util";
|
name = "garage_util";
|
||||||
version = "1.0.1";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/util");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v" || rootFeatures' ? "garage_util/k2v") "k2v")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v" || rootFeatures' ? "garage_util/k2v") "k2v")
|
||||||
];
|
];
|
||||||
|
@ -2283,8 +2281,8 @@ in
|
||||||
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.7" { inherit profileName; }).out;
|
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.7" { inherit profileName; }).out;
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
|
garage_db = (rustPackages."unknown".garage_db."0.10.0" { inherit profileName; }).out;
|
||||||
garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
|
garage_net = (rustPackages."unknown".garage_net."0.10.0" { inherit profileName; }).out;
|
||||||
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
|
||||||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
||||||
|
@ -2309,18 +2307,18 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"unknown".garage_web."1.0.1" = overridableMkRustCrate (profileName: rec {
|
"unknown".garage_web."0.10.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "garage_web";
|
name = "garage_web";
|
||||||
version = "1.0.1";
|
version = "0.10.0";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/web");
|
||||||
dependencies = {
|
dependencies = {
|
||||||
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
|
||||||
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
|
||||||
garage_api = (rustPackages."unknown".garage_api."1.0.1" { inherit profileName; }).out;
|
garage_api = (rustPackages."unknown".garage_api."0.10.0" { inherit profileName; }).out;
|
||||||
garage_model = (rustPackages."unknown".garage_model."1.0.1" { inherit profileName; }).out;
|
garage_model = (rustPackages."unknown".garage_model."0.10.0" { inherit profileName; }).out;
|
||||||
garage_table = (rustPackages."unknown".garage_table."1.0.1" { inherit profileName; }).out;
|
garage_table = (rustPackages."unknown".garage_table."0.10.0" { inherit profileName; }).out;
|
||||||
garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
|
garage_util = (rustPackages."unknown".garage_util."0.10.0" { inherit profileName; }).out;
|
||||||
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
|
||||||
http_body_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body-util."0.1.0" { inherit profileName; }).out;
|
http_body_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body-util."0.1.0" { inherit profileName; }).out;
|
||||||
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."1.1.0" { inherit profileName; }).out;
|
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."1.1.0" { inherit profileName; }).out;
|
||||||
|
@ -2486,25 +2484,25 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"; };
|
src = fetchCratesIo { inherit name version; sha256 = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "ahash")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "ahash")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "allocator-api2")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "allocator-api2")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "inline-more")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "inline-more")
|
||||||
[ "raw" ]
|
[ "raw" ]
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "ahash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.7" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "ahash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.7" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "allocator_api2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "allocator_api2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".hashlink."0.9.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".hashlink."0.8.4" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "hashlink";
|
name = "hashlink";
|
||||||
version = "0.9.0";
|
version = "0.8.4";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee"; };
|
src = fetchCratesIo { inherit name version; sha256 = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"; };
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashbrown" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashbrown."0.14.3" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashbrown" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashbrown."0.14.3" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -2524,7 +2522,7 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"; };
|
src = fetchCratesIo { inherit name version; sha256 = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli") "default")
|
[ "default" ]
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -3018,8 +3016,8 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"; };
|
src = fetchCratesIo { inherit name version; sha256 = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
[ "default" ]
|
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest") "default")
|
||||||
[ "std" ]
|
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest") "std")
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -3149,7 +3147,7 @@ in
|
||||||
name = "k2v-client";
|
name = "k2v-client";
|
||||||
version = "0.0.4";
|
version = "0.0.4";
|
||||||
registry = "unknown";
|
registry = "unknown";
|
||||||
src = fetchCrateLocal workspaceSrc;
|
src = fetchCrateLocal (workspaceSrc + "/src/k2v-client");
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli") "clap")
|
(lib.optional (rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli") "clap")
|
||||||
(lib.optional (rootFeatures' ? "k2v-client/cli") "cli")
|
(lib.optional (rootFeatures' ? "k2v-client/cli") "cli")
|
||||||
|
@ -3428,24 +3426,24 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.28.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.27.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "libsqlite3-sys";
|
name = "libsqlite3-sys";
|
||||||
version = "0.28.0";
|
version = "0.27.0";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f"; };
|
src = fetchCratesIo { inherit name version; sha256 = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled_bindings")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled_bindings")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "cc")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "cc")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "min_sqlite_version_3_14_0")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "min_sqlite_version_3_14_0")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "pkg-config")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "pkg-config")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "vcpkg")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "vcpkg")
|
||||||
];
|
];
|
||||||
buildDependencies = {
|
buildDependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.29" { profileName = "__noProfile"; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.29" { profileName = "__noProfile"; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "vcpkg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".vcpkg."0.2.15" { profileName = "__noProfile"; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "vcpkg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".vcpkg."0.2.15" { profileName = "__noProfile"; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -3972,6 +3970,40 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".ouroboros."0.18.3" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "ouroboros";
|
||||||
|
version = "0.18.3";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "97b7be5a8a3462b752f4be3ff2b2bf2f7f1d00834902e46be2a4d68b87b0573c"; };
|
||||||
|
features = builtins.concatLists [
|
||||||
|
[ "default" ]
|
||||||
|
[ "std" ]
|
||||||
|
];
|
||||||
|
dependencies = {
|
||||||
|
aliasable = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aliasable."0.1.3" { inherit profileName; }).out;
|
||||||
|
ouroboros_macro = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".ouroboros_macro."0.18.3" { profileName = "__noProfile"; }).out;
|
||||||
|
static_assertions = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".static_assertions."1.1.0" { inherit profileName; }).out;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".ouroboros_macro."0.18.3" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "ouroboros_macro";
|
||||||
|
version = "0.18.3";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "b645dcde5f119c2c454a92d0dfa271a2a3b205da92e4292a68ead4bdbfde1f33"; };
|
||||||
|
features = builtins.concatLists [
|
||||||
|
[ "std" ]
|
||||||
|
];
|
||||||
|
dependencies = {
|
||||||
|
heck = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heck."0.4.1" { inherit profileName; }).out;
|
||||||
|
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.12.1" { inherit profileName; }).out;
|
||||||
|
proc_macro2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.78" { inherit profileName; }).out;
|
||||||
|
proc_macro2_diagnostics = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2-diagnostics."0.10.1" { inherit profileName; }).out;
|
||||||
|
quote = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.35" { inherit profileName; }).out;
|
||||||
|
syn = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."2.0.48" { inherit profileName; }).out;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".outref."0.5.1" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".outref."0.5.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "outref";
|
name = "outref";
|
||||||
version = "0.5.1";
|
version = "0.5.1";
|
||||||
|
@ -4041,11 +4073,11 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"; };
|
src = fetchCratesIo { inherit name version; sha256 = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default")
|
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "lock_api" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lock_api."0.4.11" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "lock_api" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lock_api."0.4.11" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "parking_lot_core" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot_core."0.9.9" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "parking_lot_core" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot_core."0.9.9" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -4070,11 +4102,11 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"; };
|
src = fetchCratesIo { inherit name version; sha256 = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"; };
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
||||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.153" { inherit profileName; }).out;
|
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.153" { inherit profileName; }).out;
|
||||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.parsed.kernel.name == "redox" then "syscall" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".redox_syscall."0.4.1" { inherit profileName; }).out;
|
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.parsed.kernel.name == "redox" then "syscall" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".redox_syscall."0.4.1" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out;
|
||||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isWindows then "windows_targets" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows-targets."0.48.5" { inherit profileName; }).out;
|
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isWindows then "windows_targets" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows-targets."0.48.5" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -4408,6 +4440,27 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".proc-macro2-diagnostics."0.10.1" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "proc-macro2-diagnostics";
|
||||||
|
version = "0.10.1";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8"; };
|
||||||
|
features = builtins.concatLists [
|
||||||
|
[ "colors" ]
|
||||||
|
[ "default" ]
|
||||||
|
[ "yansi" ]
|
||||||
|
];
|
||||||
|
dependencies = {
|
||||||
|
proc_macro2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.78" { inherit profileName; }).out;
|
||||||
|
quote = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.35" { inherit profileName; }).out;
|
||||||
|
syn = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."2.0.48" { inherit profileName; }).out;
|
||||||
|
yansi = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".yansi."1.0.0" { inherit profileName; }).out;
|
||||||
|
};
|
||||||
|
buildDependencies = {
|
||||||
|
version_check = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".prometheus."0.13.3" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".prometheus."0.13.3" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "prometheus";
|
name = "prometheus";
|
||||||
version = "0.13.3";
|
version = "0.13.3";
|
||||||
|
@ -4529,30 +4582,6 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".r2d2."0.8.10" = overridableMkRustCrate (profileName: rec {
|
|
||||||
name = "r2d2";
|
|
||||||
version = "0.8.10";
|
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93"; };
|
|
||||||
dependencies = {
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "log" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.20" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "parking_lot" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot."0.12.1" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "scheduled_thread_pool" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".scheduled-thread-pool."0.2.7" { inherit profileName; }).out;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".r2d2_sqlite."0.24.0" = overridableMkRustCrate (profileName: rec {
|
|
||||||
name = "r2d2_sqlite";
|
|
||||||
version = "0.24.0";
|
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "6a982edf65c129796dba72f8775b292ef482b40d035e827a9825b3bc07ccc5f2"; };
|
|
||||||
dependencies = {
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "r2d2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".r2d2."0.8.10" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.31.0" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "uuid" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".uuid."1.4.1" { inherit profileName; }).out;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "rand";
|
name = "rand";
|
||||||
version = "0.8.5";
|
version = "0.8.5";
|
||||||
|
@ -4620,7 +4649,7 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"; };
|
src = fetchCratesIo { inherit name version; sha256 = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"; };
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."1.3.2" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."1.3.2" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -4881,23 +4910,22 @@ in
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.31.0" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.30.0" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "rusqlite";
|
name = "rusqlite";
|
||||||
version = "0.31.0";
|
version = "0.30.0";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae"; };
|
src = fetchCratesIo { inherit name version; sha256 = "a78046161564f5e7cd9008aff3b2990b3850dc8e0349119b98e8f251e099f24d"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "backup")
|
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "modern_sqlite")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "modern_sqlite")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."2.4.2" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."2.4.2" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-iterator."0.3.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-iterator."0.3.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_streaming_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-streaming-iterator."0.1.9" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_streaming_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-streaming-iterator."0.1.9" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashlink" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashlink."0.9.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashlink" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashlink."0.8.4" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "libsqlite3_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.28.0" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "libsqlite3_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.27.0" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -5111,16 +5139,6 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".scheduled-thread-pool."0.2.7" = overridableMkRustCrate (profileName: rec {
|
|
||||||
name = "scheduled-thread-pool";
|
|
||||||
version = "0.2.7";
|
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"; };
|
|
||||||
dependencies = {
|
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "parking_lot" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot."0.12.1" { inherit profileName; }).out;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.16" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.16" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "schemars";
|
name = "schemars";
|
||||||
version = "0.8.16";
|
version = "0.8.16";
|
||||||
|
@ -5515,6 +5533,13 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".static_assertions."1.1.0" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "static_assertions";
|
||||||
|
version = "1.1.0";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"; };
|
||||||
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".static_init."1.0.3" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".static_init."1.0.3" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "static_init";
|
name = "static_init";
|
||||||
version = "1.0.3";
|
version = "1.0.3";
|
||||||
|
@ -5680,18 +5705,6 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".syslog-tracing."0.3.0" = overridableMkRustCrate (profileName: rec {
|
|
||||||
name = "syslog-tracing";
|
|
||||||
version = "0.3.0";
|
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "340b1540dcdb6b066bc2966e7974f977ab1a38f21b2be189014ffb0cc2405768"; };
|
|
||||||
dependencies = {
|
|
||||||
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.153" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "tracing_core" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing-core."0.1.32" { inherit profileName; }).out;
|
|
||||||
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "tracing_subscriber" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing-subscriber."0.3.18" { inherit profileName; }).out;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".system-configuration."0.5.1" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".system-configuration."0.5.1" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "system-configuration";
|
name = "system-configuration";
|
||||||
version = "0.5.1";
|
version = "0.5.1";
|
||||||
|
@ -5782,11 +5795,11 @@ in
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "time";
|
name = "time";
|
||||||
version = "0.3.36";
|
version = "0.3.34";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"; };
|
src = fetchCratesIo { inherit name version; sha256 = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
[ "alloc" ]
|
[ "alloc" ]
|
||||||
[ "default" ]
|
[ "default" ]
|
||||||
|
@ -5799,7 +5812,7 @@ in
|
||||||
powerfmt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".powerfmt."0.2.0" { inherit profileName; }).out;
|
powerfmt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".powerfmt."0.2.0" { inherit profileName; }).out;
|
||||||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
|
||||||
time_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time-core."0.1.2" { inherit profileName; }).out;
|
time_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time-core."0.1.2" { inherit profileName; }).out;
|
||||||
time_macros = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.18" { profileName = "__noProfile"; }).out;
|
time_macros = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.17" { profileName = "__noProfile"; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -5810,11 +5823,11 @@ in
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"; };
|
src = fetchCratesIo { inherit name version; sha256 = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"; };
|
||||||
});
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.18" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.17" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "time-macros";
|
name = "time-macros";
|
||||||
version = "0.2.18";
|
version = "0.2.17";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"; };
|
src = fetchCratesIo { inherit name version; sha256 = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
[ "parsing" ]
|
[ "parsing" ]
|
||||||
];
|
];
|
||||||
|
@ -6498,16 +6511,13 @@ in
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"; };
|
src = fetchCratesIo { inherit name version; sha256 = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
[ "default" ]
|
[ "default" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "fast-rng")
|
|
||||||
[ "getrandom" ]
|
[ "getrandom" ]
|
||||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "rand")
|
|
||||||
[ "rng" ]
|
[ "rng" ]
|
||||||
[ "std" ]
|
[ "std" ]
|
||||||
[ "v4" ]
|
[ "v4" ]
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
getrandom = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out;
|
getrandom = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rand" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
|
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -7021,13 +7031,25 @@ in
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
|
"registry+https://github.com/rust-lang/crates.io-index".yansi."1.0.0" = overridableMkRustCrate (profileName: rec {
|
||||||
|
name = "yansi";
|
||||||
|
version = "1.0.0";
|
||||||
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
|
src = fetchCratesIo { inherit name version; sha256 = "6c2861d76f58ec8fc95708b9b1e417f7b12fd72ad33c01fa6886707092dea0d3"; };
|
||||||
|
features = builtins.concatLists [
|
||||||
|
[ "alloc" ]
|
||||||
|
[ "default" ]
|
||||||
|
[ "std" ]
|
||||||
|
];
|
||||||
|
});
|
||||||
|
|
||||||
"registry+https://github.com/rust-lang/crates.io-index".zerocopy."0.7.32" = overridableMkRustCrate (profileName: rec {
|
"registry+https://github.com/rust-lang/crates.io-index".zerocopy."0.7.32" = overridableMkRustCrate (profileName: rec {
|
||||||
name = "zerocopy";
|
name = "zerocopy";
|
||||||
version = "0.7.32";
|
version = "0.7.32";
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"; };
|
src = fetchCratesIo { inherit name version; sha256 = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"; };
|
||||||
features = builtins.concatLists [
|
features = builtins.concatLists [
|
||||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "simd")
|
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "simd")
|
||||||
];
|
];
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if false then "zerocopy_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy-derive."0.7.32" { profileName = "__noProfile"; }).out;
|
${ if false then "zerocopy_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy-derive."0.7.32" { profileName = "__noProfile"; }).out;
|
||||||
|
@ -7040,9 +7062,9 @@ in
|
||||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||||
src = fetchCratesIo { inherit name version; sha256 = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"; };
|
src = fetchCratesIo { inherit name version; sha256 = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"; };
|
||||||
dependencies = {
|
dependencies = {
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "proc_macro2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.78" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "proc_macro2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.78" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "quote" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.35" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "quote" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.35" { inherit profileName; }).out;
|
||||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "syn" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."2.0.48" { inherit profileName; }).out;
|
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "syn" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."2.0.48" { inherit profileName; }).out;
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
28
Cargo.toml
28
Cargo.toml
|
@ -21,15 +21,15 @@ default-members = ["src/garage"]
|
||||||
|
|
||||||
# Internal Garage crates
|
# Internal Garage crates
|
||||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||||
garage_api = { version = "1.0.1", path = "src/api" }
|
garage_api = { version = "0.10.0", path = "src/api" }
|
||||||
garage_block = { version = "1.0.1", path = "src/block" }
|
garage_block = { version = "0.10.0", path = "src/block" }
|
||||||
garage_db = { version = "1.0.1", path = "src/db", default-features = false }
|
garage_db = { version = "0.10.0", path = "src/db", default-features = false }
|
||||||
garage_model = { version = "1.0.1", path = "src/model", default-features = false }
|
garage_model = { version = "0.10.0", path = "src/model", default-features = false }
|
||||||
garage_net = { version = "1.0.1", path = "src/net" }
|
garage_net = { version = "0.10.0", path = "src/net" }
|
||||||
garage_rpc = { version = "1.0.1", path = "src/rpc" }
|
garage_rpc = { version = "0.10.0", path = "src/rpc" }
|
||||||
garage_table = { version = "1.0.1", path = "src/table" }
|
garage_table = { version = "0.10.0", path = "src/table" }
|
||||||
garage_util = { version = "1.0.1", path = "src/util" }
|
garage_util = { version = "0.10.0", path = "src/util" }
|
||||||
garage_web = { version = "1.0.1", path = "src/web" }
|
garage_web = { version = "0.10.0", path = "src/web" }
|
||||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||||
|
|
||||||
# External crates from crates.io
|
# External crates from crates.io
|
||||||
|
@ -43,8 +43,6 @@ bytes = "1.0"
|
||||||
bytesize = "1.1"
|
bytesize = "1.1"
|
||||||
cfg-if = "1.0"
|
cfg-if = "1.0"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
crc32fast = "1.4"
|
|
||||||
crc32c = "0.6"
|
|
||||||
crypto-common = "0.1"
|
crypto-common = "0.1"
|
||||||
digest = "0.10"
|
digest = "0.10"
|
||||||
err-derive = "0.3"
|
err-derive = "0.3"
|
||||||
|
@ -55,17 +53,16 @@ hexdump = "0.1"
|
||||||
hmac = "0.12"
|
hmac = "0.12"
|
||||||
idna = "0.5"
|
idna = "0.5"
|
||||||
itertools = "0.12"
|
itertools = "0.12"
|
||||||
ipnet = "2.9.0"
|
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
md-5 = "0.10"
|
md-5 = "0.10"
|
||||||
mktemp = "0.5"
|
mktemp = "0.5"
|
||||||
nix = { version = "0.27", default-features = false, features = ["fs"] }
|
nix = { version = "0.27", default-features = false, features = ["fs"] }
|
||||||
nom = "7.1"
|
nom = "7.1"
|
||||||
|
ouroboros = "0.18"
|
||||||
parse_duration = "2.1"
|
parse_duration = "2.1"
|
||||||
pin-project = "1.0.12"
|
pin-project = "1.0.12"
|
||||||
pnet_datalink = "0.34"
|
pnet_datalink = "0.34"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
sha1 = "0.10"
|
|
||||||
sha2 = "0.10"
|
sha2 = "0.10"
|
||||||
timeago = { version = "0.4", default-features = false }
|
timeago = { version = "0.4", default-features = false }
|
||||||
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
||||||
|
@ -77,14 +74,11 @@ kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] }
|
||||||
clap = { version = "4.1", features = ["derive", "env"] }
|
clap = { version = "4.1", features = ["derive", "env"] }
|
||||||
pretty_env_logger = "0.5"
|
pretty_env_logger = "0.5"
|
||||||
structopt = { version = "0.3", default-features = false }
|
structopt = { version = "0.3", default-features = false }
|
||||||
syslog-tracing = "0.3"
|
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
|
||||||
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
||||||
rusqlite = "0.31.0"
|
rusqlite = "0.30.0"
|
||||||
r2d2 = "0.8"
|
|
||||||
r2d2_sqlite = "0.24"
|
|
||||||
|
|
||||||
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
||||||
zstd = { version = "0.13", default-features = false }
|
zstd = { version = "0.13", default-features = false }
|
||||||
|
|
|
@ -45,4 +45,11 @@ in {
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
clippy = {
|
||||||
|
amd64 = (compile {
|
||||||
|
inherit system git_version pkgsSrc cargo2nixOverlay;
|
||||||
|
target = "x86_64-unknown-linux-musl";
|
||||||
|
compiler = "clippy";
|
||||||
|
}).workspace.garage { compileMode = "build"; };
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ client = minio.Minio(
|
||||||
"GKyourapikey",
|
"GKyourapikey",
|
||||||
"abcd[...]1234",
|
"abcd[...]1234",
|
||||||
# Force the region, this is specific to garage
|
# Force the region, this is specific to garage
|
||||||
region="garage",
|
region="region",
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -335,7 +335,6 @@ From the [official Mastodon documentation](https://docs.joinmastodon.org/admin/t
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ RAILS_ENV=production bin/tootctl media remove --days 3
|
$ RAILS_ENV=production bin/tootctl media remove --days 3
|
||||||
$ RAILS_ENV=production bin/tootctl media remove --days 15 --prune-profiles
|
|
||||||
$ RAILS_ENV=production bin/tootctl media remove-orphans
|
$ RAILS_ENV=production bin/tootctl media remove-orphans
|
||||||
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
|
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
|
||||||
```
|
```
|
||||||
|
@ -354,6 +353,8 @@ Imports: 1.7 KB
|
||||||
Settings: 0 Bytes
|
Settings: 0 Bytes
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Unfortunately, [old avatars and headers cannot currently be cleaned up](https://github.com/mastodon/mastodon/issues/9567).
|
||||||
|
|
||||||
### Migrating your data
|
### Migrating your data
|
||||||
|
|
||||||
Data migration should be done with an efficient S3 client.
|
Data migration should be done with an efficient S3 client.
|
||||||
|
|
|
@ -259,7 +259,7 @@ duck --delete garage:/my-files/an-object.txt
|
||||||
|
|
||||||
## WinSCP (libs3) {#winscp}
|
## WinSCP (libs3) {#winscp}
|
||||||
|
|
||||||
*You can find instructions on how to use the GUI in french [in our wiki](https://guide.deuxfleurs.fr/prise_en_main/winscp/).*
|
*You can find instructions on how to use the GUI in french [in our wiki](https://wiki.deuxfleurs.fr/fr/Guide/Garage/WinSCP).*
|
||||||
|
|
||||||
How to use `winscp.com`, the CLI interface of WinSCP:
|
How to use `winscp.com`, the CLI interface of WinSCP:
|
||||||
|
|
||||||
|
|
|
@ -53,43 +53,20 @@ and that's also why your nodes have super long identifiers.
|
||||||
|
|
||||||
Adding TLS support built into Garage is not currently planned.
|
Adding TLS support built into Garage is not currently planned.
|
||||||
|
|
||||||
## Garage stores data in plain text on the filesystem or encrypted using customer keys (SSE-C)
|
## Garage stores data in plain text on the filesystem
|
||||||
|
|
||||||
For standard S3 API requests, Garage does not encrypt data at rest by itself.
|
Garage does not handle data encryption at rest by itself, and instead delegates
|
||||||
For the most generic at rest encryption of data, we recommend setting up your
|
to the user to add encryption, either at the storage layer (LUKS, etc) or on
|
||||||
storage partitions on encrypted LUKS devices.
|
the client side (or both). There are no current plans to add data encryption
|
||||||
|
directly in Garage.
|
||||||
|
|
||||||
If you are developping your own client software that makes use of S3 storage,
|
Implementing data encryption directly in Garage might make things simpler for
|
||||||
we recommend implementing data encryption directly on the client side and never
|
end users, but also raises many more questions, especially around key
|
||||||
transmitting plaintext data to Garage. This makes it easy to use an external
|
management: for encryption of data, where could Garage get the encryption keys
|
||||||
untrusted storage provider if necessary.
|
from ? If we encrypt data but keep the keys in a plaintext file next to them,
|
||||||
|
it's useless. We probably don't want to have to manage secrets in garage as it
|
||||||
Garage does support [SSE-C
|
would be very hard to do in a secure way. Maybe integrate with an external
|
||||||
encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html),
|
system such as Hashicorp Vault?
|
||||||
an encryption mode of Amazon S3 where data is encrypted at rest using
|
|
||||||
encryption keys given by the client. The encryption keys are passed to the
|
|
||||||
server in a header in each request, to encrypt or decrypt data at the moment of
|
|
||||||
reading or writing. The server discards the key as soon as it has finished
|
|
||||||
using it for the request. This mode allows the data to be encrypted at rest by
|
|
||||||
Garage itself, but it requires support in the client software. It is also not
|
|
||||||
adapted to a model where the server is not trusted or assumed to be
|
|
||||||
compromised, as the server can easily know the encryption keys. Note however
|
|
||||||
that when using SSE-C encryption, the only Garage node that knows the
|
|
||||||
encryption key passed in a given request is the node to which the request is
|
|
||||||
directed (which can be a gateway node), so it is easy to have untrusted nodes
|
|
||||||
in the cluster as long as S3 API requests containing SSE-C encryption keys are
|
|
||||||
not directed to them.
|
|
||||||
|
|
||||||
Implementing automatic data encryption directly in Garage without client-side
|
|
||||||
management of keys (something like
|
|
||||||
[SSE-S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html))
|
|
||||||
could make things simpler for end users that don't want to setup LUKS, but also
|
|
||||||
raises many more questions, especially around key management: for encryption of
|
|
||||||
data, where could Garage get the encryption keys from? If we encrypt data but
|
|
||||||
keep the keys in a plaintext file next to them, it's useless. We probably don't
|
|
||||||
want to have to manage secrets in Garage as it would be very hard to do in a
|
|
||||||
secure way. At the time of speaking, there are no plans to implement this in
|
|
||||||
Garage.
|
|
||||||
|
|
||||||
|
|
||||||
# Adding data encryption using external tools
|
# Adding data encryption using external tools
|
||||||
|
|
|
@ -90,6 +90,5 @@ The following feature flags are available in v0.8.0:
|
||||||
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
|
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
|
||||||
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
||||||
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
||||||
| `syslog` | optional | Enable logging to Syslog |
|
|
||||||
| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
|
| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
|
||||||
| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata |
|
| `sqlite` | optional | Enable using Sqlite3 to store Garage's metadata |
|
||||||
|
|
|
@ -27,7 +27,7 @@ To run a real-world deployment, make sure the following conditions are met:
|
||||||
[Yggdrasil](https://yggdrasil-network.github.io/) are approaches to consider
|
[Yggdrasil](https://yggdrasil-network.github.io/) are approaches to consider
|
||||||
in addition to building out your own VPN tunneling.
|
in addition to building out your own VPN tunneling.
|
||||||
|
|
||||||
- This guide will assume you are using Docker containers to deploy Garage on each node.
|
- This guide will assume you are using Docker containers to deploy Garage on each node.
|
||||||
Garage can also be run independently, for instance as a [Systemd service](@/documentation/cookbook/systemd.md).
|
Garage can also be run independently, for instance as a [Systemd service](@/documentation/cookbook/systemd.md).
|
||||||
You can also use an orchestrator such as Nomad or Kubernetes to automatically manage
|
You can also use an orchestrator such as Nomad or Kubernetes to automatically manage
|
||||||
Docker containers on a fleet of nodes.
|
Docker containers on a fleet of nodes.
|
||||||
|
@ -53,9 +53,9 @@ to store 2 TB of data in total.
|
||||||
|
|
||||||
### Best practices
|
### Best practices
|
||||||
|
|
||||||
- If you have reasonably fast networking between all your nodes, and are planing to store
|
- If you have fast dedicated networking between all your nodes, and are planing to store
|
||||||
mostly large files, bump the `block_size` configuration parameter to 10 MB
|
very large files, bump the `block_size` configuration parameter to 10 MB
|
||||||
(`block_size = "10M"`).
|
(`block_size = 10485760`).
|
||||||
|
|
||||||
- Garage stores its files in two locations: it uses a metadata directory to store frequently-accessed
|
- Garage stores its files in two locations: it uses a metadata directory to store frequently-accessed
|
||||||
small metadata items, and a data directory to store data blocks of uploaded objects.
|
small metadata items, and a data directory to store data blocks of uploaded objects.
|
||||||
|
@ -68,42 +68,30 @@ to store 2 TB of data in total.
|
||||||
EXT4 is not recommended as it has more strict limitations on the number of inodes,
|
EXT4 is not recommended as it has more strict limitations on the number of inodes,
|
||||||
which might cause issues with Garage when large numbers of objects are stored.
|
which might cause issues with Garage when large numbers of objects are stored.
|
||||||
|
|
||||||
- Servers with multiple HDDs are supported natively by Garage without resorting
|
- If you only have an HDD and no SSD, it's fine to put your metadata alongside the data
|
||||||
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
|
on the same drive. Having lots of RAM for your kernel to cache the metadata will
|
||||||
|
help a lot with performance. The default LMDB database engine is the most tested
|
||||||
|
and has good performance.
|
||||||
|
|
||||||
- For the metadata storage, Garage does not do checksumming and integrity
|
- For the metadata storage, Garage does not do checksumming and integrity
|
||||||
verification on its own, so it is better to use a robust filesystem such as
|
verification on its own. If you are afraid of bitrot/data corruption,
|
||||||
BTRFS or ZFS. Users have reported that when using the LMDB database engine
|
put your metadata directory on a ZFS or BTRFS partition. Otherwise, just use regular
|
||||||
(the default), database files have a tendency of becoming corrupted after an
|
EXT4 or XFS.
|
||||||
unclean shutdown (e.g. a power outage), so you should take regular snapshots
|
|
||||||
to be able to recover from such a situation. This can be done using Garage's
|
|
||||||
built-in automatic snapshotting (since v0.9.4), or by using filesystem level
|
|
||||||
snapshots. If you cannot do so, you might want to switch to Sqlite which is
|
|
||||||
more robust.
|
|
||||||
|
|
||||||
- LMDB is the fastest and most tested database engine, but it has the following
|
- Servers with multiple HDDs are supported natively by Garage without resorting
|
||||||
weaknesses: 1/ data files are not architecture-independent, you cannot simply
|
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
|
||||||
move a Garage metadata directory between nodes running different architectures,
|
|
||||||
and 2/ LMDB is not suited for 32-bit platforms. Sqlite is a viable alternative
|
|
||||||
if any of these are of concern.
|
|
||||||
|
|
||||||
- If you only have an HDD and no SSD, it's fine to put your metadata alongside
|
|
||||||
the data on the same drive, but then consider your filesystem choice wisely
|
|
||||||
(see above). Having lots of RAM for your kernel to cache the metadata will
|
|
||||||
help a lot with performance. The default LMDB database engine is the most
|
|
||||||
tested and has good performance.
|
|
||||||
|
|
||||||
## Get a Docker image
|
## Get a Docker image
|
||||||
|
|
||||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||||
We encourage you to use a fixed tag (eg. `v1.0.1`) and not the `latest` tag.
|
We encourage you to use a fixed tag (eg. `v0.9.3`) and not the `latest` tag.
|
||||||
For this example, we will use the latest published version at the time of the writing which is `v1.0.1` but it's up to you
|
For this example, we will use the latest published version at the time of the writing which is `v0.9.3` but it's up to you
|
||||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo docker pull dxflrs/garage:v1.0.1
|
sudo docker pull dxflrs/garage:v0.9.3
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploying and configuring Garage
|
## Deploying and configuring Garage
|
||||||
|
@ -126,7 +114,6 @@ A valid `/etc/garage.toml` for our cluster would look as follows:
|
||||||
metadata_dir = "/var/lib/garage/meta"
|
metadata_dir = "/var/lib/garage/meta"
|
||||||
data_dir = "/var/lib/garage/data"
|
data_dir = "/var/lib/garage/data"
|
||||||
db_engine = "lmdb"
|
db_engine = "lmdb"
|
||||||
metadata_auto_snapshot_interval = "6h"
|
|
||||||
|
|
||||||
replication_factor = 3
|
replication_factor = 3
|
||||||
|
|
||||||
|
@ -152,8 +139,6 @@ Check the following for your configuration files:
|
||||||
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
|
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
|
||||||
This parameter is optional but recommended: if your nodes have trouble communicating with
|
This parameter is optional but recommended: if your nodes have trouble communicating with
|
||||||
one another, consider adding it.
|
one another, consider adding it.
|
||||||
Alternatively, you can also set `rpc_public_addr_subnet`, which can filter
|
|
||||||
the addresses announced to other peers to a specific subnet.
|
|
||||||
|
|
||||||
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
|
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
|
||||||
You can generate such a key with `openssl rand -hex 32`.
|
You can generate such a key with `openssl rand -hex 32`.
|
||||||
|
@ -171,7 +156,7 @@ docker run \
|
||||||
-v /etc/garage.toml:/etc/garage.toml \
|
-v /etc/garage.toml:/etc/garage.toml \
|
||||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||||
dxflrs/garage:v1.0.1
|
dxflrs/garage:v0.9.3
|
||||||
```
|
```
|
||||||
|
|
||||||
With this command line, Garage should be started automatically at each boot.
|
With this command line, Garage should be started automatically at each boot.
|
||||||
|
@ -185,7 +170,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
||||||
version: "3"
|
version: "3"
|
||||||
services:
|
services:
|
||||||
garage:
|
garage:
|
||||||
image: dxflrs/garage:v1.0.1
|
image: dxflrs/garage:v0.9.3
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -201,7 +186,7 @@ upgrades. With the containerized setup proposed here, the upgrade process
|
||||||
will require stopping and removing the existing container, and re-creating it
|
will require stopping and removing the existing container, and re-creating it
|
||||||
with the upgraded version.
|
with the upgraded version.
|
||||||
|
|
||||||
## Controlling the daemon
|
## Controling the daemon
|
||||||
|
|
||||||
The `garage` binary has two purposes:
|
The `garage` binary has two purposes:
|
||||||
- it acts as a daemon when launched with `garage server`
|
- it acts as a daemon when launched with `garage server`
|
||||||
|
@ -259,7 +244,7 @@ You can then instruct nodes to connect to one another as follows:
|
||||||
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
|
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
|
||||||
```
|
```
|
||||||
|
|
||||||
You don't need to instruct all node to connect to all other nodes:
|
You don't nead to instruct all node to connect to all other nodes:
|
||||||
nodes will discover one another transitively.
|
nodes will discover one another transitively.
|
||||||
|
|
||||||
Now if your run `garage status` on any node, you should have an output that looks as follows:
|
Now if your run `garage status` on any node, you should have an output that looks as follows:
|
||||||
|
@ -342,8 +327,8 @@ Given the information above, we will configure our cluster as follow:
|
||||||
```bash
|
```bash
|
||||||
garage layout assign 563e -z par1 -c 1T -t mercury
|
garage layout assign 563e -z par1 -c 1T -t mercury
|
||||||
garage layout assign 86f0 -z par1 -c 2T -t venus
|
garage layout assign 86f0 -z par1 -c 2T -t venus
|
||||||
garage layout assign 6814 -z lon1 -c 2T -t earth
|
garage layout assign 6814 -z lon1 -c 2T -t earth
|
||||||
garage layout assign 212f -z bru1 -c 1.5T -t mars
|
garage layout assign 212f -z bru1 -c 1.5T -t mars
|
||||||
```
|
```
|
||||||
|
|
||||||
At this point, the changes in the cluster layout have not yet been applied.
|
At this point, the changes in the cluster layout have not yet been applied.
|
||||||
|
|
|
@ -50,20 +50,3 @@ locations. They use Garage themselves for the following tasks:
|
||||||
|
|
||||||
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
|
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
|
||||||
9 nodes in 3 physical locations.
|
9 nodes in 3 physical locations.
|
||||||
|
|
||||||
### Triplebit
|
|
||||||
|
|
||||||
[Triplebit](https://www.triplebit.org) is a non-profit hosting provider and
|
|
||||||
ISP focused on improving access to privacy-related services. They use
|
|
||||||
Garage themselves for the following tasks:
|
|
||||||
|
|
||||||
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
|
|
||||||
|
|
||||||
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
|
|
||||||
|
|
||||||
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
|
|
||||||
|
|
||||||
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
|
|
||||||
|
|
||||||
Triplebit's Garage cluster is a multi-site cluster currently composed of
|
|
||||||
10 nodes in 3 physical locations.
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ Pithos has been abandonned and should probably not used yet, in the following we
|
||||||
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
|
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
|
||||||
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
|
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
|
||||||
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
|
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
|
||||||
We considered their v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
|
We considered there v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
|
||||||
|
|
||||||
**[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):**
|
**[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):**
|
||||||
*Not written yet*
|
*Not written yet*
|
||||||
|
|
|
@ -36,7 +36,7 @@ sudo killall nix-daemon
|
||||||
Now you can enter our nix-shell, all the required packages will be downloaded but they will not pollute your environment outside of the shell:
|
Now you can enter our nix-shell, all the required packages will be downloaded but they will not pollute your environment outside of the shell:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
nix-shell -A devShell
|
nix-shell
|
||||||
```
|
```
|
||||||
|
|
||||||
You can use the traditional Rust development workflow:
|
You can use the traditional Rust development workflow:
|
||||||
|
@ -65,8 +65,8 @@ nix-build -j $(nproc) --max-jobs auto
|
||||||
```
|
```
|
||||||
|
|
||||||
Our build has multiple parameters you might want to set:
|
Our build has multiple parameters you might want to set:
|
||||||
- `release` to build with release optimisations instead of debug
|
- `release` build with release optimisations instead of debug
|
||||||
- `target` allows for cross compilation
|
- `target allows` for cross compilation
|
||||||
- `compileMode` can be set to test or bench to build a unit test runner
|
- `compileMode` can be set to test or bench to build a unit test runner
|
||||||
- `git_version` to inject the hash to display when running `garage stats`
|
- `git_version` to inject the hash to display when running `garage stats`
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ connecting to. To run on all nodes, add the `-a` flag as follows:
|
||||||
|
|
||||||
# Data block operations
|
# Data block operations
|
||||||
|
|
||||||
## Data store scrub {#scrub}
|
## Data store scrub
|
||||||
|
|
||||||
Scrubbing the data store means examining each individual data block to check that
|
Scrubbing the data store means examining each individual data block to check that
|
||||||
their content is correct, by verifying their hash. Any block found to be corrupted
|
their content is correct, by verifying their hash. Any block found to be corrupted
|
||||||
|
@ -104,24 +104,6 @@ operation will also move out all data from locations marked as read-only.
|
||||||
|
|
||||||
# Metadata operations
|
# Metadata operations
|
||||||
|
|
||||||
## Metadata snapshotting
|
|
||||||
|
|
||||||
It is good practice to setup automatic snapshotting of your metadata database
|
|
||||||
file, to recover from situations where it becomes corrupted on disk. This can
|
|
||||||
be done at the filesystem level if you are using ZFS or BTRFS.
|
|
||||||
|
|
||||||
Since Garage v0.9.4, Garage is able to take snapshots of the metadata database
|
|
||||||
itself. This basically amounts to copying the database file, except that it can
|
|
||||||
be run live while Garage is running without the risk of corruption or
|
|
||||||
inconsistencies. This can be setup to run automatically on a schedule using
|
|
||||||
[`metadata_auto_snapshot_interval`](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval).
|
|
||||||
A snapshot can also be triggered manually using the `garage meta snapshot`
|
|
||||||
command. Note that taking a snapshot using this method is very intensive as it
|
|
||||||
requires making a full copy of the database file, so you might prefer using
|
|
||||||
filesystem-level snapshots if possible. To recover a corrupted node from such a
|
|
||||||
snapshot, read the instructions
|
|
||||||
[here](@/documentation/operations/recovering.md#corrupted_meta).
|
|
||||||
|
|
||||||
## Metadata table resync
|
## Metadata table resync
|
||||||
|
|
||||||
Garage automatically resyncs all entries stored in the metadata tables every hour,
|
Garage automatically resyncs all entries stored in the metadata tables every hour,
|
||||||
|
@ -141,7 +123,4 @@ blocks may still be held by Garage. If you suspect that such corruption has occu
|
||||||
in your cluster, you can run one of the following repair procedures:
|
in your cluster, you can run one of the following repair procedures:
|
||||||
|
|
||||||
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
|
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
|
||||||
|
- `garage repair block_refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
|
||||||
- `garage repair block-refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
|
|
||||||
|
|
||||||
- `garage repair block-rc`: checks that the reference counters for blocks are in sync with the actual number of non-deleted entries in the block reference table
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ weight = 40
|
||||||
|
|
||||||
Garage is meant to work on old, second-hand hardware.
|
Garage is meant to work on old, second-hand hardware.
|
||||||
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
|
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
|
||||||
Fear not! Garage is fully equipped to handle drive failures, in most common cases.
|
Fear not! For Garage is fully equipped to handle drive failures, in most common cases.
|
||||||
|
|
||||||
## A note on availability of Garage
|
## A note on availability of Garage
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ garage repair -a --yes blocks
|
||||||
|
|
||||||
This will re-synchronize blocks of data that are missing to the new HDD, reading them from copies located on other nodes.
|
This will re-synchronize blocks of data that are missing to the new HDD, reading them from copies located on other nodes.
|
||||||
|
|
||||||
You can check on the advancement of this process by doing the following command:
|
You can check on the advancement of this process by doing the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
garage stats -a
|
garage stats -a
|
||||||
|
@ -108,57 +108,3 @@ garage layout apply # once satisfied, apply the changes
|
||||||
|
|
||||||
Garage will then start synchronizing all required data on the new node.
|
Garage will then start synchronizing all required data on the new node.
|
||||||
This process can be monitored using the `garage stats -a` command.
|
This process can be monitored using the `garage stats -a` command.
|
||||||
|
|
||||||
## Replacement scenario 3: corrupted metadata {#corrupted_meta}
|
|
||||||
|
|
||||||
In some cases, your metadata DB file might become corrupted, for instance if
|
|
||||||
your node suffered a power outage and did not shut down properly. In this case,
|
|
||||||
you can recover without having to change the node ID and rebuilding a cluster
|
|
||||||
layout. This means that data blocks will not need to be shuffled around, you
|
|
||||||
must simply find a way to repair the metadata file. The best way is generally
|
|
||||||
to discard the corrupted file and recover it from another source.
|
|
||||||
|
|
||||||
First of all, start by locating the database file in your metadata directory,
|
|
||||||
which [depends on your `db_engine`
|
|
||||||
choice](@/documentation/reference-manual/configuration.md#db_engine). Then,
|
|
||||||
your recovery options are as follows:
|
|
||||||
|
|
||||||
- **Option 1: resyncing from other nodes.** In case your cluster is replicated
|
|
||||||
with two or three copies, you can simply delete the database file, and Garage
|
|
||||||
will resync from other nodes. To do so, stop Garage, delete the database file
|
|
||||||
or directory, and restart Garage. Then, do a full table repair by calling
|
|
||||||
`garage repair -a --yes tables`. This will take a bit of time to complete as
|
|
||||||
the new node will need to receive copies of the metadata tables from the
|
|
||||||
network.
|
|
||||||
|
|
||||||
- **Option 2: restoring a snapshot taken by Garage.** Since v0.9.4, Garage can
|
|
||||||
[automatically take regular
|
|
||||||
snapshots](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval)
|
|
||||||
of your metadata DB file. This file or directory should be located under
|
|
||||||
`<metadata_dir>/snapshots`, and is named according to the UTC time at which it
|
|
||||||
was taken. Stop Garage, discard the database file/directory and replace it by the
|
|
||||||
snapshot you want to use. For instance, in the case of LMDB:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd $METADATA_DIR
|
|
||||||
mv db.lmdb db.lmdb.bak
|
|
||||||
cp -r snapshots/2024-03-15T12:13:52Z db.lmdb
|
|
||||||
```
|
|
||||||
|
|
||||||
And for Sqlite:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd $METADATA_DIR
|
|
||||||
mv db.sqlite db.sqlite.bak
|
|
||||||
cp snapshots/2024-03-15T12:13:52Z db.sqlite
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, restart Garage and run a full table repair by calling `garage repair -a
|
|
||||||
--yes tables`. This should run relatively fast as only the changes that
|
|
||||||
occurred since the snapshot was taken will need to be resynchronized. Of
|
|
||||||
course, if your cluster is not replicated, you will lose all changes that
|
|
||||||
occurred since the snapshot was taken.
|
|
||||||
|
|
||||||
- **Option 3: restoring a filesystem-level snapshot.** If you are using ZFS or
|
|
||||||
BTRFS to snapshot your metadata partition, refer to their specific
|
|
||||||
documentation on rolling back or copying files from an old snapshot.
|
|
||||||
|
|
|
@ -73,18 +73,6 @@ The entire procedure would look something like this:
|
||||||
You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
|
You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
|
||||||
Do not try to make a backup of the metadata folder of a running node.
|
Do not try to make a backup of the metadata folder of a running node.
|
||||||
|
|
||||||
**Since Garage v0.9.4,** you can use the `garage meta snapshot --all` command
|
|
||||||
to take a simultaneous snapshot of the metadata database files of all your
|
|
||||||
nodes. This avoids the tedious process of having to take them down one by
|
|
||||||
one before upgrading. Be careful that if automatic snapshotting is enabled,
|
|
||||||
Garage only keeps the last two snapshots and deletes older ones, so you might
|
|
||||||
want to disable automatic snapshotting in your upgraded configuration file
|
|
||||||
until you have confirmed that the upgrade ran successfully. In addition to
|
|
||||||
snapshotting the metadata databases of your nodes, you should back-up at
|
|
||||||
least the `cluster_layout` file of one of your Garage instances (this file
|
|
||||||
should be the same on all nodes and you can copy it safely while Garage is
|
|
||||||
running).
|
|
||||||
|
|
||||||
3. Prepare your binaries and configuration files for the new Garage version
|
3. Prepare your binaries and configuration files for the new Garage version
|
||||||
|
|
||||||
4. Restart all nodes simultaneously in the new version
|
4. Restart all nodes simultaneously in the new version
|
||||||
|
|
|
@ -42,13 +42,6 @@ If a binary of the last version is not available for your architecture,
|
||||||
or if you want a build customized for your system,
|
or if you want a build customized for your system,
|
||||||
you can [build Garage from source](@/documentation/cookbook/from-source.md).
|
you can [build Garage from source](@/documentation/cookbook/from-source.md).
|
||||||
|
|
||||||
If none of these option work for you, you can also run Garage in a Docker
|
|
||||||
container. When using Docker, the commands used in this guide will not work
|
|
||||||
anymore. We recommend reading the tutorial on [configuring a
|
|
||||||
multi-node cluster](@/documentation/cookbook/real-world.md) to learn about
|
|
||||||
using Garage as a Docker container. For simplicity, a minimal command to launch
|
|
||||||
Garage using Docker is provided in this quick start guide as well.
|
|
||||||
|
|
||||||
|
|
||||||
## Configuring and starting Garage
|
## Configuring and starting Garage
|
||||||
|
|
||||||
|
@ -64,7 +57,7 @@ to generate unique and private secrets for security reasons:
|
||||||
cat > garage.toml <<EOF
|
cat > garage.toml <<EOF
|
||||||
metadata_dir = "/tmp/meta"
|
metadata_dir = "/tmp/meta"
|
||||||
data_dir = "/tmp/data"
|
data_dir = "/tmp/data"
|
||||||
db_engine = "sqlite"
|
db_engine = "lmdb"
|
||||||
|
|
||||||
replication_factor = 1
|
replication_factor = 1
|
||||||
|
|
||||||
|
@ -92,9 +85,6 @@ metrics_token = "$(openssl rand -base64 32)"
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
See the [Configuration file format](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/)
|
|
||||||
for complete options and values.
|
|
||||||
|
|
||||||
Now that your configuration file has been created, you may save it to the directory of your choice.
|
Now that your configuration file has been created, you may save it to the directory of your choice.
|
||||||
By default, Garage looks for **`/etc/garage.toml`.**
|
By default, Garage looks for **`/etc/garage.toml`.**
|
||||||
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
|
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
|
||||||
|
@ -121,26 +111,6 @@ garage -c path/to/garage.toml server
|
||||||
|
|
||||||
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
|
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
|
||||||
|
|
||||||
Alternatively, if you cannot or do not wish to run the Garage binary directly,
|
|
||||||
you may use Docker to run Garage in a container using the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run \
|
|
||||||
-d \
|
|
||||||
--name garaged \
|
|
||||||
-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \
|
|
||||||
-v /etc/garage.toml:/path/to/garage.toml \
|
|
||||||
-v /var/lib/garage/meta:/path/to/garage/meta \
|
|
||||||
-v /var/lib/garage/data:/path/to/garage/data \
|
|
||||||
dxflrs/garage:v0.9.4
|
|
||||||
```
|
|
||||||
|
|
||||||
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
|
||||||
|
|
||||||
#### Troubleshooting
|
|
||||||
|
|
||||||
Ensure your configuration file, `metadata_dir` and `data_dir` are readable by the user running the `garage` server or Docker.
|
|
||||||
|
|
||||||
You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \
|
You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \
|
||||||
Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`.
|
Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`.
|
||||||
|
|
||||||
|
@ -161,9 +131,6 @@ It uses values from the TOML configuration file to find the Garage daemon runnin
|
||||||
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
|
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
|
||||||
again have to specify `-c path/to/garage.toml` at each invocation.
|
again have to specify `-c path/to/garage.toml` at each invocation.
|
||||||
|
|
||||||
If you are running Garage in a Docker container, you can set `alias garage="docker exec -ti <container name> /garage"`
|
|
||||||
to use the Garage binary inside your container.
|
|
||||||
|
|
||||||
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
|
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
|
||||||
the following command should be enough to show the status of your cluster:
|
the following command should be enough to show the status of your cluster:
|
||||||
|
|
||||||
|
@ -199,7 +166,7 @@ For instance here you could write just `garage layout assign -z dc1 -c 1G 563e`.
|
||||||
The layout then has to be applied to the cluster, using:
|
The layout then has to be applied to the cluster, using:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
garage layout apply --version 1
|
garage layout apply
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
@ -349,7 +316,7 @@ Check [our s3 compatibility list](@/documentation/reference-manual/s3-compatibil
|
||||||
|
|
||||||
### Other tools for interacting with Garage
|
### Other tools for interacting with Garage
|
||||||
|
|
||||||
The following tools can also be used to send and receive files from/to Garage:
|
The following tools can also be used to send and recieve files from/to Garage:
|
||||||
|
|
||||||
- [minio-client](@/documentation/connect/cli.md#minio-client)
|
- [minio-client](@/documentation/connect/cli.md#minio-client)
|
||||||
- [s3cmd](@/documentation/connect/cli.md#s3cmd)
|
- [s3cmd](@/documentation/connect/cli.md#s3cmd)
|
||||||
|
|
|
@ -15,14 +15,10 @@ metadata_dir = "/var/lib/garage/meta"
|
||||||
data_dir = "/var/lib/garage/data"
|
data_dir = "/var/lib/garage/data"
|
||||||
metadata_fsync = true
|
metadata_fsync = true
|
||||||
data_fsync = false
|
data_fsync = false
|
||||||
disable_scrub = false
|
|
||||||
use_local_tz = false
|
|
||||||
metadata_auto_snapshot_interval = "6h"
|
|
||||||
|
|
||||||
db_engine = "lmdb"
|
db_engine = "lmdb"
|
||||||
|
|
||||||
block_size = "1M"
|
block_size = "1M"
|
||||||
block_ram_buffer_max = "256MiB"
|
|
||||||
|
|
||||||
lmdb_map_size = "1T"
|
lmdb_map_size = "1T"
|
||||||
|
|
||||||
|
@ -32,11 +28,6 @@ rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
|
||||||
rpc_bind_addr = "[::]:3901"
|
rpc_bind_addr = "[::]:3901"
|
||||||
rpc_bind_outgoing = false
|
rpc_bind_outgoing = false
|
||||||
rpc_public_addr = "[fc00:1::1]:3901"
|
rpc_public_addr = "[fc00:1::1]:3901"
|
||||||
# or set rpc_public_adr_subnet to filter down autodiscovery to a subnet:
|
|
||||||
# rpc_public_addr_subnet = "2001:0db8:f00:b00:/64"
|
|
||||||
|
|
||||||
|
|
||||||
allow_world_readable_secrets = false
|
|
||||||
|
|
||||||
bootstrap_peers = [
|
bootstrap_peers = [
|
||||||
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
|
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
|
||||||
|
@ -88,21 +79,14 @@ The following gives details about each available configuration option.
|
||||||
|
|
||||||
### Index
|
### Index
|
||||||
|
|
||||||
[Environment variables](#env_variables).
|
|
||||||
|
|
||||||
Top-level configuration options:
|
Top-level configuration options:
|
||||||
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
|
||||||
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
|
||||||
[`block_size`](#block_size),
|
[`block_size`](#block_size),
|
||||||
[`bootstrap_peers`](#bootstrap_peers),
|
[`bootstrap_peers`](#bootstrap_peers),
|
||||||
[`compression_level`](#compression_level),
|
[`compression_level`](#compression_level),
|
||||||
[`data_dir`](#data_dir),
|
[`data_dir`](#data_dir),
|
||||||
[`data_fsync`](#data_fsync),
|
[`data_fsync`](#data_fsync),
|
||||||
[`db_engine`](#db_engine),
|
[`db_engine`](#db_engine),
|
||||||
[`disable_scrub`](#disable_scrub),
|
|
||||||
[`use_local_tz`](#use_local_tz),
|
|
||||||
[`lmdb_map_size`](#lmdb_map_size),
|
[`lmdb_map_size`](#lmdb_map_size),
|
||||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
|
||||||
[`metadata_dir`](#metadata_dir),
|
[`metadata_dir`](#metadata_dir),
|
||||||
[`metadata_fsync`](#metadata_fsync),
|
[`metadata_fsync`](#metadata_fsync),
|
||||||
[`replication_factor`](#replication_factor),
|
[`replication_factor`](#replication_factor),
|
||||||
|
@ -110,7 +94,6 @@ Top-level configuration options:
|
||||||
[`rpc_bind_addr`](#rpc_bind_addr),
|
[`rpc_bind_addr`](#rpc_bind_addr),
|
||||||
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
|
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
|
||||||
[`rpc_public_addr`](#rpc_public_addr),
|
[`rpc_public_addr`](#rpc_public_addr),
|
||||||
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
|
|
||||||
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
|
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
|
||||||
|
|
||||||
The `[consul_discovery]` section:
|
The `[consul_discovery]` section:
|
||||||
|
@ -145,23 +128,6 @@ The `[admin]` section:
|
||||||
[`admin_token`/`admin_token_file`](#admin_token),
|
[`admin_token`/`admin_token_file`](#admin_token),
|
||||||
[`trace_sink`](#admin_trace_sink),
|
[`trace_sink`](#admin_trace_sink),
|
||||||
|
|
||||||
### Environment variables {#env_variables}
|
|
||||||
|
|
||||||
The following configuration parameter must be specified as an environment
|
|
||||||
variable, it does not exist in the configuration file:
|
|
||||||
|
|
||||||
- `GARAGE_LOG_TO_SYSLOG` (since v0.9.4): set this to `1` or `true` to make the
|
|
||||||
Garage daemon send its logs to `syslog` (using the libc `syslog` function)
|
|
||||||
instead of printing to stderr.
|
|
||||||
|
|
||||||
The following environment variables can be used to override the corresponding
|
|
||||||
values in the configuration file:
|
|
||||||
|
|
||||||
- [`GARAGE_ALLOW_WORLD_READABLE_SECRETS`](#allow_world_readable_secrets)
|
|
||||||
- [`GARAGE_RPC_SECRET` and `GARAGE_RPC_SECRET_FILE`](#rpc_secret)
|
|
||||||
- [`GARAGE_ADMIN_TOKEN` and `GARAGE_ADMIN_TOKEN_FILE`](#admin_token)
|
|
||||||
- [`GARAGE_METRICS_TOKEN` and `GARAGE_METRICS_TOKEN`](#admin_metrics_token)
|
|
||||||
|
|
||||||
|
|
||||||
### Top-level configuration options
|
### Top-level configuration options
|
||||||
|
|
||||||
|
@ -301,43 +267,28 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
||||||
|
|
||||||
| DB engine | `db_engine` value | Database path |
|
| DB engine | `db_engine` value | Database path |
|
||||||
| --------- | ----------------- | ------------- |
|
| --------- | ----------------- | ------------- |
|
||||||
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
| [LMDB](https://www.lmdb.tech) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
||||||
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
||||||
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
||||||
|
|
||||||
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
||||||
You can still use an older binary of Garage (e.g. v0.9.4) to migrate
|
You can still use an older binary of Garage (e.g. v0.9.3) to migrate
|
||||||
old Sled metadata databases to another engine.
|
old Sled metadata databases to another engine.
|
||||||
|
|
||||||
Performance characteristics of the different DB engines are as follows:
|
Performance characteristics of the different DB engines are as follows:
|
||||||
|
|
||||||
- LMDB: the recommended database engine for high-performance distributed clusters.
|
- LMDB: the recommended database engine on 64-bit systems, much more
|
||||||
LMDB works very well, but is known to have the following limitations:
|
space-efficient and slightly faster. Note that the data format of LMDB is not
|
||||||
|
portable between architectures, so for instance the Garage database of an
|
||||||
- The data format of LMDB is not portable between architectures, so for
|
x86-64 node cannot be moved to an ARM64 node. Also note that, while LMDB can
|
||||||
instance the Garage database of an x86-64 node cannot be moved to an ARM64
|
technically be used on 32-bit systems, this will limit your node to very
|
||||||
node.
|
small database sizes due to how LMDB works; it is therefore not recommended.
|
||||||
|
|
||||||
- While LMDB can technically be used on 32-bit systems, this will limit your
|
|
||||||
node to very small database sizes due to how LMDB works; it is therefore
|
|
||||||
not recommended.
|
|
||||||
|
|
||||||
- Several users have reported corrupted LMDB database files after an unclean
|
|
||||||
shutdown (e.g. a power outage). This situation can generally be recovered
|
|
||||||
from if your cluster is geo-replicated (by rebuilding your metadata db from
|
|
||||||
other nodes), or if you have saved regular snapshots at the filesystem
|
|
||||||
level.
|
|
||||||
|
|
||||||
- Keys in LMDB are limited to 511 bytes. This limit translates to limits on
|
|
||||||
object keys in S3 and sort keys in K2V that are limted to 479 bytes.
|
|
||||||
|
|
||||||
- Sqlite: Garage supports Sqlite as an alternative storage backend for
|
- Sqlite: Garage supports Sqlite as an alternative storage backend for
|
||||||
metadata, which does not have the issues listed above for LMDB.
|
metadata, and although it has not been tested as much, it is expected to work
|
||||||
On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal
|
satisfactorily. Since Garage v0.9.0, performance issues have largely been
|
||||||
performance, which was fixed with the addition of `metadata_fsync`.
|
fixed by allowing for a no-fsync mode (see `metadata_fsync`). Sqlite does not
|
||||||
Sqlite is still probably slower than LMDB due to the way we use it,
|
have the database size limitation of LMDB on 32-bit systems.
|
||||||
so it is not the best choice for high-performance storage clusters,
|
|
||||||
but it should work fine in many cases.
|
|
||||||
|
|
||||||
It is possible to convert Garage's metadata directory from one format to another
|
It is possible to convert Garage's metadata directory from one format to another
|
||||||
using the `garage convert-db` command, which should be used as follows:
|
using the `garage convert-db` command, which should be used as follows:
|
||||||
|
@ -364,7 +315,7 @@ Using this option reduces the risk of simultaneous metadata corruption on severa
|
||||||
cluster nodes, which could lead to data loss.
|
cluster nodes, which could lead to data loss.
|
||||||
|
|
||||||
If multi-site replication is used, this option is most likely not necessary, as
|
If multi-site replication is used, this option is most likely not necessary, as
|
||||||
it is extremely unlikely that two nodes in different locations will have a
|
it is extremely unlikely that two nodes in different locations will have a
|
||||||
power failure at the exact same time.
|
power failure at the exact same time.
|
||||||
|
|
||||||
(Metadata corruption on a single node is not an issue, the corrupted data file
|
(Metadata corruption on a single node is not an issue, the corrupted data file
|
||||||
|
@ -392,50 +343,6 @@ at the cost of a moderate drop in write performance.
|
||||||
Similarly to `metatada_fsync`, this is likely not necessary
|
Similarly to `metatada_fsync`, this is likely not necessary
|
||||||
if geographical replication is used.
|
if geographical replication is used.
|
||||||
|
|
||||||
#### `metadata_auto_snapshot_interval` (since Garage v0.9.4) {#metadata_auto_snapshot_interval}
|
|
||||||
|
|
||||||
If this value is set, Garage will automatically take a snapshot of the metadata
|
|
||||||
DB file at a regular interval and save it in the metadata directory.
|
|
||||||
This parameter can take any duration string that can be parsed by
|
|
||||||
the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate.
|
|
||||||
|
|
||||||
Snapshots can allow to recover from situations where the metadata DB file is
|
|
||||||
corrupted, for instance after an unclean shutdown. See [this
|
|
||||||
page](@/documentation/operations/recovering.md#corrupted_meta) for details.
|
|
||||||
Garage keeps only the two most recent snapshots of the metadata DB and deletes
|
|
||||||
older ones automatically.
|
|
||||||
|
|
||||||
Note that taking a metadata snapshot is a relatively intensive operation as the
|
|
||||||
entire data file is copied. A snapshot being taken might have performance
|
|
||||||
impacts on the Garage node while it is running. If the cluster is under heavy
|
|
||||||
write load when a snapshot operation is running, this might also cause the
|
|
||||||
database file to grow in size significantly as pages cannot be recycled easily.
|
|
||||||
For this reason, it might be better to use filesystem-level snapshots instead
|
|
||||||
if possible.
|
|
||||||
|
|
||||||
#### `disable_scrub` {#disable_scrub}
|
|
||||||
|
|
||||||
By default, Garage runs a scrub of the data directory approximately once per
|
|
||||||
month, with a random delay to avoid all nodes running at the same time. When
|
|
||||||
it scrubs the data directory, Garage will read all of the data files stored on
|
|
||||||
disk to check their integrity, and will rebuild any data files that it finds
|
|
||||||
corrupted, using the remaining valid copies stored on other nodes.
|
|
||||||
See [this page](@/documentation/operations/durability-repairs.md#scrub) for details.
|
|
||||||
|
|
||||||
Set the `disable_scrub` configuration value to `true` if you don't need Garage
|
|
||||||
to scrub the data directory, for instance if you are already scrubbing at the
|
|
||||||
filesystem level. Note that in this case, if you find a corrupted data file,
|
|
||||||
you should delete it from the data directory and then call `garage repair
|
|
||||||
blocks` on the node to ensure that it re-obtains a copy from another node on
|
|
||||||
the network.
|
|
||||||
|
|
||||||
#### `use_local_tz` {#use_local_tz}
|
|
||||||
|
|
||||||
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
|
|
||||||
`use_local_tz` configuration value to `true` if you want Garage to run the
|
|
||||||
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
|
|
||||||
you should also ensure that each node has the same timezone configuration.
|
|
||||||
|
|
||||||
#### `block_size` {#block_size}
|
#### `block_size` {#block_size}
|
||||||
|
|
||||||
Garage splits stored objects in consecutive chunks of size `block_size`
|
Garage splits stored objects in consecutive chunks of size `block_size`
|
||||||
|
@ -451,37 +358,6 @@ files will remain available. This however means that chunks from existing files
|
||||||
will not be deduplicated with chunks from newly uploaded files, meaning you
|
will not be deduplicated with chunks from newly uploaded files, meaning you
|
||||||
might use more storage space that is optimally possible.
|
might use more storage space that is optimally possible.
|
||||||
|
|
||||||
#### `block_ram_buffer_max` (since v0.9.4) {#block_ram_buffer_max}
|
|
||||||
|
|
||||||
A limit on the total size of data blocks kept in RAM by S3 API nodes awaiting
|
|
||||||
to be sent to storage nodes asynchronously.
|
|
||||||
|
|
||||||
Explanation: since Garage wants to tolerate node failures, it uses quorum
|
|
||||||
writes to send data blocks to storage nodes: try to write the block to three
|
|
||||||
nodes, and return ok as soon as two writes complete. So even if all three nodes
|
|
||||||
are online, the third write always completes asynchronously. In general, there
|
|
||||||
are not many writes to a cluster, and the third asynchronous write can
|
|
||||||
terminate early enough so as to not cause unbounded RAM growth. However, if
|
|
||||||
the S3 API node is continuously receiving large quantities of data and the
|
|
||||||
third node is never able to catch up, many data blocks will be kept buffered in
|
|
||||||
RAM as they are awaiting transfer to the third node.
|
|
||||||
|
|
||||||
The `block_ram_buffer_max` sets a limit to the size of buffers that can be kept
|
|
||||||
in RAM in this process. When the limit is reached, backpressure is applied
|
|
||||||
back to the S3 client.
|
|
||||||
|
|
||||||
Note that this only counts buffers that have arrived to a certain stage of
|
|
||||||
processing (received from the client + encrypted and/or compressed as
|
|
||||||
necessary) and are ready to send to the storage nodes. Many other buffers will
|
|
||||||
not be counted and this is not a hard limit on RAM consumption. In particular,
|
|
||||||
if many clients send requests simultaneously with large objects, the RAM
|
|
||||||
consumption will always grow linearly with the number of concurrent requests,
|
|
||||||
as each request will use a few buffers of size `block_size` for receiving and
|
|
||||||
intermediate processing before even trying to send the data to the storage
|
|
||||||
node.
|
|
||||||
|
|
||||||
The default value is 256MiB.
|
|
||||||
|
|
||||||
#### `lmdb_map_size` {#lmdb_map_size}
|
#### `lmdb_map_size` {#lmdb_map_size}
|
||||||
|
|
||||||
This parameters can be used to set the map size used by LMDB,
|
This parameters can be used to set the map size used by LMDB,
|
||||||
|
@ -556,14 +432,6 @@ RPC calls. **This parameter is optional but recommended.** In case you have
|
||||||
a NAT that binds the RPC port to a port that is different on your public IP,
|
a NAT that binds the RPC port to a port that is different on your public IP,
|
||||||
this field might help making it work.
|
this field might help making it work.
|
||||||
|
|
||||||
#### `rpc_public_addr_subnet` {#rpc_public_addr_subnet}
|
|
||||||
In case `rpc_public_addr` is not set, but autodiscovery is used, this allows
|
|
||||||
filtering the list of automatically discovered IPs to a specific subnet.
|
|
||||||
|
|
||||||
For example, if nodes should pick *their* IP inside a specific subnet, but you
|
|
||||||
don't want to explicitly write the IP down (as it's dynamic, or you want to
|
|
||||||
share configs across nodes), you can use this option.
|
|
||||||
|
|
||||||
#### `bootstrap_peers` {#bootstrap_peers}
|
#### `bootstrap_peers` {#bootstrap_peers}
|
||||||
|
|
||||||
A list of peer identifiers on which to contact other Garage peers of this cluster.
|
A list of peer identifiers on which to contact other Garage peers of this cluster.
|
||||||
|
@ -580,7 +448,7 @@ be obtained by running `garage node id` and then included directly in the
|
||||||
key will be returned by `garage node id` and you will have to add the IP
|
key will be returned by `garage node id` and you will have to add the IP
|
||||||
yourself.
|
yourself.
|
||||||
|
|
||||||
### `allow_world_readable_secrets` or `GARAGE_ALLOW_WORLD_READABLE_SECRETS` (env) {#allow_world_readable_secrets}
|
### `allow_world_readable_secrets`
|
||||||
|
|
||||||
Garage checks the permissions of your secret files to make sure they're not
|
Garage checks the permissions of your secret files to make sure they're not
|
||||||
world-readable. In some cases, the check might fail and consider your files as
|
world-readable. In some cases, the check might fail and consider your files as
|
||||||
|
|
|
@ -61,7 +61,7 @@ directed to a Garage cluster can be handled independently of one another instead
|
||||||
of going through a central bottleneck (the leader node).
|
of going through a central bottleneck (the leader node).
|
||||||
As a consequence, requests can be handled much faster, even in cases where latency
|
As a consequence, requests can be handled much faster, even in cases where latency
|
||||||
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
|
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
|
||||||
This is particularly useful when nodes are far from one another and talk to one other through standard Internet connections.
|
This is particularly usefull when nodes are far from one another and talk to one other through standard Internet connections.
|
||||||
|
|
||||||
### Web server for static websites
|
### Web server for static websites
|
||||||
|
|
||||||
|
|
|
@ -225,17 +225,6 @@ block_bytes_read 120586322022
|
||||||
block_bytes_written 3386618077
|
block_bytes_written 3386618077
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `block_ram_buffer_free_kb` (gauge)
|
|
||||||
|
|
||||||
Kibibytes available for buffering blocks that have to be sent to remote nodes.
|
|
||||||
When clients send too much data to this node and a storage node is not receiving
|
|
||||||
data fast enough due to slower network conditions, this will decrease down to
|
|
||||||
zero and backpressure will be applied.
|
|
||||||
|
|
||||||
```
|
|
||||||
block_ram_buffer_free_kb 219829
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `block_compression_level` (counter)
|
#### `block_compression_level` (counter)
|
||||||
|
|
||||||
Exposes the block compression level configured for the Garage node.
|
Exposes the block compression level configured for the Garage node.
|
||||||
|
@ -392,7 +381,7 @@ table_merkle_updater_todo_queue_length{table_name="block_ref"} 0
|
||||||
|
|
||||||
#### `table_sync_items_received`, `table_sync_items_sent` (counters)
|
#### `table_sync_items_received`, `table_sync_items_sent` (counters)
|
||||||
|
|
||||||
Number of data items sent to/received from other nodes during resync procedures
|
Number of data items sent to/recieved from other nodes during resync procedures
|
||||||
|
|
||||||
```
|
```
|
||||||
table_sync_items_received{from="<remote node>",table_name="bucket_v2"} 3
|
table_sync_items_received{from="<remote node>",table_name="bucket_v2"} 3
|
||||||
|
|
|
@ -42,7 +42,7 @@ The general principle are similar, but details have not been updated.**
|
||||||
A version is defined by the existence of at least one entry in the blocks table for a certain version UUID.
|
A version is defined by the existence of at least one entry in the blocks table for a certain version UUID.
|
||||||
We must keep the following invariant: if a version exists in the blocks table, it has to be referenced in the objects table.
|
We must keep the following invariant: if a version exists in the blocks table, it has to be referenced in the objects table.
|
||||||
We explicitly manage concurrent versions of an object: the version timestamp and version UUID columns are index columns, thus we may have several concurrent versions of an object.
|
We explicitly manage concurrent versions of an object: the version timestamp and version UUID columns are index columns, thus we may have several concurrent versions of an object.
|
||||||
Important: before deleting an older version from the objects table, we must make sure that we did a successful delete of the blocks of that version from the blocks table.
|
Important: before deleting an older version from the objects table, we must make sure that we did a successfull delete of the blocks of that version from the blocks table.
|
||||||
|
|
||||||
Thus, the workflow for reading an object is as follows:
|
Thus, the workflow for reading an object is as follows:
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ Known issue: if someone is reading from a version that we want to delete and the
|
||||||
Usefull metadata:
|
Usefull metadata:
|
||||||
|
|
||||||
- list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist
|
- list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist
|
||||||
- list of other nodes that we know have acknowledged a write of this block, useful in the rebalancing algorithm
|
- list of other nodes that we know have acknowledged a write of this block, usefull in the rebalancing algorithm
|
||||||
|
|
||||||
Write strategy: have a single thread that does all write IO so that it is serialized (or have several threads that manage independent parts of the hash space). When writing a blob, write it to a temporary file, close, then rename so that a concurrent read gets a consistent result (either not found or found with whole content).
|
Write strategy: have a single thread that does all write IO so that it is serialized (or have several threads that manage independent parts of the hash space). When writing a blob, write it to a temporary file, close, then rename so that a concurrent read gets a consistent result (either not found or found with whole content).
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ The migration steps are as follows:
|
||||||
5. Turn off Garage 0.3
|
5. Turn off Garage 0.3
|
||||||
|
|
||||||
6. Backup metadata folders if you can (i.e. if you have space to do it
|
6. Backup metadata folders if you can (i.e. if you have space to do it
|
||||||
somewhere). Backuping data folders could also be useful but that's much
|
somewhere). Backuping data folders could also be usefull but that's much
|
||||||
harder to do. If your filesystem supports snapshots, this could be a good
|
harder to do. If your filesystem supports snapshots, this could be a good
|
||||||
time to use them.
|
time to use them.
|
||||||
|
|
||||||
|
|
|
@ -1,77 +0,0 @@
|
||||||
+++
|
|
||||||
title = "Migrating from 0.9 to 1.0"
|
|
||||||
weight = 11
|
|
||||||
+++
|
|
||||||
|
|
||||||
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
|
|
||||||
We don't recommend trying to migrate to 1.0 directly from 0.8 or older.**
|
|
||||||
|
|
||||||
This migration procedure has been tested on several clusters without issues.
|
|
||||||
However, it is still a *critical procedure* that might cause issues.
|
|
||||||
**Make sure to back up all your data before attempting it!**
|
|
||||||
|
|
||||||
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
|
|
||||||
|
|
||||||
## Changes introduced in v1.0
|
|
||||||
|
|
||||||
The following are **breaking changes** in Garage v1.0 that require your attention when migrating:
|
|
||||||
|
|
||||||
- The Sled metadata db engine has been **removed**. If your cluster was still
|
|
||||||
using Sled, you will need to **use a Garage v0.9.x binary** to convert the
|
|
||||||
database using the `garage convert-db` subcommand. See
|
|
||||||
[here](@/documentation/reference-manual/configuration.md#db_engine) for the
|
|
||||||
details of the procedure.
|
|
||||||
|
|
||||||
The following syntax changes have been made to the configuration file:
|
|
||||||
|
|
||||||
- The `replication_mode` parameter has been split into two parameters:
|
|
||||||
[`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)
|
|
||||||
and
|
|
||||||
[`consistency_mode`](@/documentation/reference-manual/configuration.md#consistency_mode).
|
|
||||||
The old syntax using `replication_mode` is still supported for legacy
|
|
||||||
reasons and can still be used.
|
|
||||||
|
|
||||||
- The parameters `sled_cache_capacity` and `sled_flush_every_ms` have been removed.
|
|
||||||
|
|
||||||
## Migration procedure
|
|
||||||
|
|
||||||
The migration to Garage v1.0 can be done with almost no downtime,
|
|
||||||
by restarting all nodes at once in the new version.
|
|
||||||
|
|
||||||
The migration steps are as follows:
|
|
||||||
|
|
||||||
1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
|
|
||||||
all data seems to be synced correctly between nodes. If you have time, do
|
|
||||||
additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
|
|
||||||
etc.)
|
|
||||||
|
|
||||||
2. Ensure you have a snapshot of your Garage installation that you can restore
|
|
||||||
to in case the upgrade goes wrong:
|
|
||||||
|
|
||||||
- If you are running Garage v0.9.4 or later, use the `garage meta snapshot
|
|
||||||
--all` to make a backup snapshot of the metadata directories of your nodes
|
|
||||||
for backup purposes, and save a copy of the following files in the
|
|
||||||
metadata directories of your nodes: `cluster_layout`, `data_layout`,
|
|
||||||
`node_key`, `node_key.pub`.
|
|
||||||
|
|
||||||
- If you are running a filesystem such as ZFS or BTRFS that support
|
|
||||||
snapshotting, you can create a filesystem-level snapshot to be used as a
|
|
||||||
restoration point if needed.
|
|
||||||
|
|
||||||
- In other cases, make a backup using the old procedure: turn off each node
|
|
||||||
individually; back up its metadata folder (for instance, use the following
|
|
||||||
command if your metadata directory is `/var/lib/garage/meta`: `cd
|
|
||||||
/var/lib/garage ; tar -acf meta-v0.9.tar.zst meta/`); turn it back on
|
|
||||||
again. This will allow you to take a backup of all nodes without
|
|
||||||
impacting global cluster availability. You can do all nodes of a single
|
|
||||||
zone at once as this does not impact the availability of Garage.
|
|
||||||
|
|
||||||
3. Prepare your updated binaries and configuration files for Garage v1.0
|
|
||||||
|
|
||||||
4. Shut down all v0.9 nodes simultaneously, and restart them all simultaneously
|
|
||||||
in v1.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
|
|
||||||
achieve this as fast as possible. Garage v1.0 should be in a working state
|
|
||||||
as soon as enough nodes have started.
|
|
||||||
|
|
||||||
5. Monitor your cluster in the following hours to see if it works well under
|
|
||||||
your production load.
|
|
|
@ -37,7 +37,7 @@ There are two reasons for this:
|
||||||
|
|
||||||
Reminder: rules of simplicity, concerning changes to Garage's source code.
|
Reminder: rules of simplicity, concerning changes to Garage's source code.
|
||||||
Always question what we are doing.
|
Always question what we are doing.
|
||||||
Never do anything just because it looks nice or because we "think" it might be useful at some later point but without knowing precisely why/when.
|
Never do anything just because it looks nice or because we "think" it might be usefull at some later point but without knowing precisely why/when.
|
||||||
Only do things that make perfect sense in the context of what we currently know.
|
Only do things that make perfect sense in the context of what we currently know.
|
||||||
|
|
||||||
## References
|
## References
|
||||||
|
|
|
@ -70,7 +70,7 @@ Example response body:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||||
"garageVersion": "v1.0.1",
|
"garageVersion": "v0.10.0",
|
||||||
"garageFeatures": [
|
"garageFeatures": [
|
||||||
"k2v",
|
"k2v",
|
||||||
"lmdb",
|
"lmdb",
|
||||||
|
|
|
@ -562,7 +562,7 @@ token>", v: ["<value1>", ...] }`, with the following fields:
|
||||||
- in case of concurrent update and deletion, a `null` is added to the list of concurrent values
|
- in case of concurrent update and deletion, a `null` is added to the list of concurrent values
|
||||||
|
|
||||||
- if the `tombstones` query parameter is set to `true`, tombstones are returned
|
- if the `tombstones` query parameter is set to `true`, tombstones are returned
|
||||||
for items that have been deleted (this can be useful for inserting after an
|
for items that have been deleted (this can be usefull for inserting after an
|
||||||
item that has been deleted, so that the insert is not considered
|
item that has been deleted, so that the insert is not considered
|
||||||
concurrent with the delete). Tombstones are returned as tuples in the
|
concurrent with the delete). Tombstones are returned as tuples in the
|
||||||
same format with only `null` values
|
same format with only `null` values
|
||||||
|
|
114
flake.lock
114
flake.lock
|
@ -12,27 +12,27 @@
|
||||||
"rust-overlay": "rust-overlay"
|
"rust-overlay": "rust-overlay"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1705129117,
|
"lastModified": 1666087781,
|
||||||
"narHash": "sha256-LgdDHibvimzYhxBK3kxCk2gAL7k4Hyigl5KI0X9cijA=",
|
"narHash": "sha256-trKVdjMZ8mNkGfLcY5LsJJGtdV3xJDZnMVrkFjErlcs=",
|
||||||
"owner": "cargo2nix",
|
"owner": "Alexis211",
|
||||||
"repo": "cargo2nix",
|
"repo": "cargo2nix",
|
||||||
"rev": "ae19a9e1f8f0880c088ea155ab66cee1fa001f59",
|
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "cargo2nix",
|
"owner": "Alexis211",
|
||||||
"repo": "cargo2nix",
|
"repo": "cargo2nix",
|
||||||
"rev": "ae19a9e1f8f0880c088ea155ab66cee1fa001f59",
|
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-compat": {
|
"flake-compat": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1717312683,
|
"lastModified": 1688025799,
|
||||||
"narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
|
"narHash": "sha256-ktpB4dRtnksm9F5WawoIkEneh1nrEvuxb5lJFt1iOyw=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "flake-compat",
|
"repo": "flake-compat",
|
||||||
"rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
|
"rev": "8bf105319d44f6b9f0d764efa4fdef9f1cc9ba1c",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -42,12 +42,33 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-utils": {
|
"flake-utils": {
|
||||||
|
"inputs": {
|
||||||
|
"systems": "systems"
|
||||||
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1659877975,
|
"lastModified": 1681202837,
|
||||||
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
|
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
|
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils_2": {
|
||||||
|
"inputs": {
|
||||||
|
"systems": "systems_2"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681202837,
|
||||||
|
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -58,17 +79,33 @@
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1736692550,
|
"lastModified": 1682109806,
|
||||||
"narHash": "sha256-7tk8xH+g0sJkKLTJFOxphJxxOjMDFMWv24nXslaU2ro=",
|
"narHash": "sha256-d9g7RKNShMLboTWwukM+RObDWWpHKaqTYXB48clBWXI=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
"rev": "2362848adf8def2866fabbffc50462e929d7fffb",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixpkgs-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs_2": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1707091808,
|
||||||
|
"narHash": "sha256-LahKBAfGbY836gtpVNnWwBTIzN7yf/uYM/S0g393r0Y=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -80,28 +117,55 @@
|
||||||
"cargo2nix",
|
"cargo2nix",
|
||||||
"flake-utils"
|
"flake-utils"
|
||||||
],
|
],
|
||||||
"nixpkgs": "nixpkgs"
|
"nixpkgs": "nixpkgs_2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-overlay": {
|
"rust-overlay": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nixpkgs": [
|
"flake-utils": "flake-utils_2",
|
||||||
"cargo2nix",
|
"nixpkgs": "nixpkgs"
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1736649126,
|
"lastModified": 1707271822,
|
||||||
"narHash": "sha256-XCw5sv/ePsroqiF3lJM6Y2X9EhPdHeE47gr3Q8b0UQw=",
|
"narHash": "sha256-/DZsoPH5GBzOpVEGz5PgJ7vh8Q6TcrJq5u8FcBjqAfI=",
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "162ab0edc2936508470199b2e8e6c444a2535019",
|
"rev": "7a94fe7690d2bdfe1aab475382a505e14dc114a6",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "162ab0edc2936508470199b2e8e6c444a2535019",
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"systems": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681028828,
|
||||||
|
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"systems_2": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681028828,
|
||||||
|
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
14
flake.nix
14
flake.nix
|
@ -2,27 +2,24 @@
|
||||||
description =
|
description =
|
||||||
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||||
|
|
||||||
# Nixpkgs 24.11 as of 2025-01-12 has rustc v1.82
|
# Nixpkgs 23.11 as of 2024-02-07, has rustc v1.73
|
||||||
inputs.nixpkgs.url =
|
inputs.nixpkgs.url =
|
||||||
"github:NixOS/nixpkgs/7c4869c47090dd7f9f1bdfb49a22aea026996815";
|
"github:NixOS/nixpkgs/9f2ee8c91ac42da3ae6c6a1d21555f283458247e";
|
||||||
|
|
||||||
inputs.flake-compat.url = "github:nix-community/flake-compat";
|
inputs.flake-compat.url = "github:nix-community/flake-compat";
|
||||||
|
|
||||||
inputs.cargo2nix = {
|
inputs.cargo2nix = {
|
||||||
# As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection
|
# As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection
|
||||||
#url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
|
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
|
||||||
|
|
||||||
# As of 2023-04-25:
|
# As of 2023-04-25:
|
||||||
# - my two patches were merged into unstable (one for clippy and one to "fix" feature detection)
|
# - my two patches were merged into unstable (one for clippy and one to "fix" feature detection)
|
||||||
# - rustc v1.66
|
# - rustc v1.66
|
||||||
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
|
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
|
||||||
|
|
||||||
# Mainline cargo2nix as of of 2025-01-12 (branch release-0.11.0)
|
# Rust overlay as of 2024-02-07
|
||||||
url = "github:cargo2nix/cargo2nix/ae19a9e1f8f0880c088ea155ab66cee1fa001f59";
|
|
||||||
|
|
||||||
# Rust overlay as of 2025-01-12
|
|
||||||
inputs.rust-overlay.url =
|
inputs.rust-overlay.url =
|
||||||
"github:oxalica/rust-overlay/162ab0edc2936508470199b2e8e6c444a2535019";
|
"github:oxalica/rust-overlay/7a94fe7690d2bdfe1aab475382a505e14dc114a6";
|
||||||
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
inputs.flake-compat.follows = "flake-compat";
|
inputs.flake-compat.follows = "flake-compat";
|
||||||
|
@ -79,7 +76,6 @@
|
||||||
# import the full shell using `nix develop .#full`
|
# import the full shell using `nix develop .#full`
|
||||||
full = shellWithPackages (with pkgs; [
|
full = shellWithPackages (with pkgs; [
|
||||||
rustfmt
|
rustfmt
|
||||||
rust-analyzer
|
|
||||||
clang
|
clang
|
||||||
mold
|
mold
|
||||||
# ---- extra packages for dev tasks ----
|
# ---- extra packages for dev tasks ----
|
||||||
|
|
158
k2v_test.py
Executable file
158
k2v_test.py
Executable file
|
@ -0,0 +1,158 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import os
|
||||||
|
import requests
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
# let's talk to our AWS Elasticsearch cluster
|
||||||
|
#from requests_aws4auth import AWS4Auth
|
||||||
|
#auth = AWS4Auth('GK31c2f218a2e44f485b94239e',
|
||||||
|
# 'b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
||||||
|
# 'us-east-1',
|
||||||
|
# 's3')
|
||||||
|
|
||||||
|
from aws_requests_auth.aws_auth import AWSRequestsAuth
|
||||||
|
auth = AWSRequestsAuth(aws_access_key='GK31c2f218a2e44f485b94239e',
|
||||||
|
aws_secret_access_key='b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
||||||
|
aws_host='localhost:3812',
|
||||||
|
aws_region='us-east-1',
|
||||||
|
aws_service='k2v')
|
||||||
|
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
|
||||||
|
sort_keys = ["a", "b", "c", "d"]
|
||||||
|
|
||||||
|
for sk in sort_keys:
|
||||||
|
print("-- (%s) Put initial (no CT)"%sk)
|
||||||
|
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth,
|
||||||
|
data='{}: Hello, world!'.format(datetime.timestamp(datetime.now())))
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Get")
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
ct = response.headers["x-garage-causality-token"]
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Put with CT")
|
||||||
|
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth,
|
||||||
|
headers={'x-garage-causality-token': ct},
|
||||||
|
data='{}: Good bye, world!'.format(datetime.timestamp(datetime.now())))
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Get")
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Put again with same CT (concurrent)")
|
||||||
|
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth,
|
||||||
|
headers={'x-garage-causality-token': ct},
|
||||||
|
data='{}: Concurrent value, oops'.format(datetime.timestamp(datetime.now())))
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
for sk in sort_keys:
|
||||||
|
print("-- (%s) Get"%sk)
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
ct = response.headers["x-garage-causality-token"]
|
||||||
|
|
||||||
|
print("-- Delete")
|
||||||
|
response = requests.delete('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
headers={'x-garage-causality-token': ct},
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- InsertBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"pk": "root", "sk": "a", "ct": null, "v": "aW5pdGlhbCB0ZXN0Cg=="},
|
||||||
|
{"pk": "root", "sk": "b", "ct": null, "v": "aW5pdGlhbCB0ZXN1Cg=="},
|
||||||
|
{"pk": "root", "sk": "c", "ct": null, "v": "aW5pdGlhbCB0ZXN2Cg=="}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
for sk in sort_keys:
|
||||||
|
print("-- (%s) Get"%sk)
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
ct = response.headers["x-garage-causality-token"]
|
||||||
|
|
||||||
|
print("-- ReadBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex?search',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"partitionKey": "root"},
|
||||||
|
{"partitionKey": "root", "tombstones": true},
|
||||||
|
{"partitionKey": "root", "tombstones": true, "limit": 2},
|
||||||
|
{"partitionKey": "root", "start": "c", "singleItem": true},
|
||||||
|
{"partitionKey": "root", "start": "b", "end": "d", "tombstones": true}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
|
||||||
|
print("-- DeleteBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex?delete',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"partitionKey": "root", "start": "b", "end": "c"}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- ReadBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex?search',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"partitionKey": "root"}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
102
nix/compile.nix
102
nix/compile.nix
|
@ -1,4 +1,4 @@
|
||||||
{ system, target ? null, pkgsSrc, cargo2nixOverlay
|
{ system, target ? null, pkgsSrc, cargo2nixOverlay, compiler ? "rustc"
|
||||||
, release ? false, git_version ? null, features ? null, }:
|
, release ? false, git_version ? null, features ? null, }:
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -20,10 +20,24 @@ let
|
||||||
};
|
};
|
||||||
|
|
||||||
toolchainOptions = {
|
toolchainOptions = {
|
||||||
rustVersion = "1.78.0";
|
rustVersion = "1.73.0";
|
||||||
extraRustComponents = [ "clippy" ];
|
extraRustComponents = [ "clippy" ];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
buildEnv = (drv:
|
||||||
|
{
|
||||||
|
rustc = drv.setBuildEnv;
|
||||||
|
clippy = ''
|
||||||
|
${drv.setBuildEnv or ""}
|
||||||
|
echo
|
||||||
|
echo --- BUILDING WITH CLIPPY ---
|
||||||
|
echo
|
||||||
|
|
||||||
|
export NIX_RUST_BUILD_FLAGS="''${NIX_RUST_BUILD_FLAGS} --deny warnings"
|
||||||
|
export RUSTC="''${CLIPPY_DRIVER}"
|
||||||
|
'';
|
||||||
|
}.${compiler});
|
||||||
|
|
||||||
/* Cargo2nix provides many overrides by default, you can take inspiration from them:
|
/* Cargo2nix provides many overrides by default, you can take inspiration from them:
|
||||||
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
|
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
|
||||||
|
|
||||||
|
@ -32,7 +46,9 @@ let
|
||||||
*/
|
*/
|
||||||
packageOverrides = pkgs:
|
packageOverrides = pkgs:
|
||||||
pkgs.rustBuilder.overrides.all ++ [
|
pkgs.rustBuilder.overrides.all ++ [
|
||||||
/* [1] We need to alter Nix hardening to make static binaries: PIE,
|
/* [1] We add some logic to compile our crates with clippy, it provides us many additional lints
|
||||||
|
|
||||||
|
[2] We need to alter Nix hardening to make static binaries: PIE,
|
||||||
Position Independent Executables seems to be supported only on amd64. Having
|
Position Independent Executables seems to be supported only on amd64. Having
|
||||||
this flag set either 1. make our executables crash or 2. compile as dynamic on some platforms.
|
this flag set either 1. make our executables crash or 2. compile as dynamic on some platforms.
|
||||||
Here, we deactivate it. Later (find `codegenOpts`), we reactivate it for supported targets
|
Here, we deactivate it. Later (find `codegenOpts`), we reactivate it for supported targets
|
||||||
|
@ -40,11 +56,11 @@ let
|
||||||
PIE is a feature used by ASLR, which helps mitigate security issues.
|
PIE is a feature used by ASLR, which helps mitigate security issues.
|
||||||
Learn more about Nix Hardening at: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh
|
Learn more about Nix Hardening at: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh
|
||||||
|
|
||||||
[2] We want to inject the git version while keeping the build deterministic.
|
[3] We want to inject the git version while keeping the build deterministic.
|
||||||
As we do not want to consider the .git folder as part of the input source,
|
As we do not want to consider the .git folder as part of the input source,
|
||||||
we ask the user (the CI often) to pass the value to Nix.
|
we ask the user (the CI often) to pass the value to Nix.
|
||||||
|
|
||||||
[3] We don't want libsodium-sys and zstd-sys to try to use pkgconfig to build against a system library.
|
[4] We don't want libsodium-sys and zstd-sys to try to use pkgconfig to build against a system library.
|
||||||
However the features to do so get activated for some reason (due to a bug in cargo2nix?),
|
However the features to do so get activated for some reason (due to a bug in cargo2nix?),
|
||||||
so disable them manually here.
|
so disable them manually here.
|
||||||
*/
|
*/
|
||||||
|
@ -52,7 +68,7 @@ let
|
||||||
name = "garage";
|
name = "garage";
|
||||||
overrideAttrs = drv:
|
overrideAttrs = drv:
|
||||||
(if git_version != null then {
|
(if git_version != null then {
|
||||||
# [2]
|
# [3]
|
||||||
preConfigure = ''
|
preConfigure = ''
|
||||||
${drv.preConfigure or ""}
|
${drv.preConfigure or ""}
|
||||||
export GIT_VERSION="${git_version}"
|
export GIT_VERSION="${git_version}"
|
||||||
|
@ -60,21 +76,86 @@ let
|
||||||
} else
|
} else
|
||||||
{ }) // {
|
{ }) // {
|
||||||
# [1]
|
# [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
# [2]
|
||||||
hardeningDisable = [ "pie" ];
|
hardeningDisable = [ "pie" ];
|
||||||
};
|
};
|
||||||
})
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_rpc";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_db";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_util";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_table";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_block";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_model";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_api";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_web";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "k2v-client";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
name = "libsodium-sys";
|
name = "libsodium-sys";
|
||||||
overrideArgs = old: {
|
overrideArgs = old: {
|
||||||
features = [ ]; # [3]
|
features = [ ]; # [4]
|
||||||
};
|
};
|
||||||
})
|
})
|
||||||
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
name = "zstd-sys";
|
name = "zstd-sys";
|
||||||
overrideArgs = old: {
|
overrideArgs = old: {
|
||||||
features = [ ]; # [3]
|
features = [ ]; # [4]
|
||||||
};
|
};
|
||||||
})
|
})
|
||||||
];
|
];
|
||||||
|
@ -87,12 +168,13 @@ let
|
||||||
rootFeatures = if features != null then
|
rootFeatures = if features != null then
|
||||||
features
|
features
|
||||||
else
|
else
|
||||||
([ "garage/bundled-libs" "garage/lmdb" "garage/sqlite" "garage/k2v" ] ++ (if release then [
|
([ "garage/bundled-libs" "garage/lmdb" "garage/k2v" ] ++ (if release then [
|
||||||
"garage/consul-discovery"
|
"garage/consul-discovery"
|
||||||
"garage/kubernetes-discovery"
|
"garage/kubernetes-discovery"
|
||||||
"garage/metrics"
|
"garage/metrics"
|
||||||
"garage/telemetry-otlp"
|
"garage/telemetry-otlp"
|
||||||
"garage/syslog"
|
"garage/lmdb"
|
||||||
|
"garage/sqlite"
|
||||||
] else
|
] else
|
||||||
[ ]));
|
[ ]));
|
||||||
|
|
||||||
|
|
|
@ -15,10 +15,10 @@ type: application
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.6.0
|
version: 0.4.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "v1.0.1"
|
appVersion: "v0.9.3"
|
||||||
|
|
|
@ -1,86 +0,0 @@
|
||||||
# garage
|
|
||||||
|
|
||||||
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.0.1](https://img.shields.io/badge/AppVersion-v1.0.1-informational?style=flat-square)
|
|
||||||
|
|
||||||
S3-compatible object store for small self-hosted geo-distributed deployments
|
|
||||||
|
|
||||||
## Values
|
|
||||||
|
|
||||||
| Key | Type | Default | Description |
|
|
||||||
|-----|------|---------|-------------|
|
|
||||||
| affinity | object | `{}` | |
|
|
||||||
| deployment.kind | string | `"StatefulSet"` | Switchable to DaemonSet |
|
|
||||||
| deployment.podManagementPolicy | string | `"OrderedReady"` | If using statefulset, allow Parallel or OrderedReady (default) |
|
|
||||||
| deployment.replicaCount | int | `3` | Number of StatefulSet replicas/garage nodes to start |
|
|
||||||
| environment | object | `{}` | |
|
|
||||||
| extraVolumeMounts | object | `{}` | |
|
|
||||||
| extraVolumes | object | `{}` | |
|
|
||||||
| fullnameOverride | string | `""` | |
|
|
||||||
| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size |
|
|
||||||
| garage.bootstrapPeers | list | `[]` | This is not required if you use the integrated kubernetes discovery |
|
|
||||||
| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level |
|
|
||||||
| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 |
|
|
||||||
| garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml |
|
|
||||||
| garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ |
|
|
||||||
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources |
|
|
||||||
| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode |
|
|
||||||
| garage.rpcBindAddr | string | `"[::]:3901"` | |
|
|
||||||
| garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object |
|
|
||||||
| garage.s3.api.region | string | `"garage"` | |
|
|
||||||
| garage.s3.api.rootDomain | string | `".s3.garage.tld"` | |
|
|
||||||
| garage.s3.web.index | string | `"index.html"` | |
|
|
||||||
| garage.s3.web.rootDomain | string | `".web.garage.tld"` | |
|
|
||||||
| image.pullPolicy | string | `"IfNotPresent"` | |
|
|
||||||
| image.repository | string | `"dxflrs/amd64_garage"` | default to amd64 docker image |
|
|
||||||
| image.tag | string | `""` | set the image tag, please prefer using the chart version and not this to avoid compatibility issues |
|
|
||||||
| imagePullSecrets | list | `[]` | set if you need credentials to pull your custom image |
|
|
||||||
| ingress.s3.api.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
|
|
||||||
| ingress.s3.api.enabled | bool | `false` | |
|
|
||||||
| ingress.s3.api.hosts[0] | object | `{"host":"s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, to be used with awscli for example |
|
|
||||||
| ingress.s3.api.hosts[1] | object | `{"host":"*.s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, DNS style bucket access |
|
|
||||||
| ingress.s3.api.labels | object | `{}` | |
|
|
||||||
| ingress.s3.api.tls | list | `[]` | |
|
|
||||||
| ingress.s3.web.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
|
|
||||||
| ingress.s3.web.enabled | bool | `false` | |
|
|
||||||
| ingress.s3.web.hosts[0] | object | `{"host":"*.web.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | wildcard website access with bucket name prefix |
|
|
||||||
| ingress.s3.web.hosts[1] | object | `{"host":"mywebpage.example.com","paths":[{"path":"/","pathType":"Prefix"}]}` | specific bucket access with FQDN bucket |
|
|
||||||
| ingress.s3.web.labels | object | `{}` | |
|
|
||||||
| ingress.s3.web.tls | list | `[]` | |
|
|
||||||
| initImage.pullPolicy | string | `"IfNotPresent"` | |
|
|
||||||
| initImage.repository | string | `"busybox"` | |
|
|
||||||
| initImage.tag | string | `"stable"` | |
|
|
||||||
| monitoring.metrics.enabled | bool | `false` | If true, a service for monitoring is created with a prometheus.io/scrape annotation |
|
|
||||||
| monitoring.metrics.serviceMonitor.enabled | bool | `false` | If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator |
|
|
||||||
| monitoring.metrics.serviceMonitor.interval | string | `"15s"` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.labels | object | `{}` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.path | string | `"/metrics"` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.relabelings | list | `[]` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.scheme | string | `"http"` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.scrapeTimeout | string | `"10s"` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.tlsConfig | object | `{}` | |
|
|
||||||
| monitoring.tracing.sink | string | `""` | specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317` |
|
|
||||||
| nameOverride | string | `""` | |
|
|
||||||
| nodeSelector | object | `{}` | |
|
|
||||||
| persistence.data.hostPath | string | `"/var/lib/garage/data"` | |
|
|
||||||
| persistence.data.size | string | `"100Mi"` | |
|
|
||||||
| persistence.enabled | bool | `true` | |
|
|
||||||
| persistence.meta.hostPath | string | `"/var/lib/garage/meta"` | |
|
|
||||||
| persistence.meta.size | string | `"100Mi"` | |
|
|
||||||
| podAnnotations | object | `{}` | additonal pod annotations |
|
|
||||||
| podSecurityContext.fsGroup | int | `1000` | |
|
|
||||||
| podSecurityContext.runAsGroup | int | `1000` | |
|
|
||||||
| podSecurityContext.runAsNonRoot | bool | `true` | |
|
|
||||||
| podSecurityContext.runAsUser | int | `1000` | |
|
|
||||||
| resources | object | `{}` | |
|
|
||||||
| securityContext.capabilities | object | `{"drop":["ALL"]}` | The default security context is heavily restricted, feel free to tune it to your requirements |
|
|
||||||
| securityContext.readOnlyRootFilesystem | bool | `true` | |
|
|
||||||
| service.s3.api.port | int | `3900` | |
|
|
||||||
| service.s3.web.port | int | `3902` | |
|
|
||||||
| service.type | string | `"ClusterIP"` | You can rely on any service to expose your cluster - ClusterIP (+ Ingress) - NodePort (+ Ingress) - LoadBalancer |
|
|
||||||
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
|
|
||||||
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
|
|
||||||
| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
|
|
||||||
| tolerations | list | `[]` | |
|
|
||||||
|
|
||||||
----------------------------------------------
|
|
||||||
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)
|
|
|
@ -1,49 +1,7 @@
|
||||||
{{- if not .Values.garage.existingConfigMap }}
|
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "garage.fullname" . }}-config
|
name: {{ include "garage.fullname" . }}-config
|
||||||
data:
|
data:
|
||||||
garage.toml: |-
|
garage.toml: |-
|
||||||
{{- if .Values.garage.garageTomlString }}
|
{{- tpl (index (index .Values.garage) "garage.toml") $ | nindent 4 }}
|
||||||
{{- tpl (index (index .Values.garage) "garageTomlString") $ | nindent 4 }}
|
|
||||||
{{- else }}
|
|
||||||
metadata_dir = "/mnt/meta"
|
|
||||||
data_dir = "/mnt/data"
|
|
||||||
|
|
||||||
db_engine = "{{ .Values.garage.dbEngine }}"
|
|
||||||
|
|
||||||
block_size = {{ .Values.garage.blockSize }}
|
|
||||||
|
|
||||||
replication_mode = "{{ .Values.garage.replicationMode }}"
|
|
||||||
|
|
||||||
compression_level = {{ .Values.garage.compressionLevel }}
|
|
||||||
|
|
||||||
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
|
|
||||||
# rpc_secret will be populated by the init container from a k8s secret object
|
|
||||||
rpc_secret = "__RPC_SECRET_REPLACE__"
|
|
||||||
|
|
||||||
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
|
|
||||||
|
|
||||||
[kubernetes_discovery]
|
|
||||||
namespace = "{{ .Release.Namespace }}"
|
|
||||||
service_name = "{{ include "garage.fullname" . }}"
|
|
||||||
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
|
|
||||||
|
|
||||||
[s3_api]
|
|
||||||
s3_region = "{{ .Values.garage.s3.api.region }}"
|
|
||||||
api_bind_addr = "[::]:3900"
|
|
||||||
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
|
|
||||||
|
|
||||||
[s3_web]
|
|
||||||
bind_addr = "[::]:3902"
|
|
||||||
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
|
|
||||||
index = "{{ .Values.garage.s3.web.index }}"
|
|
||||||
|
|
||||||
[admin]
|
|
||||||
api_bind_addr = "[::]:3903"
|
|
||||||
{{- if .Values.monitoring.tracing.sink }}
|
|
||||||
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
|
@ -11,7 +11,6 @@ spec:
|
||||||
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
||||||
replicas: {{ .Values.deployment.replicaCount }}
|
replicas: {{ .Values.deployment.replicaCount }}
|
||||||
serviceName: {{ include "garage.fullname" . }}
|
serviceName: {{ include "garage.fullname" . }}
|
||||||
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -64,10 +63,6 @@ spec:
|
||||||
name: web-api
|
name: web-api
|
||||||
- containerPort: 3903
|
- containerPort: 3903
|
||||||
name: admin
|
name: admin
|
||||||
{{- with .Values.environment }}
|
|
||||||
env:
|
|
||||||
{{- toYaml . | nindent 12 }}
|
|
||||||
{{- end }}
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: meta
|
- name: meta
|
||||||
mountPath: /mnt/meta
|
mountPath: /mnt/meta
|
||||||
|
@ -76,9 +71,6 @@ spec:
|
||||||
- name: etc
|
- name: etc
|
||||||
mountPath: /etc/garage.toml
|
mountPath: /etc/garage.toml
|
||||||
subPath: garage.toml
|
subPath: garage.toml
|
||||||
{{- with .Values.extraVolumeMounts }}
|
|
||||||
{{- toYaml . | nindent 12 }}
|
|
||||||
{{- end }}
|
|
||||||
# TODO
|
# TODO
|
||||||
# livenessProbe:
|
# livenessProbe:
|
||||||
# httpGet:
|
# httpGet:
|
||||||
|
@ -113,9 +105,6 @@ spec:
|
||||||
- name: data
|
- name: data
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with .Values.extraVolumes }}
|
|
||||||
{{- toYaml . | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.nodeSelector }}
|
{{- with .Values.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
|
|
|
@ -4,30 +4,28 @@
|
||||||
|
|
||||||
# Garage configuration. These values go to garage.toml
|
# Garage configuration. These values go to garage.toml
|
||||||
garage:
|
garage:
|
||||||
# -- Can be changed for better performance on certain systems
|
# Can be changed for better performance on certain systems
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
|
||||||
dbEngine: "lmdb"
|
dbEngine: "lmdb"
|
||||||
|
|
||||||
# -- Defaults is 1MB
|
# Defaults is 1MB
|
||||||
# An increase can result in better performance in certain scenarios
|
# An increase can result in better performance in certain scenarios
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
|
||||||
blockSize: "1048576"
|
blockSize: "1048576"
|
||||||
|
|
||||||
# -- Default to 3 replicas, see the replication_mode section at
|
# Default to 3 replicas, see the replication_mode section at
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
|
||||||
replicationMode: "3"
|
replicationMode: "3"
|
||||||
|
|
||||||
# -- zstd compression level of stored blocks
|
# zstd compression level of stored blocks
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
|
||||||
compressionLevel: "1"
|
compressionLevel: "1"
|
||||||
|
|
||||||
rpcBindAddr: "[::]:3901"
|
rpcBindAddr: "[::]:3901"
|
||||||
# -- If not given, a random secret will be generated and stored in a Secret object
|
# If not given, a random secret will be generated and stored in a Secret object
|
||||||
rpcSecret: ""
|
rpcSecret: ""
|
||||||
# -- This is not required if you use the integrated kubernetes discovery
|
# This is not required if you use the integrated kubernetes discovery
|
||||||
bootstrapPeers: []
|
bootstrapPeers: []
|
||||||
# -- Set to true if you want to use k8s discovery but install the CRDs manually outside
|
|
||||||
# of the helm chart, for example if you operate at namespace level without cluster ressources
|
|
||||||
kubernetesSkipCrd: false
|
kubernetesSkipCrd: false
|
||||||
s3:
|
s3:
|
||||||
api:
|
api:
|
||||||
|
@ -36,16 +34,47 @@ garage:
|
||||||
web:
|
web:
|
||||||
rootDomain: ".web.garage.tld"
|
rootDomain: ".web.garage.tld"
|
||||||
index: "index.html"
|
index: "index.html"
|
||||||
|
# Template for the garage configuration
|
||||||
|
# Values can be templated
|
||||||
|
# ref: https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
|
||||||
|
garage.toml: |-
|
||||||
|
metadata_dir = "/mnt/meta"
|
||||||
|
data_dir = "/mnt/data"
|
||||||
|
|
||||||
# -- if not empty string, allow using an existing ConfigMap for the garage.toml,
|
db_engine = "{{ .Values.garage.dbEngine }}"
|
||||||
# if set, ignores garage.toml
|
|
||||||
existingConfigMap: ""
|
|
||||||
|
|
||||||
# -- String Template for the garage configuration
|
block_size = {{ .Values.garage.blockSize }}
|
||||||
# if set, ignores above values.
|
|
||||||
# Values can be templated,
|
replication_mode = "{{ .Values.garage.replicationMode }}"
|
||||||
# see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
|
|
||||||
garageTomlString: ""
|
compression_level = {{ .Values.garage.compressionLevel }}
|
||||||
|
|
||||||
|
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
|
||||||
|
# rpc_secret will be populated by the init container from a k8s secret object
|
||||||
|
rpc_secret = "__RPC_SECRET_REPLACE__"
|
||||||
|
|
||||||
|
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
|
||||||
|
|
||||||
|
[kubernetes_discovery]
|
||||||
|
namespace = "{{ .Release.Namespace }}"
|
||||||
|
service_name = "{{ include "garage.fullname" . }}"
|
||||||
|
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
|
||||||
|
|
||||||
|
[s3_api]
|
||||||
|
s3_region = "{{ .Values.garage.s3.api.region }}"
|
||||||
|
api_bind_addr = "[::]:3900"
|
||||||
|
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
|
||||||
|
|
||||||
|
[s3_web]
|
||||||
|
bind_addr = "[::]:3902"
|
||||||
|
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
|
||||||
|
index = "{{ .Values.garage.s3.web.index }}"
|
||||||
|
|
||||||
|
[admin]
|
||||||
|
api_bind_addr = "[::]:3903"
|
||||||
|
{{- if .Values.monitoring.tracing.sink }}
|
||||||
|
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
# Data persistence
|
# Data persistence
|
||||||
persistence:
|
persistence:
|
||||||
|
@ -63,18 +92,14 @@ persistence:
|
||||||
|
|
||||||
# Deployment configuration
|
# Deployment configuration
|
||||||
deployment:
|
deployment:
|
||||||
# -- Switchable to DaemonSet
|
# Switchable to DaemonSet
|
||||||
kind: StatefulSet
|
kind: StatefulSet
|
||||||
# -- Number of StatefulSet replicas/garage nodes to start
|
# Number of StatefulSet replicas/garage nodes to start
|
||||||
replicaCount: 3
|
replicaCount: 3
|
||||||
# -- If using statefulset, allow Parallel or OrderedReady (default)
|
|
||||||
podManagementPolicy: OrderedReady
|
|
||||||
|
|
||||||
image:
|
image:
|
||||||
# -- default to amd64 docker image
|
|
||||||
repository: dxflrs/amd64_garage
|
repository: dxflrs/amd64_garage
|
||||||
# -- set the image tag, please prefer using the chart version and not this
|
# please prefer using the chart version and not this tag
|
||||||
# to avoid compatibility issues
|
|
||||||
tag: ""
|
tag: ""
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
@ -83,21 +108,19 @@ initImage:
|
||||||
tag: stable
|
tag: stable
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
# -- set if you need credentials to pull your custom image
|
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
|
|
||||||
serviceAccount:
|
serviceAccount:
|
||||||
# -- Specifies whether a service account should be created
|
# Specifies whether a service account should be created
|
||||||
create: true
|
create: true
|
||||||
# -- Annotations to add to the service account
|
# Annotations to add to the service account
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# -- The name of the service account to use.
|
# The name of the service account to use.
|
||||||
# If not set and create is true, a name is generated using the fullname template
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
name: ""
|
name: ""
|
||||||
|
|
||||||
# -- additonal pod annotations
|
|
||||||
podAnnotations: {}
|
podAnnotations: {}
|
||||||
|
|
||||||
podSecurityContext:
|
podSecurityContext:
|
||||||
|
@ -107,7 +130,7 @@ podSecurityContext:
|
||||||
runAsNonRoot: true
|
runAsNonRoot: true
|
||||||
|
|
||||||
securityContext:
|
securityContext:
|
||||||
# -- The default security context is heavily restricted,
|
# The default security context is heavily restricted
|
||||||
# feel free to tune it to your requirements
|
# feel free to tune it to your requirements
|
||||||
capabilities:
|
capabilities:
|
||||||
drop:
|
drop:
|
||||||
|
@ -115,7 +138,7 @@ securityContext:
|
||||||
readOnlyRootFilesystem: true
|
readOnlyRootFilesystem: true
|
||||||
|
|
||||||
service:
|
service:
|
||||||
# -- You can rely on any service to expose your cluster
|
# You can rely on any service to expose your cluster
|
||||||
# - ClusterIP (+ Ingress)
|
# - ClusterIP (+ Ingress)
|
||||||
# - NodePort (+ Ingress)
|
# - NodePort (+ Ingress)
|
||||||
# - LoadBalancer
|
# - LoadBalancer
|
||||||
|
@ -131,23 +154,20 @@ ingress:
|
||||||
s3:
|
s3:
|
||||||
api:
|
api:
|
||||||
enabled: false
|
enabled: false
|
||||||
# -- Rely _either_ on the className or the annotation below but not both!
|
# Rely either on the className or the annotation below but not both
|
||||||
# If you want to use the className, set
|
# replace "nginx" by an Ingress controller
|
||||||
|
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
|
||||||
# className: "nginx"
|
# className: "nginx"
|
||||||
# and replace "nginx" by an Ingress controller name,
|
|
||||||
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
|
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# kubernetes.io/ingress.class: "nginx"
|
# kubernetes.io/ingress.class: "nginx"
|
||||||
# kubernetes.io/tls-acme: "true"
|
# kubernetes.io/tls-acme: "true"
|
||||||
labels: {}
|
labels: {}
|
||||||
hosts:
|
hosts:
|
||||||
# -- garage S3 API endpoint, to be used with awscli for example
|
- host: "s3.garage.tld" # garage S3 API endpoint
|
||||||
- host: "s3.garage.tld"
|
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
# -- garage S3 API endpoint, DNS style bucket access
|
- host: "*.s3.garage.tld" # garage S3 API endpoint, DNS style bucket access
|
||||||
- host: "*.s3.garage.tld"
|
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
|
@ -157,23 +177,20 @@ ingress:
|
||||||
# - kubernetes.docker.internal
|
# - kubernetes.docker.internal
|
||||||
web:
|
web:
|
||||||
enabled: false
|
enabled: false
|
||||||
# -- Rely _either_ on the className or the annotation below but not both!
|
# Rely either on the className or the annotation below but not both
|
||||||
# If you want to use the className, set
|
# replace "nginx" by an Ingress controller
|
||||||
|
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
|
||||||
# className: "nginx"
|
# className: "nginx"
|
||||||
# and replace "nginx" by an Ingress controller name,
|
|
||||||
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
|
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# kubernetes.io/ingress.class: nginx
|
# kubernetes.io/ingress.class: nginx
|
||||||
# kubernetes.io/tls-acme: "true"
|
# kubernetes.io/tls-acme: "true"
|
||||||
labels: {}
|
labels: {}
|
||||||
hosts:
|
hosts:
|
||||||
# -- wildcard website access with bucket name prefix
|
- host: "*.web.garage.tld" # wildcard website access with bucket name prefix
|
||||||
- host: "*.web.garage.tld"
|
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
# -- specific bucket access with FQDN bucket
|
- host: "mywebpage.example.com" # specific bucket access with FQDN bucket
|
||||||
- host: "mywebpage.example.com"
|
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
|
@ -197,18 +214,12 @@ tolerations: []
|
||||||
|
|
||||||
affinity: {}
|
affinity: {}
|
||||||
|
|
||||||
environment: {}
|
|
||||||
|
|
||||||
extraVolumes: {}
|
|
||||||
|
|
||||||
extraVolumeMounts: {}
|
|
||||||
|
|
||||||
monitoring:
|
monitoring:
|
||||||
metrics:
|
metrics:
|
||||||
# -- If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
||||||
enabled: false
|
enabled: false
|
||||||
serviceMonitor:
|
serviceMonitor:
|
||||||
# -- If true, a ServiceMonitor CRD is created for a prometheus operator
|
# If true, a ServiceMonitor CRD is created for a prometheus operator
|
||||||
# https://github.com/coreos/prometheus-operator
|
# https://github.com/coreos/prometheus-operator
|
||||||
enabled: false
|
enabled: false
|
||||||
path: /metrics
|
path: /metrics
|
||||||
|
@ -220,5 +231,4 @@ monitoring:
|
||||||
scrapeTimeout: 10s
|
scrapeTimeout: 10s
|
||||||
relabelings: []
|
relabelings: []
|
||||||
tracing:
|
tracing:
|
||||||
# -- specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317`
|
|
||||||
sink: ""
|
sink: ""
|
||||||
|
|
14
script/jepsen.garage/Vagrantfile
vendored
14
script/jepsen.garage/Vagrantfile
vendored
|
@ -30,11 +30,11 @@ Vagrant.configure("2") do |config|
|
||||||
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
|
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
|
||||||
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
|
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
|
||||||
|
|
||||||
#config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
|
config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
|
||||||
#config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
|
config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
|
||||||
#config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
|
config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
|
||||||
#config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
|
config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
|
||||||
#config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
|
config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
|
||||||
#config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
|
config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
|
||||||
#config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
|
config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
|
||||||
end
|
end
|
||||||
|
|
|
@ -3,10 +3,11 @@
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
#for ppatch in task3c task3a tsfix2; do
|
#for ppatch in task3c task3a tsfix2; do
|
||||||
for ppatch in v093 v1rc1; do
|
for ppatch in tsfix2; do
|
||||||
#for psc in c cp cdp r pr cpr dpr; do
|
#for psc in c cp cdp r pr cpr dpr; do
|
||||||
for ptsk in reg2 set2; do
|
for psc in cdp r pr cpr dpr; do
|
||||||
for psc in c cp cdp r pr cpr dpr; do
|
#for ptsk in reg2 set1 set2; do
|
||||||
|
for ptsk in set1; do
|
||||||
for irun in $(seq 10); do
|
for irun in $(seq 10); do
|
||||||
lein run test --nodes-file nodes.vagrant \
|
lein run test --nodes-file nodes.vagrant \
|
||||||
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \
|
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \
|
||||||
|
|
|
@ -38,9 +38,7 @@
|
||||||
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
|
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
|
||||||
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
|
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
|
||||||
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
|
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
|
||||||
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"
|
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"})
|
||||||
"v093" "v0.9.3"
|
|
||||||
"v1rc1" "v1.0.0-rc1"})
|
|
||||||
|
|
||||||
(def cli-opts
|
(def cli-opts
|
||||||
"Additional command line options."
|
"Additional command line options."
|
||||||
|
|
|
@ -43,7 +43,7 @@
|
||||||
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
|
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
|
||||||
"rpc_public_addr = \"" node ":3901\"\n"
|
"rpc_public_addr = \"" node ":3901\"\n"
|
||||||
"db_engine = \"lmdb\"\n"
|
"db_engine = \"lmdb\"\n"
|
||||||
"replication_mode = \"3\"\n"
|
"replication_mode = \"2\"\n"
|
||||||
"data_dir = \"" data-dir "\"\n"
|
"data_dir = \"" data-dir "\"\n"
|
||||||
"metadata_dir = \"" meta-dir "\"\n"
|
"metadata_dir = \"" meta-dir "\"\n"
|
||||||
"[s3_api]\n"
|
"[s3_api]\n"
|
||||||
|
|
|
@ -11,7 +11,6 @@ in
|
||||||
{
|
{
|
||||||
# --- Dev shell inherited from flake.nix ---
|
# --- Dev shell inherited from flake.nix ---
|
||||||
devShell = devShells.default;
|
devShell = devShells.default;
|
||||||
devShellFull = devShells.full;
|
|
||||||
|
|
||||||
# --- Continuous integration shell ---
|
# --- Continuous integration shell ---
|
||||||
# The shell used for all CI jobs (along with devShell)
|
# The shell used for all CI jobs (along with devShell)
|
||||||
|
@ -115,7 +114,7 @@ in
|
||||||
shellHook = ''
|
shellHook = ''
|
||||||
function refresh_cache {
|
function refresh_cache {
|
||||||
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
||||||
for attr in pkgs.amd64.debug test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
|
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
|
||||||
echo "Updating cache for ''${attr}"
|
echo "Updating cache for ''${attr}"
|
||||||
nix copy -j8 \
|
nix copy -j8 \
|
||||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -28,8 +28,6 @@ async-trait.workspace = true
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
crc32fast.workspace = true
|
|
||||||
crc32c.workspace = true
|
|
||||||
crypto-common.workspace = true
|
crypto-common.workspace = true
|
||||||
err-derive.workspace = true
|
err-derive.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
|
@ -39,7 +37,6 @@ tracing.workspace = true
|
||||||
md-5.workspace = true
|
md-5.workspace = true
|
||||||
nom.workspace = true
|
nom.workspace = true
|
||||||
pin-project.workspace = true
|
pin-project.workspace = true
|
||||||
sha1.workspace = true
|
|
||||||
sha2.workspace = true
|
sha2.workspace = true
|
||||||
|
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
|
|
@ -27,7 +27,7 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
||||||
i.id,
|
i.id,
|
||||||
NodeResp {
|
NodeResp {
|
||||||
id: hex::encode(i.id),
|
id: hex::encode(i.id),
|
||||||
addr: i.addr,
|
addr: Some(i.addr),
|
||||||
hostname: i.status.hostname,
|
hostname: i.status.hostname,
|
||||||
is_up: i.is_up,
|
is_up: i.is_up,
|
||||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||||
|
@ -70,30 +70,26 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Some(n) => {
|
Some(n) => {
|
||||||
n.role = Some(role);
|
if n.role.is_none() {
|
||||||
|
n.role = Some(role);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for ver in layout.versions().iter().rev().skip(1) {
|
for ver in layout.versions.iter().rev().skip(1) {
|
||||||
for (id, _, role) in ver.roles.items().iter() {
|
for (id, _, role) in ver.roles.items().iter() {
|
||||||
if let layout::NodeRoleV(Some(r)) = role {
|
if let layout::NodeRoleV(Some(r)) = role {
|
||||||
if r.capacity.is_some() {
|
if !nodes.contains_key(id) && r.capacity.is_some() {
|
||||||
if let Some(n) = nodes.get_mut(id) {
|
nodes.insert(
|
||||||
if n.role.is_none() {
|
*id,
|
||||||
n.draining = true;
|
NodeResp {
|
||||||
}
|
id: hex::encode(id),
|
||||||
} else {
|
draining: true,
|
||||||
nodes.insert(
|
..Default::default()
|
||||||
*id,
|
},
|
||||||
NodeResp {
|
);
|
||||||
id: hex::encode(id),
|
|
||||||
draining: true,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -160,7 +156,7 @@ pub async fn handle_connect_cluster_nodes(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let res = format_cluster_layout(garage.system.cluster_layout().inner());
|
let res = format_cluster_layout(&garage.system.cluster_layout());
|
||||||
|
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
}
|
}
|
||||||
|
@ -299,7 +295,7 @@ pub async fn handle_update_cluster_layout(
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let mut layout = garage.system.cluster_layout().inner().clone();
|
let mut layout = garage.system.cluster_layout().clone();
|
||||||
|
|
||||||
let mut roles = layout.current().roles.clone();
|
let mut roles = layout.current().roles.clone();
|
||||||
roles.merge(&layout.staging.get().roles);
|
roles.merge(&layout.staging.get().roles);
|
||||||
|
@ -345,7 +341,7 @@ pub async fn handle_apply_cluster_layout(
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let layout = garage.system.cluster_layout().inner().clone();
|
let layout = garage.system.cluster_layout().clone();
|
||||||
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
||||||
|
|
||||||
garage
|
garage
|
||||||
|
@ -364,7 +360,7 @@ pub async fn handle_apply_cluster_layout(
|
||||||
pub async fn handle_revert_cluster_layout(
|
pub async fn handle_revert_cluster_layout(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let layout = garage.system.cluster_layout().inner().clone();
|
let layout = garage.system.cluster_layout().clone();
|
||||||
let layout = layout.revert_staged_changes()?;
|
let layout = layout.revert_staged_changes()?;
|
||||||
garage
|
garage
|
||||||
.system
|
.system
|
||||||
|
|
|
@ -2,7 +2,6 @@ use std::convert::Infallible;
|
||||||
use std::fs::{self, Permissions};
|
use std::fs::{self, Permissions};
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
@ -20,7 +19,6 @@ use hyper_util::rt::TokioIo;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite};
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
|
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
use tokio::time::{sleep_until, Instant};
|
|
||||||
|
|
||||||
use opentelemetry::{
|
use opentelemetry::{
|
||||||
global,
|
global,
|
||||||
|
@ -293,7 +291,7 @@ where
|
||||||
let connection_collector = tokio::spawn({
|
let connection_collector = tokio::spawn({
|
||||||
let server_name = server_name.clone();
|
let server_name = server_name.clone();
|
||||||
async move {
|
async move {
|
||||||
let mut connections = FuturesUnordered::<tokio::task::JoinHandle<()>>::new();
|
let mut connections = FuturesUnordered::new();
|
||||||
loop {
|
loop {
|
||||||
let collect_next = async {
|
let collect_next = async {
|
||||||
if connections.is_empty() {
|
if connections.is_empty() {
|
||||||
|
@ -314,34 +312,23 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let deadline = Instant::now() + Duration::from_secs(10);
|
if !connections.is_empty() {
|
||||||
while !connections.is_empty() {
|
|
||||||
info!(
|
info!(
|
||||||
"{} server: {} connections still open, deadline in {:.2}s",
|
"{} server: {} connections still open",
|
||||||
server_name,
|
server_name,
|
||||||
connections.len(),
|
connections.len()
|
||||||
(deadline - Instant::now()).as_secs_f32(),
|
|
||||||
);
|
);
|
||||||
tokio::select! {
|
while let Some(conn_res) = connections.next().await {
|
||||||
conn_res = connections.next() => {
|
trace!(
|
||||||
trace!(
|
"{} server: HTTP connection finished: {:?}",
|
||||||
"{} server: HTTP connection finished: {:?}",
|
server_name,
|
||||||
server_name,
|
conn_res
|
||||||
conn_res.unwrap(),
|
);
|
||||||
);
|
info!(
|
||||||
}
|
"{} server: {} connections still open",
|
||||||
_ = sleep_until(deadline) => {
|
server_name,
|
||||||
warn!("{} server: exit deadline reached with {} connections still open, killing them now",
|
connections.len()
|
||||||
server_name,
|
);
|
||||||
connections.len());
|
|
||||||
for conn in connections.iter() {
|
|
||||||
conn.abort();
|
|
||||||
}
|
|
||||||
for conn in connections {
|
|
||||||
assert!(conn.await.unwrap_err().is_cancelled());
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,7 @@ impl ApiHandler for K2VApiServer {
|
||||||
} = endpoint;
|
} = endpoint;
|
||||||
let garage = self.garage.clone();
|
let garage = self.garage.clone();
|
||||||
|
|
||||||
// The OPTIONS method is processed early, before we even check for an API key
|
// The OPTIONS method is procesed early, before we even check for an API key
|
||||||
if let Endpoint::Options = endpoint {
|
if let Endpoint::Options = endpoint {
|
||||||
let options_res = handle_options_api(garage, &req, Some(bucket_name))
|
let options_res = handle_options_api(garage, &req, Some(bucket_name))
|
||||||
.await
|
.await
|
||||||
|
|
|
@ -204,7 +204,7 @@ macro_rules! generateQueryParameters {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get an error message in case not all parameters where used when extracting them to
|
/// Get an error message in case not all parameters where used when extracting them to
|
||||||
/// build an Endpoint variant
|
/// build an Enpoint variant
|
||||||
fn nonempty_message(&self) -> Option<&str> {
|
fn nonempty_message(&self) -> Option<&str> {
|
||||||
if self.keyword.is_some() {
|
if self.keyword.is_some() {
|
||||||
Some("Keyword not used")
|
Some("Keyword not used")
|
||||||
|
|
|
@ -325,7 +325,7 @@ impl ApiHandler for S3ApiServer {
|
||||||
part_number_marker: part_number_marker.map(|p| p.min(10000)),
|
part_number_marker: part_number_marker.map(|p| p.min(10000)),
|
||||||
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
|
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
|
||||||
};
|
};
|
||||||
handle_list_parts(ctx, req, &query).await
|
handle_list_parts(ctx, &query).await
|
||||||
}
|
}
|
||||||
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
|
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
|
||||||
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
|
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
|
||||||
|
|
|
@ -1,406 +0,0 @@
|
||||||
use std::convert::{TryFrom, TryInto};
|
|
||||||
use std::hash::Hasher;
|
|
||||||
|
|
||||||
use base64::prelude::*;
|
|
||||||
use crc32c::Crc32cHasher as Crc32c;
|
|
||||||
use crc32fast::Hasher as Crc32;
|
|
||||||
use md5::{Digest, Md5};
|
|
||||||
use sha1::Sha1;
|
|
||||||
use sha2::Sha256;
|
|
||||||
|
|
||||||
use http::{HeaderMap, HeaderName, HeaderValue};
|
|
||||||
|
|
||||||
use garage_util::data::*;
|
|
||||||
use garage_util::error::OkOrMessage;
|
|
||||||
|
|
||||||
use garage_model::s3::object_table::*;
|
|
||||||
|
|
||||||
use crate::s3::error::*;
|
|
||||||
|
|
||||||
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
|
|
||||||
HeaderName::from_static("x-amz-checksum-algorithm");
|
|
||||||
pub const X_AMZ_CHECKSUM_MODE: HeaderName = HeaderName::from_static("x-amz-checksum-mode");
|
|
||||||
pub const X_AMZ_CHECKSUM_CRC32: HeaderName = HeaderName::from_static("x-amz-checksum-crc32");
|
|
||||||
pub const X_AMZ_CHECKSUM_CRC32C: HeaderName = HeaderName::from_static("x-amz-checksum-crc32c");
|
|
||||||
pub const X_AMZ_CHECKSUM_SHA1: HeaderName = HeaderName::from_static("x-amz-checksum-sha1");
|
|
||||||
pub const X_AMZ_CHECKSUM_SHA256: HeaderName = HeaderName::from_static("x-amz-checksum-sha256");
|
|
||||||
|
|
||||||
pub type Crc32Checksum = [u8; 4];
|
|
||||||
pub type Crc32cChecksum = [u8; 4];
|
|
||||||
pub type Md5Checksum = [u8; 16];
|
|
||||||
pub type Sha1Checksum = [u8; 20];
|
|
||||||
pub type Sha256Checksum = [u8; 32];
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub(crate) struct ExpectedChecksums {
|
|
||||||
// base64-encoded md5 (content-md5 header)
|
|
||||||
pub md5: Option<String>,
|
|
||||||
// content_sha256 (as a Hash / FixedBytes32)
|
|
||||||
pub sha256: Option<Hash>,
|
|
||||||
// extra x-amz-checksum-* header
|
|
||||||
pub extra: Option<ChecksumValue>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct Checksummer {
|
|
||||||
pub crc32: Option<Crc32>,
|
|
||||||
pub crc32c: Option<Crc32c>,
|
|
||||||
pub md5: Option<Md5>,
|
|
||||||
pub sha1: Option<Sha1>,
|
|
||||||
pub sha256: Option<Sha256>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub(crate) struct Checksums {
|
|
||||||
pub crc32: Option<Crc32Checksum>,
|
|
||||||
pub crc32c: Option<Crc32cChecksum>,
|
|
||||||
pub md5: Option<Md5Checksum>,
|
|
||||||
pub sha1: Option<Sha1Checksum>,
|
|
||||||
pub sha256: Option<Sha256Checksum>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Checksummer {
|
|
||||||
pub(crate) fn init(expected: &ExpectedChecksums, require_md5: bool) -> Self {
|
|
||||||
let mut ret = Self {
|
|
||||||
crc32: None,
|
|
||||||
crc32c: None,
|
|
||||||
md5: None,
|
|
||||||
sha1: None,
|
|
||||||
sha256: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if expected.md5.is_some() || require_md5 {
|
|
||||||
ret.md5 = Some(Md5::new());
|
|
||||||
}
|
|
||||||
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
|
|
||||||
ret.sha256 = Some(Sha256::new());
|
|
||||||
}
|
|
||||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
|
|
||||||
ret.crc32 = Some(Crc32::new());
|
|
||||||
}
|
|
||||||
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
|
|
||||||
ret.crc32c = Some(Crc32c::default());
|
|
||||||
}
|
|
||||||
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
|
|
||||||
ret.sha1 = Some(Sha1::new());
|
|
||||||
}
|
|
||||||
ret
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
|
|
||||||
match algo {
|
|
||||||
Some(ChecksumAlgorithm::Crc32) => {
|
|
||||||
self.crc32 = Some(Crc32::new());
|
|
||||||
}
|
|
||||||
Some(ChecksumAlgorithm::Crc32c) => {
|
|
||||||
self.crc32c = Some(Crc32c::default());
|
|
||||||
}
|
|
||||||
Some(ChecksumAlgorithm::Sha1) => {
|
|
||||||
self.sha1 = Some(Sha1::new());
|
|
||||||
}
|
|
||||||
Some(ChecksumAlgorithm::Sha256) => {
|
|
||||||
self.sha256 = Some(Sha256::new());
|
|
||||||
}
|
|
||||||
None => (),
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn update(&mut self, bytes: &[u8]) {
|
|
||||||
if let Some(crc32) = &mut self.crc32 {
|
|
||||||
crc32.update(bytes);
|
|
||||||
}
|
|
||||||
if let Some(crc32c) = &mut self.crc32c {
|
|
||||||
crc32c.write(bytes);
|
|
||||||
}
|
|
||||||
if let Some(md5) = &mut self.md5 {
|
|
||||||
md5.update(bytes);
|
|
||||||
}
|
|
||||||
if let Some(sha1) = &mut self.sha1 {
|
|
||||||
sha1.update(bytes);
|
|
||||||
}
|
|
||||||
if let Some(sha256) = &mut self.sha256 {
|
|
||||||
sha256.update(bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn finalize(self) -> Checksums {
|
|
||||||
Checksums {
|
|
||||||
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
|
|
||||||
crc32c: self
|
|
||||||
.crc32c
|
|
||||||
.map(|x| u32::to_be_bytes(u32::try_from(x.finish()).unwrap())),
|
|
||||||
md5: self.md5.map(|x| x.finalize()[..].try_into().unwrap()),
|
|
||||||
sha1: self.sha1.map(|x| x.finalize()[..].try_into().unwrap()),
|
|
||||||
sha256: self.sha256.map(|x| x.finalize()[..].try_into().unwrap()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Checksums {
|
|
||||||
pub fn verify(&self, expected: &ExpectedChecksums) -> Result<(), Error> {
|
|
||||||
if let Some(expected_md5) = &expected.md5 {
|
|
||||||
match self.md5 {
|
|
||||||
Some(md5) if BASE64_STANDARD.encode(&md5) == expected_md5.trim_matches('"') => (),
|
|
||||||
_ => {
|
|
||||||
return Err(Error::InvalidDigest(
|
|
||||||
"MD5 checksum verification failed (from content-md5)".into(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(expected_sha256) = &expected.sha256 {
|
|
||||||
match self.sha256 {
|
|
||||||
Some(sha256) if &sha256[..] == expected_sha256.as_slice() => (),
|
|
||||||
_ => {
|
|
||||||
return Err(Error::InvalidDigest(
|
|
||||||
"SHA256 checksum verification failed (from x-amz-content-sha256)".into(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(extra) = expected.extra {
|
|
||||||
let algo = extra.algorithm();
|
|
||||||
if self.extract(Some(algo)) != Some(extra) {
|
|
||||||
return Err(Error::InvalidDigest(format!(
|
|
||||||
"Failed to validate checksum for algorithm {:?}",
|
|
||||||
algo
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn extract(&self, algo: Option<ChecksumAlgorithm>) -> Option<ChecksumValue> {
|
|
||||||
match algo {
|
|
||||||
None => None,
|
|
||||||
Some(ChecksumAlgorithm::Crc32) => Some(ChecksumValue::Crc32(self.crc32.unwrap())),
|
|
||||||
Some(ChecksumAlgorithm::Crc32c) => Some(ChecksumValue::Crc32c(self.crc32c.unwrap())),
|
|
||||||
Some(ChecksumAlgorithm::Sha1) => Some(ChecksumValue::Sha1(self.sha1.unwrap())),
|
|
||||||
Some(ChecksumAlgorithm::Sha256) => Some(ChecksumValue::Sha256(self.sha256.unwrap())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub(crate) struct MultipartChecksummer {
|
|
||||||
pub md5: Md5,
|
|
||||||
pub extra: Option<MultipartExtraChecksummer>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) enum MultipartExtraChecksummer {
|
|
||||||
Crc32(Crc32),
|
|
||||||
Crc32c(Crc32c),
|
|
||||||
Sha1(Sha1),
|
|
||||||
Sha256(Sha256),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MultipartChecksummer {
|
|
||||||
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
|
|
||||||
Self {
|
|
||||||
md5: Md5::new(),
|
|
||||||
extra: match algo {
|
|
||||||
None => None,
|
|
||||||
Some(ChecksumAlgorithm::Crc32) => {
|
|
||||||
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
|
|
||||||
}
|
|
||||||
Some(ChecksumAlgorithm::Crc32c) => {
|
|
||||||
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
|
|
||||||
}
|
|
||||||
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
|
|
||||||
Some(ChecksumAlgorithm::Sha256) => {
|
|
||||||
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn update(
|
|
||||||
&mut self,
|
|
||||||
etag: &str,
|
|
||||||
checksum: Option<ChecksumValue>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
self.md5
|
|
||||||
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
|
|
||||||
match (&mut self.extra, checksum) {
|
|
||||||
(None, _) => (),
|
|
||||||
(
|
|
||||||
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
|
|
||||||
Some(ChecksumValue::Crc32(x)),
|
|
||||||
) => {
|
|
||||||
crc32.update(&x);
|
|
||||||
}
|
|
||||||
(
|
|
||||||
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
|
|
||||||
Some(ChecksumValue::Crc32c(x)),
|
|
||||||
) => {
|
|
||||||
crc32c.write(&x);
|
|
||||||
}
|
|
||||||
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
|
|
||||||
sha1.update(&x);
|
|
||||||
}
|
|
||||||
(
|
|
||||||
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
|
|
||||||
Some(ChecksumValue::Sha256(x)),
|
|
||||||
) => {
|
|
||||||
sha256.update(&x);
|
|
||||||
}
|
|
||||||
(Some(_), b) => {
|
|
||||||
return Err(Error::internal_error(format!(
|
|
||||||
"part checksum was not computed correctly, got: {:?}",
|
|
||||||
b
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
|
|
||||||
let md5 = self.md5.finalize()[..].try_into().unwrap();
|
|
||||||
let extra = match self.extra {
|
|
||||||
None => None,
|
|
||||||
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
|
|
||||||
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
|
|
||||||
}
|
|
||||||
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
|
|
||||||
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
|
|
||||||
)),
|
|
||||||
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
|
|
||||||
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
|
|
||||||
}
|
|
||||||
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
|
|
||||||
sha256.finalize()[..].try_into().unwrap(),
|
|
||||||
)),
|
|
||||||
};
|
|
||||||
(md5, extra)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----
|
|
||||||
|
|
||||||
/// Extract the value of the x-amz-checksum-algorithm header
|
|
||||||
pub(crate) fn request_checksum_algorithm(
|
|
||||||
headers: &HeaderMap<HeaderValue>,
|
|
||||||
) -> Result<Option<ChecksumAlgorithm>, Error> {
|
|
||||||
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(x) if x == "CRC32" => Ok(Some(ChecksumAlgorithm::Crc32)),
|
|
||||||
Some(x) if x == "CRC32C" => Ok(Some(ChecksumAlgorithm::Crc32c)),
|
|
||||||
Some(x) if x == "SHA1" => Ok(Some(ChecksumAlgorithm::Sha1)),
|
|
||||||
Some(x) if x == "SHA256" => Ok(Some(ChecksumAlgorithm::Sha256)),
|
|
||||||
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract the value of any of the x-amz-checksum-* headers
|
|
||||||
pub(crate) fn request_checksum_value(
|
|
||||||
headers: &HeaderMap<HeaderValue>,
|
|
||||||
) -> Result<Option<ChecksumValue>, Error> {
|
|
||||||
let mut ret = vec![];
|
|
||||||
|
|
||||||
if let Some(crc32_str) = headers.get(X_AMZ_CHECKSUM_CRC32) {
|
|
||||||
let crc32 = BASE64_STANDARD
|
|
||||||
.decode(&crc32_str)
|
|
||||||
.ok()
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
|
||||||
ret.push(ChecksumValue::Crc32(crc32))
|
|
||||||
}
|
|
||||||
if let Some(crc32c_str) = headers.get(X_AMZ_CHECKSUM_CRC32C) {
|
|
||||||
let crc32c = BASE64_STANDARD
|
|
||||||
.decode(&crc32c_str)
|
|
||||||
.ok()
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
|
||||||
ret.push(ChecksumValue::Crc32c(crc32c))
|
|
||||||
}
|
|
||||||
if let Some(sha1_str) = headers.get(X_AMZ_CHECKSUM_SHA1) {
|
|
||||||
let sha1 = BASE64_STANDARD
|
|
||||||
.decode(&sha1_str)
|
|
||||||
.ok()
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
|
||||||
ret.push(ChecksumValue::Sha1(sha1))
|
|
||||||
}
|
|
||||||
if let Some(sha256_str) = headers.get(X_AMZ_CHECKSUM_SHA256) {
|
|
||||||
let sha256 = BASE64_STANDARD
|
|
||||||
.decode(&sha256_str)
|
|
||||||
.ok()
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
|
||||||
ret.push(ChecksumValue::Sha256(sha256))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ret.len() > 1 {
|
|
||||||
return Err(Error::bad_request(
|
|
||||||
"multiple x-amz-checksum-* headers given",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(ret.pop())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks for the presence of x-amz-checksum-algorithm
|
|
||||||
/// if so extract the corresponding x-amz-checksum-* value
|
|
||||||
pub(crate) fn request_checksum_algorithm_value(
|
|
||||||
headers: &HeaderMap<HeaderValue>,
|
|
||||||
) -> Result<Option<ChecksumValue>, Error> {
|
|
||||||
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
|
||||||
Some(x) if x == "CRC32" => {
|
|
||||||
let crc32 = headers
|
|
||||||
.get(X_AMZ_CHECKSUM_CRC32)
|
|
||||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
|
||||||
Ok(Some(ChecksumValue::Crc32(crc32)))
|
|
||||||
}
|
|
||||||
Some(x) if x == "CRC32C" => {
|
|
||||||
let crc32c = headers
|
|
||||||
.get(X_AMZ_CHECKSUM_CRC32C)
|
|
||||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
|
||||||
Ok(Some(ChecksumValue::Crc32c(crc32c)))
|
|
||||||
}
|
|
||||||
Some(x) if x == "SHA1" => {
|
|
||||||
let sha1 = headers
|
|
||||||
.get(X_AMZ_CHECKSUM_SHA1)
|
|
||||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
|
||||||
Ok(Some(ChecksumValue::Sha1(sha1)))
|
|
||||||
}
|
|
||||||
Some(x) if x == "SHA256" => {
|
|
||||||
let sha256 = headers
|
|
||||||
.get(X_AMZ_CHECKSUM_SHA256)
|
|
||||||
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
|
||||||
.and_then(|x| x.try_into().ok())
|
|
||||||
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
|
||||||
Ok(Some(ChecksumValue::Sha256(sha256)))
|
|
||||||
}
|
|
||||||
Some(_) => Err(Error::bad_request("invalid x-amz-checksum-algorithm")),
|
|
||||||
None => Ok(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn add_checksum_response_headers(
|
|
||||||
checksum: &Option<ChecksumValue>,
|
|
||||||
mut resp: http::response::Builder,
|
|
||||||
) -> http::response::Builder {
|
|
||||||
match checksum {
|
|
||||||
Some(ChecksumValue::Crc32(crc32)) => {
|
|
||||||
resp = resp.header(X_AMZ_CHECKSUM_CRC32, BASE64_STANDARD.encode(&crc32));
|
|
||||||
}
|
|
||||||
Some(ChecksumValue::Crc32c(crc32c)) => {
|
|
||||||
resp = resp.header(X_AMZ_CHECKSUM_CRC32C, BASE64_STANDARD.encode(&crc32c));
|
|
||||||
}
|
|
||||||
Some(ChecksumValue::Sha1(sha1)) => {
|
|
||||||
resp = resp.header(X_AMZ_CHECKSUM_SHA1, BASE64_STANDARD.encode(&sha1));
|
|
||||||
}
|
|
||||||
Some(ChecksumValue::Sha256(sha256)) => {
|
|
||||||
resp = resp.header(X_AMZ_CHECKSUM_SHA256, BASE64_STANDARD.encode(&sha256));
|
|
||||||
}
|
|
||||||
None => (),
|
|
||||||
}
|
|
||||||
resp
|
|
||||||
}
|
|
|
@ -2,6 +2,7 @@ use std::pin::Pin;
|
||||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
use futures::{stream, stream::Stream, StreamExt, TryStreamExt};
|
use futures::{stream, stream::Stream, StreamExt, TryStreamExt};
|
||||||
|
use md5::{Digest as Md5Digest, Md5};
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use hyper::{Request, Response};
|
use hyper::{Request, Response};
|
||||||
|
@ -22,12 +23,11 @@ use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::{ReqBody, ResBody};
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::checksum::*;
|
|
||||||
use crate::s3::encryption::EncryptionParams;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::get::full_object_byte_stream;
|
use crate::s3::get::full_object_byte_stream;
|
||||||
use crate::s3::multipart;
|
use crate::s3::multipart;
|
||||||
use crate::s3::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
|
use crate::s3::put::{get_headers, save_stream, SaveStreamResult};
|
||||||
use crate::s3::xml::{self as s3_xml, xmlns_tag};
|
use crate::s3::xml::{self as s3_xml, xmlns_tag};
|
||||||
|
|
||||||
// -------- CopyObject ---------
|
// -------- CopyObject ---------
|
||||||
|
@ -39,8 +39,6 @@ pub async fn handle_copy(
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
||||||
|
|
||||||
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
|
|
||||||
|
|
||||||
let source_object = get_copy_source(&ctx, req).await?;
|
let source_object = get_copy_source(&ctx, req).await?;
|
||||||
|
|
||||||
let (source_version, source_version_data, source_version_meta) =
|
let (source_version, source_version_data, source_version_meta) =
|
||||||
|
@ -50,7 +48,7 @@ pub async fn handle_copy(
|
||||||
copy_precondition.check(source_version, &source_version_meta.etag)?;
|
copy_precondition.check(source_version, &source_version_meta.etag)?;
|
||||||
|
|
||||||
// Determine encryption parameters
|
// Determine encryption parameters
|
||||||
let (source_encryption, source_object_meta_inner) =
|
let (source_encryption, source_object_headers) =
|
||||||
EncryptionParams::check_decrypt_for_copy_source(
|
EncryptionParams::check_decrypt_for_copy_source(
|
||||||
&ctx.garage,
|
&ctx.garage,
|
||||||
req.headers(),
|
req.headers(),
|
||||||
|
@ -58,54 +56,23 @@ pub async fn handle_copy(
|
||||||
)?;
|
)?;
|
||||||
let dest_encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
let dest_encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
||||||
|
|
||||||
// Extract source checksum info before source_object_meta_inner is consumed
|
// Determine headers of destination object
|
||||||
let source_checksum = source_object_meta_inner.checksum;
|
let dest_object_headers = match req.headers().get("x-amz-metadata-directive") {
|
||||||
let source_checksum_algorithm = source_checksum.map(|x| x.algorithm());
|
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
|
||||||
|
get_headers(req.headers())?
|
||||||
// If source object has a checksum, the destination object must as well.
|
}
|
||||||
// The x-amz-checksum-algorithm header allows to change that algorithm,
|
_ => source_object_headers.into_owned(),
|
||||||
// but if it is absent, we must use the same as before
|
|
||||||
let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm);
|
|
||||||
|
|
||||||
// Determine metadata of destination object
|
|
||||||
let was_multipart = source_version_meta.etag.contains('-');
|
|
||||||
let dest_object_meta = ObjectVersionMetaInner {
|
|
||||||
headers: match req.headers().get("x-amz-metadata-directive") {
|
|
||||||
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
|
|
||||||
get_headers(req.headers())?
|
|
||||||
}
|
|
||||||
_ => source_object_meta_inner.into_owned().headers,
|
|
||||||
},
|
|
||||||
checksum: source_checksum,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Do actual object copying
|
// Do actual object copying
|
||||||
//
|
let res = if EncryptionParams::is_same(&source_encryption, &dest_encryption) {
|
||||||
// In any of the following scenarios, we need to read the whole object
|
// If source and dest are both unencrypted, or if the encryption keys
|
||||||
// data and re-write it again:
|
// are the same, we can just copy the metadata and link blocks of the
|
||||||
//
|
|
||||||
// - the data needs to be decrypted or encrypted
|
|
||||||
// - the requested checksum algorithm requires us to recompute a checksum
|
|
||||||
// - the original object was a multipart upload and a checksum algorithm
|
|
||||||
// is defined (AWS specifies that in this case, we must recompute the
|
|
||||||
// checksum from scratch as if this was a single big object and not
|
|
||||||
// a multipart object, as the checksums are not computed in the same way)
|
|
||||||
//
|
|
||||||
// In other cases, we can just copy the metadata and reference the same blocks.
|
|
||||||
//
|
|
||||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
|
||||||
|
|
||||||
let must_recopy = !EncryptionParams::is_same(&source_encryption, &dest_encryption)
|
|
||||||
|| source_checksum_algorithm != checksum_algorithm
|
|
||||||
|| (was_multipart && checksum_algorithm.is_some());
|
|
||||||
|
|
||||||
let res = if !must_recopy {
|
|
||||||
// In most cases, we can just copy the metadata and link blocks of the
|
|
||||||
// old object from the new object.
|
// old object from the new object.
|
||||||
handle_copy_metaonly(
|
handle_copy_metaonly(
|
||||||
ctx,
|
ctx,
|
||||||
dest_key,
|
dest_key,
|
||||||
dest_object_meta,
|
dest_object_headers,
|
||||||
dest_encryption,
|
dest_encryption,
|
||||||
source_version,
|
source_version,
|
||||||
source_version_data,
|
source_version_data,
|
||||||
|
@ -113,27 +80,16 @@ pub async fn handle_copy(
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
} else {
|
} else {
|
||||||
let expected_checksum = ExpectedChecksums {
|
|
||||||
md5: None,
|
|
||||||
sha256: None,
|
|
||||||
extra: source_checksum,
|
|
||||||
};
|
|
||||||
let checksum_mode = if was_multipart || source_checksum_algorithm != checksum_algorithm {
|
|
||||||
ChecksumMode::Calculate(checksum_algorithm)
|
|
||||||
} else {
|
|
||||||
ChecksumMode::Verify(&expected_checksum)
|
|
||||||
};
|
|
||||||
// If source and dest encryption use different keys,
|
// If source and dest encryption use different keys,
|
||||||
// we must decrypt content and re-encrypt, so rewrite all data blocks.
|
// we must decrypt content and re-encrypt, so rewrite all data blocks.
|
||||||
handle_copy_reencrypt(
|
handle_copy_reencrypt(
|
||||||
ctx,
|
ctx,
|
||||||
dest_key,
|
dest_key,
|
||||||
dest_object_meta,
|
dest_object_headers,
|
||||||
dest_encryption,
|
dest_encryption,
|
||||||
source_version,
|
source_version,
|
||||||
source_version_data,
|
source_version_data,
|
||||||
source_encryption,
|
source_encryption,
|
||||||
checksum_mode,
|
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
};
|
};
|
||||||
|
@ -159,7 +115,7 @@ pub async fn handle_copy(
|
||||||
async fn handle_copy_metaonly(
|
async fn handle_copy_metaonly(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
dest_key: &str,
|
dest_key: &str,
|
||||||
dest_object_meta: ObjectVersionMetaInner,
|
dest_object_headers: ObjectVersionHeaders,
|
||||||
dest_encryption: EncryptionParams,
|
dest_encryption: EncryptionParams,
|
||||||
source_version: &ObjectVersion,
|
source_version: &ObjectVersion,
|
||||||
source_version_data: &ObjectVersionData,
|
source_version_data: &ObjectVersionData,
|
||||||
|
@ -176,7 +132,7 @@ async fn handle_copy_metaonly(
|
||||||
let new_timestamp = now_msec();
|
let new_timestamp = now_msec();
|
||||||
|
|
||||||
let new_meta = ObjectVersionMeta {
|
let new_meta = ObjectVersionMeta {
|
||||||
encryption: dest_encryption.encrypt_meta(dest_object_meta)?,
|
encryption: dest_encryption.encrypt_headers(dest_object_headers)?,
|
||||||
size: source_version_meta.size,
|
size: source_version_meta.size,
|
||||||
etag: source_version_meta.etag.clone(),
|
etag: source_version_meta.etag.clone(),
|
||||||
};
|
};
|
||||||
|
@ -224,7 +180,6 @@ async fn handle_copy_metaonly(
|
||||||
timestamp: new_timestamp,
|
timestamp: new_timestamp,
|
||||||
state: ObjectVersionState::Uploading {
|
state: ObjectVersionState::Uploading {
|
||||||
encryption: new_meta.encryption.clone(),
|
encryption: new_meta.encryption.clone(),
|
||||||
checksum_algorithm: None,
|
|
||||||
multipart: false,
|
multipart: false,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -297,12 +252,11 @@ async fn handle_copy_metaonly(
|
||||||
async fn handle_copy_reencrypt(
|
async fn handle_copy_reencrypt(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
dest_key: &str,
|
dest_key: &str,
|
||||||
dest_object_meta: ObjectVersionMetaInner,
|
dest_object_headers: ObjectVersionHeaders,
|
||||||
dest_encryption: EncryptionParams,
|
dest_encryption: EncryptionParams,
|
||||||
source_version: &ObjectVersion,
|
source_version: &ObjectVersion,
|
||||||
source_version_data: &ObjectVersionData,
|
source_version_data: &ObjectVersionData,
|
||||||
source_encryption: EncryptionParams,
|
source_encryption: EncryptionParams,
|
||||||
checksum_mode: ChecksumMode<'_>,
|
|
||||||
) -> Result<SaveStreamResult, Error> {
|
) -> Result<SaveStreamResult, Error> {
|
||||||
// basically we will read the source data (decrypt if necessary)
|
// basically we will read the source data (decrypt if necessary)
|
||||||
// and save that in a new object (encrypt if necessary),
|
// and save that in a new object (encrypt if necessary),
|
||||||
|
@ -316,11 +270,12 @@ async fn handle_copy_reencrypt(
|
||||||
|
|
||||||
save_stream(
|
save_stream(
|
||||||
&ctx,
|
&ctx,
|
||||||
dest_object_meta,
|
dest_object_headers,
|
||||||
dest_encryption,
|
dest_encryption,
|
||||||
source_stream.map_err(|e| Error::from(GarageError::from(e))),
|
source_stream.map_err(|e| Error::from(GarageError::from(e))),
|
||||||
&dest_key.to_string(),
|
&dest_key.to_string(),
|
||||||
checksum_mode,
|
None,
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
@ -358,12 +313,8 @@ pub async fn handle_upload_part_copy(
|
||||||
req.headers(),
|
req.headers(),
|
||||||
&source_version_meta.encryption,
|
&source_version_meta.encryption,
|
||||||
)?;
|
)?;
|
||||||
let (dest_object_encryption, dest_object_checksum_algorithm) = match dest_version.state {
|
let dest_object_encryption = match dest_version.state {
|
||||||
ObjectVersionState::Uploading {
|
ObjectVersionState::Uploading { encryption, .. } => encryption,
|
||||||
encryption,
|
|
||||||
checksum_algorithm,
|
|
||||||
..
|
|
||||||
} => (encryption, checksum_algorithm),
|
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
let (dest_encryption, _) =
|
let (dest_encryption, _) =
|
||||||
|
@ -461,9 +412,7 @@ pub async fn handle_upload_part_copy(
|
||||||
dest_mpu_part_key,
|
dest_mpu_part_key,
|
||||||
MpuPart {
|
MpuPart {
|
||||||
version: dest_version_id,
|
version: dest_version_id,
|
||||||
// These are all filled in later (bottom of this function)
|
|
||||||
etag: None,
|
etag: None,
|
||||||
checksum: None,
|
|
||||||
size: None,
|
size: None,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
@ -480,8 +429,7 @@ pub async fn handle_upload_part_copy(
|
||||||
garage.version_table.insert(&dest_version).await?;
|
garage.version_table.insert(&dest_version).await?;
|
||||||
|
|
||||||
// Now, actually copy the blocks
|
// Now, actually copy the blocks
|
||||||
let mut checksummer = Checksummer::init(&Default::default(), !dest_encryption.is_encrypted())
|
let mut md5hasher = Md5::new();
|
||||||
.add(dest_object_checksum_algorithm);
|
|
||||||
|
|
||||||
// First, create a stream that is able to read the source blocks
|
// First, create a stream that is able to read the source blocks
|
||||||
// and extract the subrange if necessary.
|
// and extract the subrange if necessary.
|
||||||
|
@ -547,24 +495,18 @@ pub async fn handle_upload_part_copy(
|
||||||
}
|
}
|
||||||
|
|
||||||
let data_len = data.len() as u64;
|
let data_len = data.len() as u64;
|
||||||
|
md5hasher.update(&data[..]);
|
||||||
|
|
||||||
let (checksummer_updated, (data_to_upload, final_hash)) =
|
let (final_data, must_upload, final_hash) = match existing_block_hash {
|
||||||
tokio::task::spawn_blocking(move || {
|
Some(hash) if same_encryption => (data, false, hash),
|
||||||
checksummer.update(&data[..]);
|
_ => tokio::task::spawn_blocking(move || {
|
||||||
|
let data_enc = dest_encryption.encrypt_block(data)?;
|
||||||
let tup = match existing_block_hash {
|
let hash = blake2sum(&data_enc);
|
||||||
Some(hash) if same_encryption => (None, hash),
|
Ok::<_, Error>((data_enc, true, hash))
|
||||||
_ => {
|
|
||||||
let data_enc = dest_encryption.encrypt_block(data)?;
|
|
||||||
let hash = blake2sum(&data_enc);
|
|
||||||
(Some(data_enc), hash)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok::<_, Error>((checksummer, tup))
|
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
.unwrap()?;
|
.unwrap()?,
|
||||||
checksummer = checksummer_updated;
|
};
|
||||||
|
|
||||||
dest_version.blocks.clear();
|
dest_version.blocks.clear();
|
||||||
dest_version.blocks.put(
|
dest_version.blocks.put(
|
||||||
|
@ -589,7 +531,7 @@ pub async fn handle_upload_part_copy(
|
||||||
// Thing 1: if the block is not exactly a block that existed before,
|
// Thing 1: if the block is not exactly a block that existed before,
|
||||||
// we need to insert that data as a new block.
|
// we need to insert that data as a new block.
|
||||||
async {
|
async {
|
||||||
if let Some(final_data) = data_to_upload {
|
if must_upload {
|
||||||
garage
|
garage
|
||||||
.block_manager
|
.block_manager
|
||||||
.rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None)
|
.rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None)
|
||||||
|
@ -610,9 +552,8 @@ pub async fn handle_upload_part_copy(
|
||||||
|
|
||||||
assert_eq!(current_offset, source_range.length);
|
assert_eq!(current_offset, source_range.length);
|
||||||
|
|
||||||
let checksums = checksummer.finalize();
|
let data_md5sum = md5hasher.finalize();
|
||||||
let etag = dest_encryption.etag_from_md5(&checksums.md5);
|
let etag = dest_encryption.etag_from_md5(&data_md5sum);
|
||||||
let checksum = checksums.extract(dest_object_checksum_algorithm);
|
|
||||||
|
|
||||||
// Put the part's ETag in the Versiontable
|
// Put the part's ETag in the Versiontable
|
||||||
dest_mpu.parts.put(
|
dest_mpu.parts.put(
|
||||||
|
@ -620,7 +561,6 @@ pub async fn handle_upload_part_copy(
|
||||||
MpuPart {
|
MpuPart {
|
||||||
version: dest_version_id,
|
version: dest_version_id,
|
||||||
etag: Some(etag.clone()),
|
etag: Some(etag.clone()),
|
||||||
checksum,
|
|
||||||
size: Some(current_offset),
|
size: Some(current_offset),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
|
@ -26,10 +26,9 @@ use garage_util::error::Error as GarageError;
|
||||||
use garage_util::migrate::Migrate;
|
use garage_util::migrate::Migrate;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
|
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionHeaders};
|
||||||
|
|
||||||
use crate::common_error::*;
|
use crate::common_error::*;
|
||||||
use crate::s3::checksum::Md5Checksum;
|
|
||||||
use crate::s3::error::Error;
|
use crate::s3::error::Error;
|
||||||
|
|
||||||
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
||||||
|
@ -125,7 +124,7 @@ impl EncryptionParams {
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
headers: &HeaderMap,
|
headers: &HeaderMap,
|
||||||
obj_enc: &'a ObjectVersionEncryption,
|
obj_enc: &'a ObjectVersionEncryption,
|
||||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
) -> Result<(Self, Cow<'a, ObjectVersionHeaders>), Error> {
|
||||||
let key = parse_request_headers(
|
let key = parse_request_headers(
|
||||||
headers,
|
headers,
|
||||||
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||||
|
@ -139,7 +138,7 @@ impl EncryptionParams {
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
headers: &HeaderMap,
|
headers: &HeaderMap,
|
||||||
obj_enc: &'a ObjectVersionEncryption,
|
obj_enc: &'a ObjectVersionEncryption,
|
||||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
) -> Result<(Self, Cow<'a, ObjectVersionHeaders>), Error> {
|
||||||
let key = parse_request_headers(
|
let key = parse_request_headers(
|
||||||
headers,
|
headers,
|
||||||
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||||
|
@ -153,11 +152,14 @@ impl EncryptionParams {
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
key: Option<(Key<Aes256Gcm>, Md5Output)>,
|
key: Option<(Key<Aes256Gcm>, Md5Output)>,
|
||||||
obj_enc: &'a ObjectVersionEncryption,
|
obj_enc: &'a ObjectVersionEncryption,
|
||||||
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
) -> Result<(Self, Cow<'a, ObjectVersionHeaders>), Error> {
|
||||||
match (key, &obj_enc) {
|
match (key, &obj_enc) {
|
||||||
(
|
(
|
||||||
Some((client_key, client_key_md5)),
|
Some((client_key, client_key_md5)),
|
||||||
ObjectVersionEncryption::SseC { inner, compressed },
|
ObjectVersionEncryption::SseC {
|
||||||
|
headers,
|
||||||
|
compressed,
|
||||||
|
},
|
||||||
) => {
|
) => {
|
||||||
let enc = Self::SseC {
|
let enc = Self::SseC {
|
||||||
client_key,
|
client_key,
|
||||||
|
@ -168,13 +170,13 @@ impl EncryptionParams {
|
||||||
None
|
None
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
let plaintext = enc.decrypt_blob(&inner)?;
|
let plaintext = enc.decrypt_blob(&headers)?;
|
||||||
let inner = ObjectVersionMetaInner::decode(&plaintext)
|
let headers = ObjectVersionHeaders::decode(&plaintext)
|
||||||
.ok_or_internal_error("Could not decode encrypted metadata")?;
|
.ok_or_internal_error("Could not decode encrypted headers")?;
|
||||||
Ok((enc, Cow::Owned(inner)))
|
Ok((enc, Cow::Owned(headers)))
|
||||||
}
|
}
|
||||||
(None, ObjectVersionEncryption::Plaintext { inner }) => {
|
(None, ObjectVersionEncryption::Plaintext { headers }) => {
|
||||||
Ok((Self::Plaintext, Cow::Borrowed(inner)))
|
Ok((Self::Plaintext, Cow::Borrowed(headers)))
|
||||||
}
|
}
|
||||||
(_, ObjectVersionEncryption::SseC { .. }) => {
|
(_, ObjectVersionEncryption::SseC { .. }) => {
|
||||||
Err(Error::bad_request("Object is encrypted"))
|
Err(Error::bad_request("Object is encrypted"))
|
||||||
|
@ -186,31 +188,29 @@ impl EncryptionParams {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encrypt_meta(
|
pub fn encrypt_headers(
|
||||||
&self,
|
&self,
|
||||||
meta: ObjectVersionMetaInner,
|
h: ObjectVersionHeaders,
|
||||||
) -> Result<ObjectVersionEncryption, Error> {
|
) -> Result<ObjectVersionEncryption, Error> {
|
||||||
match self {
|
match self {
|
||||||
Self::SseC {
|
Self::SseC {
|
||||||
compression_level, ..
|
compression_level, ..
|
||||||
} => {
|
} => {
|
||||||
let plaintext = meta.encode().map_err(GarageError::from)?;
|
let plaintext = h.encode().map_err(GarageError::from)?;
|
||||||
let ciphertext = self.encrypt_blob(&plaintext)?;
|
let ciphertext = self.encrypt_blob(&plaintext)?;
|
||||||
Ok(ObjectVersionEncryption::SseC {
|
Ok(ObjectVersionEncryption::SseC {
|
||||||
inner: ciphertext.into_owned(),
|
headers: ciphertext.into_owned(),
|
||||||
compressed: compression_level.is_some(),
|
compressed: compression_level.is_some(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
Self::Plaintext => Ok(ObjectVersionEncryption::Plaintext { inner: meta }),
|
Self::Plaintext => Ok(ObjectVersionEncryption::Plaintext { headers: h }),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- generating object Etag values ----
|
// ---- generating object Etag values ----
|
||||||
pub fn etag_from_md5(&self, md5sum: &Option<Md5Checksum>) -> String {
|
pub fn etag_from_md5(&self, md5sum: &[u8]) -> String {
|
||||||
match self {
|
match self {
|
||||||
Self::Plaintext => md5sum
|
Self::Plaintext => hex::encode(md5sum),
|
||||||
.map(|x| hex::encode(&x[..]))
|
|
||||||
.expect("md5 digest should have been computed"),
|
|
||||||
Self::SseC { .. } => {
|
Self::SseC { .. } => {
|
||||||
// AWS specifies that for encrypted objects, the Etag is not
|
// AWS specifies that for encrypted objects, the Etag is not
|
||||||
// the md5sum of the data, but doesn't say what it is.
|
// the md5sum of the data, but doesn't say what it is.
|
||||||
|
@ -224,7 +224,7 @@ impl EncryptionParams {
|
||||||
|
|
||||||
// ---- generic function for encrypting / decrypting blobs ----
|
// ---- generic function for encrypting / decrypting blobs ----
|
||||||
// Prepends a randomly-generated nonce to the encrypted value.
|
// Prepends a randomly-generated nonce to the encrypted value.
|
||||||
// This is used for encrypting object metadata and inlined data for small objects.
|
// This is used for encrypting object headers and inlined data for small objects.
|
||||||
// This does not compress anything.
|
// This does not compress anything.
|
||||||
|
|
||||||
pub fn encrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
|
pub fn encrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
|
||||||
|
|
|
@ -69,10 +69,6 @@ pub enum Error {
|
||||||
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
|
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
|
||||||
InvalidEncryptionAlgorithm(String),
|
InvalidEncryptionAlgorithm(String),
|
||||||
|
|
||||||
/// The client sent invalid XML data
|
|
||||||
#[error(display = "Invalid digest: {}", _0)]
|
|
||||||
InvalidDigest(String),
|
|
||||||
|
|
||||||
/// The client sent a request for an action not supported by garage
|
/// The client sent a request for an action not supported by garage
|
||||||
#[error(display = "Unimplemented action: {}", _0)]
|
#[error(display = "Unimplemented action: {}", _0)]
|
||||||
NotImplemented(String),
|
NotImplemented(String),
|
||||||
|
@ -133,7 +129,6 @@ impl Error {
|
||||||
Error::NotImplemented(_) => "NotImplemented",
|
Error::NotImplemented(_) => "NotImplemented",
|
||||||
Error::InvalidXml(_) => "MalformedXML",
|
Error::InvalidXml(_) => "MalformedXML",
|
||||||
Error::InvalidRange(_) => "InvalidRange",
|
Error::InvalidRange(_) => "InvalidRange",
|
||||||
Error::InvalidDigest(_) => "InvalidDigest",
|
|
||||||
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest",
|
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest",
|
||||||
Error::InvalidEncryptionAlgorithm(_) => "InvalidEncryptionAlgorithmError",
|
Error::InvalidEncryptionAlgorithm(_) => "InvalidEncryptionAlgorithmError",
|
||||||
}
|
}
|
||||||
|
@ -153,7 +148,6 @@ impl ApiError for Error {
|
||||||
| Error::InvalidPart
|
| Error::InvalidPart
|
||||||
| Error::InvalidPartOrder
|
| Error::InvalidPartOrder
|
||||||
| Error::EntityTooSmall
|
| Error::EntityTooSmall
|
||||||
| Error::InvalidDigest(_)
|
|
||||||
| Error::InvalidEncryptionAlgorithm(_)
|
| Error::InvalidEncryptionAlgorithm(_)
|
||||||
| Error::InvalidXml(_)
|
| Error::InvalidXml(_)
|
||||||
| Error::InvalidUtf8Str(_)
|
| Error::InvalidUtf8Str(_)
|
||||||
|
|
|
@ -27,7 +27,6 @@ use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::ResBody;
|
use crate::s3::api_server::ResBody;
|
||||||
use crate::s3::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
|
||||||
use crate::s3::encryption::EncryptionParams;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
|
|
||||||
|
@ -46,9 +45,8 @@ pub struct GetObjectOverrides {
|
||||||
fn object_headers(
|
fn object_headers(
|
||||||
version: &ObjectVersion,
|
version: &ObjectVersion,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
meta_inner: &ObjectVersionMetaInner,
|
headers: &ObjectVersionHeaders,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
checksum_mode: ChecksumMode,
|
|
||||||
) -> http::response::Builder {
|
) -> http::response::Builder {
|
||||||
debug!("Version meta: {:?}", version_meta);
|
debug!("Version meta: {:?}", version_meta);
|
||||||
|
|
||||||
|
@ -67,7 +65,7 @@ fn object_headers(
|
||||||
// have the same name (ignoring case) into a comma-delimited list.
|
// have the same name (ignoring case) into a comma-delimited list.
|
||||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
|
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
|
||||||
let mut headers_by_name = BTreeMap::new();
|
let mut headers_by_name = BTreeMap::new();
|
||||||
for (name, value) in meta_inner.headers.iter() {
|
for (name, value) in headers.0.iter() {
|
||||||
match headers_by_name.get_mut(name) {
|
match headers_by_name.get_mut(name) {
|
||||||
None => {
|
None => {
|
||||||
headers_by_name.insert(name, vec![value.as_str()]);
|
headers_by_name.insert(name, vec![value.as_str()]);
|
||||||
|
@ -82,10 +80,6 @@ fn object_headers(
|
||||||
resp = resp.header(name, values.join(","));
|
resp = resp.header(name, values.join(","));
|
||||||
}
|
}
|
||||||
|
|
||||||
if checksum_mode.enabled {
|
|
||||||
resp = add_checksum_response_headers(&meta_inner.checksum, resp);
|
|
||||||
}
|
|
||||||
|
|
||||||
encryption.add_response_headers(&mut resp);
|
encryption.add_response_headers(&mut resp);
|
||||||
|
|
||||||
resp
|
resp
|
||||||
|
@ -205,8 +199,6 @@ pub async fn handle_head_without_ctx(
|
||||||
let (encryption, headers) =
|
let (encryption, headers) =
|
||||||
EncryptionParams::check_decrypt(&garage, req.headers(), &version_meta.encryption)?;
|
EncryptionParams::check_decrypt(&garage, req.headers(), &version_meta.encryption)?;
|
||||||
|
|
||||||
let checksum_mode = checksum_mode(&req);
|
|
||||||
|
|
||||||
if let Some(pn) = part_number {
|
if let Some(pn) = part_number {
|
||||||
match version_data {
|
match version_data {
|
||||||
ObjectVersionData::Inline(_, _) => {
|
ObjectVersionData::Inline(_, _) => {
|
||||||
|
@ -214,21 +206,17 @@ pub async fn handle_head_without_ctx(
|
||||||
return Err(Error::InvalidPart);
|
return Err(Error::InvalidPart);
|
||||||
}
|
}
|
||||||
let bytes_len = version_meta.size;
|
let bytes_len = version_meta.size;
|
||||||
Ok(object_headers(
|
Ok(
|
||||||
object_version,
|
object_headers(object_version, version_meta, &headers, encryption)
|
||||||
version_meta,
|
.header(CONTENT_LENGTH, format!("{}", bytes_len))
|
||||||
&headers,
|
.header(
|
||||||
encryption,
|
CONTENT_RANGE,
|
||||||
checksum_mode,
|
format!("bytes 0-{}/{}", bytes_len - 1, bytes_len),
|
||||||
|
)
|
||||||
|
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
||||||
|
.status(StatusCode::PARTIAL_CONTENT)
|
||||||
|
.body(empty_body())?,
|
||||||
)
|
)
|
||||||
.header(CONTENT_LENGTH, format!("{}", bytes_len))
|
|
||||||
.header(
|
|
||||||
CONTENT_RANGE,
|
|
||||||
format!("bytes 0-{}/{}", bytes_len - 1, bytes_len),
|
|
||||||
)
|
|
||||||
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
|
||||||
.status(StatusCode::PARTIAL_CONTENT)
|
|
||||||
.body(empty_body())?)
|
|
||||||
}
|
}
|
||||||
ObjectVersionData::FirstBlock(_, _) => {
|
ObjectVersionData::FirstBlock(_, _) => {
|
||||||
let version = garage
|
let version = garage
|
||||||
|
@ -240,40 +228,32 @@ pub async fn handle_head_without_ctx(
|
||||||
let (part_offset, part_end) =
|
let (part_offset, part_end) =
|
||||||
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
|
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
|
||||||
|
|
||||||
Ok(object_headers(
|
Ok(
|
||||||
object_version,
|
object_headers(object_version, version_meta, &headers, encryption)
|
||||||
version_meta,
|
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
|
||||||
&headers,
|
.header(
|
||||||
encryption,
|
CONTENT_RANGE,
|
||||||
checksum_mode,
|
format!(
|
||||||
|
"bytes {}-{}/{}",
|
||||||
|
part_offset,
|
||||||
|
part_end - 1,
|
||||||
|
version_meta.size
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
|
||||||
|
.status(StatusCode::PARTIAL_CONTENT)
|
||||||
|
.body(empty_body())?,
|
||||||
)
|
)
|
||||||
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
|
|
||||||
.header(
|
|
||||||
CONTENT_RANGE,
|
|
||||||
format!(
|
|
||||||
"bytes {}-{}/{}",
|
|
||||||
part_offset,
|
|
||||||
part_end - 1,
|
|
||||||
version_meta.size
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
|
|
||||||
.status(StatusCode::PARTIAL_CONTENT)
|
|
||||||
.body(empty_body())?)
|
|
||||||
}
|
}
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Ok(object_headers(
|
Ok(
|
||||||
object_version,
|
object_headers(object_version, version_meta, &headers, encryption)
|
||||||
version_meta,
|
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
||||||
&headers,
|
.status(StatusCode::OK)
|
||||||
encryption,
|
.body(empty_body())?,
|
||||||
checksum_mode,
|
|
||||||
)
|
)
|
||||||
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.body(empty_body())?)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -327,24 +307,12 @@ pub async fn handle_get_without_ctx(
|
||||||
let (enc, headers) =
|
let (enc, headers) =
|
||||||
EncryptionParams::check_decrypt(&garage, req.headers(), &last_v_meta.encryption)?;
|
EncryptionParams::check_decrypt(&garage, req.headers(), &last_v_meta.encryption)?;
|
||||||
|
|
||||||
let checksum_mode = checksum_mode(&req);
|
|
||||||
|
|
||||||
match (part_number, parse_range_header(req, last_v_meta.size)?) {
|
match (part_number, parse_range_header(req, last_v_meta.size)?) {
|
||||||
(Some(_), Some(_)) => Err(Error::bad_request(
|
(Some(_), Some(_)) => Err(Error::bad_request(
|
||||||
"Cannot specify both partNumber and Range header",
|
"Cannot specify both partNumber and Range header",
|
||||||
)),
|
)),
|
||||||
(Some(pn), None) => {
|
(Some(pn), None) => {
|
||||||
handle_get_part(
|
handle_get_part(garage, last_v, last_v_data, last_v_meta, enc, &headers, pn).await
|
||||||
garage,
|
|
||||||
last_v,
|
|
||||||
last_v_data,
|
|
||||||
last_v_meta,
|
|
||||||
enc,
|
|
||||||
&headers,
|
|
||||||
pn,
|
|
||||||
checksum_mode,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
(None, Some(range)) => {
|
(None, Some(range)) => {
|
||||||
handle_get_range(
|
handle_get_range(
|
||||||
|
@ -356,7 +324,6 @@ pub async fn handle_get_without_ctx(
|
||||||
&headers,
|
&headers,
|
||||||
range.start,
|
range.start,
|
||||||
range.start + range.length,
|
range.start + range.length,
|
||||||
checksum_mode,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
@ -369,7 +336,6 @@ pub async fn handle_get_without_ctx(
|
||||||
enc,
|
enc,
|
||||||
&headers,
|
&headers,
|
||||||
overrides,
|
overrides,
|
||||||
checksum_mode,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
@ -382,19 +348,12 @@ async fn handle_get_full(
|
||||||
version_data: &ObjectVersionData,
|
version_data: &ObjectVersionData,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
meta_inner: &ObjectVersionMetaInner,
|
headers: &ObjectVersionHeaders,
|
||||||
overrides: GetObjectOverrides,
|
overrides: GetObjectOverrides,
|
||||||
checksum_mode: ChecksumMode,
|
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let mut resp_builder = object_headers(
|
let mut resp_builder = object_headers(version, version_meta, &headers, encryption)
|
||||||
version,
|
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
||||||
version_meta,
|
.status(StatusCode::OK);
|
||||||
&meta_inner,
|
|
||||||
encryption,
|
|
||||||
checksum_mode,
|
|
||||||
)
|
|
||||||
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
|
||||||
.status(StatusCode::OK);
|
|
||||||
getobject_override_headers(overrides, &mut resp_builder)?;
|
getobject_override_headers(overrides, &mut resp_builder)?;
|
||||||
|
|
||||||
let stream = full_object_byte_stream(garage, version, version_data, encryption);
|
let stream = full_object_byte_stream(garage, version, version_data, encryption);
|
||||||
|
@ -473,15 +432,14 @@ async fn handle_get_range(
|
||||||
version_data: &ObjectVersionData,
|
version_data: &ObjectVersionData,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
meta_inner: &ObjectVersionMetaInner,
|
headers: &ObjectVersionHeaders,
|
||||||
begin: u64,
|
begin: u64,
|
||||||
end: u64,
|
end: u64,
|
||||||
checksum_mode: ChecksumMode,
|
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
// Here we do not use getobject_override_headers because we don't
|
// Here we do not use getobject_override_headers because we don't
|
||||||
// want to add any overridden headers (those should not be added
|
// want to add any overridden headers (those should not be added
|
||||||
// when returning PARTIAL_CONTENT)
|
// when returning PARTIAL_CONTENT)
|
||||||
let resp_builder = object_headers(version, version_meta, meta_inner, encryption, checksum_mode)
|
let resp_builder = object_headers(version, version_meta, headers, encryption)
|
||||||
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
||||||
.header(
|
.header(
|
||||||
CONTENT_RANGE,
|
CONTENT_RANGE,
|
||||||
|
@ -522,19 +480,12 @@ async fn handle_get_part(
|
||||||
version_data: &ObjectVersionData,
|
version_data: &ObjectVersionData,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
meta_inner: &ObjectVersionMetaInner,
|
headers: &ObjectVersionHeaders,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
checksum_mode: ChecksumMode,
|
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
// Same as for get_range, no getobject_override_headers
|
// Same as for get_range, no getobject_override_headers
|
||||||
let resp_builder = object_headers(
|
let resp_builder = object_headers(object_version, version_meta, headers, encryption)
|
||||||
object_version,
|
.status(StatusCode::PARTIAL_CONTENT);
|
||||||
version_meta,
|
|
||||||
meta_inner,
|
|
||||||
encryption,
|
|
||||||
checksum_mode,
|
|
||||||
)
|
|
||||||
.status(StatusCode::PARTIAL_CONTENT);
|
|
||||||
|
|
||||||
match version_data {
|
match version_data {
|
||||||
ObjectVersionData::Inline(_, bytes) => {
|
ObjectVersionData::Inline(_, bytes) => {
|
||||||
|
@ -616,20 +567,6 @@ fn calculate_part_bounds(v: &Version, part_number: u64) -> Option<(u64, u64)> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ChecksumMode {
|
|
||||||
enabled: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn checksum_mode(req: &Request<impl Body>) -> ChecksumMode {
|
|
||||||
ChecksumMode {
|
|
||||||
enabled: req
|
|
||||||
.headers()
|
|
||||||
.get(X_AMZ_CHECKSUM_MODE)
|
|
||||||
.map(|x| x == "ENABLED")
|
|
||||||
.unwrap_or(false),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn body_from_blocks_range(
|
fn body_from_blocks_range(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
|
|
|
@ -2,7 +2,7 @@ use std::collections::{BTreeMap, BTreeSet};
|
||||||
use std::iter::{Iterator, Peekable};
|
use std::iter::{Iterator, Peekable};
|
||||||
|
|
||||||
use base64::prelude::*;
|
use base64::prelude::*;
|
||||||
use hyper::{Request, Response};
|
use hyper::Response;
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
|
@ -15,8 +15,7 @@ use garage_table::EnumerationOrder;
|
||||||
|
|
||||||
use crate::encoding::*;
|
use crate::encoding::*;
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::{ReqBody, ResBody};
|
use crate::s3::api_server::ResBody;
|
||||||
use crate::s3::encryption::EncryptionParams;
|
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::multipart as s3_multipart;
|
use crate::s3::multipart as s3_multipart;
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
|
@ -272,21 +271,13 @@ pub async fn handle_list_multipart_upload(
|
||||||
|
|
||||||
pub async fn handle_list_parts(
|
pub async fn handle_list_parts(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
req: Request<ReqBody>,
|
|
||||||
query: &ListPartsQuery,
|
query: &ListPartsQuery,
|
||||||
) -> Result<Response<ResBody>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
debug!("ListParts {:?}", query);
|
debug!("ListParts {:?}", query);
|
||||||
|
|
||||||
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
|
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
|
||||||
|
|
||||||
let (_, object_version, mpu) = s3_multipart::get_upload(&ctx, &query.key, &upload_id).await?;
|
let (_, _, mpu) = s3_multipart::get_upload(&ctx, &query.key, &upload_id).await?;
|
||||||
|
|
||||||
let object_encryption = match object_version.state {
|
|
||||||
ObjectVersionState::Uploading { encryption, .. } => encryption,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
let encryption_res =
|
|
||||||
EncryptionParams::check_decrypt(&ctx.garage, req.headers(), &object_encryption);
|
|
||||||
|
|
||||||
let (info, next) = fetch_part_info(query, &mpu)?;
|
let (info, next) = fetch_part_info(query, &mpu)?;
|
||||||
|
|
||||||
|
@ -305,40 +296,11 @@ pub async fn handle_list_parts(
|
||||||
is_truncated: s3_xml::Value(format!("{}", next.is_some())),
|
is_truncated: s3_xml::Value(format!("{}", next.is_some())),
|
||||||
parts: info
|
parts: info
|
||||||
.iter()
|
.iter()
|
||||||
.map(|part| {
|
.map(|part| s3_xml::PartItem {
|
||||||
// hide checksum if object is encrypted and the decryption
|
etag: s3_xml::Value(format!("\"{}\"", part.etag)),
|
||||||
// keys are not provided
|
last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)),
|
||||||
let checksum = part.checksum.filter(|_| encryption_res.is_ok());
|
part_number: s3_xml::IntValue(part.part_number as i64),
|
||||||
s3_xml::PartItem {
|
size: s3_xml::IntValue(part.size as i64),
|
||||||
etag: s3_xml::Value(format!("\"{}\"", part.etag)),
|
|
||||||
last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)),
|
|
||||||
part_number: s3_xml::IntValue(part.part_number as i64),
|
|
||||||
size: s3_xml::IntValue(part.size as i64),
|
|
||||||
checksum_crc32: match &checksum {
|
|
||||||
Some(ChecksumValue::Crc32(x)) => {
|
|
||||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_crc32c: match &checksum {
|
|
||||||
Some(ChecksumValue::Crc32c(x)) => {
|
|
||||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_sha1: match &checksum {
|
|
||||||
Some(ChecksumValue::Sha1(x)) => {
|
|
||||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_sha256: match &checksum {
|
|
||||||
Some(ChecksumValue::Sha256(x)) => {
|
|
||||||
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
|
|
||||||
|
@ -384,7 +346,6 @@ struct PartInfo<'a> {
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
size: u64,
|
size: u64,
|
||||||
checksum: Option<ChecksumValue>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum ExtractionResult {
|
enum ExtractionResult {
|
||||||
|
@ -398,7 +359,7 @@ enum ExtractionResult {
|
||||||
key: String,
|
key: String,
|
||||||
},
|
},
|
||||||
// Fallback key is used for legacy APIs that only support
|
// Fallback key is used for legacy APIs that only support
|
||||||
// exclusive pagination (and not inclusive one).
|
// exlusive pagination (and not inclusive one).
|
||||||
SkipTo {
|
SkipTo {
|
||||||
key: String,
|
key: String,
|
||||||
fallback_key: Option<String>,
|
fallback_key: Option<String>,
|
||||||
|
@ -408,7 +369,7 @@ enum ExtractionResult {
|
||||||
#[derive(PartialEq, Clone, Debug)]
|
#[derive(PartialEq, Clone, Debug)]
|
||||||
enum RangeBegin {
|
enum RangeBegin {
|
||||||
// Fallback key is used for legacy APIs that only support
|
// Fallback key is used for legacy APIs that only support
|
||||||
// exclusive pagination (and not inclusive one).
|
// exlusive pagination (and not inclusive one).
|
||||||
IncludingKey {
|
IncludingKey {
|
||||||
key: String,
|
key: String,
|
||||||
fallback_key: Option<String>,
|
fallback_key: Option<String>,
|
||||||
|
@ -525,7 +486,6 @@ fn fetch_part_info<'a>(
|
||||||
timestamp: pk.timestamp,
|
timestamp: pk.timestamp,
|
||||||
etag,
|
etag,
|
||||||
size,
|
size,
|
||||||
checksum: p.checksum,
|
|
||||||
};
|
};
|
||||||
match parts.last_mut() {
|
match parts.last_mut() {
|
||||||
Some(lastpart) if lastpart.part_number == pk.part_number => {
|
Some(lastpart) if lastpart.part_number == pk.part_number => {
|
||||||
|
@ -985,12 +945,8 @@ mod tests {
|
||||||
state: ObjectVersionState::Uploading {
|
state: ObjectVersionState::Uploading {
|
||||||
multipart: true,
|
multipart: true,
|
||||||
encryption: ObjectVersionEncryption::Plaintext {
|
encryption: ObjectVersionEncryption::Plaintext {
|
||||||
inner: ObjectVersionMetaInner {
|
headers: ObjectVersionHeaders(vec![]),
|
||||||
headers: vec![],
|
|
||||||
checksum: None,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
checksum_algorithm: None,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1179,7 +1135,6 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(3),
|
size: Some(3),
|
||||||
etag: Some("etag1".into()),
|
etag: Some("etag1".into()),
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1191,7 +1146,6 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: None,
|
size: None,
|
||||||
etag: None,
|
etag: None,
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1203,7 +1157,6 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(10),
|
size: Some(10),
|
||||||
etag: Some("etag2".into()),
|
etag: Some("etag2".into()),
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1215,7 +1168,6 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(7),
|
size: Some(7),
|
||||||
etag: Some("etag3".into()),
|
etag: Some("etag3".into()),
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1227,7 +1179,6 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(5),
|
size: Some(5),
|
||||||
etag: Some("etag4".into()),
|
etag: Some("etag4".into()),
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
@ -1266,14 +1217,12 @@ mod tests {
|
||||||
etag: "etag1",
|
etag: "etag1",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
part_number: 1,
|
part_number: 1,
|
||||||
size: 3,
|
size: 3
|
||||||
checksum: None,
|
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag2",
|
etag: "etag2",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
part_number: 3,
|
part_number: 3,
|
||||||
checksum: None,
|
|
||||||
size: 10
|
size: 10
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
@ -1289,14 +1238,12 @@ mod tests {
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag3",
|
etag: "etag3",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 5,
|
part_number: 5,
|
||||||
size: 7
|
size: 7
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag4",
|
etag: "etag4",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 8,
|
part_number: 8,
|
||||||
size: 5
|
size: 5
|
||||||
},
|
},
|
||||||
|
@ -1320,28 +1267,24 @@ mod tests {
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag1",
|
etag: "etag1",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 1,
|
part_number: 1,
|
||||||
size: 3
|
size: 3
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag2",
|
etag: "etag2",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 3,
|
part_number: 3,
|
||||||
size: 10
|
size: 10
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag3",
|
etag: "etag3",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 5,
|
part_number: 5,
|
||||||
size: 7
|
size: 7
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag4",
|
etag: "etag4",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
checksum: None,
|
|
||||||
part_number: 8,
|
part_number: 8,
|
||||||
size: 5
|
size: 5
|
||||||
},
|
},
|
||||||
|
|
|
@ -13,7 +13,6 @@ mod post_object;
|
||||||
mod put;
|
mod put;
|
||||||
mod website;
|
mod website;
|
||||||
|
|
||||||
mod checksum;
|
|
||||||
mod encryption;
|
mod encryption;
|
||||||
mod router;
|
mod router;
|
||||||
pub mod xml;
|
pub mod xml;
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::convert::TryInto;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use base64::prelude::*;
|
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use hyper::{Request, Response};
|
use hyper::{Request, Response};
|
||||||
|
use md5::{Digest as Md5Digest, Md5};
|
||||||
|
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
@ -17,7 +16,6 @@ use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::{ReqBody, ResBody};
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::checksum::*;
|
|
||||||
use crate::s3::encryption::EncryptionParams;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::put::*;
|
use crate::s3::put::*;
|
||||||
|
@ -43,16 +41,10 @@ pub async fn handle_create_multipart_upload(
|
||||||
let timestamp = next_timestamp(existing_object.as_ref());
|
let timestamp = next_timestamp(existing_object.as_ref());
|
||||||
|
|
||||||
let headers = get_headers(req.headers())?;
|
let headers = get_headers(req.headers())?;
|
||||||
let meta = ObjectVersionMetaInner {
|
|
||||||
headers,
|
|
||||||
checksum: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Determine whether object should be encrypted, and if so the key
|
// Determine whether object should be encrypted, and if so the key
|
||||||
let encryption = EncryptionParams::new_from_headers(&garage, req.headers())?;
|
let encryption = EncryptionParams::new_from_headers(&garage, req.headers())?;
|
||||||
let object_encryption = encryption.encrypt_meta(meta)?;
|
let object_encryption = encryption.encrypt_headers(headers)?;
|
||||||
|
|
||||||
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
|
|
||||||
|
|
||||||
// Create object in object table
|
// Create object in object table
|
||||||
let object_version = ObjectVersion {
|
let object_version = ObjectVersion {
|
||||||
|
@ -61,7 +53,6 @@ pub async fn handle_create_multipart_upload(
|
||||||
state: ObjectVersionState::Uploading {
|
state: ObjectVersionState::Uploading {
|
||||||
multipart: true,
|
multipart: true,
|
||||||
encryption: object_encryption,
|
encryption: object_encryption,
|
||||||
checksum_algorithm,
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
let object = Object::new(*bucket_id, key.to_string(), vec![object_version]);
|
let object = Object::new(*bucket_id, key.to_string(), vec![object_version]);
|
||||||
|
@ -99,13 +90,9 @@ pub async fn handle_put_part(
|
||||||
|
|
||||||
let upload_id = decode_upload_id(upload_id)?;
|
let upload_id = decode_upload_id(upload_id)?;
|
||||||
|
|
||||||
let expected_checksums = ExpectedChecksums {
|
let content_md5 = match req.headers().get("content-md5") {
|
||||||
md5: match req.headers().get("content-md5") {
|
Some(x) => Some(x.to_str()?.to_string()),
|
||||||
Some(x) => Some(x.to_str()?.to_string()),
|
None => None,
|
||||||
None => None,
|
|
||||||
},
|
|
||||||
sha256: content_sha256,
|
|
||||||
extra: request_checksum_value(req.headers())?,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Read first chuck, and at the same time try to get object to see if it exists
|
// Read first chuck, and at the same time try to get object to see if it exists
|
||||||
|
@ -119,12 +106,8 @@ pub async fn handle_put_part(
|
||||||
futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?;
|
futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?;
|
||||||
|
|
||||||
// Check encryption params
|
// Check encryption params
|
||||||
let (object_encryption, checksum_algorithm) = match object_version.state {
|
let object_encryption = match object_version.state {
|
||||||
ObjectVersionState::Uploading {
|
ObjectVersionState::Uploading { encryption, .. } => encryption,
|
||||||
encryption,
|
|
||||||
checksum_algorithm,
|
|
||||||
..
|
|
||||||
} => (encryption, checksum_algorithm),
|
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
let (encryption, _) =
|
let (encryption, _) =
|
||||||
|
@ -155,9 +138,7 @@ pub async fn handle_put_part(
|
||||||
mpu_part_key,
|
mpu_part_key,
|
||||||
MpuPart {
|
MpuPart {
|
||||||
version: version_uuid,
|
version: version_uuid,
|
||||||
// all these are filled in later, at the end of this function
|
|
||||||
etag: None,
|
etag: None,
|
||||||
checksum: None,
|
|
||||||
size: None,
|
size: None,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
@ -171,31 +152,32 @@ pub async fn handle_put_part(
|
||||||
garage.version_table.insert(&version).await?;
|
garage.version_table.insert(&version).await?;
|
||||||
|
|
||||||
// Copy data to version
|
// Copy data to version
|
||||||
let checksummer =
|
let (total_size, data_md5sum, data_sha256sum, _) = read_and_put_blocks(
|
||||||
Checksummer::init(&expected_checksums, !encryption.is_encrypted()).add(checksum_algorithm);
|
|
||||||
let (total_size, checksums, _) = read_and_put_blocks(
|
|
||||||
&ctx,
|
&ctx,
|
||||||
&version,
|
&version,
|
||||||
encryption,
|
encryption,
|
||||||
part_number,
|
part_number,
|
||||||
first_block,
|
first_block,
|
||||||
&mut chunker,
|
&mut chunker,
|
||||||
checksummer,
|
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// Verify that checksums map
|
// Verify that checksums map
|
||||||
checksums.verify(&expected_checksums)?;
|
ensure_checksum_matches(
|
||||||
|
&data_md5sum,
|
||||||
|
data_sha256sum,
|
||||||
|
content_md5.as_deref(),
|
||||||
|
content_sha256,
|
||||||
|
)?;
|
||||||
|
|
||||||
// Store part etag in version
|
// Store part etag in version
|
||||||
let etag = encryption.etag_from_md5(&checksums.md5);
|
let etag = encryption.etag_from_md5(&data_md5sum);
|
||||||
|
|
||||||
mpu.parts.put(
|
mpu.parts.put(
|
||||||
mpu_part_key,
|
mpu_part_key,
|
||||||
MpuPart {
|
MpuPart {
|
||||||
version: version_uuid,
|
version: version_uuid,
|
||||||
etag: Some(etag.clone()),
|
etag: Some(etag.clone()),
|
||||||
checksum: checksums.extract(checksum_algorithm),
|
|
||||||
size: Some(total_size),
|
size: Some(total_size),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
@ -207,7 +189,6 @@ pub async fn handle_put_part(
|
||||||
|
|
||||||
let mut resp = Response::builder().header("ETag", format!("\"{}\"", etag));
|
let mut resp = Response::builder().header("ETag", format!("\"{}\"", etag));
|
||||||
encryption.add_response_headers(&mut resp);
|
encryption.add_response_headers(&mut resp);
|
||||||
let resp = add_checksum_response_headers(&expected_checksums.extra, resp);
|
|
||||||
Ok(resp.body(empty_body())?)
|
Ok(resp.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,11 +236,10 @@ pub async fn handle_complete_multipart_upload(
|
||||||
bucket_name,
|
bucket_name,
|
||||||
..
|
..
|
||||||
} = &ctx;
|
} = &ctx;
|
||||||
let (req_head, req_body) = req.into_parts();
|
|
||||||
|
|
||||||
let expected_checksum = request_checksum_value(&req_head.headers)?;
|
let body = http_body_util::BodyExt::collect(req.into_body())
|
||||||
|
.await?
|
||||||
let body = http_body_util::BodyExt::collect(req_body).await?.to_bytes();
|
.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
|
@ -283,12 +263,8 @@ pub async fn handle_complete_multipart_upload(
|
||||||
return Err(Error::bad_request("No data was uploaded"));
|
return Err(Error::bad_request("No data was uploaded"));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (object_encryption, checksum_algorithm) = match object_version.state {
|
let object_encryption = match object_version.state {
|
||||||
ObjectVersionState::Uploading {
|
ObjectVersionState::Uploading { encryption, .. } => encryption,
|
||||||
encryption,
|
|
||||||
checksum_algorithm,
|
|
||||||
..
|
|
||||||
} => (encryption, checksum_algorithm),
|
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -316,13 +292,6 @@ pub async fn handle_complete_multipart_upload(
|
||||||
for req_part in body_list_of_parts.iter() {
|
for req_part in body_list_of_parts.iter() {
|
||||||
match have_parts.get(&req_part.part_number) {
|
match have_parts.get(&req_part.part_number) {
|
||||||
Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => {
|
Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => {
|
||||||
// alternative version: if req_part.checksum.is_some() && part.checksum != req_part.checksum {
|
|
||||||
if part.checksum != req_part.checksum {
|
|
||||||
return Err(Error::InvalidDigest(format!(
|
|
||||||
"Invalid checksum for part {}: in request = {:?}, uploaded part = {:?}",
|
|
||||||
req_part.part_number, req_part.checksum, part.checksum
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
parts.push(*part)
|
parts.push(*part)
|
||||||
}
|
}
|
||||||
_ => return Err(Error::InvalidPart),
|
_ => return Err(Error::InvalidPart),
|
||||||
|
@ -370,23 +339,18 @@ pub async fn handle_complete_multipart_upload(
|
||||||
});
|
});
|
||||||
garage.block_ref_table.insert_many(block_refs).await?;
|
garage.block_ref_table.insert_many(block_refs).await?;
|
||||||
|
|
||||||
// Calculate checksum and etag of final object
|
// Calculate etag of final object
|
||||||
// To understand how etags are calculated, read more here:
|
// To understand how etags are calculated, read more here:
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
|
||||||
// https://teppen.io/2018/06/23/aws_s3_etags/
|
// https://teppen.io/2018/06/23/aws_s3_etags/
|
||||||
let mut checksummer = MultipartChecksummer::init(checksum_algorithm);
|
let mut etag_md5_hasher = Md5::new();
|
||||||
for part in parts.iter() {
|
for part in parts.iter() {
|
||||||
checksummer.update(part.etag.as_ref().unwrap(), part.checksum)?;
|
etag_md5_hasher.update(part.etag.as_ref().unwrap().as_bytes());
|
||||||
}
|
}
|
||||||
let (checksum_md5, checksum_extra) = checksummer.finalize();
|
let etag = format!(
|
||||||
|
"{}-{}",
|
||||||
if expected_checksum.is_some() && checksum_extra != expected_checksum {
|
hex::encode(etag_md5_hasher.finalize()),
|
||||||
return Err(Error::InvalidDigest(
|
parts.len()
|
||||||
"Failed to validate x-amz-checksum-*".into(),
|
);
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let etag = format!("{}-{}", hex::encode(&checksum_md5[..]), parts.len());
|
|
||||||
|
|
||||||
// Calculate total size of final object
|
// Calculate total size of final object
|
||||||
let total_size = parts.iter().map(|x| x.size.unwrap()).sum();
|
let total_size = parts.iter().map(|x| x.size.unwrap()).sum();
|
||||||
|
@ -399,20 +363,6 @@ pub async fn handle_complete_multipart_upload(
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a checksum algorithm, update metadata with checksum
|
|
||||||
let object_encryption = match checksum_algorithm {
|
|
||||||
None => object_encryption,
|
|
||||||
Some(_) => {
|
|
||||||
let (encryption, meta) =
|
|
||||||
EncryptionParams::check_decrypt(&garage, &req_head.headers, &object_encryption)?;
|
|
||||||
let new_meta = ObjectVersionMetaInner {
|
|
||||||
headers: meta.into_owned().headers,
|
|
||||||
checksum: checksum_extra,
|
|
||||||
};
|
|
||||||
encryption.encrypt_meta(new_meta)?
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Write final object version
|
// Write final object version
|
||||||
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||||
ObjectVersionMeta {
|
ObjectVersionMeta {
|
||||||
|
@ -433,28 +383,10 @@ pub async fn handle_complete_multipart_upload(
|
||||||
bucket: s3_xml::Value(bucket_name.to_string()),
|
bucket: s3_xml::Value(bucket_name.to_string()),
|
||||||
key: s3_xml::Value(key),
|
key: s3_xml::Value(key),
|
||||||
etag: s3_xml::Value(format!("\"{}\"", etag)),
|
etag: s3_xml::Value(format!("\"{}\"", etag)),
|
||||||
checksum_crc32: match &checksum_extra {
|
|
||||||
Some(ChecksumValue::Crc32(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_crc32c: match &checksum_extra {
|
|
||||||
Some(ChecksumValue::Crc32c(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_sha1: match &checksum_extra {
|
|
||||||
Some(ChecksumValue::Sha1(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
checksum_sha256: match &checksum_extra {
|
|
||||||
Some(ChecksumValue::Sha256(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
let xml = s3_xml::to_xml_with_header(&result)?;
|
let xml = s3_xml::to_xml_with_header(&result)?;
|
||||||
|
|
||||||
let resp = Response::builder();
|
Ok(Response::new(string_body(xml)))
|
||||||
let resp = add_checksum_response_headers(&expected_checksum, resp);
|
|
||||||
Ok(resp.body(string_body(xml))?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_abort_multipart_upload(
|
pub async fn handle_abort_multipart_upload(
|
||||||
|
@ -523,7 +455,6 @@ pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
|
||||||
struct CompleteMultipartUploadPart {
|
struct CompleteMultipartUploadPart {
|
||||||
etag: String,
|
etag: String,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
checksum: Option<ChecksumValue>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_complete_multipart_upload_body(
|
fn parse_complete_multipart_upload_body(
|
||||||
|
@ -549,41 +480,9 @@ fn parse_complete_multipart_upload_body(
|
||||||
.children()
|
.children()
|
||||||
.find(|e| e.has_tag_name("PartNumber"))?
|
.find(|e| e.has_tag_name("PartNumber"))?
|
||||||
.text()?;
|
.text()?;
|
||||||
let checksum = if let Some(crc32) =
|
|
||||||
item.children().find(|e| e.has_tag_name("ChecksumCRC32"))
|
|
||||||
{
|
|
||||||
Some(ChecksumValue::Crc32(
|
|
||||||
BASE64_STANDARD.decode(crc32.text()?).ok()?[..]
|
|
||||||
.try_into()
|
|
||||||
.ok()?,
|
|
||||||
))
|
|
||||||
} else if let Some(crc32c) = item.children().find(|e| e.has_tag_name("ChecksumCRC32C"))
|
|
||||||
{
|
|
||||||
Some(ChecksumValue::Crc32c(
|
|
||||||
BASE64_STANDARD.decode(crc32c.text()?).ok()?[..]
|
|
||||||
.try_into()
|
|
||||||
.ok()?,
|
|
||||||
))
|
|
||||||
} else if let Some(sha1) = item.children().find(|e| e.has_tag_name("ChecksumSHA1")) {
|
|
||||||
Some(ChecksumValue::Sha1(
|
|
||||||
BASE64_STANDARD.decode(sha1.text()?).ok()?[..]
|
|
||||||
.try_into()
|
|
||||||
.ok()?,
|
|
||||||
))
|
|
||||||
} else if let Some(sha256) = item.children().find(|e| e.has_tag_name("ChecksumSHA256"))
|
|
||||||
{
|
|
||||||
Some(ChecksumValue::Sha256(
|
|
||||||
BASE64_STANDARD.decode(sha256.text()?).ok()?[..]
|
|
||||||
.try_into()
|
|
||||||
.ok()?,
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
parts.push(CompleteMultipartUploadPart {
|
parts.push(CompleteMultipartUploadPart {
|
||||||
etag: etag.trim_matches('"').to_string(),
|
etag: etag.trim_matches('"').to_string(),
|
||||||
part_number: part_number.parse().ok()?,
|
part_number: part_number.parse().ok()?,
|
||||||
checksum,
|
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
return None;
|
return None;
|
||||||
|
|
|
@ -14,15 +14,13 @@ use multer::{Constraints, Multipart, SizeLimit};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::s3::object_table::*;
|
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::ResBody;
|
use crate::s3::api_server::ResBody;
|
||||||
use crate::s3::checksum::*;
|
|
||||||
use crate::s3::cors::*;
|
use crate::s3::cors::*;
|
||||||
use crate::s3::encryption::EncryptionParams;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::put::{get_headers, save_stream, ChecksumMode};
|
use crate::s3::put::{get_headers, save_stream};
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::signature::payload::{verify_v4, Authorization};
|
use crate::signature::payload::{verify_v4, Authorization};
|
||||||
|
|
||||||
|
@ -71,11 +69,21 @@ pub async fn handle_post_object(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Ok(content) = HeaderValue::from_str(&field.text().await?) {
|
if let Ok(content) = HeaderValue::from_str(&field.text().await?) {
|
||||||
if params.insert(&name, content).is_some() {
|
match name.as_str() {
|
||||||
return Err(Error::bad_request(format!(
|
"tag" => (/* tag need to be reencoded, but we don't support them yet anyway */),
|
||||||
"Field '{}' provided more than once",
|
"acl" => {
|
||||||
name
|
if params.insert("x-amz-acl", content).is_some() {
|
||||||
)));
|
return Err(Error::bad_request("Field 'acl' provided more than once"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
if params.insert(&name, content).is_some() {
|
||||||
|
return Err(Error::bad_request(format!(
|
||||||
|
"Field '{}' provided more than once",
|
||||||
|
name
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -90,6 +98,10 @@ pub async fn handle_post_object(
|
||||||
.ok_or_bad_request("No policy was provided")?
|
.ok_or_bad_request("No policy was provided")?
|
||||||
.to_str()?;
|
.to_str()?;
|
||||||
let authorization = Authorization::parse_form(¶ms)?;
|
let authorization = Authorization::parse_form(¶ms)?;
|
||||||
|
let content_md5 = params
|
||||||
|
.get("content-md5")
|
||||||
|
.map(HeaderValue::to_str)
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
let key = if key.contains("${filename}") {
|
let key = if key.contains("${filename}") {
|
||||||
// if no filename is provided, don't replace. This matches the behavior of AWS.
|
// if no filename is provided, don't replace. This matches the behavior of AWS.
|
||||||
|
@ -212,25 +224,8 @@ pub async fn handle_post_object(
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
|
|
||||||
// around here to make sure the rest of the machinery takes our acl into account.
|
|
||||||
let headers = get_headers(¶ms)?;
|
let headers = get_headers(¶ms)?;
|
||||||
|
|
||||||
let expected_checksums = ExpectedChecksums {
|
|
||||||
md5: params
|
|
||||||
.get("content-md5")
|
|
||||||
.map(HeaderValue::to_str)
|
|
||||||
.transpose()?
|
|
||||||
.map(str::to_string),
|
|
||||||
sha256: None,
|
|
||||||
extra: request_checksum_algorithm_value(¶ms)?,
|
|
||||||
};
|
|
||||||
|
|
||||||
let meta = ObjectVersionMetaInner {
|
|
||||||
headers,
|
|
||||||
checksum: expected_checksums.extra,
|
|
||||||
};
|
|
||||||
|
|
||||||
let encryption = EncryptionParams::new_from_headers(&garage, ¶ms)?;
|
let encryption = EncryptionParams::new_from_headers(&garage, ¶ms)?;
|
||||||
|
|
||||||
let stream = file_field.map(|r| r.map_err(Into::into));
|
let stream = file_field.map(|r| r.map_err(Into::into));
|
||||||
|
@ -244,11 +239,12 @@ pub async fn handle_post_object(
|
||||||
|
|
||||||
let res = save_stream(
|
let res = save_stream(
|
||||||
&ctx,
|
&ctx,
|
||||||
meta,
|
headers,
|
||||||
encryption,
|
encryption,
|
||||||
StreamLimiter::new(stream, conditions.content_length),
|
StreamLimiter::new(stream, conditions.content_length),
|
||||||
&key,
|
&key,
|
||||||
ChecksumMode::Verify(&expected_checksums),
|
content_md5.map(str::to_string),
|
||||||
|
None,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use base64::prelude::*;
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use futures::stream::FuturesOrdered;
|
use futures::stream::FuturesOrdered;
|
||||||
use futures::try_join;
|
use futures::try_join;
|
||||||
|
use md5::{digest::generic_array::*, Digest as Md5Digest, Md5};
|
||||||
|
use sha2::Sha256;
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
@ -19,6 +22,7 @@ use opentelemetry::{
|
||||||
use garage_net::bytes_buf::BytesBuf;
|
use garage_net::bytes_buf::BytesBuf;
|
||||||
use garage_rpc::rpc_helper::OrderTag;
|
use garage_rpc::rpc_helper::OrderTag;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
use garage_util::async_hash::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
@ -32,22 +36,16 @@ use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::api_server::{ReqBody, ResBody};
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::checksum::*;
|
|
||||||
use crate::s3::encryption::EncryptionParams;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
|
|
||||||
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
||||||
|
|
||||||
pub(crate) struct SaveStreamResult {
|
pub struct SaveStreamResult {
|
||||||
pub(crate) version_uuid: Uuid,
|
pub version_uuid: Uuid,
|
||||||
pub(crate) version_timestamp: u64,
|
pub version_timestamp: u64,
|
||||||
/// Etag WITHOUT THE QUOTES (just the hex value)
|
/// Etag WITHOUT THE QUOTES (just the hex value)
|
||||||
pub(crate) etag: String,
|
pub etag: String,
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) enum ChecksumMode<'a> {
|
|
||||||
Verify(&'a ExpectedChecksums),
|
|
||||||
Calculate(Option<ChecksumAlgorithm>),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_put(
|
pub async fn handle_put(
|
||||||
|
@ -60,32 +58,24 @@ pub async fn handle_put(
|
||||||
let headers = get_headers(req.headers())?;
|
let headers = get_headers(req.headers())?;
|
||||||
debug!("Object headers: {:?}", headers);
|
debug!("Object headers: {:?}", headers);
|
||||||
|
|
||||||
let expected_checksums = ExpectedChecksums {
|
|
||||||
md5: match req.headers().get("content-md5") {
|
|
||||||
Some(x) => Some(x.to_str()?.to_string()),
|
|
||||||
None => None,
|
|
||||||
},
|
|
||||||
sha256: content_sha256,
|
|
||||||
extra: request_checksum_value(req.headers())?,
|
|
||||||
};
|
|
||||||
|
|
||||||
let meta = ObjectVersionMetaInner {
|
|
||||||
headers,
|
|
||||||
checksum: expected_checksums.extra,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Determine whether object should be encrypted, and if so the key
|
// Determine whether object should be encrypted, and if so the key
|
||||||
let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
||||||
|
|
||||||
|
let content_md5 = match req.headers().get("content-md5") {
|
||||||
|
Some(x) => Some(x.to_str()?.to_string()),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
let stream = body_stream(req.into_body());
|
let stream = body_stream(req.into_body());
|
||||||
|
|
||||||
let res = save_stream(
|
let res = save_stream(
|
||||||
&ctx,
|
&ctx,
|
||||||
meta,
|
headers,
|
||||||
encryption,
|
encryption,
|
||||||
stream,
|
stream,
|
||||||
key,
|
key,
|
||||||
ChecksumMode::Verify(&expected_checksums),
|
content_md5,
|
||||||
|
content_sha256,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -93,17 +83,17 @@ pub async fn handle_put(
|
||||||
.header("x-amz-version-id", hex::encode(res.version_uuid))
|
.header("x-amz-version-id", hex::encode(res.version_uuid))
|
||||||
.header("ETag", format!("\"{}\"", res.etag));
|
.header("ETag", format!("\"{}\"", res.etag));
|
||||||
encryption.add_response_headers(&mut resp);
|
encryption.add_response_headers(&mut resp);
|
||||||
let resp = add_checksum_response_headers(&expected_checksums.extra, resp);
|
|
||||||
Ok(resp.body(empty_body())?)
|
Ok(resp.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
ctx: &ReqCtx,
|
ctx: &ReqCtx,
|
||||||
mut meta: ObjectVersionMetaInner,
|
headers: ObjectVersionHeaders,
|
||||||
encryption: EncryptionParams,
|
encryption: EncryptionParams,
|
||||||
body: S,
|
body: S,
|
||||||
key: &String,
|
key: &String,
|
||||||
checksum_mode: ChecksumMode<'_>,
|
content_md5: Option<String>,
|
||||||
|
content_sha256: Option<FixedBytes32>,
|
||||||
) -> Result<SaveStreamResult, Error> {
|
) -> Result<SaveStreamResult, Error> {
|
||||||
let ReqCtx {
|
let ReqCtx {
|
||||||
garage, bucket_id, ..
|
garage, bucket_id, ..
|
||||||
|
@ -117,36 +107,32 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
|
|
||||||
let first_block = first_block_opt.unwrap_or_default();
|
let first_block = first_block_opt.unwrap_or_default();
|
||||||
|
|
||||||
|
let object_encryption = encryption.encrypt_headers(headers)?;
|
||||||
|
|
||||||
// Generate identity of new version
|
// Generate identity of new version
|
||||||
let version_uuid = gen_uuid();
|
let version_uuid = gen_uuid();
|
||||||
let version_timestamp = next_timestamp(existing_object.as_ref());
|
let version_timestamp = next_timestamp(existing_object.as_ref());
|
||||||
|
|
||||||
let mut checksummer = match checksum_mode {
|
|
||||||
ChecksumMode::Verify(expected) => Checksummer::init(expected, !encryption.is_encrypted()),
|
|
||||||
ChecksumMode::Calculate(algo) => {
|
|
||||||
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(algo)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// If body is small enough, store it directly in the object table
|
// If body is small enough, store it directly in the object table
|
||||||
// as "inline data". We can then return immediately.
|
// as "inline data". We can then return immediately.
|
||||||
if first_block.len() < INLINE_THRESHOLD {
|
if first_block.len() < INLINE_THRESHOLD {
|
||||||
checksummer.update(&first_block);
|
let mut md5sum = Md5::new();
|
||||||
let checksums = checksummer.finalize();
|
md5sum.update(&first_block[..]);
|
||||||
|
let data_md5sum = md5sum.finalize();
|
||||||
|
|
||||||
match checksum_mode {
|
let data_sha256sum = sha256sum(&first_block[..]);
|
||||||
ChecksumMode::Verify(expected) => {
|
|
||||||
checksums.verify(&expected)?;
|
ensure_checksum_matches(
|
||||||
}
|
&data_md5sum,
|
||||||
ChecksumMode::Calculate(algo) => {
|
data_sha256sum,
|
||||||
meta.checksum = checksums.extract(algo);
|
content_md5.as_deref(),
|
||||||
}
|
content_sha256,
|
||||||
};
|
)?;
|
||||||
|
|
||||||
let size = first_block.len() as u64;
|
let size = first_block.len() as u64;
|
||||||
check_quotas(ctx, size, existing_object.as_ref()).await?;
|
check_quotas(ctx, size, existing_object.as_ref()).await?;
|
||||||
|
|
||||||
let etag = encryption.etag_from_md5(&checksums.md5);
|
let etag = encryption.etag_from_md5(&data_md5sum);
|
||||||
let inline_data = encryption.encrypt_blob(&first_block)?.to_vec();
|
let inline_data = encryption.encrypt_blob(&first_block)?.to_vec();
|
||||||
|
|
||||||
let object_version = ObjectVersion {
|
let object_version = ObjectVersion {
|
||||||
|
@ -154,7 +140,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
timestamp: version_timestamp,
|
timestamp: version_timestamp,
|
||||||
state: ObjectVersionState::Complete(ObjectVersionData::Inline(
|
state: ObjectVersionState::Complete(ObjectVersionData::Inline(
|
||||||
ObjectVersionMeta {
|
ObjectVersionMeta {
|
||||||
encryption: encryption.encrypt_meta(meta)?,
|
encryption: object_encryption,
|
||||||
size,
|
size,
|
||||||
etag: etag.clone(),
|
etag: etag.clone(),
|
||||||
},
|
},
|
||||||
|
@ -189,8 +175,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
uuid: version_uuid,
|
uuid: version_uuid,
|
||||||
timestamp: version_timestamp,
|
timestamp: version_timestamp,
|
||||||
state: ObjectVersionState::Uploading {
|
state: ObjectVersionState::Uploading {
|
||||||
encryption: encryption.encrypt_meta(meta.clone())?,
|
encryption: object_encryption.clone(),
|
||||||
checksum_algorithm: None, // don't care; overwritten later
|
|
||||||
multipart: false,
|
multipart: false,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -211,37 +196,25 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
);
|
);
|
||||||
garage.version_table.insert(&version).await?;
|
garage.version_table.insert(&version).await?;
|
||||||
|
|
||||||
// Transfer data
|
// Transfer data and verify checksum
|
||||||
let (total_size, checksums, first_block_hash) = read_and_put_blocks(
|
let (total_size, data_md5sum, data_sha256sum, first_block_hash) =
|
||||||
ctx,
|
read_and_put_blocks(ctx, &version, encryption, 1, first_block, &mut chunker).await?;
|
||||||
&version,
|
|
||||||
encryption,
|
|
||||||
1,
|
|
||||||
first_block,
|
|
||||||
&mut chunker,
|
|
||||||
checksummer,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Verify checksums are ok / add calculated checksum to metadata
|
ensure_checksum_matches(
|
||||||
match checksum_mode {
|
&data_md5sum,
|
||||||
ChecksumMode::Verify(expected) => {
|
data_sha256sum,
|
||||||
checksums.verify(&expected)?;
|
content_md5.as_deref(),
|
||||||
}
|
content_sha256,
|
||||||
ChecksumMode::Calculate(algo) => {
|
)?;
|
||||||
meta.checksum = checksums.extract(algo);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Verify quotas are respsected
|
|
||||||
check_quotas(ctx, total_size, existing_object.as_ref()).await?;
|
check_quotas(ctx, total_size, existing_object.as_ref()).await?;
|
||||||
|
|
||||||
// Save final object state, marked as Complete
|
// Save final object state, marked as Complete
|
||||||
let etag = encryption.etag_from_md5(&checksums.md5);
|
let etag = encryption.etag_from_md5(&data_md5sum);
|
||||||
|
|
||||||
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
|
||||||
ObjectVersionMeta {
|
ObjectVersionMeta {
|
||||||
encryption: encryption.encrypt_meta(meta)?,
|
encryption: object_encryption,
|
||||||
size: total_size,
|
size: total_size,
|
||||||
etag: etag.clone(),
|
etag: etag.clone(),
|
||||||
},
|
},
|
||||||
|
@ -261,6 +234,33 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Validate MD5 sum against content-md5 header
|
||||||
|
/// and sha256sum against signed content-sha256
|
||||||
|
pub(crate) fn ensure_checksum_matches(
|
||||||
|
data_md5sum: &[u8],
|
||||||
|
data_sha256sum: garage_util::data::FixedBytes32,
|
||||||
|
content_md5: Option<&str>,
|
||||||
|
content_sha256: Option<garage_util::data::FixedBytes32>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if let Some(expected_sha256) = content_sha256 {
|
||||||
|
if expected_sha256 != data_sha256sum {
|
||||||
|
return Err(Error::bad_request(
|
||||||
|
"Unable to validate x-amz-content-sha256",
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
trace!("Successfully validated x-amz-content-sha256");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(expected_md5) = content_md5 {
|
||||||
|
if expected_md5.trim_matches('"') != BASE64_STANDARD.encode(data_md5sum) {
|
||||||
|
return Err(Error::bad_request("Unable to validate content-md5"));
|
||||||
|
} else {
|
||||||
|
trace!("Successfully validated content-md5");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Check that inserting this object with this size doesn't exceed bucket quotas
|
/// Check that inserting this object with this size doesn't exceed bucket quotas
|
||||||
pub(crate) async fn check_quotas(
|
pub(crate) async fn check_quotas(
|
||||||
ctx: &ReqCtx,
|
ctx: &ReqCtx,
|
||||||
|
@ -332,8 +332,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
first_block: Bytes,
|
first_block: Bytes,
|
||||||
chunker: &mut StreamChunker<S>,
|
chunker: &mut StreamChunker<S>,
|
||||||
checksummer: Checksummer,
|
) -> Result<(u64, GenericArray<u8, typenum::U16>, Hash, Hash), Error> {
|
||||||
) -> Result<(u64, Checksums, Hash), Error> {
|
|
||||||
let tracer = opentelemetry::global::tracer("garage");
|
let tracer = opentelemetry::global::tracer("garage");
|
||||||
|
|
||||||
let (block_tx, mut block_rx) = mpsc::channel::<Result<Bytes, Error>>(2);
|
let (block_tx, mut block_rx) = mpsc::channel::<Result<Bytes, Error>>(2);
|
||||||
|
@ -361,20 +360,20 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
||||||
|
|
||||||
let (block_tx2, mut block_rx2) = mpsc::channel::<Result<Bytes, Error>>(1);
|
let (block_tx2, mut block_rx2) = mpsc::channel::<Result<Bytes, Error>>(1);
|
||||||
let hash_stream = async {
|
let hash_stream = async {
|
||||||
let mut checksummer = checksummer;
|
let md5hasher = AsyncHasher::<Md5>::new();
|
||||||
|
let sha256hasher = AsyncHasher::<Sha256>::new();
|
||||||
while let Some(next) = block_rx.recv().await {
|
while let Some(next) = block_rx.recv().await {
|
||||||
match next {
|
match next {
|
||||||
Ok(block) => {
|
Ok(block) => {
|
||||||
block_tx2.send(Ok(block.clone())).await?;
|
block_tx2.send(Ok(block.clone())).await?;
|
||||||
checksummer = tokio::task::spawn_blocking(move || {
|
futures::future::join(
|
||||||
checksummer.update(&block);
|
md5hasher.update(block.clone()),
|
||||||
checksummer
|
sha256hasher.update(block.clone()),
|
||||||
})
|
)
|
||||||
.with_context(Context::current_with_span(
|
.with_context(Context::current_with_span(
|
||||||
tracer.start("Hash block (md5, sha256)"),
|
tracer.start("Hash block (md5, sha256)"),
|
||||||
))
|
))
|
||||||
.await
|
.await;
|
||||||
.unwrap()
|
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
block_tx2.send(Err(e)).await?;
|
block_tx2.send(Err(e)).await?;
|
||||||
|
@ -383,7 +382,10 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drop(block_tx2);
|
drop(block_tx2);
|
||||||
Ok::<_, mpsc::error::SendError<_>>(checksummer)
|
Ok::<_, mpsc::error::SendError<_>>(futures::join!(
|
||||||
|
md5hasher.finalize(),
|
||||||
|
sha256hasher.finalize()
|
||||||
|
))
|
||||||
};
|
};
|
||||||
|
|
||||||
let (block_tx3, mut block_rx3) = mpsc::channel::<Result<(Bytes, u64, Hash), Error>>(1);
|
let (block_tx3, mut block_rx3) = mpsc::channel::<Result<(Bytes, u64, Hash), Error>>(1);
|
||||||
|
@ -393,28 +395,33 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
||||||
match next {
|
match next {
|
||||||
Ok(block) => {
|
Ok(block) => {
|
||||||
let unencrypted_len = block.len() as u64;
|
let unencrypted_len = block.len() as u64;
|
||||||
let res = tokio::task::spawn_blocking(move || {
|
let block = if encryption.is_encrypted() {
|
||||||
let block = encryption.encrypt_block(block)?;
|
let res =
|
||||||
let hash = blake2sum(&block);
|
tokio::task::spawn_blocking(move || encryption.encrypt_block(block))
|
||||||
Ok((block, hash))
|
.with_context(Context::current_with_span(
|
||||||
})
|
tracer.start("Encrypt block"),
|
||||||
.with_context(Context::current_with_span(
|
))
|
||||||
tracer.start("Encrypt and hash (blake2) block"),
|
.await
|
||||||
))
|
.unwrap();
|
||||||
.await
|
match res {
|
||||||
.unwrap();
|
Ok(b) => b,
|
||||||
match res {
|
Err(e) => {
|
||||||
Ok((block, hash)) => {
|
block_tx3.send(Err(e)).await?;
|
||||||
if first_block_hash.is_none() {
|
break;
|
||||||
first_block_hash = Some(hash);
|
|
||||||
}
|
}
|
||||||
block_tx3.send(Ok((block, unencrypted_len, hash))).await?;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
block_tx3.send(Err(e)).await?;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
block
|
||||||
|
};
|
||||||
|
let hash = async_blake2sum(block.clone())
|
||||||
|
.with_context(Context::current_with_span(
|
||||||
|
tracer.start("Hash block (blake2)"),
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
if first_block_hash.is_none() {
|
||||||
|
first_block_hash = Some(hash);
|
||||||
}
|
}
|
||||||
|
block_tx3.send(Ok((block, unencrypted_len, hash))).await?;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
block_tx3.send(Err(e)).await?;
|
block_tx3.send(Err(e)).await?;
|
||||||
|
@ -486,10 +493,12 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
|
||||||
let total_size = final_result?;
|
let total_size = final_result?;
|
||||||
// unwrap here is ok, because if hasher failed, it is because something failed
|
// unwrap here is ok, because if hasher failed, it is because something failed
|
||||||
// later in the pipeline which already caused a return at the ? on previous line
|
// later in the pipeline which already caused a return at the ? on previous line
|
||||||
|
let (data_md5sum, data_sha256sum) = stream_hash_result.unwrap();
|
||||||
let first_block_hash = block_hash_result.unwrap();
|
let first_block_hash = block_hash_result.unwrap();
|
||||||
let checksums = stream_hash_result.unwrap().finalize();
|
|
||||||
|
|
||||||
Ok((total_size, checksums, first_block_hash))
|
let data_sha256sum = Hash::try_from(&data_sha256sum[..]).unwrap();
|
||||||
|
|
||||||
|
Ok((total_size, data_md5sum, data_sha256sum, first_block_hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn put_block_and_meta(
|
async fn put_block_and_meta(
|
||||||
|
@ -600,7 +609,7 @@ impl Drop for InterruptedCleanup {
|
||||||
|
|
||||||
// ============ helpers ============
|
// ============ helpers ============
|
||||||
|
|
||||||
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList, Error> {
|
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<ObjectVersionHeaders, Error> {
|
||||||
let mut ret = Vec::new();
|
let mut ret = Vec::new();
|
||||||
|
|
||||||
// Preserve standard headers
|
// Preserve standard headers
|
||||||
|
@ -628,7 +637,7 @@ pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(ret)
|
Ok(ObjectVersionHeaders(ret))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn next_timestamp(existing_object: Option<&Object>) -> u64 {
|
pub(crate) fn next_timestamp(existing_object: Option<&Object>) -> u64 {
|
||||||
|
|
|
@ -276,7 +276,7 @@ impl Redirect {
|
||||||
return Err(Error::bad_request("Bad XML: invalid protocol"));
|
return Err(Error::bad_request("Bad XML: invalid protocol"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO there are probably more invalid cases, but which ones?
|
// TODO there are probably more invalide cases, but which ones?
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -131,14 +131,6 @@ pub struct CompleteMultipartUploadResult {
|
||||||
pub key: Value,
|
pub key: Value,
|
||||||
#[serde(rename = "ETag")]
|
#[serde(rename = "ETag")]
|
||||||
pub etag: Value,
|
pub etag: Value,
|
||||||
#[serde(rename = "ChecksumCRC32")]
|
|
||||||
pub checksum_crc32: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumCRC32C")]
|
|
||||||
pub checksum_crc32c: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumSHA1")]
|
|
||||||
pub checksum_sha1: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumSHA256")]
|
|
||||||
pub checksum_sha256: Option<Value>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||||
|
@ -205,14 +197,6 @@ pub struct PartItem {
|
||||||
pub part_number: IntValue,
|
pub part_number: IntValue,
|
||||||
#[serde(rename = "Size")]
|
#[serde(rename = "Size")]
|
||||||
pub size: IntValue,
|
pub size: IntValue,
|
||||||
#[serde(rename = "ChecksumCRC32")]
|
|
||||||
pub checksum_crc32: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumCRC32C")]
|
|
||||||
pub checksum_crc32c: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumSHA1")]
|
|
||||||
pub checksum_sha1: Option<Value>,
|
|
||||||
#[serde(rename = "ChecksumSHA256")]
|
|
||||||
pub checksum_sha256: Option<Value>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, PartialEq, Eq)]
|
#[derive(Debug, Serialize, PartialEq, Eq)]
|
||||||
|
@ -516,10 +500,6 @@ mod tests {
|
||||||
bucket: Value("mybucket".to_string()),
|
bucket: Value("mybucket".to_string()),
|
||||||
key: Value("a/plop".to_string()),
|
key: Value("a/plop".to_string()),
|
||||||
etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()),
|
etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()),
|
||||||
checksum_crc32: None,
|
|
||||||
checksum_crc32c: None,
|
|
||||||
checksum_sha1: Some(Value("ZJAnHyG8PeKz9tI8UTcHrJos39A=".into())),
|
|
||||||
checksum_sha256: None,
|
|
||||||
};
|
};
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
to_xml_with_header(&result)?,
|
to_xml_with_header(&result)?,
|
||||||
|
@ -529,7 +509,6 @@ mod tests {
|
||||||
<Bucket>mybucket</Bucket>\
|
<Bucket>mybucket</Bucket>\
|
||||||
<Key>a/plop</Key>\
|
<Key>a/plop</Key>\
|
||||||
<ETag>"3858f62230ac3c915f300c664312c11f-9"</ETag>\
|
<ETag>"3858f62230ac3c915f300c664312c11f-9"</ETag>\
|
||||||
<ChecksumSHA1>ZJAnHyG8PeKz9tI8UTcHrJos39A=</ChecksumSHA1>\
|
|
||||||
</CompleteMultipartUploadResult>"
|
</CompleteMultipartUploadResult>"
|
||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -801,22 +780,12 @@ mod tests {
|
||||||
last_modified: Value("2010-11-10T20:48:34.000Z".to_string()),
|
last_modified: Value("2010-11-10T20:48:34.000Z".to_string()),
|
||||||
part_number: IntValue(2),
|
part_number: IntValue(2),
|
||||||
size: IntValue(10485760),
|
size: IntValue(10485760),
|
||||||
checksum_crc32: None,
|
|
||||||
checksum_crc32c: None,
|
|
||||||
checksum_sha256: Some(Value(
|
|
||||||
"5RQ3A5uk0w7ojNjvegohch4JRBBGN/cLhsNrPzfv/hA=".into(),
|
|
||||||
)),
|
|
||||||
checksum_sha1: None,
|
|
||||||
},
|
},
|
||||||
PartItem {
|
PartItem {
|
||||||
etag: Value("\"aaaa18db4cc2f85cedef654fccc4a4x8\"".to_string()),
|
etag: Value("\"aaaa18db4cc2f85cedef654fccc4a4x8\"".to_string()),
|
||||||
last_modified: Value("2010-11-10T20:48:33.000Z".to_string()),
|
last_modified: Value("2010-11-10T20:48:33.000Z".to_string()),
|
||||||
part_number: IntValue(3),
|
part_number: IntValue(3),
|
||||||
size: IntValue(10485760),
|
size: IntValue(10485760),
|
||||||
checksum_sha256: None,
|
|
||||||
checksum_crc32c: None,
|
|
||||||
checksum_crc32: Some(Value("ZJAnHyG8=".into())),
|
|
||||||
checksum_sha1: None,
|
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
initiator: Initiator {
|
initiator: Initiator {
|
||||||
|
@ -851,14 +820,12 @@ mod tests {
|
||||||
<LastModified>2010-11-10T20:48:34.000Z</LastModified>\
|
<LastModified>2010-11-10T20:48:34.000Z</LastModified>\
|
||||||
<PartNumber>2</PartNumber>\
|
<PartNumber>2</PartNumber>\
|
||||||
<Size>10485760</Size>\
|
<Size>10485760</Size>\
|
||||||
<ChecksumSHA256>5RQ3A5uk0w7ojNjvegohch4JRBBGN/cLhsNrPzfv/hA=</ChecksumSHA256>\
|
|
||||||
</Part>\
|
</Part>\
|
||||||
<Part>\
|
<Part>\
|
||||||
<ETag>"aaaa18db4cc2f85cedef654fccc4a4x8"</ETag>\
|
<ETag>"aaaa18db4cc2f85cedef654fccc4a4x8"</ETag>\
|
||||||
<LastModified>2010-11-10T20:48:33.000Z</LastModified>\
|
<LastModified>2010-11-10T20:48:33.000Z</LastModified>\
|
||||||
<PartNumber>3</PartNumber>\
|
<PartNumber>3</PartNumber>\
|
||||||
<Size>10485760</Size>\
|
<Size>10485760</Size>\
|
||||||
<ChecksumCRC32>ZJAnHyG8=</ChecksumCRC32>\
|
|
||||||
</Part>\
|
</Part>\
|
||||||
<Initiator>\
|
<Initiator>\
|
||||||
<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\
|
<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\
|
||||||
|
|
|
@ -47,8 +47,8 @@ pub async fn check_payload_signature(
|
||||||
let query = parse_query_map(request.uri())?;
|
let query = parse_query_map(request.uri())?;
|
||||||
|
|
||||||
if query.contains_key(&X_AMZ_ALGORITHM) {
|
if query.contains_key(&X_AMZ_ALGORITHM) {
|
||||||
// We check for presigned-URL-style authentication first, because
|
// We check for presigned-URL-style authentification first, because
|
||||||
// the browser or something else could inject an Authorization header
|
// the browser or someting else could inject an Authorization header
|
||||||
// that is totally unrelated to AWS signatures.
|
// that is totally unrelated to AWS signatures.
|
||||||
check_presigned_signature(garage, service, request, query).await
|
check_presigned_signature(garage, service, request, query).await
|
||||||
} else if request.headers().contains_key(AUTHORIZATION) {
|
} else if request.headers().contains_key(AUTHORIZATION) {
|
||||||
|
@ -132,7 +132,7 @@ async fn check_presigned_signature(
|
||||||
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
|
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
|
||||||
|
|
||||||
// Verify that all necessary request headers are included in signed_headers
|
// Verify that all necessary request headers are included in signed_headers
|
||||||
// For AWSv4 pre-signed URLs, the following must be included:
|
// For AWSv4 pre-signed URLs, the following must be incldued:
|
||||||
// - the Host header (mandatory)
|
// - the Host header (mandatory)
|
||||||
// - all x-amz-* headers used in the request
|
// - all x-amz-* headers used in the request
|
||||||
let signed_headers = split_signed_headers(&authorization)?;
|
let signed_headers = split_signed_headers(&authorization)?;
|
||||||
|
@ -306,7 +306,7 @@ pub fn canonical_request(
|
||||||
// Note that there is also the issue of path normalization, which I hope is unrelated to the
|
// Note that there is also the issue of path normalization, which I hope is unrelated to the
|
||||||
// one of URI-encoding. At least in aws-sigv4 both parameters can be set independently,
|
// one of URI-encoding. At least in aws-sigv4 both parameters can be set independently,
|
||||||
// and rusoto_signature does not seem to do any effective path normalization, even though
|
// and rusoto_signature does not seem to do any effective path normalization, even though
|
||||||
// it mentions it in the comments (same link to the source code as above).
|
// it mentions it in the comments (same link to the souce code as above).
|
||||||
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so
|
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so
|
||||||
// would make non-normalized paths invalid K2V partition keys, and we don't want that.
|
// would make non-normalized paths invalid K2V partition keys, and we don't want that.
|
||||||
let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
|
let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
@ -14,12 +13,9 @@ const DRIVE_NPART: usize = 1024;
|
||||||
|
|
||||||
const HASH_DRIVE_BYTES: (usize, usize) = (2, 3);
|
const HASH_DRIVE_BYTES: (usize, usize) = (2, 3);
|
||||||
|
|
||||||
const MARKER_FILE_NAME: &str = "garage-marker";
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
pub(crate) struct DataLayout {
|
pub(crate) struct DataLayout {
|
||||||
pub(crate) data_dirs: Vec<DataDir>,
|
pub(crate) data_dirs: Vec<DataDir>,
|
||||||
markers: HashMap<PathBuf, String>,
|
|
||||||
|
|
||||||
/// Primary storage location (index in data_dirs) for each partition
|
/// Primary storage location (index in data_dirs) for each partition
|
||||||
/// = the location where the data is supposed to be, blocks are always
|
/// = the location where the data is supposed to be, blocks are always
|
||||||
|
@ -79,17 +75,16 @@ impl DataLayout {
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
data_dirs,
|
data_dirs,
|
||||||
markers: HashMap::new(),
|
|
||||||
part_prim,
|
part_prim,
|
||||||
part_sec,
|
part_sec,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn update(self, dirs: &DataDirEnum) -> Result<Self, Error> {
|
pub(crate) fn update(&mut self, dirs: &DataDirEnum) -> Result<(), Error> {
|
||||||
// Make list of new data directories, exit if nothing changed
|
// Make list of new data directories, exit if nothing changed
|
||||||
let data_dirs = make_data_dirs(dirs)?;
|
let data_dirs = make_data_dirs(dirs)?;
|
||||||
if data_dirs == self.data_dirs {
|
if data_dirs == self.data_dirs {
|
||||||
return Ok(self);
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>();
|
let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>();
|
||||||
|
@ -219,43 +214,11 @@ impl DataLayout {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply newly generated config
|
// Apply newly generated config
|
||||||
Ok(Self {
|
*self = Self {
|
||||||
data_dirs,
|
data_dirs,
|
||||||
markers: self.markers,
|
|
||||||
part_prim,
|
part_prim,
|
||||||
part_sec,
|
part_sec,
|
||||||
})
|
};
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn check_markers(&mut self) -> Result<(), Error> {
|
|
||||||
let data_dirs = &self.data_dirs;
|
|
||||||
self.markers
|
|
||||||
.retain(|k, _| data_dirs.iter().any(|x| x.path == *k));
|
|
||||||
|
|
||||||
for dir in self.data_dirs.iter() {
|
|
||||||
let mut marker_path = dir.path.clone();
|
|
||||||
marker_path.push(MARKER_FILE_NAME);
|
|
||||||
let existing_marker = std::fs::read_to_string(&marker_path).ok();
|
|
||||||
match (existing_marker, self.markers.get(&dir.path)) {
|
|
||||||
(Some(m1), Some(m2)) => {
|
|
||||||
if m1 != *m2 {
|
|
||||||
return Err(Error::Message(format!("Mismatched content for marker file `{}` in data directory `{}`. If you moved data directories or changed their mountpoints, you should remove the `data_layout` file in Garage's metadata directory and restart Garage.", MARKER_FILE_NAME, dir.path.display())));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(None, Some(_)) => {
|
|
||||||
return Err(Error::Message(format!("Could not find expected marker file `{}` in data directory `{}`, make sure this data directory is mounted correctly.", MARKER_FILE_NAME, dir.path.display())));
|
|
||||||
}
|
|
||||||
(Some(mkr), None) => {
|
|
||||||
self.markers.insert(dir.path.clone(), mkr);
|
|
||||||
}
|
|
||||||
(None, None) => {
|
|
||||||
let mkr = hex::encode(garage_util::data::gen_uuid().as_slice());
|
|
||||||
std::fs::write(&marker_path, &mkr)?;
|
|
||||||
self.markers.insert(dir.path.clone(), mkr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,8 +242,7 @@ impl DataLayout {
|
||||||
u16::from_be_bytes([
|
u16::from_be_bytes([
|
||||||
hash.as_slice()[HASH_DRIVE_BYTES.0],
|
hash.as_slice()[HASH_DRIVE_BYTES.0],
|
||||||
hash.as_slice()[HASH_DRIVE_BYTES.1],
|
hash.as_slice()[HASH_DRIVE_BYTES.1],
|
||||||
]) as usize
|
]) as usize % DRIVE_NPART
|
||||||
% DRIVE_NPART
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_dir_from(&self, hash: &Hash, dir: &PathBuf) -> PathBuf {
|
fn block_dir_from(&self, hash: &Hash, dir: &PathBuf) -> PathBuf {
|
||||||
|
@ -293,7 +255,6 @@ impl DataLayout {
|
||||||
pub(crate) fn without_secondary_locations(&self) -> Self {
|
pub(crate) fn without_secondary_locations(&self) -> Self {
|
||||||
Self {
|
Self {
|
||||||
data_dirs: self.data_dirs.clone(),
|
data_dirs: self.data_dirs.clone(),
|
||||||
markers: self.markers.clone(),
|
|
||||||
part_prim: self.part_prim.clone(),
|
part_prim: self.part_prim.clone(),
|
||||||
part_sec: self.part_sec.iter().map(|_| vec![]).collect::<Vec<_>>(),
|
part_sec: self.part_sec.iter().map(|_| vec![]).collect::<Vec<_>>(),
|
||||||
}
|
}
|
||||||
|
@ -361,12 +322,14 @@ fn make_data_dirs(dirs: &DataDirEnum) -> Result<Vec<DataDir>, Error> {
|
||||||
fn dir_not_empty(path: &PathBuf) -> Result<bool, Error> {
|
fn dir_not_empty(path: &PathBuf) -> Result<bool, Error> {
|
||||||
for entry in std::fs::read_dir(&path)? {
|
for entry in std::fs::read_dir(&path)? {
|
||||||
let dir = entry?;
|
let dir = entry?;
|
||||||
let ft = dir.file_type()?;
|
if dir.file_type()?.is_dir()
|
||||||
let name = dir.file_name().into_string().ok();
|
&& dir
|
||||||
if ft.is_file() && name.as_deref() == Some(MARKER_FILE_NAME) {
|
.file_name()
|
||||||
return Ok(true);
|
.into_string()
|
||||||
}
|
.ok()
|
||||||
if ft.is_dir() && name.and_then(|hex| hex::decode(&hex).ok()).is_some() {
|
.and_then(|hex| hex::decode(&hex).ok())
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,4 +11,3 @@ mod metrics;
|
||||||
mod rc;
|
mod rc;
|
||||||
|
|
||||||
pub use block::zstd_encode;
|
pub use block::zstd_encode;
|
||||||
pub use rc::CalculateRefcount;
|
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
use std::convert::TryInto;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
@ -11,7 +10,7 @@ use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use tokio::fs;
|
use tokio::fs;
|
||||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader};
|
use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader};
|
||||||
use tokio::sync::{mpsc, Mutex, MutexGuard, Semaphore};
|
use tokio::sync::{mpsc, Mutex, MutexGuard};
|
||||||
|
|
||||||
use opentelemetry::{
|
use opentelemetry::{
|
||||||
trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer},
|
trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer},
|
||||||
|
@ -23,7 +22,7 @@ use garage_net::stream::{read_stream_to_end, stream_asyncread, ByteStream};
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::background::{vars, BackgroundRunner};
|
use garage_util::background::{vars, BackgroundRunner};
|
||||||
use garage_util::config::Config;
|
use garage_util::config::DataDirEnum;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
use garage_util::metrics::RecordDuration;
|
use garage_util::metrics::RecordDuration;
|
||||||
|
@ -85,16 +84,14 @@ pub struct BlockManager {
|
||||||
|
|
||||||
data_fsync: bool,
|
data_fsync: bool,
|
||||||
compression_level: Option<i32>,
|
compression_level: Option<i32>,
|
||||||
disable_scrub: bool,
|
|
||||||
|
|
||||||
mutation_lock: Vec<Mutex<BlockManagerLocked>>,
|
mutation_lock: Vec<Mutex<BlockManagerLocked>>,
|
||||||
|
|
||||||
pub rc: BlockRc,
|
pub(crate) rc: BlockRc,
|
||||||
pub resync: BlockResyncManager,
|
pub resync: BlockResyncManager,
|
||||||
|
|
||||||
pub(crate) system: Arc<System>,
|
pub(crate) system: Arc<System>,
|
||||||
pub(crate) endpoint: Arc<Endpoint<BlockRpc, Self>>,
|
pub(crate) endpoint: Arc<Endpoint<BlockRpc, Self>>,
|
||||||
buffer_kb_semaphore: Arc<Semaphore>,
|
|
||||||
|
|
||||||
pub(crate) metrics: BlockManagerMetrics,
|
pub(crate) metrics: BlockManagerMetrics,
|
||||||
|
|
||||||
|
@ -122,22 +119,24 @@ struct BlockManagerLocked();
|
||||||
impl BlockManager {
|
impl BlockManager {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
db: &db::Db,
|
db: &db::Db,
|
||||||
config: &Config,
|
data_dir: DataDirEnum,
|
||||||
|
data_fsync: bool,
|
||||||
|
compression_level: Option<i32>,
|
||||||
replication: TableShardedReplication,
|
replication: TableShardedReplication,
|
||||||
system: Arc<System>,
|
system: Arc<System>,
|
||||||
) -> Result<Arc<Self>, Error> {
|
) -> Result<Arc<Self>, Error> {
|
||||||
// Load or compute layout, i.e. assignment of data blocks to the different data directories
|
// Load or compute layout, i.e. assignment of data blocks to the different data directories
|
||||||
let data_layout_persister: Persister<DataLayout> =
|
let data_layout_persister: Persister<DataLayout> =
|
||||||
Persister::new(&system.metadata_dir, "data_layout");
|
Persister::new(&system.metadata_dir, "data_layout");
|
||||||
let mut data_layout = match data_layout_persister.load() {
|
let data_layout = match data_layout_persister.load() {
|
||||||
Ok(layout) => layout
|
Ok(mut layout) => {
|
||||||
.update(&config.data_dir)
|
layout
|
||||||
.ok_or_message("invalid data_dir config")?,
|
.update(&data_dir)
|
||||||
Err(_) => {
|
.ok_or_message("invalid data_dir config")?;
|
||||||
DataLayout::initialize(&config.data_dir).ok_or_message("invalid data_dir config")?
|
layout
|
||||||
}
|
}
|
||||||
|
Err(_) => DataLayout::initialize(&data_dir).ok_or_message("invalid data_dir config")?,
|
||||||
};
|
};
|
||||||
data_layout.check_markers()?;
|
|
||||||
data_layout_persister
|
data_layout_persister
|
||||||
.save(&data_layout)
|
.save(&data_layout)
|
||||||
.expect("cannot save data_layout");
|
.expect("cannot save data_layout");
|
||||||
|
@ -154,14 +153,11 @@ impl BlockManager {
|
||||||
.netapp
|
.netapp
|
||||||
.endpoint("garage_block/manager.rs/Rpc".to_string());
|
.endpoint("garage_block/manager.rs/Rpc".to_string());
|
||||||
|
|
||||||
let buffer_kb_semaphore = Arc::new(Semaphore::new(config.block_ram_buffer_max / 1024));
|
|
||||||
|
|
||||||
let metrics = BlockManagerMetrics::new(
|
let metrics = BlockManagerMetrics::new(
|
||||||
config.compression_level,
|
compression_level,
|
||||||
rc.rc_table.clone(),
|
rc.rc.clone(),
|
||||||
resync.queue.clone(),
|
resync.queue.clone(),
|
||||||
resync.errors.clone(),
|
resync.errors.clone(),
|
||||||
buffer_kb_semaphore.clone(),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info");
|
let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info");
|
||||||
|
@ -170,9 +166,8 @@ impl BlockManager {
|
||||||
replication,
|
replication,
|
||||||
data_layout: ArcSwap::new(Arc::new(data_layout)),
|
data_layout: ArcSwap::new(Arc::new(data_layout)),
|
||||||
data_layout_persister,
|
data_layout_persister,
|
||||||
data_fsync: config.data_fsync,
|
data_fsync,
|
||||||
disable_scrub: config.disable_scrub,
|
compression_level,
|
||||||
compression_level: config.compression_level,
|
|
||||||
mutation_lock: vec![(); MUTEX_COUNT]
|
mutation_lock: vec![(); MUTEX_COUNT]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|_| Mutex::new(BlockManagerLocked()))
|
.map(|_| Mutex::new(BlockManagerLocked()))
|
||||||
|
@ -181,7 +176,6 @@ impl BlockManager {
|
||||||
resync,
|
resync,
|
||||||
system,
|
system,
|
||||||
endpoint,
|
endpoint,
|
||||||
buffer_kb_semaphore,
|
|
||||||
metrics,
|
metrics,
|
||||||
scrub_persister,
|
scrub_persister,
|
||||||
tx_scrub_command: ArcSwapOption::new(None),
|
tx_scrub_command: ArcSwapOption::new(None),
|
||||||
|
@ -200,43 +194,33 @@ impl BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spawn scrub worker
|
// Spawn scrub worker
|
||||||
if !self.disable_scrub {
|
let (scrub_tx, scrub_rx) = mpsc::channel(1);
|
||||||
let (scrub_tx, scrub_rx) = mpsc::channel(1);
|
self.tx_scrub_command.store(Some(Arc::new(scrub_tx)));
|
||||||
self.tx_scrub_command.store(Some(Arc::new(scrub_tx)));
|
bg.spawn_worker(ScrubWorker::new(
|
||||||
bg.spawn_worker(ScrubWorker::new(
|
self.clone(),
|
||||||
self.clone(),
|
scrub_rx,
|
||||||
scrub_rx,
|
self.scrub_persister.clone(),
|
||||||
self.scrub_persister.clone(),
|
));
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
|
||||||
self.resync.register_bg_vars(vars);
|
self.resync.register_bg_vars(vars);
|
||||||
|
|
||||||
if !self.disable_scrub {
|
vars.register_rw(
|
||||||
vars.register_rw(
|
&self.scrub_persister,
|
||||||
&self.scrub_persister,
|
"scrub-tranquility",
|
||||||
"scrub-tranquility",
|
|p| p.get_with(|x| x.tranquility),
|
||||||
|p| p.get_with(|x| x.tranquility),
|
|p, tranquility| p.set_with(|x| x.tranquility = tranquility),
|
||||||
|p, tranquility| p.set_with(|x| x.tranquility = tranquility),
|
);
|
||||||
);
|
vars.register_ro(&self.scrub_persister, "scrub-last-completed", |p| {
|
||||||
vars.register_ro(&self.scrub_persister, "scrub-last-completed", |p| {
|
p.get_with(|x| msec_to_rfc3339(x.time_last_complete_scrub))
|
||||||
p.get_with(|x| msec_to_rfc3339(x.time_last_complete_scrub))
|
});
|
||||||
});
|
vars.register_ro(&self.scrub_persister, "scrub-next-run", |p| {
|
||||||
vars.register_ro(&self.scrub_persister, "scrub-next-run", |p| {
|
p.get_with(|x| msec_to_rfc3339(x.time_next_run_scrub))
|
||||||
p.get_with(|x| msec_to_rfc3339(x.time_next_run_scrub))
|
});
|
||||||
});
|
vars.register_ro(&self.scrub_persister, "scrub-corruptions_detected", |p| {
|
||||||
vars.register_ro(&self.scrub_persister, "scrub-corruptions_detected", |p| {
|
p.get_with(|x| x.corruptions_detected)
|
||||||
p.get_with(|x| x.corruptions_detected)
|
});
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Initialization: set how block references are recalculated
|
|
||||||
/// for repair operations
|
|
||||||
pub fn set_recalc_rc(&self, recalc: Vec<CalculateRefcount>) {
|
|
||||||
self.rc.recalc_rc.store(Some(Arc::new(recalc)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ask nodes that might have a (possibly compressed) block for it
|
/// Ask nodes that might have a (possibly compressed) block for it
|
||||||
|
@ -244,16 +228,10 @@ impl BlockManager {
|
||||||
async fn rpc_get_raw_block_streaming(
|
async fn rpc_get_raw_block_streaming(
|
||||||
&self,
|
&self,
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
priority: RequestPriority,
|
|
||||||
order_tag: Option<OrderTag>,
|
order_tag: Option<OrderTag>,
|
||||||
) -> Result<DataBlockStream, Error> {
|
) -> Result<DataBlockStream, Error> {
|
||||||
self.rpc_get_raw_block_internal(
|
self.rpc_get_raw_block_internal(hash, order_tag, |stream| async move { Ok(stream) })
|
||||||
hash,
|
.await
|
||||||
priority,
|
|
||||||
order_tag,
|
|
||||||
|stream| async move { Ok(stream) },
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ask nodes that might have a (possibly compressed) block for it
|
/// Ask nodes that might have a (possibly compressed) block for it
|
||||||
|
@ -261,10 +239,9 @@ impl BlockManager {
|
||||||
pub(crate) async fn rpc_get_raw_block(
|
pub(crate) async fn rpc_get_raw_block(
|
||||||
&self,
|
&self,
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
priority: RequestPriority,
|
|
||||||
order_tag: Option<OrderTag>,
|
order_tag: Option<OrderTag>,
|
||||||
) -> Result<DataBlock, Error> {
|
) -> Result<DataBlock, Error> {
|
||||||
self.rpc_get_raw_block_internal(hash, priority, order_tag, |block_stream| async move {
|
self.rpc_get_raw_block_internal(hash, order_tag, |block_stream| async move {
|
||||||
let (header, stream) = block_stream.into_parts();
|
let (header, stream) = block_stream.into_parts();
|
||||||
read_stream_to_end(stream)
|
read_stream_to_end(stream)
|
||||||
.await
|
.await
|
||||||
|
@ -277,7 +254,6 @@ impl BlockManager {
|
||||||
async fn rpc_get_raw_block_internal<F, Fut, T>(
|
async fn rpc_get_raw_block_internal<F, Fut, T>(
|
||||||
&self,
|
&self,
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
priority: RequestPriority,
|
|
||||||
order_tag: Option<OrderTag>,
|
order_tag: Option<OrderTag>,
|
||||||
f: F,
|
f: F,
|
||||||
) -> Result<T, Error>
|
) -> Result<T, Error>
|
||||||
|
@ -295,7 +271,7 @@ impl BlockManager {
|
||||||
let rpc = self.endpoint.call_streaming(
|
let rpc = self.endpoint.call_streaming(
|
||||||
&node_id,
|
&node_id,
|
||||||
BlockRpc::GetBlock(*hash, order_tag),
|
BlockRpc::GetBlock(*hash, order_tag),
|
||||||
priority,
|
PRIO_NORMAL | PRIO_SECONDARY,
|
||||||
);
|
);
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
res = rpc => {
|
res = rpc => {
|
||||||
|
@ -334,9 +310,9 @@ impl BlockManager {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
let err = Error::MissingBlock(*hash);
|
let msg = format!("Get block {:?}: no node returned a valid block", hash);
|
||||||
debug!("{}", err);
|
debug!("{}", msg);
|
||||||
Err(err)
|
Err(Error::Message(msg))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- Public interface ----
|
// ---- Public interface ----
|
||||||
|
@ -347,9 +323,7 @@ impl BlockManager {
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
order_tag: Option<OrderTag>,
|
order_tag: Option<OrderTag>,
|
||||||
) -> Result<ByteStream, Error> {
|
) -> Result<ByteStream, Error> {
|
||||||
let block_stream = self
|
let block_stream = self.rpc_get_raw_block_streaming(hash, order_tag).await?;
|
||||||
.rpc_get_raw_block_streaming(hash, PRIO_NORMAL | PRIO_SECONDARY, order_tag)
|
|
||||||
.await?;
|
|
||||||
let (header, stream) = block_stream.into_parts();
|
let (header, stream) = block_stream.into_parts();
|
||||||
match header {
|
match header {
|
||||||
DataBlockHeader::Plain => Ok(stream),
|
DataBlockHeader::Plain => Ok(stream),
|
||||||
|
@ -377,14 +351,6 @@ impl BlockManager {
|
||||||
let (header, bytes) = DataBlock::from_buffer(data, compression_level)
|
let (header, bytes) = DataBlock::from_buffer(data, compression_level)
|
||||||
.await
|
.await
|
||||||
.into_parts();
|
.into_parts();
|
||||||
|
|
||||||
let permit = self
|
|
||||||
.buffer_kb_semaphore
|
|
||||||
.clone()
|
|
||||||
.acquire_many_owned((bytes.len() / 1024).try_into().unwrap())
|
|
||||||
.await
|
|
||||||
.ok_or_message("could not reserve space for buffer of data to send to remote nodes")?;
|
|
||||||
|
|
||||||
let put_block_rpc =
|
let put_block_rpc =
|
||||||
Req::new(BlockRpc::PutBlock { hash, header })?.with_stream_from_buffer(bytes);
|
Req::new(BlockRpc::PutBlock { hash, header })?.with_stream_from_buffer(bytes);
|
||||||
let put_block_rpc = if let Some(tag) = order_tag {
|
let put_block_rpc = if let Some(tag) = order_tag {
|
||||||
|
@ -400,7 +366,6 @@ impl BlockManager {
|
||||||
who.as_ref(),
|
who.as_ref(),
|
||||||
put_block_rpc,
|
put_block_rpc,
|
||||||
RequestStrategy::with_priority(PRIO_NORMAL | PRIO_SECONDARY)
|
RequestStrategy::with_priority(PRIO_NORMAL | PRIO_SECONDARY)
|
||||||
.with_drop_on_completion(permit)
|
|
||||||
.with_quorum(self.replication.write_quorum()),
|
.with_quorum(self.replication.write_quorum()),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -410,7 +375,7 @@ impl BlockManager {
|
||||||
|
|
||||||
/// Get number of items in the refcount table
|
/// Get number of items in the refcount table
|
||||||
pub fn rc_len(&self) -> Result<usize, Error> {
|
pub fn rc_len(&self) -> Result<usize, Error> {
|
||||||
Ok(self.rc.rc_table.len()?)
|
Ok(self.rc.rc.len()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send command to start/stop/manager scrub worker
|
/// Send command to start/stop/manager scrub worker
|
||||||
|
|
|
@ -1,7 +1,3 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use tokio::sync::Semaphore;
|
|
||||||
|
|
||||||
use opentelemetry::{global, metrics::*};
|
use opentelemetry::{global, metrics::*};
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
@ -12,7 +8,6 @@ pub struct BlockManagerMetrics {
|
||||||
pub(crate) _rc_size: ValueObserver<u64>,
|
pub(crate) _rc_size: ValueObserver<u64>,
|
||||||
pub(crate) _resync_queue_len: ValueObserver<u64>,
|
pub(crate) _resync_queue_len: ValueObserver<u64>,
|
||||||
pub(crate) _resync_errored_blocks: ValueObserver<u64>,
|
pub(crate) _resync_errored_blocks: ValueObserver<u64>,
|
||||||
pub(crate) _buffer_free_kb: ValueObserver<u64>,
|
|
||||||
|
|
||||||
pub(crate) resync_counter: BoundCounter<u64>,
|
pub(crate) resync_counter: BoundCounter<u64>,
|
||||||
pub(crate) resync_error_counter: BoundCounter<u64>,
|
pub(crate) resync_error_counter: BoundCounter<u64>,
|
||||||
|
@ -35,7 +30,6 @@ impl BlockManagerMetrics {
|
||||||
rc_tree: db::Tree,
|
rc_tree: db::Tree,
|
||||||
resync_queue: db::Tree,
|
resync_queue: db::Tree,
|
||||||
resync_errors: db::Tree,
|
resync_errors: db::Tree,
|
||||||
buffer_semaphore: Arc<Semaphore>,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let meter = global::meter("garage_model/block");
|
let meter = global::meter("garage_model/block");
|
||||||
Self {
|
Self {
|
||||||
|
@ -75,15 +69,6 @@ impl BlockManagerMetrics {
|
||||||
.with_description("Number of block hashes whose last resync resulted in an error")
|
.with_description("Number of block hashes whose last resync resulted in an error")
|
||||||
.init(),
|
.init(),
|
||||||
|
|
||||||
_buffer_free_kb: meter
|
|
||||||
.u64_value_observer("block.ram_buffer_free_kb", move |observer| {
|
|
||||||
observer.observe(buffer_semaphore.available_permits() as u64, &[])
|
|
||||||
})
|
|
||||||
.with_description(
|
|
||||||
"Available RAM in KiB to use for buffering data blocks to be written to remote nodes",
|
|
||||||
)
|
|
||||||
.init(),
|
|
||||||
|
|
||||||
resync_counter: meter
|
resync_counter: meter
|
||||||
.u64_counter("block.resync_counter")
|
.u64_counter("block.resync_counter")
|
||||||
.with_description("Number of calls to resync_block")
|
.with_description("Number of calls to resync_block")
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
|
|
||||||
use arc_swap::ArcSwapOption;
|
|
||||||
|
|
||||||
use garage_db as db;
|
use garage_db as db;
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
@ -10,20 +8,13 @@ use garage_util::time::*;
|
||||||
|
|
||||||
use crate::manager::BLOCK_GC_DELAY;
|
use crate::manager::BLOCK_GC_DELAY;
|
||||||
|
|
||||||
pub type CalculateRefcount =
|
|
||||||
Box<dyn Fn(&db::Transaction, &Hash) -> db::TxResult<usize, Error> + Send + Sync>;
|
|
||||||
|
|
||||||
pub struct BlockRc {
|
pub struct BlockRc {
|
||||||
pub rc_table: db::Tree,
|
pub(crate) rc: db::Tree,
|
||||||
pub(crate) recalc_rc: ArcSwapOption<Vec<CalculateRefcount>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockRc {
|
impl BlockRc {
|
||||||
pub(crate) fn new(rc: db::Tree) -> Self {
|
pub(crate) fn new(rc: db::Tree) -> Self {
|
||||||
Self {
|
Self { rc }
|
||||||
rc_table: rc,
|
|
||||||
recalc_rc: ArcSwapOption::new(None),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Increment the reference counter associated to a hash.
|
/// Increment the reference counter associated to a hash.
|
||||||
|
@ -33,9 +24,9 @@ impl BlockRc {
|
||||||
tx: &mut db::Transaction,
|
tx: &mut db::Transaction,
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
) -> db::TxOpResult<bool> {
|
) -> db::TxOpResult<bool> {
|
||||||
let old_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
|
let old_rc = RcEntry::parse_opt(tx.get(&self.rc, hash)?);
|
||||||
match old_rc.increment().serialize() {
|
match old_rc.increment().serialize() {
|
||||||
Some(x) => tx.insert(&self.rc_table, hash, x)?,
|
Some(x) => tx.insert(&self.rc, hash, x)?,
|
||||||
None => unreachable!(),
|
None => unreachable!(),
|
||||||
};
|
};
|
||||||
Ok(old_rc.is_zero())
|
Ok(old_rc.is_zero())
|
||||||
|
@ -48,28 +39,28 @@ impl BlockRc {
|
||||||
tx: &mut db::Transaction,
|
tx: &mut db::Transaction,
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
) -> db::TxOpResult<bool> {
|
) -> db::TxOpResult<bool> {
|
||||||
let new_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?).decrement();
|
let new_rc = RcEntry::parse_opt(tx.get(&self.rc, hash)?).decrement();
|
||||||
match new_rc.serialize() {
|
match new_rc.serialize() {
|
||||||
Some(x) => tx.insert(&self.rc_table, hash, x)?,
|
Some(x) => tx.insert(&self.rc, hash, x)?,
|
||||||
None => tx.remove(&self.rc_table, hash)?,
|
None => tx.remove(&self.rc, hash)?,
|
||||||
};
|
};
|
||||||
Ok(matches!(new_rc, RcEntry::Deletable { .. }))
|
Ok(matches!(new_rc, RcEntry::Deletable { .. }))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read a block's reference count
|
/// Read a block's reference count
|
||||||
pub(crate) fn get_block_rc(&self, hash: &Hash) -> Result<RcEntry, Error> {
|
pub(crate) fn get_block_rc(&self, hash: &Hash) -> Result<RcEntry, Error> {
|
||||||
Ok(RcEntry::parse_opt(self.rc_table.get(hash.as_ref())?))
|
Ok(RcEntry::parse_opt(self.rc.get(hash.as_ref())?))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delete an entry in the RC table if it is deletable and the
|
/// Delete an entry in the RC table if it is deletable and the
|
||||||
/// deletion time has passed
|
/// deletion time has passed
|
||||||
pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> {
|
pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> {
|
||||||
let now = now_msec();
|
let now = now_msec();
|
||||||
self.rc_table.db().transaction(|tx| {
|
self.rc.db().transaction(|tx| {
|
||||||
let rcval = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
|
let rcval = RcEntry::parse_opt(tx.get(&self.rc, hash)?);
|
||||||
match rcval {
|
match rcval {
|
||||||
RcEntry::Deletable { at_time } if now > at_time => {
|
RcEntry::Deletable { at_time } if now > at_time => {
|
||||||
tx.remove(&self.rc_table, hash)?;
|
tx.remove(&self.rc, hash)?;
|
||||||
}
|
}
|
||||||
_ => (),
|
_ => (),
|
||||||
};
|
};
|
||||||
|
@ -77,58 +68,6 @@ impl BlockRc {
|
||||||
})?;
|
})?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Recalculate the reference counter of a block
|
|
||||||
/// to fix potential inconsistencies
|
|
||||||
pub fn recalculate_rc(&self, hash: &Hash) -> Result<(usize, bool), Error> {
|
|
||||||
if let Some(recalc_fns) = self.recalc_rc.load().as_ref() {
|
|
||||||
trace!("Repair block RC for {:?}", hash);
|
|
||||||
let res = self
|
|
||||||
.rc_table
|
|
||||||
.db()
|
|
||||||
.transaction(|tx| {
|
|
||||||
let mut cnt = 0;
|
|
||||||
for f in recalc_fns.iter() {
|
|
||||||
cnt += f(&tx, hash)?;
|
|
||||||
}
|
|
||||||
let old_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
|
|
||||||
trace!(
|
|
||||||
"Block RC for {:?}: stored={}, calculated={}",
|
|
||||||
hash,
|
|
||||||
old_rc.as_u64(),
|
|
||||||
cnt
|
|
||||||
);
|
|
||||||
if cnt as u64 != old_rc.as_u64() {
|
|
||||||
warn!(
|
|
||||||
"Fixing inconsistent block RC for {:?}: was {}, should be {}",
|
|
||||||
hash,
|
|
||||||
old_rc.as_u64(),
|
|
||||||
cnt
|
|
||||||
);
|
|
||||||
let new_rc = if cnt > 0 {
|
|
||||||
RcEntry::Present { count: cnt as u64 }
|
|
||||||
} else {
|
|
||||||
RcEntry::Deletable {
|
|
||||||
at_time: now_msec() + BLOCK_GC_DELAY.as_millis() as u64,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
tx.insert(&self.rc_table, hash, new_rc.serialize().unwrap())?;
|
|
||||||
Ok((cnt, true))
|
|
||||||
} else {
|
|
||||||
Ok((cnt, false))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.map_err(Error::from);
|
|
||||||
if let Err(e) = &res {
|
|
||||||
error!("Failed to fix RC for block {:?}: {}", hash, e);
|
|
||||||
}
|
|
||||||
res
|
|
||||||
} else {
|
|
||||||
Err(Error::Message(
|
|
||||||
"Block RC recalculation is not available at this point".into(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Describes the state of the reference counter for a block
|
/// Describes the state of the reference counter for a block
|
||||||
|
|
|
@ -107,7 +107,7 @@ impl Worker for RepairWorker {
|
||||||
for entry in self
|
for entry in self
|
||||||
.manager
|
.manager
|
||||||
.rc
|
.rc
|
||||||
.rc_table
|
.rc
|
||||||
.range::<&[u8], _>((start_bound, Bound::Unbounded))?
|
.range::<&[u8], _>((start_bound, Bound::Unbounded))?
|
||||||
{
|
{
|
||||||
let (hash, _) = entry?;
|
let (hash, _) = entry?;
|
||||||
|
|
|
@ -105,7 +105,7 @@ impl BlockResyncManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get length of resync queue
|
/// Get lenght of resync queue
|
||||||
pub fn queue_len(&self) -> Result<usize, Error> {
|
pub fn queue_len(&self) -> Result<usize, Error> {
|
||||||
Ok(self.queue.len()?)
|
Ok(self.queue.len()?)
|
||||||
}
|
}
|
||||||
|
@ -185,10 +185,10 @@ impl BlockResyncManager {
|
||||||
//
|
//
|
||||||
// - resync.errors: a tree that indicates for each block
|
// - resync.errors: a tree that indicates for each block
|
||||||
// if the last resync resulted in an error, and if so,
|
// if the last resync resulted in an error, and if so,
|
||||||
// the following two information (see the ErrorCounter struct):
|
// the following two informations (see the ErrorCounter struct):
|
||||||
// - how many consecutive resync errors for this block?
|
// - how many consecutive resync errors for this block?
|
||||||
// - when was the last try?
|
// - when was the last try?
|
||||||
// These two information are used to implement an
|
// These two informations are used to implement an
|
||||||
// exponential backoff retry strategy.
|
// exponential backoff retry strategy.
|
||||||
// The key in this tree is the 32-byte hash of the block,
|
// The key in this tree is the 32-byte hash of the block,
|
||||||
// and the value is the encoded ErrorCounter value.
|
// and the value is the encoded ErrorCounter value.
|
||||||
|
@ -367,13 +367,6 @@ impl BlockResyncManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
if exists && rc.is_deletable() {
|
if exists && rc.is_deletable() {
|
||||||
if manager.rc.recalculate_rc(hash)?.0 > 0 {
|
|
||||||
return Err(Error::Message(format!(
|
|
||||||
"Refcount for block {:?} was inconsistent, retrying later",
|
|
||||||
hash
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Resync block {:?}: offloading and deleting", hash);
|
info!("Resync block {:?}: offloading and deleting", hash);
|
||||||
let existing_path = existing_path.unwrap();
|
let existing_path = existing_path.unwrap();
|
||||||
|
|
||||||
|
@ -436,7 +429,7 @@ impl BlockResyncManager {
|
||||||
&manager.endpoint,
|
&manager.endpoint,
|
||||||
&need_nodes,
|
&need_nodes,
|
||||||
put_block_message,
|
put_block_message,
|
||||||
RequestStrategy::with_priority(PRIO_BACKGROUND | PRIO_SECONDARY)
|
RequestStrategy::with_priority(PRIO_BACKGROUND)
|
||||||
.with_quorum(need_nodes.len()),
|
.with_quorum(need_nodes.len()),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -460,17 +453,7 @@ impl BlockResyncManager {
|
||||||
hash
|
hash
|
||||||
);
|
);
|
||||||
|
|
||||||
let block_data = manager
|
let block_data = manager.rpc_get_raw_block(hash, None).await?;
|
||||||
.rpc_get_raw_block(hash, PRIO_BACKGROUND | PRIO_SECONDARY, None)
|
|
||||||
.await;
|
|
||||||
if matches!(block_data, Err(Error::MissingBlock(_))) {
|
|
||||||
warn!(
|
|
||||||
"Could not fetch needed block {:?}, no node returned valid data. Checking that refcount is correct.",
|
|
||||||
hash
|
|
||||||
);
|
|
||||||
manager.rc.recalculate_rc(hash)?;
|
|
||||||
}
|
|
||||||
let block_data = block_data?;
|
|
||||||
|
|
||||||
manager.metrics.resync_recv_counter.add(1);
|
manager.metrics.resync_recv_counter.add(1);
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_db"
|
name = "garage_db"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,12 +14,11 @@ path = "lib.rs"
|
||||||
[dependencies]
|
[dependencies]
|
||||||
err-derive.workspace = true
|
err-derive.workspace = true
|
||||||
hexdump.workspace = true
|
hexdump.workspace = true
|
||||||
|
ouroboros.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
|
||||||
heed = { workspace = true, optional = true }
|
heed = { workspace = true, optional = true }
|
||||||
rusqlite = { workspace = true, optional = true, features = ["backup"] }
|
rusqlite = { workspace = true, optional = true }
|
||||||
r2d2 = { workspace = true, optional = true }
|
|
||||||
r2d2_sqlite = { workspace = true, optional = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
mktemp.workspace = true
|
mktemp.workspace = true
|
||||||
|
@ -28,4 +27,4 @@ mktemp.workspace = true
|
||||||
default = [ "lmdb", "sqlite" ]
|
default = [ "lmdb", "sqlite" ]
|
||||||
bundled-libs = [ "rusqlite?/bundled" ]
|
bundled-libs = [ "rusqlite?/bundled" ]
|
||||||
lmdb = [ "heed" ]
|
lmdb = [ "heed" ]
|
||||||
sqlite = [ "rusqlite", "r2d2", "r2d2_sqlite" ]
|
sqlite = [ "rusqlite" ]
|
||||||
|
|
|
@ -15,7 +15,6 @@ use core::ops::{Bound, RangeBounds};
|
||||||
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::cell::Cell;
|
use std::cell::Cell;
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use err_derive::Error;
|
use err_derive::Error;
|
||||||
|
@ -45,12 +44,6 @@ pub type TxValueIter<'a> = Box<dyn std::iter::Iterator<Item = TxOpResult<(Value,
|
||||||
#[error(display = "{}", _0)]
|
#[error(display = "{}", _0)]
|
||||||
pub struct Error(pub Cow<'static, str>);
|
pub struct Error(pub Cow<'static, str>);
|
||||||
|
|
||||||
impl From<std::io::Error> for Error {
|
|
||||||
fn from(e: std::io::Error) -> Error {
|
|
||||||
Error(format!("IO: {}", e).into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
|
@ -122,7 +115,7 @@ impl Db {
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
},
|
},
|
||||||
Err(TxError::Db(e2)) => match ret {
|
Err(TxError::Db(e2)) => match ret {
|
||||||
// Ok was stored -> the error occurred when finalizing
|
// Ok was stored -> the error occured when finalizing
|
||||||
// transaction
|
// transaction
|
||||||
Ok(_) => Err(TxError::Db(e2)),
|
Ok(_) => Err(TxError::Db(e2)),
|
||||||
// An error was already stored: that's the one we want to
|
// An error was already stored: that's the one we want to
|
||||||
|
@ -133,10 +126,6 @@ impl Db {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn snapshot(&self, path: &PathBuf) -> Result<()> {
|
|
||||||
self.0.snapshot(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn import(&self, other: &Db) -> Result<()> {
|
pub fn import(&self, other: &Db) -> Result<()> {
|
||||||
let existing_trees = self.list_trees()?;
|
let existing_trees = self.list_trees()?;
|
||||||
if !existing_trees.is_empty() {
|
if !existing_trees.is_empty() {
|
||||||
|
@ -211,12 +200,16 @@ impl Tree {
|
||||||
|
|
||||||
/// Returns the old value if there was one
|
/// Returns the old value if there was one
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>(&self, key: T, value: U) -> Result<()> {
|
pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>(
|
||||||
|
&self,
|
||||||
|
key: T,
|
||||||
|
value: U,
|
||||||
|
) -> Result<Option<Value>> {
|
||||||
self.0.insert(self.1, key.as_ref(), value.as_ref())
|
self.0.insert(self.1, key.as_ref(), value.as_ref())
|
||||||
}
|
}
|
||||||
/// Returns the old value if there was one
|
/// Returns the old value if there was one
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<()> {
|
pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<Option<Value>> {
|
||||||
self.0.remove(self.1, key.as_ref())
|
self.0.remove(self.1, key.as_ref())
|
||||||
}
|
}
|
||||||
/// Clears all values from the tree
|
/// Clears all values from the tree
|
||||||
|
@ -274,12 +267,12 @@ impl<'a> Transaction<'a> {
|
||||||
tree: &Tree,
|
tree: &Tree,
|
||||||
key: T,
|
key: T,
|
||||||
value: U,
|
value: U,
|
||||||
) -> TxOpResult<()> {
|
) -> TxOpResult<Option<Value>> {
|
||||||
self.tx.insert(tree.1, key.as_ref(), value.as_ref())
|
self.tx.insert(tree.1, key.as_ref(), value.as_ref())
|
||||||
}
|
}
|
||||||
/// Returns the old value if there was one
|
/// Returns the old value if there was one
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<()> {
|
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> {
|
||||||
self.tx.remove(tree.1, key.as_ref())
|
self.tx.remove(tree.1, key.as_ref())
|
||||||
}
|
}
|
||||||
/// Clears all values in a tree
|
/// Clears all values in a tree
|
||||||
|
@ -330,13 +323,12 @@ pub(crate) trait IDb: Send + Sync {
|
||||||
fn engine(&self) -> String;
|
fn engine(&self) -> String;
|
||||||
fn open_tree(&self, name: &str) -> Result<usize>;
|
fn open_tree(&self, name: &str) -> Result<usize>;
|
||||||
fn list_trees(&self) -> Result<Vec<String>>;
|
fn list_trees(&self) -> Result<Vec<String>>;
|
||||||
fn snapshot(&self, path: &PathBuf) -> Result<()>;
|
|
||||||
|
|
||||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||||
fn len(&self, tree: usize) -> Result<usize>;
|
fn len(&self, tree: usize) -> Result<usize>;
|
||||||
|
|
||||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()>;
|
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>>;
|
||||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<()>;
|
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
|
||||||
fn clear(&self, tree: usize) -> Result<()>;
|
fn clear(&self, tree: usize) -> Result<()>;
|
||||||
|
|
||||||
fn iter(&self, tree: usize) -> Result<ValueIter<'_>>;
|
fn iter(&self, tree: usize) -> Result<ValueIter<'_>>;
|
||||||
|
@ -362,8 +354,8 @@ pub(crate) trait ITx {
|
||||||
fn get(&self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
|
fn get(&self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
|
||||||
fn len(&self, tree: usize) -> TxOpResult<usize>;
|
fn len(&self, tree: usize) -> TxOpResult<usize>;
|
||||||
|
|
||||||
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()>;
|
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>>;
|
||||||
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()>;
|
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
|
||||||
fn clear(&mut self, tree: usize) -> TxOpResult<()>;
|
fn clear(&mut self, tree: usize) -> TxOpResult<()>;
|
||||||
|
|
||||||
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>>;
|
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>>;
|
||||||
|
|
|
@ -3,7 +3,6 @@ use core::ptr::NonNull;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
|
@ -104,15 +103,6 @@ impl IDb for LmdbDb {
|
||||||
Ok(ret2)
|
Ok(ret2)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
|
||||||
std::fs::create_dir_all(to)?;
|
|
||||||
let mut path = to.clone();
|
|
||||||
path.push("data.mdb");
|
|
||||||
self.db
|
|
||||||
.copy_to_path(path, heed::CompactionOption::Disabled)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
|
||||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||||
|
@ -132,20 +122,22 @@ impl IDb for LmdbDb {
|
||||||
Ok(tree.len(&tx)?.try_into().unwrap())
|
Ok(tree.len(&tx)?.try_into().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
let tree = self.get_tree(tree)?;
|
||||||
let mut tx = self.db.write_txn()?;
|
let mut tx = self.db.write_txn()?;
|
||||||
|
let old_val = tree.get(&tx, key)?.map(Vec::from);
|
||||||
tree.put(&mut tx, key, value)?;
|
tree.put(&mut tx, key, value)?;
|
||||||
tx.commit()?;
|
tx.commit()?;
|
||||||
Ok(())
|
Ok(old_val)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<()> {
|
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
let tree = self.get_tree(tree)?;
|
||||||
let mut tx = self.db.write_txn()?;
|
let mut tx = self.db.write_txn()?;
|
||||||
|
let old_val = tree.get(&tx, key)?.map(Vec::from);
|
||||||
tree.delete(&mut tx, key)?;
|
tree.delete(&mut tx, key)?;
|
||||||
tx.commit()?;
|
tx.commit()?;
|
||||||
Ok(())
|
Ok(old_val)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear(&self, tree: usize) -> Result<()> {
|
fn clear(&self, tree: usize) -> Result<()> {
|
||||||
|
@ -233,7 +225,7 @@ impl<'a> LmdbTx<'a> {
|
||||||
fn get_tree(&self, i: usize) -> TxOpResult<&Database> {
|
fn get_tree(&self, i: usize) -> TxOpResult<&Database> {
|
||||||
self.trees.get(i).ok_or_else(|| {
|
self.trees.get(i).ok_or_else(|| {
|
||||||
TxOpError(Error(
|
TxOpError(Error(
|
||||||
"invalid tree id (it might have been opened after the transaction started)".into(),
|
"invalid tree id (it might have been openned after the transaction started)".into(),
|
||||||
))
|
))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -252,15 +244,17 @@ impl<'a> ITx for LmdbTx<'a> {
|
||||||
Ok(tree.len(&self.tx)? as usize)
|
Ok(tree.len(&self.tx)? as usize)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
|
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>> {
|
||||||
let tree = *self.get_tree(tree)?;
|
let tree = *self.get_tree(tree)?;
|
||||||
|
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
|
||||||
tree.put(&mut self.tx, key, value)?;
|
tree.put(&mut self.tx, key, value)?;
|
||||||
Ok(())
|
Ok(old_val)
|
||||||
}
|
}
|
||||||
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()> {
|
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
|
||||||
let tree = *self.get_tree(tree)?;
|
let tree = *self.get_tree(tree)?;
|
||||||
|
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
|
||||||
tree.delete(&mut self.tx, key)?;
|
tree.delete(&mut self.tx, key)?;
|
||||||
Ok(())
|
Ok(old_val)
|
||||||
}
|
}
|
||||||
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
|
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
|
||||||
let tree = *self.get_tree(tree)?;
|
let tree = *self.get_tree(tree)?;
|
||||||
|
|
|
@ -36,7 +36,7 @@ impl std::str::FromStr for Engine {
|
||||||
match text {
|
match text {
|
||||||
"lmdb" | "heed" => Ok(Self::Lmdb),
|
"lmdb" | "heed" => Ok(Self::Lmdb),
|
||||||
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
|
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
|
||||||
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.4).".into())),
|
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.3).".into())),
|
||||||
kind => Err(Error(
|
kind => Err(Error(
|
||||||
format!(
|
format!(
|
||||||
"Invalid DB engine: {} (options are: lmdb, sqlite)",
|
"Invalid DB engine: {} (options are: lmdb, sqlite)",
|
||||||
|
@ -68,8 +68,14 @@ pub fn open_db(path: &PathBuf, engine: Engine, opt: &OpenOpt) -> Result<Db> {
|
||||||
#[cfg(feature = "sqlite")]
|
#[cfg(feature = "sqlite")]
|
||||||
Engine::Sqlite => {
|
Engine::Sqlite => {
|
||||||
info!("Opening Sqlite database at: {}", path.display());
|
info!("Opening Sqlite database at: {}", path.display());
|
||||||
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
|
let db = crate::sqlite_adapter::rusqlite::Connection::open(&path)?;
|
||||||
Ok(crate::sqlite_adapter::SqliteDb::new(manager, opt.fsync)?)
|
db.pragma_update(None, "journal_mode", "WAL")?;
|
||||||
|
if opt.fsync {
|
||||||
|
db.pragma_update(None, "synchronous", "NORMAL")?;
|
||||||
|
} else {
|
||||||
|
db.pragma_update(None, "synchronous", "OFF")?;
|
||||||
|
}
|
||||||
|
Ok(crate::sqlite_adapter::SqliteDb::init(db))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- LMDB DB ----
|
// ---- LMDB DB ----
|
||||||
|
@ -92,7 +98,6 @@ pub fn open_db(path: &PathBuf, engine: Engine, opt: &OpenOpt) -> Result<Db> {
|
||||||
env_builder.map_size(map_size);
|
env_builder.map_size(map_size);
|
||||||
env_builder.max_readers(2048);
|
env_builder.max_readers(2048);
|
||||||
unsafe {
|
unsafe {
|
||||||
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoRdAhead);
|
|
||||||
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoMetaSync);
|
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoMetaSync);
|
||||||
if !opt.fsync {
|
if !opt.fsync {
|
||||||
env_builder.flag(heed::flags::Flags::MdbNoSync);
|
env_builder.flag(heed::flags::Flags::MdbNoSync);
|
||||||
|
|
|
@ -1,14 +1,10 @@
|
||||||
use core::ops::Bound;
|
use core::ops::Bound;
|
||||||
|
|
||||||
use std::marker::PhantomPinned;
|
use std::borrow::BorrowMut;
|
||||||
use std::path::PathBuf;
|
use std::sync::{Arc, Mutex, MutexGuard};
|
||||||
use std::pin::Pin;
|
|
||||||
use std::ptr::NonNull;
|
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
|
||||||
|
|
||||||
use r2d2::Pool;
|
use ouroboros::self_referencing;
|
||||||
use r2d2_sqlite::SqliteConnectionManager;
|
use rusqlite::{params, Connection, Rows, Statement, Transaction};
|
||||||
use rusqlite::{params, Rows, Statement, Transaction};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||||
|
@ -17,8 +13,6 @@ use crate::{
|
||||||
|
|
||||||
pub use rusqlite;
|
pub use rusqlite;
|
||||||
|
|
||||||
type Connection = r2d2::PooledConnection<SqliteConnectionManager>;
|
|
||||||
|
|
||||||
// --- err
|
// --- err
|
||||||
|
|
||||||
impl From<rusqlite::Error> for Error {
|
impl From<rusqlite::Error> for Error {
|
||||||
|
@ -27,12 +21,6 @@ impl From<rusqlite::Error> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<r2d2::Error> for Error {
|
|
||||||
fn from(e: r2d2::Error) -> Error {
|
|
||||||
Error(format!("Sqlite: {}", e).into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<rusqlite::Error> for TxOpError {
|
impl From<rusqlite::Error> for TxOpError {
|
||||||
fn from(e: rusqlite::Error) -> TxOpError {
|
fn from(e: rusqlite::Error) -> TxOpError {
|
||||||
TxOpError(e.into())
|
TxOpError(e.into())
|
||||||
|
@ -41,47 +29,35 @@ impl From<rusqlite::Error> for TxOpError {
|
||||||
|
|
||||||
// -- db
|
// -- db
|
||||||
|
|
||||||
pub struct SqliteDb {
|
pub struct SqliteDb(Mutex<SqliteDbInner>);
|
||||||
db: Pool<SqliteConnectionManager>,
|
|
||||||
trees: RwLock<Vec<Arc<str>>>,
|
struct SqliteDbInner {
|
||||||
// All operations that might write on the DB must take this lock first.
|
db: Connection,
|
||||||
// This emulates LMDB's approach where a single writer can be
|
trees: Vec<String>,
|
||||||
// active at once.
|
|
||||||
write_lock: Mutex<()>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SqliteDb {
|
impl SqliteDb {
|
||||||
pub fn new(manager: SqliteConnectionManager, sync_mode: bool) -> Result<Db> {
|
pub fn init(db: rusqlite::Connection) -> Db {
|
||||||
let manager = manager.with_init(move |db| {
|
let s = Self(Mutex::new(SqliteDbInner {
|
||||||
db.pragma_update(None, "journal_mode", "WAL")?;
|
db,
|
||||||
if sync_mode {
|
trees: Vec::new(),
|
||||||
db.pragma_update(None, "synchronous", "NORMAL")?;
|
}));
|
||||||
} else {
|
Db(Arc::new(s))
|
||||||
db.pragma_update(None, "synchronous", "OFF")?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
});
|
|
||||||
let s = Self {
|
|
||||||
db: Pool::builder().build(manager)?,
|
|
||||||
trees: RwLock::new(vec![]),
|
|
||||||
write_lock: Mutex::new(()),
|
|
||||||
};
|
|
||||||
Ok(Db(Arc::new(s)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SqliteDb {
|
impl SqliteDbInner {
|
||||||
fn get_tree(&self, i: usize) -> Result<Arc<str>> {
|
fn get_tree(&self, i: usize) -> Result<&'_ str> {
|
||||||
self.trees
|
self.trees
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.get(i)
|
.get(i)
|
||||||
.cloned()
|
.map(String::as_str)
|
||||||
.ok_or_else(|| Error("invalid tree id".into()))
|
.ok_or_else(|| Error("invalid tree id".into()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn internal_get(&self, db: &Connection, tree: &str, key: &[u8]) -> Result<Option<Value>> {
|
fn internal_get(&self, tree: &str, key: &[u8]) -> Result<Option<Value>> {
|
||||||
let mut stmt = db.prepare(&format!("SELECT v FROM {} WHERE k = ?1", tree))?;
|
let mut stmt = self
|
||||||
|
.db
|
||||||
|
.prepare(&format!("SELECT v FROM {} WHERE k = ?1", tree))?;
|
||||||
let mut res_iter = stmt.query([key])?;
|
let mut res_iter = stmt.query([key])?;
|
||||||
match res_iter.next()? {
|
match res_iter.next()? {
|
||||||
None => Ok(None),
|
None => Ok(None),
|
||||||
|
@ -97,14 +73,13 @@ impl IDb for SqliteDb {
|
||||||
|
|
||||||
fn open_tree(&self, name: &str) -> Result<usize> {
|
fn open_tree(&self, name: &str) -> Result<usize> {
|
||||||
let name = format!("tree_{}", name.replace(':', "_COLON_"));
|
let name = format!("tree_{}", name.replace(':', "_COLON_"));
|
||||||
let mut trees = self.trees.write().unwrap();
|
let mut this = self.0.lock().unwrap();
|
||||||
|
|
||||||
if let Some(i) = trees.iter().position(|x| x.as_ref() == &name) {
|
if let Some(i) = this.trees.iter().position(|x| x == &name) {
|
||||||
Ok(i)
|
Ok(i)
|
||||||
} else {
|
} else {
|
||||||
let db = self.db.get()?;
|
|
||||||
trace!("create table {}", name);
|
trace!("create table {}", name);
|
||||||
db.execute(
|
this.db.execute(
|
||||||
&format!(
|
&format!(
|
||||||
"CREATE TABLE IF NOT EXISTS {} (
|
"CREATE TABLE IF NOT EXISTS {} (
|
||||||
k BLOB PRIMARY KEY,
|
k BLOB PRIMARY KEY,
|
||||||
|
@ -116,8 +91,8 @@ impl IDb for SqliteDb {
|
||||||
)?;
|
)?;
|
||||||
trace!("table created: {}, unlocking", name);
|
trace!("table created: {}, unlocking", name);
|
||||||
|
|
||||||
let i = trees.len();
|
let i = this.trees.len();
|
||||||
trees.push(name.to_string().into_boxed_str().into());
|
this.trees.push(name.to_string());
|
||||||
Ok(i)
|
Ok(i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -125,8 +100,11 @@ impl IDb for SqliteDb {
|
||||||
fn list_trees(&self) -> Result<Vec<String>> {
|
fn list_trees(&self) -> Result<Vec<String>> {
|
||||||
let mut trees = vec![];
|
let mut trees = vec![];
|
||||||
|
|
||||||
let db = self.db.get()?;
|
trace!("list_trees: lock db");
|
||||||
let mut stmt = db.prepare(
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("list_trees: lock acquired");
|
||||||
|
|
||||||
|
let mut stmt = this.db.prepare(
|
||||||
"SELECT name FROM sqlite_schema WHERE type = 'table' AND name LIKE 'tree_%'",
|
"SELECT name FROM sqlite_schema WHERE type = 'table' AND name LIKE 'tree_%'",
|
||||||
)?;
|
)?;
|
||||||
let mut rows = stmt.query([])?;
|
let mut rows = stmt.query([])?;
|
||||||
|
@ -139,29 +117,24 @@ impl IDb for SqliteDb {
|
||||||
Ok(trees)
|
Ok(trees)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
|
||||||
fn progress(p: rusqlite::backup::Progress) {
|
|
||||||
let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
|
|
||||||
info!("Sqlite snapshot progress: {}%", percent);
|
|
||||||
}
|
|
||||||
self.db
|
|
||||||
.get()?
|
|
||||||
.backup(rusqlite::DatabaseName::Main, to, Some(progress))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
|
||||||
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("get {}: lock db", tree);
|
||||||
self.internal_get(&self.db.get()?, &tree, key)
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("get {}: lock acquired", tree);
|
||||||
|
|
||||||
|
let tree = this.get_tree(tree)?;
|
||||||
|
this.internal_get(tree, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn len(&self, tree: usize) -> Result<usize> {
|
fn len(&self, tree: usize) -> Result<usize> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("len {}: lock db", tree);
|
||||||
let db = self.db.get()?;
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("len {}: lock acquired", tree);
|
||||||
|
|
||||||
let mut stmt = db.prepare(&format!("SELECT COUNT(*) FROM {}", tree))?;
|
let tree = this.get_tree(tree)?;
|
||||||
|
let mut stmt = this.db.prepare(&format!("SELECT COUNT(*) FROM {}", tree))?;
|
||||||
let mut res_iter = stmt.query([])?;
|
let mut res_iter = stmt.query([])?;
|
||||||
match res_iter.next()? {
|
match res_iter.next()? {
|
||||||
None => Ok(0),
|
None => Ok(0),
|
||||||
|
@ -169,56 +142,70 @@ impl IDb for SqliteDb {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("insert {}: lock db", tree);
|
||||||
let db = self.db.get()?;
|
let this = self.0.lock().unwrap();
|
||||||
let lock = self.write_lock.lock();
|
trace!("insert {}: lock acquired", tree);
|
||||||
|
|
||||||
let old_val = self.internal_get(&db, &tree, key)?;
|
let tree = this.get_tree(tree)?;
|
||||||
|
let old_val = this.internal_get(tree, key)?;
|
||||||
|
|
||||||
let sql = match &old_val {
|
let sql = match &old_val {
|
||||||
Some(_) => format!("UPDATE {} SET v = ?2 WHERE k = ?1", tree),
|
Some(_) => format!("UPDATE {} SET v = ?2 WHERE k = ?1", tree),
|
||||||
None => format!("INSERT INTO {} (k, v) VALUES (?1, ?2)", tree),
|
None => format!("INSERT INTO {} (k, v) VALUES (?1, ?2)", tree),
|
||||||
};
|
};
|
||||||
let n = db.execute(&sql, params![key, value])?;
|
let n = this.db.execute(&sql, params![key, value])?;
|
||||||
assert_eq!(n, 1);
|
assert_eq!(n, 1);
|
||||||
|
|
||||||
drop(lock);
|
Ok(old_val)
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove(&self, tree: usize, key: &[u8]) -> Result<()> {
|
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("remove {}: lock db", tree);
|
||||||
let db = self.db.get()?;
|
let this = self.0.lock().unwrap();
|
||||||
let lock = self.write_lock.lock();
|
trace!("remove {}: lock acquired", tree);
|
||||||
|
|
||||||
db.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
|
let tree = this.get_tree(tree)?;
|
||||||
|
let old_val = this.internal_get(tree, key)?;
|
||||||
|
|
||||||
drop(lock);
|
if old_val.is_some() {
|
||||||
Ok(())
|
let n = this
|
||||||
|
.db
|
||||||
|
.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
|
||||||
|
assert_eq!(n, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(old_val)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear(&self, tree: usize) -> Result<()> {
|
fn clear(&self, tree: usize) -> Result<()> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("clear {}: lock db", tree);
|
||||||
let db = self.db.get()?;
|
let this = self.0.lock().unwrap();
|
||||||
let lock = self.write_lock.lock();
|
trace!("clear {}: lock acquired", tree);
|
||||||
|
|
||||||
db.execute(&format!("DELETE FROM {}", tree), [])?;
|
let tree = this.get_tree(tree)?;
|
||||||
|
this.db.execute(&format!("DELETE FROM {}", tree), [])?;
|
||||||
drop(lock);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter(&self, tree: usize) -> Result<ValueIter<'_>> {
|
fn iter(&self, tree: usize) -> Result<ValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("iter {}: lock db", tree);
|
||||||
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("iter {}: lock acquired", tree);
|
||||||
|
|
||||||
|
let tree = this.get_tree(tree)?;
|
||||||
let sql = format!("SELECT k, v FROM {} ORDER BY k ASC", tree);
|
let sql = format!("SELECT k, v FROM {} ORDER BY k ASC", tree);
|
||||||
DbValueIterator::make(self.db.get()?, &sql, [])
|
make_iterator(this, &sql, [])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> {
|
fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("iter_rev {}: lock db", tree);
|
||||||
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("iter_rev {}: lock acquired", tree);
|
||||||
|
|
||||||
|
let tree = this.get_tree(tree)?;
|
||||||
let sql = format!("SELECT k, v FROM {} ORDER BY k DESC", tree);
|
let sql = format!("SELECT k, v FROM {} ORDER BY k DESC", tree);
|
||||||
DbValueIterator::make(self.db.get()?, &sql, [])
|
make_iterator(this, &sql, [])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn range<'r>(
|
fn range<'r>(
|
||||||
|
@ -227,7 +214,11 @@ impl IDb for SqliteDb {
|
||||||
low: Bound<&'r [u8]>,
|
low: Bound<&'r [u8]>,
|
||||||
high: Bound<&'r [u8]>,
|
high: Bound<&'r [u8]>,
|
||||||
) -> Result<ValueIter<'_>> {
|
) -> Result<ValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("range {}: lock db", tree);
|
||||||
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("range {}: lock acquired", tree);
|
||||||
|
|
||||||
|
let tree = this.get_tree(tree)?;
|
||||||
|
|
||||||
let (bounds_sql, params) = bounds_sql(low, high);
|
let (bounds_sql, params) = bounds_sql(low, high);
|
||||||
let sql = format!("SELECT k, v FROM {} {} ORDER BY k ASC", tree, bounds_sql);
|
let sql = format!("SELECT k, v FROM {} {} ORDER BY k ASC", tree, bounds_sql);
|
||||||
|
@ -237,7 +228,7 @@ impl IDb for SqliteDb {
|
||||||
.map(|x| x as &dyn rusqlite::ToSql)
|
.map(|x| x as &dyn rusqlite::ToSql)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
DbValueIterator::make::<&[&dyn rusqlite::ToSql]>(self.db.get()?, &sql, params.as_ref())
|
make_iterator::<&[&dyn rusqlite::ToSql]>(this, &sql, params.as_ref())
|
||||||
}
|
}
|
||||||
fn range_rev<'r>(
|
fn range_rev<'r>(
|
||||||
&self,
|
&self,
|
||||||
|
@ -245,7 +236,11 @@ impl IDb for SqliteDb {
|
||||||
low: Bound<&'r [u8]>,
|
low: Bound<&'r [u8]>,
|
||||||
high: Bound<&'r [u8]>,
|
high: Bound<&'r [u8]>,
|
||||||
) -> Result<ValueIter<'_>> {
|
) -> Result<ValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
trace!("range_rev {}: lock db", tree);
|
||||||
|
let this = self.0.lock().unwrap();
|
||||||
|
trace!("range_rev {}: lock acquired", tree);
|
||||||
|
|
||||||
|
let tree = this.get_tree(tree)?;
|
||||||
|
|
||||||
let (bounds_sql, params) = bounds_sql(low, high);
|
let (bounds_sql, params) = bounds_sql(low, high);
|
||||||
let sql = format!("SELECT k, v FROM {} {} ORDER BY k DESC", tree, bounds_sql);
|
let sql = format!("SELECT k, v FROM {} {} ORDER BY k DESC", tree, bounds_sql);
|
||||||
|
@ -255,20 +250,25 @@ impl IDb for SqliteDb {
|
||||||
.map(|x| x as &dyn rusqlite::ToSql)
|
.map(|x| x as &dyn rusqlite::ToSql)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
DbValueIterator::make::<&[&dyn rusqlite::ToSql]>(self.db.get()?, &sql, params.as_ref())
|
make_iterator::<&[&dyn rusqlite::ToSql]>(this, &sql, params.as_ref())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
|
||||||
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
|
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
|
||||||
let mut db = self.db.get().map_err(Error::from).map_err(TxError::Db)?;
|
trace!("transaction: lock db");
|
||||||
let trees = self.trees.read().unwrap();
|
let mut this = self.0.lock().unwrap();
|
||||||
let lock = self.write_lock.lock();
|
trace!("transaction: lock acquired");
|
||||||
|
|
||||||
|
let this_mut_ref: &mut SqliteDbInner = this.borrow_mut();
|
||||||
|
|
||||||
trace!("trying transaction");
|
|
||||||
let mut tx = SqliteTx {
|
let mut tx = SqliteTx {
|
||||||
tx: db.transaction().map_err(Error::from).map_err(TxError::Db)?,
|
tx: this_mut_ref
|
||||||
trees: &trees,
|
.db
|
||||||
|
.transaction()
|
||||||
|
.map_err(Error::from)
|
||||||
|
.map_err(TxError::Db)?,
|
||||||
|
trees: &this_mut_ref.trees,
|
||||||
};
|
};
|
||||||
let res = match f.try_on(&mut tx) {
|
let res = match f.try_on(&mut tx) {
|
||||||
TxFnResult::Ok(on_commit) => {
|
TxFnResult::Ok(on_commit) => {
|
||||||
|
@ -288,8 +288,7 @@ impl IDb for SqliteDb {
|
||||||
};
|
};
|
||||||
|
|
||||||
trace!("transaction done");
|
trace!("transaction done");
|
||||||
drop(lock);
|
res
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -297,14 +296,14 @@ impl IDb for SqliteDb {
|
||||||
|
|
||||||
struct SqliteTx<'a> {
|
struct SqliteTx<'a> {
|
||||||
tx: Transaction<'a>,
|
tx: Transaction<'a>,
|
||||||
trees: &'a [Arc<str>],
|
trees: &'a [String],
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> SqliteTx<'a> {
|
impl<'a> SqliteTx<'a> {
|
||||||
fn get_tree(&self, i: usize) -> TxOpResult<&'_ str> {
|
fn get_tree(&self, i: usize) -> TxOpResult<&'_ str> {
|
||||||
self.trees.get(i).map(Arc::as_ref).ok_or_else(|| {
|
self.trees.get(i).map(String::as_ref).ok_or_else(|| {
|
||||||
TxOpError(Error(
|
TxOpError(Error(
|
||||||
"invalid tree id (it might have been opened after the transaction started)".into(),
|
"invalid tree id (it might have been openned after the transaction started)".into(),
|
||||||
))
|
))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -336,17 +335,31 @@ impl<'a> ITx for SqliteTx<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
|
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
let tree = self.get_tree(tree)?;
|
||||||
let sql = format!("INSERT OR REPLACE INTO {} (k, v) VALUES (?1, ?2)", tree);
|
let old_val = self.internal_get(tree, key)?;
|
||||||
self.tx.execute(&sql, params![key, value])?;
|
|
||||||
Ok(())
|
let sql = match &old_val {
|
||||||
|
Some(_) => format!("UPDATE {} SET v = ?2 WHERE k = ?1", tree),
|
||||||
|
None => format!("INSERT INTO {} (k, v) VALUES (?1, ?2)", tree),
|
||||||
|
};
|
||||||
|
let n = self.tx.execute(&sql, params![key, value])?;
|
||||||
|
assert_eq!(n, 1);
|
||||||
|
|
||||||
|
Ok(old_val)
|
||||||
}
|
}
|
||||||
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()> {
|
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
|
||||||
let tree = self.get_tree(tree)?;
|
let tree = self.get_tree(tree)?;
|
||||||
self.tx
|
let old_val = self.internal_get(tree, key)?;
|
||||||
.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
|
|
||||||
Ok(())
|
if old_val.is_some() {
|
||||||
|
let n = self
|
||||||
|
.tx
|
||||||
|
.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
|
||||||
|
assert_eq!(n, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(old_val)
|
||||||
}
|
}
|
||||||
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
|
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
|
||||||
let tree = self.get_tree(tree)?;
|
let tree = self.get_tree(tree)?;
|
||||||
|
@ -357,12 +370,12 @@ impl<'a> ITx for SqliteTx<'a> {
|
||||||
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
let tree = self.get_tree(tree)?;
|
||||||
let sql = format!("SELECT k, v FROM {} ORDER BY k ASC", tree);
|
let sql = format!("SELECT k, v FROM {} ORDER BY k ASC", tree);
|
||||||
TxValueIterator::make(self, &sql, [])
|
make_tx_iterator(self, &sql, [])
|
||||||
}
|
}
|
||||||
fn iter_rev(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
fn iter_rev(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
|
||||||
let tree = self.get_tree(tree)?;
|
let tree = self.get_tree(tree)?;
|
||||||
let sql = format!("SELECT k, v FROM {} ORDER BY k DESC", tree);
|
let sql = format!("SELECT k, v FROM {} ORDER BY k DESC", tree);
|
||||||
TxValueIterator::make(self, &sql, [])
|
make_tx_iterator(self, &sql, [])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn range<'r>(
|
fn range<'r>(
|
||||||
|
@ -381,7 +394,7 @@ impl<'a> ITx for SqliteTx<'a> {
|
||||||
.map(|x| x as &dyn rusqlite::ToSql)
|
.map(|x| x as &dyn rusqlite::ToSql)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
TxValueIterator::make::<&[&dyn rusqlite::ToSql]>(self, &sql, params.as_ref())
|
make_tx_iterator::<&[&dyn rusqlite::ToSql]>(self, &sql, params.as_ref())
|
||||||
}
|
}
|
||||||
fn range_rev<'r>(
|
fn range_rev<'r>(
|
||||||
&self,
|
&self,
|
||||||
|
@ -399,73 +412,47 @@ impl<'a> ITx for SqliteTx<'a> {
|
||||||
.map(|x| x as &dyn rusqlite::ToSql)
|
.map(|x| x as &dyn rusqlite::ToSql)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
TxValueIterator::make::<&[&dyn rusqlite::ToSql]>(self, &sql, params.as_ref())
|
make_tx_iterator::<&[&dyn rusqlite::ToSql]>(self, &sql, params.as_ref())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- iterators outside transactions ----
|
// ---- iterators outside transactions ----
|
||||||
// complicated, they must hold the Statement and Row objects
|
// complicated, they must hold the Statement and Row objects
|
||||||
// therefore quite some unsafe code (it is a self-referential struct)
|
// so we need a self_referencing struct
|
||||||
|
|
||||||
|
// need to split in two because sequential mutable borrows are broken,
|
||||||
|
// see https://github.com/someguynamedjosh/ouroboros/issues/100
|
||||||
|
#[self_referencing]
|
||||||
|
struct DbValueIterator1<'a> {
|
||||||
|
db: MutexGuard<'a, SqliteDbInner>,
|
||||||
|
#[borrows(mut db)]
|
||||||
|
#[covariant]
|
||||||
|
stmt: Statement<'this>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[self_referencing]
|
||||||
struct DbValueIterator<'a> {
|
struct DbValueIterator<'a> {
|
||||||
db: Connection,
|
aux: DbValueIterator1<'a>,
|
||||||
stmt: Option<Statement<'a>>,
|
#[borrows(mut aux)]
|
||||||
iter: Option<Rows<'a>>,
|
#[covariant]
|
||||||
_pin: PhantomPinned,
|
iter: Rows<'this>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> DbValueIterator<'a> {
|
fn make_iterator<'a, P: rusqlite::Params>(
|
||||||
fn make<P: rusqlite::Params>(db: Connection, sql: &str, args: P) -> Result<ValueIter<'a>> {
|
db: MutexGuard<'a, SqliteDbInner>,
|
||||||
let res = DbValueIterator {
|
sql: &str,
|
||||||
db,
|
args: P,
|
||||||
stmt: None,
|
) -> Result<ValueIter<'a>> {
|
||||||
iter: None,
|
let aux = DbValueIterator1::try_new(db, |db| db.db.prepare(sql))?;
|
||||||
_pin: PhantomPinned,
|
let res = DbValueIterator::try_new(aux, |aux| aux.with_stmt_mut(|stmt| stmt.query(args)))?;
|
||||||
};
|
Ok(Box::new(res))
|
||||||
let mut boxed = Box::pin(res);
|
|
||||||
trace!("make iterator with sql: {}", sql);
|
|
||||||
|
|
||||||
// This unsafe allows us to bypass lifetime checks
|
|
||||||
let db = unsafe { NonNull::from(&boxed.db).as_ref() };
|
|
||||||
let stmt = db.prepare(sql)?;
|
|
||||||
|
|
||||||
let mut_ref = Pin::as_mut(&mut boxed);
|
|
||||||
// This unsafe allows us to write in a field of the pinned struct
|
|
||||||
unsafe {
|
|
||||||
Pin::get_unchecked_mut(mut_ref).stmt = Some(stmt);
|
|
||||||
}
|
|
||||||
|
|
||||||
// This unsafe allows us to bypass lifetime checks
|
|
||||||
let stmt = unsafe { NonNull::from(&boxed.stmt).as_mut() };
|
|
||||||
let iter = stmt.as_mut().unwrap().query(args)?;
|
|
||||||
|
|
||||||
let mut_ref = Pin::as_mut(&mut boxed);
|
|
||||||
// This unsafe allows us to write in a field of the pinned struct
|
|
||||||
unsafe {
|
|
||||||
Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Box::new(DbValueIteratorPin(boxed)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Drop for DbValueIterator<'a> {
|
impl<'a> Iterator for DbValueIterator<'a> {
|
||||||
fn drop(&mut self) {
|
|
||||||
trace!("drop iter");
|
|
||||||
drop(self.iter.take());
|
|
||||||
drop(self.stmt.take());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct DbValueIteratorPin<'a>(Pin<Box<DbValueIterator<'a>>>);
|
|
||||||
|
|
||||||
impl<'a> Iterator for DbValueIteratorPin<'a> {
|
|
||||||
type Item = Result<(Value, Value)>;
|
type Item = Result<(Value, Value)>;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
let mut_ref = Pin::as_mut(&mut self.0);
|
let next = self.with_iter_mut(|iter| iter.next());
|
||||||
// This unsafe allows us to mutably access the iterator field
|
|
||||||
let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
|
|
||||||
iter_next_row(next)
|
iter_next_row(next)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -474,57 +461,29 @@ impl<'a> Iterator for DbValueIteratorPin<'a> {
|
||||||
// it's the same except we don't hold a mutex guard,
|
// it's the same except we don't hold a mutex guard,
|
||||||
// only a Statement and a Rows object
|
// only a Statement and a Rows object
|
||||||
|
|
||||||
|
#[self_referencing]
|
||||||
struct TxValueIterator<'a> {
|
struct TxValueIterator<'a> {
|
||||||
stmt: Statement<'a>,
|
stmt: Statement<'a>,
|
||||||
iter: Option<Rows<'a>>,
|
#[borrows(mut stmt)]
|
||||||
_pin: PhantomPinned,
|
#[covariant]
|
||||||
|
iter: Rows<'this>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TxValueIterator<'a> {
|
fn make_tx_iterator<'a, P: rusqlite::Params>(
|
||||||
fn make<P: rusqlite::Params>(
|
tx: &'a SqliteTx<'a>,
|
||||||
tx: &'a SqliteTx<'a>,
|
sql: &str,
|
||||||
sql: &str,
|
args: P,
|
||||||
args: P,
|
) -> TxOpResult<TxValueIter<'a>> {
|
||||||
) -> TxOpResult<TxValueIter<'a>> {
|
let stmt = tx.tx.prepare(sql)?;
|
||||||
let stmt = tx.tx.prepare(sql)?;
|
let res = TxValueIterator::try_new(stmt, |stmt| stmt.query(args))?;
|
||||||
let res = TxValueIterator {
|
Ok(Box::new(res))
|
||||||
stmt,
|
|
||||||
iter: None,
|
|
||||||
_pin: PhantomPinned,
|
|
||||||
};
|
|
||||||
let mut boxed = Box::pin(res);
|
|
||||||
trace!("make iterator with sql: {}", sql);
|
|
||||||
|
|
||||||
// This unsafe allows us to bypass lifetime checks
|
|
||||||
let stmt = unsafe { NonNull::from(&boxed.stmt).as_mut() };
|
|
||||||
let iter = stmt.query(args)?;
|
|
||||||
|
|
||||||
let mut_ref = Pin::as_mut(&mut boxed);
|
|
||||||
// This unsafe allows us to write in a field of the pinned struct
|
|
||||||
unsafe {
|
|
||||||
Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Box::new(TxValueIteratorPin(boxed)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Drop for TxValueIterator<'a> {
|
impl<'a> Iterator for TxValueIterator<'a> {
|
||||||
fn drop(&mut self) {
|
|
||||||
trace!("drop iter");
|
|
||||||
drop(self.iter.take());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TxValueIteratorPin<'a>(Pin<Box<TxValueIterator<'a>>>);
|
|
||||||
|
|
||||||
impl<'a> Iterator for TxValueIteratorPin<'a> {
|
|
||||||
type Item = TxOpResult<(Value, Value)>;
|
type Item = TxOpResult<(Value, Value)>;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
let mut_ref = Pin::as_mut(&mut self.0);
|
let next = self.with_iter_mut(|iter| iter.next());
|
||||||
// This unsafe allows us to mutably access the iterator field
|
|
||||||
let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
|
|
||||||
iter_next_row(next)
|
iter_next_row(next)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ fn test_suite(db: Db) {
|
||||||
|
|
||||||
// ---- test simple insert/delete ----
|
// ---- test simple insert/delete ----
|
||||||
|
|
||||||
assert!(tree.insert(ka, va).is_ok());
|
assert!(tree.insert(ka, va).unwrap().is_none());
|
||||||
assert_eq!(tree.get(ka).unwrap().unwrap(), va);
|
assert_eq!(tree.get(ka).unwrap().unwrap(), va);
|
||||||
assert_eq!(tree.len().unwrap(), 1);
|
assert_eq!(tree.len().unwrap(), 1);
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ fn test_suite(db: Db) {
|
||||||
let res = db.transaction::<_, (), _>(|tx| {
|
let res = db.transaction::<_, (), _>(|tx| {
|
||||||
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), va);
|
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), va);
|
||||||
|
|
||||||
assert_eq!(tx.insert(&tree, ka, vb).unwrap(), ());
|
assert_eq!(tx.insert(&tree, ka, vb).unwrap().unwrap(), va);
|
||||||
|
|
||||||
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
|
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ fn test_suite(db: Db) {
|
||||||
let res = db.transaction::<(), _, _>(|tx| {
|
let res = db.transaction::<(), _, _>(|tx| {
|
||||||
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
|
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
|
||||||
|
|
||||||
assert_eq!(tx.insert(&tree, ka, vc).unwrap(), ());
|
assert_eq!(tx.insert(&tree, ka, vc).unwrap().unwrap(), vb);
|
||||||
|
|
||||||
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vc);
|
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vc);
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ fn test_suite(db: Db) {
|
||||||
assert!(iter.next().is_none());
|
assert!(iter.next().is_none());
|
||||||
drop(iter);
|
drop(iter);
|
||||||
|
|
||||||
assert!(tree.insert(kb, vc).is_ok());
|
assert!(tree.insert(kb, vc).unwrap().is_none());
|
||||||
assert_eq!(tree.get(kb).unwrap().unwrap(), vc);
|
assert_eq!(tree.get(kb).unwrap().unwrap(), vc);
|
||||||
|
|
||||||
let mut iter = tree.iter().unwrap();
|
let mut iter = tree.iter().unwrap();
|
||||||
|
@ -144,7 +144,6 @@ fn test_lmdb_db() {
|
||||||
fn test_sqlite_db() {
|
fn test_sqlite_db() {
|
||||||
use crate::sqlite_adapter::SqliteDb;
|
use crate::sqlite_adapter::SqliteDb;
|
||||||
|
|
||||||
let manager = r2d2_sqlite::SqliteConnectionManager::memory();
|
let db = SqliteDb::init(rusqlite::Connection::open_in_memory().unwrap());
|
||||||
let db = SqliteDb::new(manager, false).unwrap();
|
|
||||||
test_suite(db);
|
test_suite(db);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -42,7 +42,6 @@ tracing.workspace = true
|
||||||
tracing-subscriber.workspace = true
|
tracing-subscriber.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
sha1.workspace = true
|
|
||||||
sodiumoxide.workspace = true
|
sodiumoxide.workspace = true
|
||||||
structopt.workspace = true
|
structopt.workspace = true
|
||||||
git-version.workspace = true
|
git-version.workspace = true
|
||||||
|
@ -59,7 +58,6 @@ opentelemetry.workspace = true
|
||||||
opentelemetry-prometheus = { workspace = true, optional = true }
|
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||||
opentelemetry-otlp = { workspace = true, optional = true }
|
opentelemetry-otlp = { workspace = true, optional = true }
|
||||||
prometheus = { workspace = true, optional = true }
|
prometheus = { workspace = true, optional = true }
|
||||||
syslog-tracing = { workspace = true, optional = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
aws-config.workspace = true
|
aws-config.workspace = true
|
||||||
|
@ -98,8 +96,6 @@ kubernetes-discovery = [ "garage_rpc/kubernetes-discovery" ]
|
||||||
metrics = [ "garage_api/metrics", "opentelemetry-prometheus", "prometheus" ]
|
metrics = [ "garage_api/metrics", "opentelemetry-prometheus", "prometheus" ]
|
||||||
# Exporter for the OpenTelemetry Collector.
|
# Exporter for the OpenTelemetry Collector.
|
||||||
telemetry-otlp = [ "opentelemetry-otlp" ]
|
telemetry-otlp = [ "opentelemetry-otlp" ]
|
||||||
# Logging to syslog
|
|
||||||
syslog = [ "syslog-tracing" ]
|
|
||||||
|
|
||||||
# NOTE: bundled-libs and system-libs should be treat as mutually exclusive;
|
# NOTE: bundled-libs and system-libs should be treat as mutually exclusive;
|
||||||
# exactly one of them should be enabled.
|
# exactly one of them should be enabled.
|
||||||
|
|
|
@ -54,8 +54,9 @@ impl AdminRpcHandler {
|
||||||
let bucket_id = self
|
let bucket_id = self
|
||||||
.garage
|
.garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.admin_get_existing_matching_bucket(&query.name)
|
.resolve_global_bucket_name(&query.name)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
|
|
||||||
let bucket = self
|
let bucket = self
|
||||||
.garage
|
.garage
|
||||||
|
@ -156,8 +157,9 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
let bucket_id = helper
|
let bucket_id = helper
|
||||||
.bucket()
|
.bucket()
|
||||||
.admin_get_existing_matching_bucket(&query.name)
|
.resolve_global_bucket_name(&query.name)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
|
|
||||||
// Get the alias, but keep in minde here the bucket name
|
// Get the alias, but keep in minde here the bucket name
|
||||||
// given in parameter can also be directly the bucket's ID.
|
// given in parameter can also be directly the bucket's ID.
|
||||||
|
@ -233,8 +235,9 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
let bucket_id = helper
|
let bucket_id = helper
|
||||||
.bucket()
|
.bucket()
|
||||||
.admin_get_existing_matching_bucket(&query.existing_bucket)
|
.resolve_global_bucket_name(&query.existing_bucket)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
|
|
||||||
if let Some(key_pattern) = &query.local {
|
if let Some(key_pattern) = &query.local {
|
||||||
let key = helper.key().get_existing_matching_key(key_pattern).await?;
|
let key = helper.key().get_existing_matching_key(key_pattern).await?;
|
||||||
|
@ -304,8 +307,9 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
let bucket_id = helper
|
let bucket_id = helper
|
||||||
.bucket()
|
.bucket()
|
||||||
.admin_get_existing_matching_bucket(&query.bucket)
|
.resolve_global_bucket_name(&query.bucket)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
let key = helper
|
let key = helper
|
||||||
.key()
|
.key()
|
||||||
.get_existing_matching_key(&query.key_pattern)
|
.get_existing_matching_key(&query.key_pattern)
|
||||||
|
@ -339,8 +343,9 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
let bucket_id = helper
|
let bucket_id = helper
|
||||||
.bucket()
|
.bucket()
|
||||||
.admin_get_existing_matching_bucket(&query.bucket)
|
.resolve_global_bucket_name(&query.bucket)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
let key = helper
|
let key = helper
|
||||||
.key()
|
.key()
|
||||||
.get_existing_matching_key(&query.key_pattern)
|
.get_existing_matching_key(&query.key_pattern)
|
||||||
|
@ -373,8 +378,9 @@ impl AdminRpcHandler {
|
||||||
let bucket_id = self
|
let bucket_id = self
|
||||||
.garage
|
.garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.admin_get_existing_matching_bucket(&query.bucket)
|
.resolve_global_bucket_name(&query.bucket)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
|
|
||||||
let mut bucket = self
|
let mut bucket = self
|
||||||
.garage
|
.garage
|
||||||
|
@ -414,8 +420,9 @@ impl AdminRpcHandler {
|
||||||
let bucket_id = self
|
let bucket_id = self
|
||||||
.garage
|
.garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.admin_get_existing_matching_bucket(&query.bucket)
|
.resolve_global_bucket_name(&query.bucket)
|
||||||
.await?;
|
.await?
|
||||||
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
|
|
||||||
let mut bucket = self
|
let mut bucket = self
|
||||||
.garage
|
.garage
|
||||||
|
@ -472,8 +479,9 @@ impl AdminRpcHandler {
|
||||||
bucket_ids.push(
|
bucket_ids.push(
|
||||||
self.garage
|
self.garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.admin_get_existing_matching_bucket(b)
|
.resolve_global_bucket_name(b)
|
||||||
.await?,
|
.await?
|
||||||
|
.ok_or_bad_request(format!("Bucket not found: {}", b))?,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,6 @@ pub enum AdminRpc {
|
||||||
Stats(StatsOpt),
|
Stats(StatsOpt),
|
||||||
Worker(WorkerOperation),
|
Worker(WorkerOperation),
|
||||||
BlockOperation(BlockOperation),
|
BlockOperation(BlockOperation),
|
||||||
MetaOperation(MetaOperation),
|
|
||||||
|
|
||||||
// Replies
|
// Replies
|
||||||
Ok(String),
|
Ok(String),
|
||||||
|
@ -466,43 +465,6 @@ impl AdminRpcHandler {
|
||||||
)]))
|
)]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ================ META DB COMMANDS ====================
|
|
||||||
|
|
||||||
async fn handle_meta_cmd(self: &Arc<Self>, mo: &MetaOperation) -> Result<AdminRpc, Error> {
|
|
||||||
match mo {
|
|
||||||
MetaOperation::Snapshot { all: true } => {
|
|
||||||
let to = self.garage.system.cluster_layout().all_nodes().to_vec();
|
|
||||||
|
|
||||||
let resps = futures::future::join_all(to.iter().map(|to| async move {
|
|
||||||
let to = (*to).into();
|
|
||||||
self.endpoint
|
|
||||||
.call(
|
|
||||||
&to,
|
|
||||||
AdminRpc::MetaOperation(MetaOperation::Snapshot { all: false }),
|
|
||||||
PRIO_NORMAL,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}))
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let mut ret = vec![];
|
|
||||||
for (to, resp) in to.iter().zip(resps.iter()) {
|
|
||||||
let res_str = match resp {
|
|
||||||
Ok(_) => "ok".to_string(),
|
|
||||||
Err(e) => format!("error: {}", e),
|
|
||||||
};
|
|
||||||
ret.push(format!("{:?}\t{}", to, res_str));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(AdminRpc::Ok(format_table_to_string(ret)))
|
|
||||||
}
|
|
||||||
MetaOperation::Snapshot { all: false } => {
|
|
||||||
garage_model::snapshot::async_snapshot_metadata(&self.garage).await?;
|
|
||||||
Ok(AdminRpc::Ok("Snapshot has been saved.".into()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
@ -519,7 +481,6 @@ impl EndpointHandler<AdminRpc> for AdminRpcHandler {
|
||||||
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
|
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
|
||||||
AdminRpc::Worker(wo) => self.handle_worker_cmd(wo).await,
|
AdminRpc::Worker(wo) => self.handle_worker_cmd(wo).await,
|
||||||
AdminRpc::BlockOperation(bo) => self.handle_block_cmd(bo).await,
|
AdminRpc::BlockOperation(bo) => self.handle_block_cmd(bo).await,
|
||||||
AdminRpc::MetaOperation(mo) => self.handle_meta_cmd(mo).await,
|
|
||||||
m => Err(GarageError::unexpected_rpc_message(m).into()),
|
m => Err(GarageError::unexpected_rpc_message(m).into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,9 +41,6 @@ pub async fn cli_command_dispatch(
|
||||||
Command::Block(bo) => {
|
Command::Block(bo) => {
|
||||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await
|
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await
|
||||||
}
|
}
|
||||||
Command::Meta(mo) => {
|
|
||||||
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::MetaOperation(mo)).await
|
|
||||||
}
|
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -57,10 +54,6 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()];
|
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()];
|
||||||
for adv in status.iter().filter(|adv| adv.is_up) {
|
for adv in status.iter().filter(|adv| adv.is_up) {
|
||||||
let host = adv.status.hostname.as_deref().unwrap_or("?");
|
let host = adv.status.hostname.as_deref().unwrap_or("?");
|
||||||
let addr = match adv.addr {
|
|
||||||
Some(addr) => addr.to_string(),
|
|
||||||
None => "N/A".to_string(),
|
|
||||||
};
|
|
||||||
if let Some(NodeRoleV(Some(cfg))) = layout.current().roles.get(&adv.id) {
|
if let Some(NodeRoleV(Some(cfg))) = layout.current().roles.get(&adv.id) {
|
||||||
let data_avail = match &adv.status.data_disk_avail {
|
let data_avail = match &adv.status.data_disk_avail {
|
||||||
_ if cfg.capacity.is_none() => "N/A".into(),
|
_ if cfg.capacity.is_none() => "N/A".into(),
|
||||||
|
@ -75,7 +68,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
|
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
|
||||||
id = adv.id,
|
id = adv.id,
|
||||||
host = host,
|
host = host,
|
||||||
addr = addr,
|
addr = adv.addr,
|
||||||
tags = cfg.tags.join(","),
|
tags = cfg.tags.join(","),
|
||||||
zone = cfg.zone,
|
zone = cfg.zone,
|
||||||
capacity = cfg.capacity_string(),
|
capacity = cfg.capacity_string(),
|
||||||
|
@ -95,7 +88,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\tdraining metadata...",
|
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\tdraining metadata...",
|
||||||
id = adv.id,
|
id = adv.id,
|
||||||
host = host,
|
host = host,
|
||||||
addr = addr,
|
addr = adv.addr,
|
||||||
tags = cfg.tags.join(","),
|
tags = cfg.tags.join(","),
|
||||||
zone = cfg.zone,
|
zone = cfg.zone,
|
||||||
));
|
));
|
||||||
|
@ -108,7 +101,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
"{id:?}\t{h}\t{addr}\t\t\t{new_role}",
|
"{id:?}\t{h}\t{addr}\t\t\t{new_role}",
|
||||||
id = adv.id,
|
id = adv.id,
|
||||||
h = host,
|
h = host,
|
||||||
addr = addr,
|
addr = adv.addr,
|
||||||
new_role = new_role,
|
new_role = new_role,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -124,7 +117,8 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
|
|
||||||
let tf = timeago::Formatter::new();
|
let tf = timeago::Formatter::new();
|
||||||
let mut drain_msg = false;
|
let mut drain_msg = false;
|
||||||
let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()];
|
let mut failed_nodes =
|
||||||
|
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tLast seen".to_string()];
|
||||||
let mut listed = HashSet::new();
|
let mut listed = HashSet::new();
|
||||||
for ver in layout.versions.iter().rev() {
|
for ver in layout.versions.iter().rev() {
|
||||||
for (node, _, role) in ver.roles.items().iter() {
|
for (node, _, role) in ver.roles.items().iter() {
|
||||||
|
@ -145,14 +139,15 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
|
|
||||||
// Node is in a layout version, is not a gateway node, and is not up:
|
// Node is in a layout version, is not a gateway node, and is not up:
|
||||||
// it is in a failed state, add proper line to the output
|
// it is in a failed state, add proper line to the output
|
||||||
let (host, last_seen) = match adv {
|
let (host, addr, last_seen) = match adv {
|
||||||
Some(adv) => (
|
Some(adv) => (
|
||||||
adv.status.hostname.as_deref().unwrap_or("?"),
|
adv.status.hostname.as_deref().unwrap_or("?"),
|
||||||
|
adv.addr.to_string(),
|
||||||
adv.last_seen_secs_ago
|
adv.last_seen_secs_ago
|
||||||
.map(|s| tf.convert(Duration::from_secs(s)))
|
.map(|s| tf.convert(Duration::from_secs(s)))
|
||||||
.unwrap_or_else(|| "never seen".into()),
|
.unwrap_or_else(|| "never seen".into()),
|
||||||
),
|
),
|
||||||
None => ("??", "never seen".into()),
|
None => ("??", "??".into(), "never seen".into()),
|
||||||
};
|
};
|
||||||
let capacity = if ver.version == layout.current().version {
|
let capacity = if ver.version == layout.current().version {
|
||||||
cfg.capacity_string()
|
cfg.capacity_string()
|
||||||
|
@ -161,9 +156,10 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
"draining metadata...".to_string()
|
"draining metadata...".to_string()
|
||||||
};
|
};
|
||||||
failed_nodes.push(format!(
|
failed_nodes.push(format!(
|
||||||
"{id:?}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
|
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
|
||||||
id = node,
|
id = node,
|
||||||
host = host,
|
host = host,
|
||||||
|
addr = addr,
|
||||||
tags = cfg.tags.join(","),
|
tags = cfg.tags.join(","),
|
||||||
zone = cfg.zone,
|
zone = cfg.zone,
|
||||||
capacity = capacity,
|
capacity = capacity,
|
||||||
|
|
|
@ -24,7 +24,6 @@ pub struct ConvertDbOpt {
|
||||||
output_engine: Engine,
|
output_engine: Engine,
|
||||||
|
|
||||||
#[structopt(flatten)]
|
#[structopt(flatten)]
|
||||||
#[allow(dead_code)]
|
|
||||||
db_open: OpenDbOpt,
|
db_open: OpenDbOpt,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,7 +52,6 @@ pub(crate) fn do_conversion(args: ConvertDbOpt) -> Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
let opt = OpenOpt {
|
let opt = OpenOpt {
|
||||||
#[cfg(feature = "lmdb")]
|
|
||||||
lmdb_map_size: args.db_open.lmdb.map_size.map(|x| x.as_u64() as usize),
|
lmdb_map_size: args.db_open.lmdb.map_size.map(|x| x.as_u64() as usize),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
|
@ -129,7 +129,7 @@ pub async fn cmd_assign_role(
|
||||||
zone: args
|
zone: args
|
||||||
.zone
|
.zone
|
||||||
.clone()
|
.clone()
|
||||||
.ok_or("Please specify a zone with the -z flag")?,
|
.ok_or("Please specifiy a zone with the -z flag")?,
|
||||||
capacity,
|
capacity,
|
||||||
tags: args.tags.clone(),
|
tags: args.tags.clone(),
|
||||||
}
|
}
|
||||||
|
@ -145,7 +145,7 @@ pub async fn cmd_assign_role(
|
||||||
|
|
||||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||||
|
|
||||||
println!("Role changes are staged but not yet committed.");
|
println!("Role changes are staged but not yet commited.");
|
||||||
println!("Use `garage layout show` to view staged role changes,");
|
println!("Use `garage layout show` to view staged role changes,");
|
||||||
println!("and `garage layout apply` to enact staged changes.");
|
println!("and `garage layout apply` to enact staged changes.");
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -172,7 +172,7 @@ pub async fn cmd_remove_role(
|
||||||
|
|
||||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||||
|
|
||||||
println!("Role removal is staged but not yet committed.");
|
println!("Role removal is staged but not yet commited.");
|
||||||
println!("Use `garage layout show` to view staged role changes,");
|
println!("Use `garage layout show` to view staged role changes,");
|
||||||
println!("and `garage layout apply` to enact staged changes.");
|
println!("and `garage layout apply` to enact staged changes.");
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -358,7 +358,7 @@ pub async fn cmd_layout_history(
|
||||||
|
|
||||||
if layout.versions.len() > 1 {
|
if layout.versions.len() > 1 {
|
||||||
println!("==== UPDATE TRACKERS ====");
|
println!("==== UPDATE TRACKERS ====");
|
||||||
println!("Several layout versions are currently live in the cluster, and data is being migrated.");
|
println!("Several layout versions are currently live in the version, and data is being migrated.");
|
||||||
println!(
|
println!(
|
||||||
"This is the internal data that Garage stores to know which nodes have what data."
|
"This is the internal data that Garage stores to know which nodes have what data."
|
||||||
);
|
);
|
||||||
|
@ -377,27 +377,15 @@ pub async fn cmd_layout_history(
|
||||||
table[1..].sort();
|
table[1..].sort();
|
||||||
format_table(table);
|
format_table(table);
|
||||||
|
|
||||||
let min_ack = layout
|
|
||||||
.update_trackers
|
|
||||||
.ack_map
|
|
||||||
.min_among(&all_nodes, layout.min_stored());
|
|
||||||
|
|
||||||
println!();
|
println!();
|
||||||
println!(
|
println!(
|
||||||
"If some nodes are not catching up to the latest layout version in the update trackers,"
|
"If some nodes are not catching up to the latest layout version in the update trackers,"
|
||||||
);
|
);
|
||||||
println!("it might be because they are offline or unable to complete a sync successfully.");
|
println!("it might be because they are offline or unable to complete a sync successfully.");
|
||||||
if min_ack < layout.current().version {
|
println!(
|
||||||
println!(
|
"You may force progress using `garage layout skip-dead-nodes --version {}`",
|
||||||
"You may force progress using `garage layout skip-dead-nodes --version {}`",
|
layout.current().version
|
||||||
layout.current().version
|
);
|
||||||
);
|
|
||||||
} else {
|
|
||||||
println!(
|
|
||||||
"You may force progress using `garage layout skip-dead-nodes --version {} --allow-missing-data`.",
|
|
||||||
layout.current().version
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
println!("Your cluster is currently in a stable state with a single live layout version.");
|
println!("Your cluster is currently in a stable state with a single live layout version.");
|
||||||
println!("No metadata migration is in progress. Note that the migration of data blocks is not tracked,");
|
println!("No metadata migration is in progress. Note that the migration of data blocks is not tracked,");
|
||||||
|
@ -438,15 +426,15 @@ pub async fn cmd_layout_skip_dead_nodes(
|
||||||
let all_nodes = layout.get_all_nodes();
|
let all_nodes = layout.get_all_nodes();
|
||||||
let mut did_something = false;
|
let mut did_something = false;
|
||||||
for node in all_nodes.iter() {
|
for node in all_nodes.iter() {
|
||||||
// Update ACK tracker for dead nodes or for all nodes if --allow-missing-data
|
if status.iter().any(|x| x.id == *node && x.is_up) {
|
||||||
if opt.allow_missing_data || !status.iter().any(|x| x.id == *node && x.is_up) {
|
continue;
|
||||||
if layout.update_trackers.ack_map.set_max(*node, opt.version) {
|
}
|
||||||
println!("Increased the ACK tracker for node {:?}", node);
|
|
||||||
did_something = true;
|
if layout.update_trackers.ack_map.set_max(*node, opt.version) {
|
||||||
}
|
println!("Increased the ACK tracker for node {:?}", node);
|
||||||
|
did_something = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If --allow-missing-data, update SYNC tracker for all nodes.
|
|
||||||
if opt.allow_missing_data {
|
if opt.allow_missing_data {
|
||||||
if layout.update_trackers.sync_map.set_max(*node, opt.version) {
|
if layout.update_trackers.sync_map.set_max(*node, opt.version) {
|
||||||
println!("Increased the SYNC tracker for node {:?}", node);
|
println!("Increased the SYNC tracker for node {:?}", node);
|
||||||
|
|
|
@ -48,14 +48,10 @@ pub enum Command {
|
||||||
#[structopt(name = "worker", version = garage_version())]
|
#[structopt(name = "worker", version = garage_version())]
|
||||||
Worker(WorkerOperation),
|
Worker(WorkerOperation),
|
||||||
|
|
||||||
/// Low-level node-local debug operations on data blocks
|
/// Low-level debug operations on data blocks
|
||||||
#[structopt(name = "block", version = garage_version())]
|
#[structopt(name = "block", version = garage_version())]
|
||||||
Block(BlockOperation),
|
Block(BlockOperation),
|
||||||
|
|
||||||
/// Operations on the metadata db
|
|
||||||
#[structopt(name = "meta", version = garage_version())]
|
|
||||||
Meta(MetaOperation),
|
|
||||||
|
|
||||||
/// Convert metadata db between database engine formats
|
/// Convert metadata db between database engine formats
|
||||||
#[structopt(name = "convert-db", version = garage_version())]
|
#[structopt(name = "convert-db", version = garage_version())]
|
||||||
ConvertDb(convert_db::ConvertDbOpt),
|
ConvertDb(convert_db::ConvertDbOpt),
|
||||||
|
@ -184,7 +180,7 @@ pub struct SkipDeadNodesOpt {
|
||||||
/// This will generally be the current layout version.
|
/// This will generally be the current layout version.
|
||||||
#[structopt(long = "version")]
|
#[structopt(long = "version")]
|
||||||
pub(crate) version: u64,
|
pub(crate) version: u64,
|
||||||
/// Allow the skip even if a quorum of nodes could not be found for
|
/// Allow the skip even if a quorum of ndoes could not be found for
|
||||||
/// the data among the remaining nodes
|
/// the data among the remaining nodes
|
||||||
#[structopt(long = "allow-missing-data")]
|
#[structopt(long = "allow-missing-data")]
|
||||||
pub(crate) allow_missing_data: bool,
|
pub(crate) allow_missing_data: bool,
|
||||||
|
@ -473,11 +469,8 @@ pub enum RepairWhat {
|
||||||
#[structopt(name = "mpu", version = garage_version())]
|
#[structopt(name = "mpu", version = garage_version())]
|
||||||
MultipartUploads,
|
MultipartUploads,
|
||||||
/// Repropagate version deletions to the block ref table
|
/// Repropagate version deletions to the block ref table
|
||||||
#[structopt(name = "block-refs", version = garage_version())]
|
#[structopt(name = "block_refs", version = garage_version())]
|
||||||
BlockRefs,
|
BlockRefs,
|
||||||
/// Recalculate block reference counters
|
|
||||||
#[structopt(name = "block-rc", version = garage_version())]
|
|
||||||
BlockRc,
|
|
||||||
/// Verify integrity of all blocks on disc
|
/// Verify integrity of all blocks on disc
|
||||||
#[structopt(name = "scrub", version = garage_version())]
|
#[structopt(name = "scrub", version = garage_version())]
|
||||||
Scrub {
|
Scrub {
|
||||||
|
@ -618,14 +611,3 @@ pub enum BlockOperation {
|
||||||
blocks: Vec<String>,
|
blocks: Vec<String>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone, Copy)]
|
|
||||||
pub enum MetaOperation {
|
|
||||||
/// Save a snapshot of the metadata db file
|
|
||||||
#[structopt(name = "snapshot", version = garage_version())]
|
|
||||||
Snapshot {
|
|
||||||
/// Run on all nodes instead of only local node
|
|
||||||
#[structopt(long = "all")]
|
|
||||||
all: bool,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
|
@ -451,7 +451,7 @@ pub fn print_block_info(
|
||||||
if refcount != nondeleted_count {
|
if refcount != nondeleted_count {
|
||||||
println!();
|
println!();
|
||||||
println!(
|
println!(
|
||||||
"Warning: refcount does not match number of non-deleted versions, you should try `garage repair block-rc`."
|
"Warning: refcount does not match number of non-deleted versions (see issue #644)."
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,7 +107,7 @@ async fn main() {
|
||||||
);
|
);
|
||||||
|
|
||||||
// Initialize panic handler that aborts on panic and shows a nice message.
|
// Initialize panic handler that aborts on panic and shows a nice message.
|
||||||
// By default, Tokio continues running normally when a task panics. We want
|
// By default, Tokio continues runing normally when a task panics. We want
|
||||||
// to avoid this behavior in Garage as this would risk putting the process in an
|
// to avoid this behavior in Garage as this would risk putting the process in an
|
||||||
// unknown/uncontrollable state. We prefer to exit the process and restart it
|
// unknown/uncontrollable state. We prefer to exit the process and restart it
|
||||||
// from scratch, so that it boots back into a fresh, known state.
|
// from scratch, so that it boots back into a fresh, known state.
|
||||||
|
@ -138,8 +138,17 @@ async fn main() {
|
||||||
let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches());
|
let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches());
|
||||||
|
|
||||||
// Initialize logging as well as other libraries used in Garage
|
// Initialize logging as well as other libraries used in Garage
|
||||||
init_logging(&opt);
|
if std::env::var("RUST_LOG").is_err() {
|
||||||
|
let default_log = match &opt.cmd {
|
||||||
|
Command::Server => "netapp=info,garage=info",
|
||||||
|
_ => "netapp=warn,garage=warn",
|
||||||
|
};
|
||||||
|
std::env::set_var("RUST_LOG", default_log)
|
||||||
|
}
|
||||||
|
tracing_subscriber::fmt()
|
||||||
|
.with_writer(std::io::stderr)
|
||||||
|
.with_env_filter(tracing_subscriber::filter::EnvFilter::from_default_env())
|
||||||
|
.init();
|
||||||
sodiumoxide::init().expect("Unable to init sodiumoxide");
|
sodiumoxide::init().expect("Unable to init sodiumoxide");
|
||||||
|
|
||||||
let res = match opt.cmd {
|
let res = match opt.cmd {
|
||||||
|
@ -162,58 +171,6 @@ async fn main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_logging(opt: &Opt) {
|
|
||||||
if std::env::var("RUST_LOG").is_err() {
|
|
||||||
let default_log = match &opt.cmd {
|
|
||||||
Command::Server => "netapp=info,garage=info",
|
|
||||||
_ => "netapp=warn,garage=warn",
|
|
||||||
};
|
|
||||||
std::env::set_var("RUST_LOG", default_log)
|
|
||||||
}
|
|
||||||
|
|
||||||
let env_filter = tracing_subscriber::filter::EnvFilter::from_default_env();
|
|
||||||
|
|
||||||
if std::env::var("GARAGE_LOG_TO_SYSLOG")
|
|
||||||
.map(|x| x == "1" || x == "true")
|
|
||||||
.unwrap_or(false)
|
|
||||||
{
|
|
||||||
#[cfg(feature = "syslog")]
|
|
||||||
{
|
|
||||||
use std::ffi::CStr;
|
|
||||||
use syslog_tracing::{Facility, Options, Syslog};
|
|
||||||
|
|
||||||
let syslog = Syslog::new(
|
|
||||||
CStr::from_bytes_with_nul(b"garage\0").unwrap(),
|
|
||||||
Options::LOG_PID | Options::LOG_PERROR,
|
|
||||||
Facility::Daemon,
|
|
||||||
)
|
|
||||||
.expect("Unable to init syslog");
|
|
||||||
|
|
||||||
tracing_subscriber::fmt()
|
|
||||||
.with_writer(syslog)
|
|
||||||
.with_env_filter(env_filter)
|
|
||||||
.with_ansi(false) // disable ANSI escape sequences (colours)
|
|
||||||
.with_file(false)
|
|
||||||
.with_level(false)
|
|
||||||
.without_time()
|
|
||||||
.compact()
|
|
||||||
.init();
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#[cfg(not(feature = "syslog"))]
|
|
||||||
{
|
|
||||||
eprintln!("Syslog support is not enabled in this build.");
|
|
||||||
std::process::exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing_subscriber::fmt()
|
|
||||||
.with_writer(std::io::stderr)
|
|
||||||
.with_env_filter(env_filter)
|
|
||||||
.init();
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn cli_command(opt: Opt) -> Result<(), Error> {
|
async fn cli_command(opt: Opt) -> Result<(), Error> {
|
||||||
let config = if (opt.secrets.rpc_secret.is_none() && opt.secrets.rpc_secret_file.is_none())
|
let config = if (opt.secrets.rpc_secret.is_none() && opt.secrets.rpc_secret_file.is_none())
|
||||||
|| opt.rpc_host.is_none()
|
|| opt.rpc_host.is_none()
|
||||||
|
|
|
@ -4,7 +4,6 @@ use std::time::Duration;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use garage_block::manager::BlockManager;
|
|
||||||
use garage_block::repair::ScrubWorkerCommand;
|
use garage_block::repair::ScrubWorkerCommand;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
|
@ -17,14 +16,11 @@ use garage_table::replication::*;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
|
||||||
use garage_util::background::*;
|
use garage_util::background::*;
|
||||||
use garage_util::data::*;
|
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
use garage_util::migrate::Migrate;
|
use garage_util::migrate::Migrate;
|
||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
const RC_REPAIR_ITER_COUNT: usize = 64;
|
|
||||||
|
|
||||||
pub async fn launch_online_repair(
|
pub async fn launch_online_repair(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
bg: &BackgroundRunner,
|
bg: &BackgroundRunner,
|
||||||
|
@ -51,13 +47,6 @@ pub async fn launch_online_repair(
|
||||||
info!("Repairing the block refs table");
|
info!("Repairing the block refs table");
|
||||||
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs));
|
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs));
|
||||||
}
|
}
|
||||||
RepairWhat::BlockRc => {
|
|
||||||
info!("Repairing the block reference counters");
|
|
||||||
bg.spawn_worker(BlockRcRepair::new(
|
|
||||||
garage.block_manager.clone(),
|
|
||||||
garage.block_ref_table.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
RepairWhat::Blocks => {
|
RepairWhat::Blocks => {
|
||||||
info!("Repairing the stored blocks");
|
info!("Repairing the stored blocks");
|
||||||
bg.spawn_worker(garage_block::repair::RepairWorker::new(
|
bg.spawn_worker(garage_block::repair::RepairWorker::new(
|
||||||
|
@ -293,98 +282,3 @@ impl TableRepair for RepairMpu {
|
||||||
Ok(false)
|
Ok(false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ===== block reference counter repair =====
|
|
||||||
|
|
||||||
pub struct BlockRcRepair {
|
|
||||||
block_manager: Arc<BlockManager>,
|
|
||||||
block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
|
||||||
cursor: Hash,
|
|
||||||
counter: u64,
|
|
||||||
repairs: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockRcRepair {
|
|
||||||
fn new(
|
|
||||||
block_manager: Arc<BlockManager>,
|
|
||||||
block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
block_manager,
|
|
||||||
block_ref_table,
|
|
||||||
cursor: [0u8; 32].into(),
|
|
||||||
counter: 0,
|
|
||||||
repairs: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Worker for BlockRcRepair {
|
|
||||||
fn name(&self) -> String {
|
|
||||||
format!("Block refcount repair worker")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn status(&self) -> WorkerStatus {
|
|
||||||
WorkerStatus {
|
|
||||||
progress: Some(format!("{} ({})", self.counter, self.repairs)),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
|
|
||||||
for _i in 0..RC_REPAIR_ITER_COUNT {
|
|
||||||
let next1 = self
|
|
||||||
.block_manager
|
|
||||||
.rc
|
|
||||||
.rc_table
|
|
||||||
.range(self.cursor.as_slice()..)?
|
|
||||||
.next()
|
|
||||||
.transpose()?
|
|
||||||
.map(|(k, _)| Hash::try_from(k.as_slice()).unwrap());
|
|
||||||
let next2 = self
|
|
||||||
.block_ref_table
|
|
||||||
.data
|
|
||||||
.store
|
|
||||||
.range(self.cursor.as_slice()..)?
|
|
||||||
.next()
|
|
||||||
.transpose()?
|
|
||||||
.map(|(k, _)| Hash::try_from(&k[..32]).unwrap());
|
|
||||||
let next = match (next1, next2) {
|
|
||||||
(Some(k1), Some(k2)) => std::cmp::min(k1, k2),
|
|
||||||
(Some(k), None) | (None, Some(k)) => k,
|
|
||||||
(None, None) => {
|
|
||||||
info!(
|
|
||||||
"{}: finished, done {}, fixed {}",
|
|
||||||
self.name(),
|
|
||||||
self.counter,
|
|
||||||
self.repairs
|
|
||||||
);
|
|
||||||
return Ok(WorkerState::Done);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if self.block_manager.rc.recalculate_rc(&next)?.1 {
|
|
||||||
self.repairs += 1;
|
|
||||||
}
|
|
||||||
self.counter += 1;
|
|
||||||
if let Some(next_incr) = next.increment() {
|
|
||||||
self.cursor = next_incr;
|
|
||||||
} else {
|
|
||||||
info!(
|
|
||||||
"{}: finished, done {}, fixed {}",
|
|
||||||
self.name(),
|
|
||||||
self.counter,
|
|
||||||
self.repairs
|
|
||||||
);
|
|
||||||
return Ok(WorkerState::Done);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(WorkerState::Busy)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -104,7 +104,7 @@ pub(crate) fn fill_secret(
|
||||||
|
|
||||||
if let Some(val) = cli_value {
|
if let Some(val) = cli_value {
|
||||||
if config_secret.is_some() || config_secret_file.is_some() {
|
if config_secret.is_some() || config_secret_file.is_some() {
|
||||||
debug!("Overriding secret `{}` using value specified using CLI argument or environment variable.", name);
|
debug!("Overriding secret `{}` using value specified using CLI argument or environnement variable.", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
*config_secret = Some(val);
|
*config_secret = Some(val);
|
||||||
|
|
|
@ -51,7 +51,7 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er
|
||||||
let (background, await_background_done) = BackgroundRunner::new(watch_cancel.clone());
|
let (background, await_background_done) = BackgroundRunner::new(watch_cancel.clone());
|
||||||
|
|
||||||
info!("Spawning Garage workers...");
|
info!("Spawning Garage workers...");
|
||||||
garage.spawn_workers(&background)?;
|
garage.spawn_workers(&background);
|
||||||
|
|
||||||
if config.admin.trace_sink.is_some() {
|
if config.admin.trace_sink.is_some() {
|
||||||
info!("Initialize tracing...");
|
info!("Initialize tracing...");
|
||||||
|
|
|
@ -153,7 +153,7 @@ impl<'a> RequestBuilder<'a> {
|
||||||
|
|
||||||
pub async fn send(&mut self) -> Result<Response<Body>, String> {
|
pub async fn send(&mut self) -> Result<Response<Body>, String> {
|
||||||
// TODO this is a bit incorrect in that path and query params should be url-encoded and
|
// TODO this is a bit incorrect in that path and query params should be url-encoded and
|
||||||
// aren't, but this is good enough for now.
|
// aren't, but this is good enought for now.
|
||||||
|
|
||||||
let query = query_param_to_string(&self.query_params);
|
let query = query_param_to_string(&self.query_params);
|
||||||
let (host, path) = if self.vhost_style {
|
let (host, path) = if self.vhost_style {
|
||||||
|
@ -210,9 +210,9 @@ impl<'a> RequestBuilder<'a> {
|
||||||
HeaderName::from_static("x-amz-decoded-content-length"),
|
HeaderName::from_static("x-amz-decoded-content-length"),
|
||||||
HeaderValue::from_str(&self.body.len().to_string()).unwrap(),
|
HeaderValue::from_str(&self.body.len().to_string()).unwrap(),
|
||||||
);
|
);
|
||||||
// Get length of body by doing the conversion to a streaming body with an
|
// Get lenght of body by doing the conversion to a streaming body with an
|
||||||
// invalid signature (we don't know the seed) just to get its length. This
|
// invalid signature (we don't know the seed) just to get its length. This
|
||||||
// is a pretty lazy and inefficient way to do it, but it's enough for test
|
// is a pretty lazy and inefficient way to do it, but it's enought for test
|
||||||
// code.
|
// code.
|
||||||
all_headers.insert(
|
all_headers.insert(
|
||||||
CONTENT_LENGTH,
|
CONTENT_LENGTH,
|
||||||
|
|
|
@ -42,10 +42,6 @@ impl Instance {
|
||||||
.ok()
|
.ok()
|
||||||
.unwrap_or_else(|| env::temp_dir().join(format!("garage-integ-test-{}", port)));
|
.unwrap_or_else(|| env::temp_dir().join(format!("garage-integ-test-{}", port)));
|
||||||
|
|
||||||
let db_engine = env::var("GARAGE_TEST_INTEGRATION_DB_ENGINE")
|
|
||||||
.ok()
|
|
||||||
.unwrap_or_else(|| "lmdb".into());
|
|
||||||
|
|
||||||
// Clean test runtime directory
|
// Clean test runtime directory
|
||||||
if path.exists() {
|
if path.exists() {
|
||||||
fs::remove_dir_all(&path).expect("Could not clean test runtime directory");
|
fs::remove_dir_all(&path).expect("Could not clean test runtime directory");
|
||||||
|
@ -56,7 +52,7 @@ impl Instance {
|
||||||
r#"
|
r#"
|
||||||
metadata_dir = "{path}/meta"
|
metadata_dir = "{path}/meta"
|
||||||
data_dir = "{path}/data"
|
data_dir = "{path}/data"
|
||||||
db_engine = "{db_engine}"
|
db_engine = "lmdb"
|
||||||
|
|
||||||
replication_factor = 1
|
replication_factor = 1
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,6 @@ use crate::common;
|
||||||
use crate::json_body;
|
use crate::json_body;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "currently broken"]
|
|
||||||
async fn test_poll_item() {
|
async fn test_poll_item() {
|
||||||
let ctx = common::context();
|
let ctx = common::context();
|
||||||
let bucket = ctx.create_bucket("test-k2v-poll-item");
|
let bucket = ctx.create_bucket("test-k2v-poll-item");
|
||||||
|
@ -99,7 +98,6 @@ async fn test_poll_item() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "currently broken"]
|
|
||||||
async fn test_poll_range() {
|
async fn test_poll_range() {
|
||||||
let ctx = common::context();
|
let ctx = common::context();
|
||||||
let bucket = ctx.create_bucket("test-k2v-poll-range");
|
let bucket = ctx.create_bucket("test-k2v-poll-range");
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
use crate::common;
|
use crate::common;
|
||||||
use aws_sdk_s3::primitives::ByteStream;
|
use aws_sdk_s3::primitives::ByteStream;
|
||||||
use aws_sdk_s3::types::{ChecksumAlgorithm, CompletedMultipartUpload, CompletedPart};
|
use aws_sdk_s3::types::{CompletedMultipartUpload, CompletedPart};
|
||||||
use base64::prelude::*;
|
|
||||||
|
|
||||||
const SZ_5MB: usize = 5 * 1024 * 1024;
|
const SZ_5MB: usize = 5 * 1024 * 1024;
|
||||||
const SZ_10MB: usize = 10 * 1024 * 1024;
|
const SZ_10MB: usize = 10 * 1024 * 1024;
|
||||||
|
@ -190,153 +189,6 @@ async fn test_multipart_upload() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_multipart_with_checksum() {
|
|
||||||
let ctx = common::context();
|
|
||||||
let bucket = ctx.create_bucket("testmpu-cksum");
|
|
||||||
|
|
||||||
let u1 = vec![0x11; SZ_5MB];
|
|
||||||
let u2 = vec![0x22; SZ_5MB];
|
|
||||||
let u3 = vec![0x33; SZ_5MB];
|
|
||||||
|
|
||||||
let ck1 = calculate_sha1(&u1);
|
|
||||||
let ck2 = calculate_sha1(&u2);
|
|
||||||
let ck3 = calculate_sha1(&u3);
|
|
||||||
|
|
||||||
let up = ctx
|
|
||||||
.client
|
|
||||||
.create_multipart_upload()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.checksum_algorithm(ChecksumAlgorithm::Sha1)
|
|
||||||
.key("a")
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
assert!(up.upload_id.is_some());
|
|
||||||
|
|
||||||
let uid = up.upload_id.as_ref().unwrap();
|
|
||||||
|
|
||||||
let p1 = ctx
|
|
||||||
.client
|
|
||||||
.upload_part()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.part_number(1)
|
|
||||||
.checksum_sha1(&ck1)
|
|
||||||
.body(ByteStream::from(u1.clone()))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// wrong checksum value should return an error
|
|
||||||
let err1 = ctx
|
|
||||||
.client
|
|
||||||
.upload_part()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.part_number(2)
|
|
||||||
.checksum_sha1(&ck1)
|
|
||||||
.body(ByteStream::from(u2.clone()))
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
assert!(err1.is_err());
|
|
||||||
|
|
||||||
let p2 = ctx
|
|
||||||
.client
|
|
||||||
.upload_part()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.part_number(2)
|
|
||||||
.checksum_sha1(&ck2)
|
|
||||||
.body(ByteStream::from(u2))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let p3 = ctx
|
|
||||||
.client
|
|
||||||
.upload_part()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.part_number(3)
|
|
||||||
.checksum_sha1(&ck3)
|
|
||||||
.body(ByteStream::from(u3.clone()))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
{
|
|
||||||
let r = ctx
|
|
||||||
.client
|
|
||||||
.list_parts()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let parts = r.parts.unwrap();
|
|
||||||
assert_eq!(parts.len(), 3);
|
|
||||||
assert!(parts[0].checksum_crc32.is_none());
|
|
||||||
assert!(parts[0].checksum_crc32_c.is_none());
|
|
||||||
assert!(parts[0].checksum_sha256.is_none());
|
|
||||||
assert_eq!(parts[0].checksum_sha1.as_deref().unwrap(), ck1);
|
|
||||||
assert_eq!(parts[1].checksum_sha1.as_deref().unwrap(), ck2);
|
|
||||||
assert_eq!(parts[2].checksum_sha1.as_deref().unwrap(), ck3);
|
|
||||||
}
|
|
||||||
|
|
||||||
let cmp = CompletedMultipartUpload::builder()
|
|
||||||
.parts(
|
|
||||||
CompletedPart::builder()
|
|
||||||
.part_number(1)
|
|
||||||
.checksum_sha1(&ck1)
|
|
||||||
.e_tag(p1.e_tag.unwrap())
|
|
||||||
.build(),
|
|
||||||
)
|
|
||||||
.parts(
|
|
||||||
CompletedPart::builder()
|
|
||||||
.part_number(2)
|
|
||||||
.checksum_sha1(&ck2)
|
|
||||||
.e_tag(p2.e_tag.unwrap())
|
|
||||||
.build(),
|
|
||||||
)
|
|
||||||
.parts(
|
|
||||||
CompletedPart::builder()
|
|
||||||
.part_number(3)
|
|
||||||
.checksum_sha1(&ck3)
|
|
||||||
.e_tag(p3.e_tag.unwrap())
|
|
||||||
.build(),
|
|
||||||
)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
let expected_checksum = calculate_sha1(
|
|
||||||
&vec![
|
|
||||||
BASE64_STANDARD.decode(&ck1).unwrap(),
|
|
||||||
BASE64_STANDARD.decode(&ck2).unwrap(),
|
|
||||||
BASE64_STANDARD.decode(&ck3).unwrap(),
|
|
||||||
]
|
|
||||||
.concat(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let res = ctx
|
|
||||||
.client
|
|
||||||
.complete_multipart_upload()
|
|
||||||
.bucket(&bucket)
|
|
||||||
.key("a")
|
|
||||||
.upload_id(uid)
|
|
||||||
.checksum_sha1(expected_checksum.clone())
|
|
||||||
.multipart_upload(cmp)
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(res.checksum_sha1, Some(expected_checksum));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_uploadlistpart() {
|
async fn test_uploadlistpart() {
|
||||||
let ctx = common::context();
|
let ctx = common::context();
|
||||||
|
@ -772,11 +624,3 @@ async fn test_uploadpartcopy() {
|
||||||
assert_eq!(real_obj.len(), exp_obj.len());
|
assert_eq!(real_obj.len(), exp_obj.len());
|
||||||
assert_eq!(real_obj, exp_obj);
|
assert_eq!(real_obj, exp_obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn calculate_sha1(bytes: &[u8]) -> String {
|
|
||||||
use sha1::{Digest, Sha1};
|
|
||||||
|
|
||||||
let mut hasher = Sha1::new();
|
|
||||||
hasher.update(bytes);
|
|
||||||
BASE64_STANDARD.encode(&hasher.finalize()[..])
|
|
||||||
}
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ enum Command {
|
||||||
partition_key: String,
|
partition_key: String,
|
||||||
/// Sort key to read from
|
/// Sort key to read from
|
||||||
sort_key: String,
|
sort_key: String,
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: ReadOutputKind,
|
output_kind: ReadOutputKind,
|
||||||
},
|
},
|
||||||
|
@ -70,7 +70,7 @@ enum Command {
|
||||||
/// Timeout, in seconds
|
/// Timeout, in seconds
|
||||||
#[clap(short = 'T', long)]
|
#[clap(short = 'T', long)]
|
||||||
timeout: Option<u64>,
|
timeout: Option<u64>,
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: ReadOutputKind,
|
output_kind: ReadOutputKind,
|
||||||
},
|
},
|
||||||
|
@ -87,7 +87,7 @@ enum Command {
|
||||||
/// Timeout, in seconds
|
/// Timeout, in seconds
|
||||||
#[clap(short = 'T', long)]
|
#[clap(short = 'T', long)]
|
||||||
timeout: Option<u64>,
|
timeout: Option<u64>,
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: BatchOutputKind,
|
output_kind: BatchOutputKind,
|
||||||
},
|
},
|
||||||
|
@ -103,7 +103,7 @@ enum Command {
|
||||||
},
|
},
|
||||||
/// List partition keys
|
/// List partition keys
|
||||||
ReadIndex {
|
ReadIndex {
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: BatchOutputKind,
|
output_kind: BatchOutputKind,
|
||||||
/// Output only partition keys matching this filter
|
/// Output only partition keys matching this filter
|
||||||
|
@ -114,7 +114,7 @@ enum Command {
|
||||||
ReadRange {
|
ReadRange {
|
||||||
/// Partition key to read from
|
/// Partition key to read from
|
||||||
partition_key: String,
|
partition_key: String,
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: BatchOutputKind,
|
output_kind: BatchOutputKind,
|
||||||
/// Output only sort keys matching this filter
|
/// Output only sort keys matching this filter
|
||||||
|
@ -125,7 +125,7 @@ enum Command {
|
||||||
DeleteRange {
|
DeleteRange {
|
||||||
/// Partition key to delete from
|
/// Partition key to delete from
|
||||||
partition_key: String,
|
partition_key: String,
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: BatchOutputKind,
|
output_kind: BatchOutputKind,
|
||||||
/// Delete only sort keys matching this filter
|
/// Delete only sort keys matching this filter
|
||||||
|
@ -185,10 +185,10 @@ struct ReadOutputKind {
|
||||||
/// Raw output. Conflicts generate error, causality token is not returned
|
/// Raw output. Conflicts generate error, causality token is not returned
|
||||||
#[clap(short, long, group = "output-kind")]
|
#[clap(short, long, group = "output-kind")]
|
||||||
raw: bool,
|
raw: bool,
|
||||||
/// Human formatted output
|
/// Human formated output
|
||||||
#[clap(short = 'H', long, group = "output-kind")]
|
#[clap(short = 'H', long, group = "output-kind")]
|
||||||
human: bool,
|
human: bool,
|
||||||
/// JSON formatted output
|
/// JSON formated output
|
||||||
#[clap(short, long, group = "output-kind")]
|
#[clap(short, long, group = "output-kind")]
|
||||||
json: bool,
|
json: bool,
|
||||||
}
|
}
|
||||||
|
@ -207,7 +207,7 @@ impl ReadOutputKind {
|
||||||
let mut val = val.value;
|
let mut val = val.value;
|
||||||
if val.len() != 1 {
|
if val.len() != 1 {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"Raw mode can only read non-concurrent values, found {} values, expected 1",
|
"Raw mode can only read non-concurent values, found {} values, expected 1",
|
||||||
val.len()
|
val.len()
|
||||||
);
|
);
|
||||||
exit(1);
|
exit(1);
|
||||||
|
@ -265,10 +265,10 @@ impl ReadOutputKind {
|
||||||
#[derive(Parser, Debug)]
|
#[derive(Parser, Debug)]
|
||||||
#[clap(group = clap::ArgGroup::new("output-kind").multiple(false).required(false))]
|
#[clap(group = clap::ArgGroup::new("output-kind").multiple(false).required(false))]
|
||||||
struct BatchOutputKind {
|
struct BatchOutputKind {
|
||||||
/// Human formatted output
|
/// Human formated output
|
||||||
#[clap(short = 'H', long, group = "output-kind")]
|
#[clap(short = 'H', long, group = "output-kind")]
|
||||||
human: bool,
|
human: bool,
|
||||||
/// JSON formatted output
|
/// JSON formated output
|
||||||
#[clap(short, long, group = "output-kind")]
|
#[clap(short, long, group = "output-kind")]
|
||||||
json: bool,
|
json: bool,
|
||||||
}
|
}
|
||||||
|
|
|
@ -336,7 +336,7 @@ impl K2vClient {
|
||||||
.collect())
|
.collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Perform a DeleteBatch request, deleting multiple values or range of values at once, without
|
/// Perform a DeleteBatch request, deleting mutiple values or range of values at once, without
|
||||||
/// providing causality information.
|
/// providing causality information.
|
||||||
pub async fn delete_batch(&self, operations: &[BatchDeleteOp<'_>]) -> Result<Vec<u64>, Error> {
|
pub async fn delete_batch(&self, operations: &[BatchDeleteOp<'_>]) -> Result<Vec<u64>, Error> {
|
||||||
let url = self.build_url(None, &[("delete", "")]);
|
let url = self.build_url(None, &[("delete", "")]);
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "1.0.1"
|
version = "0.10.0"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -29,7 +29,6 @@ err-derive.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
http.workspace = true
|
http.workspace = true
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
parse_duration.workspace = true
|
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
zstd.workspace = true
|
zstd.workspace = true
|
||||||
|
|
|
@ -89,9 +89,9 @@ pub fn is_valid_bucket_name(n: &str) -> bool {
|
||||||
// Bucket names must start and end with a letter or a number
|
// Bucket names must start and end with a letter or a number
|
||||||
&& !n.starts_with(&['-', '.'][..])
|
&& !n.starts_with(&['-', '.'][..])
|
||||||
&& !n.ends_with(&['-', '.'][..])
|
&& !n.ends_with(&['-', '.'][..])
|
||||||
// Bucket names must not be formatted as an IP address
|
// Bucket names must not be formated as an IP address
|
||||||
&& n.parse::<std::net::IpAddr>().is_err()
|
&& n.parse::<std::net::IpAddr>().is_err()
|
||||||
// Bucket names must not start with "xn--"
|
// Bucket names must not start wih "xn--"
|
||||||
&& !n.starts_with("xn--")
|
&& !n.starts_with("xn--")
|
||||||
// Bucket names must not end with "-s3alias"
|
// Bucket names must not end with "-s3alias"
|
||||||
&& !n.ends_with("-s3alias")
|
&& !n.ends_with("-s3alias")
|
||||||
|
|
|
@ -14,7 +14,7 @@ mod v08 {
|
||||||
/// A bucket is a collection of objects
|
/// A bucket is a collection of objects
|
||||||
///
|
///
|
||||||
/// Its parameters are not directly accessible as:
|
/// Its parameters are not directly accessible as:
|
||||||
/// - It must be possible to merge parameters, hence the use of a LWW CRDT.
|
/// - It must be possible to merge paramaters, hence the use of a LWW CRDT.
|
||||||
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
|
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
|
||||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct Bucket {
|
pub struct Bucket {
|
||||||
|
@ -126,7 +126,7 @@ impl AutoCrdt for BucketQuotas {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BucketParams {
|
impl BucketParams {
|
||||||
/// Create an empty BucketParams with no authorized keys and no website access
|
/// Create an empty BucketParams with no authorized keys and no website accesss
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
BucketParams {
|
BucketParams {
|
||||||
creation_date: now_msec(),
|
creation_date: now_msec(),
|
||||||
|
|
|
@ -141,7 +141,7 @@ impl Garage {
|
||||||
)?)
|
)?)
|
||||||
.ok()
|
.ok()
|
||||||
.and_then(|x| NetworkKey::from_slice(&x))
|
.and_then(|x| NetworkKey::from_slice(&x))
|
||||||
.ok_or_message("Invalid RPC secret key: expected 32 bytes of random hex, please check the documentation for requirements")?;
|
.ok_or_message("Invalid RPC secret key")?;
|
||||||
|
|
||||||
let (replication_factor, consistency_mode) = parse_replication_mode(&config)?;
|
let (replication_factor, consistency_mode) = parse_replication_mode(&config)?;
|
||||||
|
|
||||||
|
@ -170,7 +170,14 @@ impl Garage {
|
||||||
};
|
};
|
||||||
|
|
||||||
info!("Initialize block manager...");
|
info!("Initialize block manager...");
|
||||||
let block_manager = BlockManager::new(&db, &config, data_rep_param, system.clone())?;
|
let block_manager = BlockManager::new(
|
||||||
|
&db,
|
||||||
|
config.data_dir.clone(),
|
||||||
|
config.data_fsync,
|
||||||
|
config.compression_level,
|
||||||
|
data_rep_param,
|
||||||
|
system.clone(),
|
||||||
|
)?;
|
||||||
block_manager.register_bg_vars(&mut bg_vars);
|
block_manager.register_bg_vars(&mut bg_vars);
|
||||||
|
|
||||||
// ---- admin tables ----
|
// ---- admin tables ----
|
||||||
|
@ -247,14 +254,6 @@ impl Garage {
|
||||||
#[cfg(feature = "k2v")]
|
#[cfg(feature = "k2v")]
|
||||||
let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param);
|
let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param);
|
||||||
|
|
||||||
// ---- setup block refcount recalculation ----
|
|
||||||
// this function can be used to fix inconsistencies in the RC table
|
|
||||||
block_manager.set_recalc_rc(vec![
|
|
||||||
block_ref_recount_fn(&block_ref_table),
|
|
||||||
// other functions could be added here if we had other tables
|
|
||||||
// that hold references to data blocks
|
|
||||||
]);
|
|
||||||
|
|
||||||
// -- done --
|
// -- done --
|
||||||
Ok(Arc::new(Self {
|
Ok(Arc::new(Self {
|
||||||
config,
|
config,
|
||||||
|
@ -279,7 +278,7 @@ impl Garage {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) -> Result<(), Error> {
|
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
|
||||||
self.block_manager.spawn_workers(bg);
|
self.block_manager.spawn_workers(bg);
|
||||||
|
|
||||||
self.bucket_table.spawn_workers(bg);
|
self.bucket_table.spawn_workers(bg);
|
||||||
|
@ -300,23 +299,6 @@ impl Garage {
|
||||||
|
|
||||||
#[cfg(feature = "k2v")]
|
#[cfg(feature = "k2v")]
|
||||||
self.k2v.spawn_workers(bg);
|
self.k2v.spawn_workers(bg);
|
||||||
|
|
||||||
if let Some(itv) = self.config.metadata_auto_snapshot_interval.as_deref() {
|
|
||||||
let interval = parse_duration::parse(itv)
|
|
||||||
.ok_or_message("Invalid `metadata_auto_snapshot_interval`")?;
|
|
||||||
if interval < std::time::Duration::from_secs(600) {
|
|
||||||
return Err(Error::Message(
|
|
||||||
"metadata_auto_snapshot_interval too small or negative".into(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
bg.spawn_worker(crate::snapshot::AutoSnapshotWorker::new(
|
|
||||||
self.clone(),
|
|
||||||
interval,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
||||||
|
|
|
@ -67,49 +67,6 @@ impl<'a> BucketHelper<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Find a bucket by its global alias or a prefix of its uuid
|
|
||||||
pub async fn admin_get_existing_matching_bucket(
|
|
||||||
&self,
|
|
||||||
pattern: &String,
|
|
||||||
) -> Result<Uuid, Error> {
|
|
||||||
if let Some(uuid) = self.resolve_global_bucket_name(pattern).await? {
|
|
||||||
return Ok(uuid);
|
|
||||||
} else if pattern.len() >= 2 {
|
|
||||||
let hexdec = pattern
|
|
||||||
.get(..pattern.len() & !1)
|
|
||||||
.and_then(|x| hex::decode(x).ok());
|
|
||||||
if let Some(hex) = hexdec {
|
|
||||||
let mut start = [0u8; 32];
|
|
||||||
start
|
|
||||||
.as_mut_slice()
|
|
||||||
.get_mut(..hex.len())
|
|
||||||
.ok_or_bad_request("invalid length")?
|
|
||||||
.copy_from_slice(&hex);
|
|
||||||
let mut candidates = self
|
|
||||||
.0
|
|
||||||
.bucket_table
|
|
||||||
.get_range(
|
|
||||||
&EmptyKey,
|
|
||||||
Some(start.into()),
|
|
||||||
Some(DeletedFilter::NotDeleted),
|
|
||||||
10,
|
|
||||||
EnumerationOrder::Forward,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
candidates.retain(|x| hex::encode(x.id).starts_with(pattern));
|
|
||||||
if candidates.len() == 1 {
|
|
||||||
return Ok(candidates.into_iter().next().unwrap().id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(Error::BadRequest(format!(
|
|
||||||
"Bucket not found / several matching buckets: {}",
|
|
||||||
pattern
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a Bucket if it is present in bucket table,
|
/// Returns a Bucket if it is present in bucket table,
|
||||||
/// even if it is in deleted state. Querying a non-existing
|
/// even if it is in deleted state. Querying a non-existing
|
||||||
/// bucket ID returns an internal error.
|
/// bucket ID returns an internal error.
|
||||||
|
|
|
@ -231,7 +231,7 @@ impl<'a> LockedHelper<'a> {
|
||||||
let bucket_p_local_alias_key = (key.key_id.clone(), alias_name.clone());
|
let bucket_p_local_alias_key = (key.key_id.clone(), alias_name.clone());
|
||||||
|
|
||||||
// Calculate the timestamp to assign to this aliasing in the two local_aliases maps
|
// Calculate the timestamp to assign to this aliasing in the two local_aliases maps
|
||||||
// (the one from key to bucket, and the reverse one stored in the bucket itself)
|
// (the one from key to bucket, and the reverse one stored in the bucket iself)
|
||||||
// so that merges on both maps in case of a concurrent operation resolve
|
// so that merges on both maps in case of a concurrent operation resolve
|
||||||
// to the same alias being set
|
// to the same alias being set
|
||||||
let alias_ts = increment_logical_clock_2(
|
let alias_ts = increment_logical_clock_2(
|
||||||
|
@ -279,8 +279,7 @@ impl<'a> LockedHelper<'a> {
|
||||||
.local_aliases
|
.local_aliases
|
||||||
.get(alias_name)
|
.get(alias_name)
|
||||||
.cloned()
|
.cloned()
|
||||||
.flatten()
|
.flatten() != Some(bucket_id)
|
||||||
!= Some(bucket_id)
|
|
||||||
{
|
{
|
||||||
return Err(GarageError::Message(format!(
|
return Err(GarageError::Message(format!(
|
||||||
"Bucket {:?} does not have alias {} in namespace of key {}",
|
"Bucket {:?} does not have alias {} in namespace of key {}",
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue