Compare commits

...
Sign in to create a new pull request.

55 commits

Author SHA1 Message Date
d601f31186 Merge pull request 'split garage_api in garage_api_{common,s3,k2v,admin}' (#947) from split-garage-api into main
Reviewed-on: Deuxfleurs/garage#947
2025-02-01 17:48:25 +00:00
e4de7bdfd5 fix ci for more test crates 2025-01-31 19:21:36 +01:00
d18c5ad0ff fix tests 2025-01-31 19:12:51 +01:00
3d5e9a027e cargo defs: simplify and fix descriptions 2025-01-31 18:54:29 +01:00
f4ca7758b4 update cargo.nix 2025-01-31 18:48:07 +01:00
4563313f87 use cargo-shear to remove many unused dependencies between crates 2025-01-31 18:47:30 +01:00
afa28706e5 split s3/cors.rs into also common/cors.rs 2025-01-31 18:42:14 +01:00
84f1db91c4 fix things up 2025-01-31 18:34:57 +01:00
9fa20d45be wip: split garage_api into garage_api_{common,s3,k2v,admin} 2025-01-31 18:18:29 +01:00
9330fd79d3 Merge pull request 'table::insert_many: avoid failure with zero items (fix #915)' (#946) from fix-915 into main
Reviewed-on: Deuxfleurs/garage#946
2025-01-31 13:10:54 +00:00
83f6928ff7 table::insert_many: avoid failure with zero items (fix #915) 2025-01-30 18:06:47 +01:00
ab71544499 Merge pull request 'api: better handling of helper errors to distinguish error codes' (#942) from fix-getkeyinfo-404 into main
Reviewed-on: Deuxfleurs/garage#942
2025-01-29 18:25:44 +00:00
991edbe02c Merge pull request 'Update doc/book/connect/repositories.md' (#941) from yatesco/garage:main into main
Reviewed-on: Deuxfleurs/garage#941
2025-01-29 18:18:59 +00:00
9f3c7c3720 api: better handling of helper errors to distinguish error codes 2025-01-29 19:14:34 +01:00
bfde9152b8 Update doc/book/operations/multi-hdd.md
trivial spelling mistake
2025-01-29 13:40:41 +00:00
7bb042f0b7 Update doc/book/connect/repositories.md
trivial spelling mistake
2025-01-29 13:34:35 +00:00
a1d081ee84 Merge pull request 's3 api: make x-amz-meta-* headers lowercase (fix #844)' (#938) from fix-844 into main
Reviewed-on: Deuxfleurs/garage#938
2025-01-27 19:32:19 +00:00
e8fa89e834 s3 api: make x-amz-meta-* headers lowercase (fix #844) 2025-01-27 19:58:06 +01:00
beedc9fd11 Merge pull request 'snapshot: sqlite: use a subdirectory for consistency with LMDB' (#932) from baptiste/garage:snapshot_consistency_sqlite into main
Reviewed-on: Deuxfleurs/garage#932
2025-01-27 18:50:11 +00:00
d4e3e60920 Merge pull request 'update nix crate to 0.29 and libc to 0.2.169' (#931) from neuschaefer/garage:nix into main
Reviewed-on: Deuxfleurs/garage#931
2025-01-27 18:09:51 +00:00
Baptiste Jonglez
43402c9619 snapshot: sqlite: use a subdirectory for consistency with LMDB
Currently, taking a snapshot of the metadata database with sqlite creates
a sqlite file without extension with the following format:

    snapshots/2025-01-26T15:29:17Z

This makes it hard to understand what kind of data this is, and is not
consistent with LMDB:

    snapshots/2025-01-26T15:29:17Z/data.mdb

With this change, we now get a directory with a single db.sqlite file:

    snapshots/2025-01-26T15:29:17Z/db.sqlite
2025-01-27 19:06:52 +01:00
efa6f3d85e Merge pull request 'db-snapshot: allow to set directory where snapshots are stored' (#933) from baptiste/garage:configure_metadata_snapshots_dir into main
Reviewed-on: Deuxfleurs/garage#933
2025-01-27 18:04:05 +00:00
74a1b49b13 Update Cargo.nix 2025-01-27 18:37:05 +01:00
23d57b89dc update nix crate to 0.29 and libc to 0.2.169 2025-01-27 18:37:05 +01:00
5e3e1f4453 Merge pull request 'fix problems with CI doing work multiple times' (#936) from woodpecker-simplify into main
Reviewed-on: Deuxfleurs/garage#936
2025-01-27 17:36:27 +00:00
Baptiste Jonglez
59c153d280 db-snapshot: allow to set directory where snapshots are stored
Fix #926
2025-01-27 18:33:55 +01:00
bb3e0f7d22 nix CI: reduce redundant work 2025-01-27 18:09:51 +01:00
0156e40c9d Merge pull request 'ci: fix woodpecker definitions to comply with woodpecker 3' (#935) from woodpecker3 into main
Reviewed-on: Deuxfleurs/garage#935
Reviewed-by: maximilien <me@mricher.fr>
2025-01-27 12:03:46 +00:00
f6f88065ad ci: fix woodpecker definitions to comply with woodpecker 3 2025-01-27 12:06:31 +01:00
591bd808ec Merge pull request 'doc: Fix Nix devenv setup' (#927) from fix_devenv into main
Reviewed-on: Deuxfleurs/garage#927
2025-01-23 10:20:04 +00:00
294cb99409 Merge pull request 'Fix all typos' (#928) from majst01/garage:fix-typos into main
Reviewed-on: Deuxfleurs/garage#928
Reviewed-by: maximilien <me@mricher.fr>
2025-01-17 06:06:14 +00:00
Stefan Majer
2eb9fcae20 Fix all typos 2025-01-16 13:22:00 +01:00
Baptiste Jonglez
58b9eb46fc doc: Fix Nix devenv setup
This is a hotfix to fix the doc for the current setup, see #868 for
possible future directions.
2025-01-16 10:00:12 +01:00
255b01b626 Merge pull request 'Helm chart: Add garage.existingConfigmap and replace garage.garage.toml with garage.garageTomlString' (#923) from jessebot/garage:allow-existing-configmap into main
Reviewed-on: Deuxfleurs/garage#923
Reviewed-by: maximilien <me@mricher.fr>
2025-01-15 23:53:25 +00:00
58a765c51f Minor rewording, add some more hints 2025-01-15 23:51:07 +00:00
1c431b8457 Add garage.existingConfigmap and replace garage.garage.toml with garage.garageTomlString
also moves all gotemplating back to configmap

and adds autogenerated docs via helm-docs

Signed-off-by: jessebot <jessebot@linux.com>
2025-01-15 23:51:07 +00:00
39ac034de5 Merge pull request 'update toolchain' (#924) from nix-update into main
Reviewed-on: Deuxfleurs/garage#924
2025-01-13 10:19:53 +00:00
8ddb0dd485 nix build: switch to upstream cargo2nix (branch release-0.11.0) 2025-01-12 18:16:23 +01:00
83887a8519 nix build: remove clippy build env that doesn't work 2025-01-12 17:51:33 +01:00
0a15db6960 nix build: update rustc to v1.78 2025-01-12 17:37:36 +01:00
295237476e fix formatting to comply with latest rustfmt 2025-01-12 17:36:25 +01:00
9d83605736 flake: update versions of nixpkgs and rust-overlay 2025-01-12 17:34:04 +01:00
4b1a7fb5e3 Merge pull request 'The version flag is required when applying a layout' (#921) from update-quickstart-docs-layout-apply into main
Reviewed-on: Deuxfleurs/garage#921
2025-01-09 00:41:35 +00:00
b6aaebaf4c The version flag is required when applying a layout
I followed the documentation and got an error if the layout's version was not specified 

```
garage layout apply

Error: Internal error:
Please pass the new layout version number to ensure that you are writing the correct version of the cluster layout.
To know the correct value of the new layout version, invoke `garage layout show` and review the proposed changes.
```

This fixes that
2025-01-08 20:30:09 +00:00
7bbc8fec50 Merge pull request 'Fix #907' (#917) from vk/garage:fix_907 into main
Reviewed-on: Deuxfleurs/garage#917
2025-01-04 16:07:40 +00:00
6689800986 Formatting with 2025-01-04 16:52:23 +01:00
d2246baab7 Merge pull request 'update flake.lock' (#918) from update-flake into main
Reviewed-on: Deuxfleurs/garage#918
2025-01-04 15:43:41 +00:00
afac1d4d4a update flake.lock 2025-01-04 16:29:42 +01:00
6ca99fd02c formatting 2025-01-04 14:46:42 +01:00
b568bb863d Fix #907 2025-01-04 12:50:10 +01:00
b8f301a61d Merge pull request 'woodpecker: use modern syntax for secrets (removes warning)' (#912) from woodpecker-fix-warnings into main
Reviewed-on: Deuxfleurs/garage#912
2024-12-23 17:41:15 +00:00
428ad2075d
woodpecker: use modern syntax for secrets (removes warning) 2024-12-23 18:00:22 +01:00
3661a597fa Merge pull request 'feat: add use_local_tz configuration' (#908) from ragazenta/garage:feat/local-timezone into main
Reviewed-on: Deuxfleurs/garage#908
Reviewed-by: maximilien <me@mricher.fr>
2024-12-01 13:23:24 +00:00
0fd3c0e794
doc: add use_local_tz configuration 2024-11-25 10:35:00 +07:00
4c1bf42192
feat: add use_local_tz configuration
Used in lifecycle_worker to determine midnight time
2024-11-23 05:51:12 +07:00
136 changed files with 1450 additions and 1246 deletions

View file

@ -16,7 +16,7 @@ steps:
- name: build
image: nixpkgs/nix:nixos-22.05
commands:
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-build --no-build-output --attr pkgs.amd64.debug --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- name: unit + func tests
image: nixpkgs/nix:nixos-22.05
@ -24,10 +24,12 @@ steps:
GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage
GARAGE_TEST_INTEGRATION_PATH: tmp-garage-integration
commands:
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-build --no-build-output --attr test.amd64
- nix-build --no-build-output --attr test.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- ./result/bin/garage_db-*
- ./result/bin/garage_api-*
- ./result/bin/garage_api_common-*
- ./result/bin/garage_api_s3-*
- ./result/bin/garage_api_k2v-*
- ./result/bin/garage_api_admin-*
- ./result/bin/garage_model-*
- ./result/bin/garage_rpc-*
- ./result/bin/garage_table-*
@ -43,5 +45,5 @@ steps:
- name: integration tests
image: nixpkgs/nix:nixos-22.05
commands:
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-build --no-build-output --attr pkgs.amd64.debug --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)

View file

@ -9,19 +9,20 @@ depends_on:
steps:
- name: refresh-index
image: nixpkgs/nix:nixos-22.05
secrets:
- source: garagehq_aws_access_key_id
target: AWS_ACCESS_KEY_ID
- source: garagehq_aws_secret_access_key
target: AWS_SECRET_ACCESS_KEY
environment:
AWS_ACCESS_KEY_ID:
from_secret: garagehq_aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: garagehq_aws_secret_access_key
commands:
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
- nix-shell --attr ci --run "refresh_index"
- name: multiarch-docker
image: nixpkgs/nix:nixos-22.05
secrets:
- docker_auth
environment:
DOCKER_AUTH:
from_secret: docker_auth
commands:
- mkdir -p /root/.docker
- echo $DOCKER_AUTH > /root/.docker/config.json

View file

@ -23,7 +23,6 @@ steps:
- name: check is static binary
image: nixpkgs/nix:nixos-22.05
commands:
- nix-build --no-build-output --attr pkgs.${ARCH}.release --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-shell --attr ci --run "./script/not-dynamic.sh result-bin/bin/garage"
- name: integration tests
@ -48,11 +47,10 @@ steps:
image: nixpkgs/nix:nixos-22.05
environment:
TARGET: "${TARGET}"
secrets:
- source: garagehq_aws_access_key_id
target: AWS_ACCESS_KEY_ID
- source: garagehq_aws_secret_access_key
target: AWS_SECRET_ACCESS_KEY
AWS_ACCESS_KEY_ID:
from_secret: garagehq_aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: garagehq_aws_secret_access_key
commands:
- nix-shell --attr ci --run "to_s3"
@ -61,8 +59,8 @@ steps:
environment:
DOCKER_PLATFORM: "linux/${ARCH}"
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
secrets:
- docker_auth
DOCKER_AUTH:
from_secret: docker_auth
commands:
- mkdir -p /root/.docker
- echo $DOCKER_AUTH > /root/.docker/config.json

257
Cargo.lock generated
View file

@ -236,36 +236,6 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "aws-config"
version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b30c39ebe61f75d1b3785362b1586b41991873c9ab3e317a9181c246fb71d82"
dependencies = [
"aws-credential-types",
"aws-runtime",
"aws-sdk-sso",
"aws-sdk-ssooidc",
"aws-sdk-sts",
"aws-smithy-async",
"aws-smithy-http",
"aws-smithy-json",
"aws-smithy-runtime",
"aws-smithy-runtime-api",
"aws-smithy-types",
"aws-types",
"bytes",
"fastrand",
"hex",
"http 0.2.11",
"hyper 0.14.28",
"ring",
"time",
"tokio",
"tracing",
"zeroize",
]
[[package]]
name = "aws-credential-types"
version = "1.1.4"
@ -353,73 +323,6 @@ dependencies = [
"url",
]
[[package]]
name = "aws-sdk-sso"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f486420a66caad72635bc2ce0ff6581646e0d32df02aa39dc983bfe794955a5b"
dependencies = [
"aws-credential-types",
"aws-runtime",
"aws-smithy-async",
"aws-smithy-http",
"aws-smithy-json",
"aws-smithy-runtime",
"aws-smithy-runtime-api",
"aws-smithy-types",
"aws-types",
"bytes",
"http 0.2.11",
"once_cell",
"regex-lite",
"tracing",
]
[[package]]
name = "aws-sdk-ssooidc"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39ddccf01d82fce9b4a15c8ae8608211ee7db8ed13a70b514bbfe41df3d24841"
dependencies = [
"aws-credential-types",
"aws-runtime",
"aws-smithy-async",
"aws-smithy-http",
"aws-smithy-json",
"aws-smithy-runtime",
"aws-smithy-runtime-api",
"aws-smithy-types",
"aws-types",
"bytes",
"http 0.2.11",
"once_cell",
"regex-lite",
"tracing",
]
[[package]]
name = "aws-sdk-sts"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a591f8c7e6a621a501b2b5d2e88e1697fcb6274264523a6ad4d5959889a41ce"
dependencies = [
"aws-credential-types",
"aws-runtime",
"aws-smithy-async",
"aws-smithy-http",
"aws-smithy-json",
"aws-smithy-query",
"aws-smithy-runtime",
"aws-smithy-runtime-api",
"aws-smithy-types",
"aws-smithy-xml",
"aws-types",
"http 0.2.11",
"once_cell",
"regex-lite",
"tracing",
]
[[package]]
name = "aws-sigv4"
version = "1.1.4"
@ -522,16 +425,6 @@ dependencies = [
"aws-smithy-types",
]
[[package]]
name = "aws-smithy-query"
version = "0.60.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bda4b1dfc9810e35fba8a620e900522cd1bd4f9578c446e82f49d1ce41d2e9f9"
dependencies = [
"aws-smithy-types",
"urlencoding",
]
[[package]]
name = "aws-smithy-runtime"
version = "1.1.4"
@ -570,7 +463,6 @@ dependencies = [
"pin-project-lite",
"tokio",
"tracing",
"zeroize",
]
[[package]]
@ -781,6 +673,12 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
[[package]]
name = "cfg_aliases"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
name = "chrono"
version = "0.4.33"
@ -1308,7 +1206,6 @@ version = "1.0.1"
dependencies = [
"assert-json-diff",
"async-trait",
"aws-config",
"aws-sdk-s3",
"backtrace",
"base64 0.21.7",
@ -1317,8 +1214,10 @@ dependencies = [
"chrono",
"format_table",
"futures",
"futures-util",
"garage_api",
"garage_api_admin",
"garage_api_common",
"garage_api_k2v",
"garage_api_s3",
"garage_block",
"garage_db",
"garage_model",
@ -1341,10 +1240,7 @@ dependencies = [
"opentelemetry-otlp",
"opentelemetry-prometheus",
"parse_duration",
"prometheus",
"rand",
"serde",
"serde_bytes",
"serde_json",
"sha1",
"sha2",
@ -1353,17 +1249,96 @@ dependencies = [
"syslog-tracing",
"timeago",
"tokio",
"toml",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "garage_api"
name = "garage_api_admin"
version = "1.0.1"
dependencies = [
"argon2",
"async-trait",
"err-derive",
"futures",
"garage_api_common",
"garage_model",
"garage_rpc",
"garage_table",
"garage_util",
"hex",
"http 1.0.0",
"hyper 1.1.0",
"opentelemetry",
"opentelemetry-prometheus",
"prometheus",
"serde",
"serde_json",
"tokio",
"tracing",
"url",
]
[[package]]
name = "garage_api_common"
version = "1.0.1"
dependencies = [
"async-trait",
"bytes",
"chrono",
"crypto-common",
"err-derive",
"futures",
"garage_model",
"garage_table",
"garage_util",
"hex",
"hmac",
"http 1.0.0",
"http-body-util",
"hyper 1.1.0",
"hyper-util",
"idna",
"nom",
"opentelemetry",
"pin-project",
"serde",
"serde_json",
"sha2",
"tokio",
"tracing",
"url",
]
[[package]]
name = "garage_api_k2v"
version = "1.0.1"
dependencies = [
"async-trait",
"base64 0.21.7",
"err-derive",
"futures",
"garage_api_common",
"garage_model",
"garage_table",
"garage_util",
"http 1.0.0",
"http-body-util",
"hyper 1.1.0",
"opentelemetry",
"percent-encoding",
"serde",
"serde_json",
"tokio",
"tracing",
"url",
]
[[package]]
name = "garage_api_s3"
version = "1.0.1"
dependencies = [
"aes-gcm",
"argon2",
"async-compression",
"async-trait",
"base64 0.21.7",
@ -1371,11 +1346,10 @@ dependencies = [
"chrono",
"crc32c",
"crc32fast",
"crypto-common",
"err-derive",
"form_urlencoded",
"futures",
"futures-util",
"garage_api_common",
"garage_block",
"garage_model",
"garage_net",
@ -1383,26 +1357,19 @@ dependencies = [
"garage_table",
"garage_util",
"hex",
"hmac",
"http 1.0.0",
"http-body-util",
"http-range",
"httpdate",
"hyper 1.1.0",
"hyper-util",
"idna",
"md-5",
"multer",
"nom",
"opentelemetry",
"opentelemetry-prometheus",
"percent-encoding",
"pin-project",
"prometheus",
"quick-xml",
"roxmltree",
"serde",
"serde_bytes",
"serde_json",
"sha1",
"sha2",
@ -1423,7 +1390,6 @@ dependencies = [
"bytes",
"bytesize",
"futures",
"futures-util",
"garage_db",
"garage_net",
"garage_rpc",
@ -1433,7 +1399,6 @@ dependencies = [
"opentelemetry",
"rand",
"serde",
"serde_bytes",
"tokio",
"tokio-util 0.7.10",
"tracing",
@ -1446,7 +1411,6 @@ version = "1.0.1"
dependencies = [
"err-derive",
"heed",
"hexdump",
"mktemp",
"r2d2",
"r2d2_sqlite",
@ -1458,14 +1422,12 @@ dependencies = [
name = "garage_model"
version = "1.0.1"
dependencies = [
"arc-swap",
"async-trait",
"base64 0.21.7",
"blake2",
"chrono",
"err-derive",
"futures",
"futures-util",
"garage_block",
"garage_db",
"garage_net",
@ -1474,7 +1436,6 @@ dependencies = [
"garage_util",
"hex",
"http 1.0.0",
"opentelemetry",
"parse_duration",
"rand",
"serde",
@ -1516,13 +1477,10 @@ version = "1.0.1"
dependencies = [
"arc-swap",
"async-trait",
"bytes",
"bytesize",
"err-derive",
"format_table",
"futures",
"futures-util",
"garage_db",
"garage_net",
"garage_util",
"gethostname",
@ -1542,7 +1500,6 @@ dependencies = [
"serde_bytes",
"serde_json",
"tokio",
"tokio-stream",
"tracing",
]
@ -1552,7 +1509,6 @@ version = "1.0.1"
dependencies = [
"arc-swap",
"async-trait",
"bytes",
"futures",
"futures-util",
"garage_db",
@ -1575,10 +1531,8 @@ dependencies = [
"arc-swap",
"async-trait",
"blake2",
"bytes",
"bytesize",
"chrono",
"digest",
"err-derive",
"futures",
"garage_db",
@ -1607,15 +1561,14 @@ name = "garage_web"
version = "1.0.1"
dependencies = [
"err-derive",
"futures",
"garage_api",
"garage_api_common",
"garage_api_s3",
"garage_model",
"garage_table",
"garage_util",
"http 1.0.0",
"http-body-util",
"hyper 1.1.0",
"hyper-util",
"opentelemetry",
"percent-encoding",
"tokio",
@ -2251,7 +2204,6 @@ dependencies = [
"sha2",
"thiserror",
"tokio",
"tracing",
"tracing-subscriber",
]
@ -2406,9 +2358,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.153"
version = "0.2.169"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
[[package]]
name = "libsodium-sys"
@ -2558,12 +2510,13 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
[[package]]
name = "nix"
version = "0.27.1"
version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
dependencies = [
"bitflags 2.4.2",
"cfg-if",
"cfg_aliases 0.2.1",
"libc",
]
@ -3881,7 +3834,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a2a1c578e98c1c16fc3b8ec1328f7659a500737d7a0c6d625e73e830ff9c1f6"
dependencies = [
"bitflags 1.3.2",
"cfg_aliases",
"cfg_aliases 0.1.1",
"libc",
"parking_lot 0.11.2",
"parking_lot_core 0.8.6",
@ -3895,7 +3848,7 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf"
dependencies = [
"cfg_aliases",
"cfg_aliases 0.1.1",
"memchr",
"proc-macro2",
"quote",
@ -4534,12 +4487,6 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "urlencoding"
version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
[[package]]
name = "utf8parse"
version = "0.2.1"

455
Cargo.nix

File diff suppressed because it is too large Load diff

View file

@ -8,7 +8,10 @@ members = [
"src/table",
"src/block",
"src/model",
"src/api",
"src/api/common",
"src/api/s3",
"src/api/k2v",
"src/api/admin",
"src/web",
"src/garage",
"src/k2v-client",
@ -21,7 +24,10 @@ default-members = ["src/garage"]
# Internal Garage crates
format_table = { version = "0.1.1", path = "src/format-table" }
garage_api = { version = "1.0.1", path = "src/api" }
garage_api_common = { version = "1.0.1", path = "src/api/common" }
garage_api_admin = { version = "1.0.1", path = "src/api/admin" }
garage_api_s3 = { version = "1.0.1", path = "src/api/s3" }
garage_api_k2v = { version = "1.0.1", path = "src/api/k2v" }
garage_block = { version = "1.0.1", path = "src/block" }
garage_db = { version = "1.0.1", path = "src/db", default-features = false }
garage_model = { version = "1.0.1", path = "src/model", default-features = false }
@ -46,7 +52,6 @@ chrono = "0.4"
crc32fast = "1.4"
crc32c = "0.6"
crypto-common = "0.1"
digest = "0.10"
err-derive = "0.3"
gethostname = "0.4"
git-version = "0.3.4"
@ -59,7 +64,7 @@ ipnet = "2.9.0"
lazy_static = "1.4"
md-5 = "0.10"
mktemp = "0.5"
nix = { version = "0.27", default-features = false, features = ["fs"] }
nix = { version = "0.29", default-features = false, features = ["fs"] }
nom = "7.1"
parse_duration = "2.1"
pin-project = "1.0.12"
@ -136,8 +141,6 @@ thiserror = "1.0"
assert-json-diff = "2.0"
rustc_version = "0.4.0"
static_init = "1.0"
aws-config = "1.1.4"
aws-sdk-config = "1.13"
aws-sdk-s3 = "1.14"

View file

@ -45,11 +45,4 @@ in {
];
});
};
clippy = {
amd64 = (compile {
inherit system git_version pkgsSrc cargo2nixOverlay;
target = "x86_64-unknown-linux-musl";
compiler = "clippy";
}).workspace.garage { compileMode = "build"; };
};
}

View file

@ -17,7 +17,7 @@ Garage can also help you serve this content.
## Gitea
You can use Garage with Gitea to store your [git LFS](https://git-lfs.github.com/) data, your users' avatar, and their attachements.
You can use Garage with Gitea to store your [git LFS](https://git-lfs.github.com/) data, your users' avatar, and their attachments.
You can configure a different target for each data type (check `[lfs]` and `[attachment]` sections of the Gitea documentation) and you can provide a default one through the `[storage]` section.
Let's start by creating a key and a bucket (your key id and secret will be needed later, keep them somewhere):

View file

@ -36,7 +36,7 @@ sudo killall nix-daemon
Now you can enter our nix-shell, all the required packages will be downloaded but they will not pollute your environment outside of the shell:
```bash
nix-shell
nix-shell -A devShell
```
You can use the traditional Rust development workflow:
@ -65,8 +65,8 @@ nix-build -j $(nproc) --max-jobs auto
```
Our build has multiple parameters you might want to set:
- `release` build with release optimisations instead of debug
- `target allows` for cross compilation
- `release` to build with release optimisations instead of debug
- `target` allows for cross compilation
- `compileMode` can be set to test or bench to build a unit test runner
- `git_version` to inject the hash to display when running `garage stats`

View file

@ -21,14 +21,14 @@ data_dir = [
```
Garage will automatically balance all blocks stored by the node
among the different specified directories, proportionnally to the
among the different specified directories, proportionally to the
specified capacities.
## Updating the list of storage locations
If you add new storage locations to your `data_dir`,
Garage will not rebalance existing data between storage locations.
Newly written blocks will be balanced proportionnally to the specified capacities,
Newly written blocks will be balanced proportionally to the specified capacities,
and existing data may be moved between drives to improve balancing,
but only opportunistically when a data block is re-written (e.g. an object
is re-uploaded, or an object with a duplicate block is uploaded).

View file

@ -199,7 +199,7 @@ For instance here you could write just `garage layout assign -z dc1 -c 1G 563e`.
The layout then has to be applied to the cluster, using:
```bash
garage layout apply
garage layout apply --version 1
```
@ -349,7 +349,7 @@ Check [our s3 compatibility list](@/documentation/reference-manual/s3-compatibil
### Other tools for interacting with Garage
The following tools can also be used to send and recieve files from/to Garage:
The following tools can also be used to send and receive files from/to Garage:
- [minio-client](@/documentation/connect/cli.md#minio-client)
- [s3cmd](@/documentation/connect/cli.md#s3cmd)

View file

@ -13,9 +13,11 @@ consistency_mode = "consistent"
metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
metadata_snapshots_dir = "/var/lib/garage/snapshots"
metadata_fsync = true
data_fsync = false
disable_scrub = false
use_local_tz = false
metadata_auto_snapshot_interval = "6h"
db_engine = "lmdb"
@ -99,10 +101,12 @@ Top-level configuration options:
[`data_fsync`](#data_fsync),
[`db_engine`](#db_engine),
[`disable_scrub`](#disable_scrub),
[`use_local_tz`](#use_local_tz),
[`lmdb_map_size`](#lmdb_map_size),
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
[`metadata_dir`](#metadata_dir),
[`metadata_fsync`](#metadata_fsync),
[`metadata_snapshots_dir`](#metadata_snapshots_dir),
[`replication_factor`](#replication_factor),
[`consistency_mode`](#consistency_mode),
[`rpc_bind_addr`](#rpc_bind_addr),
@ -273,6 +277,7 @@ as the index of all objects, object version and object blocks.
Store this folder on a fast SSD drive if possible to maximize Garage's performance.
#### `data_dir` {#data_dir}
The directory in which Garage will store the data blocks of objects.
@ -293,6 +298,25 @@ data_dir = [
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
on how to operate Garage in such a setup.
#### `metadata_snapshots_dir` (since Garage `v1.0.2`) {#metadata_snapshots_dir}
The directory in which Garage will store metadata snapshots when it
performs a snapshot of the metadata database, either when instructed to do
so from a RPC call or regularly through
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval).
By default, Garage will store snapshots into a `snapshots/` subdirectory
of [`metadata_dir`](#metadata_dir). This might quickly fill up your
metadata storage space if you use snapshots, because Garage will need up
to 4x the space of the existing metadata database: each snapshot requires
roughly as much space as the original database, and Garage temporarily
needs to store up to three different snapshots before it cleans up the oldest
snapshot to go back to two stored snapshots.
To prevent filling your disk, you might to change this setting to a
directory with ample available space, e.g. on the same storage space as
[`data_dir`](#data_dir).
#### `db_engine` (since `v0.8.0`) {#db_engine}
Since `v0.8.0`, Garage can use alternative storage backends as follows:
@ -427,6 +451,13 @@ you should delete it from the data directory and then call `garage repair
blocks` on the node to ensure that it re-obtains a copy from another node on
the network.
#### `use_local_tz` {#use_local_tz}
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
`use_local_tz` configuration value to `true` if you want Garage to run the
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
you should also ensure that each node has the same timezone configuration.
#### `block_size` {#block_size}
Garage splits stored objects in consecutive chunks of size `block_size`

View file

@ -61,7 +61,7 @@ directed to a Garage cluster can be handled independently of one another instead
of going through a central bottleneck (the leader node).
As a consequence, requests can be handled much faster, even in cases where latency
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
This is particularly usefull when nodes are far from one another and talk to one other through standard Internet connections.
This is particularly useful when nodes are far from one another and talk to one other through standard Internet connections.
### Web server for static websites

View file

@ -392,7 +392,7 @@ table_merkle_updater_todo_queue_length{table_name="block_ref"} 0
#### `table_sync_items_received`, `table_sync_items_sent` (counters)
Number of data items sent to/recieved from other nodes during resync procedures
Number of data items sent to/received from other nodes during resync procedures
```
table_sync_items_received{from="<remote node>",table_name="bucket_v2"} 3

View file

@ -42,7 +42,7 @@ The general principle are similar, but details have not been updated.**
A version is defined by the existence of at least one entry in the blocks table for a certain version UUID.
We must keep the following invariant: if a version exists in the blocks table, it has to be referenced in the objects table.
We explicitly manage concurrent versions of an object: the version timestamp and version UUID columns are index columns, thus we may have several concurrent versions of an object.
Important: before deleting an older version from the objects table, we must make sure that we did a successfull delete of the blocks of that version from the blocks table.
Important: before deleting an older version from the objects table, we must make sure that we did a successful delete of the blocks of that version from the blocks table.
Thus, the workflow for reading an object is as follows:
@ -95,7 +95,7 @@ Known issue: if someone is reading from a version that we want to delete and the
Usefull metadata:
- list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist
- list of other nodes that we know have acknowledged a write of this block, usefull in the rebalancing algorithm
- list of other nodes that we know have acknowledged a write of this block, useful in the rebalancing algorithm
Write strategy: have a single thread that does all write IO so that it is serialized (or have several threads that manage independent parts of the hash space). When writing a blob, write it to a temporary file, close, then rename so that a concurrent read gets a consistent result (either not found or found with whole content).

View file

@ -68,7 +68,7 @@ The migration steps are as follows:
5. Turn off Garage 0.3
6. Backup metadata folders if you can (i.e. if you have space to do it
somewhere). Backuping data folders could also be usefull but that's much
somewhere). Backuping data folders could also be useful but that's much
harder to do. If your filesystem supports snapshots, this could be a good
time to use them.

View file

@ -37,7 +37,7 @@ There are two reasons for this:
Reminder: rules of simplicity, concerning changes to Garage's source code.
Always question what we are doing.
Never do anything just because it looks nice or because we "think" it might be usefull at some later point but without knowing precisely why/when.
Never do anything just because it looks nice or because we "think" it might be useful at some later point but without knowing precisely why/when.
Only do things that make perfect sense in the context of what we currently know.
## References

View file

@ -562,7 +562,7 @@ token>", v: ["<value1>", ...] }`, with the following fields:
- in case of concurrent update and deletion, a `null` is added to the list of concurrent values
- if the `tombstones` query parameter is set to `true`, tombstones are returned
for items that have been deleted (this can be usefull for inserting after an
for items that have been deleted (this can be useful for inserting after an
item that has been deleted, so that the insert is not considered
concurrent with the delete). Tombstones are returned as tuples in the
same format with only `null` values

50
flake.lock generated
View file

@ -12,17 +12,17 @@
"rust-overlay": "rust-overlay"
},
"locked": {
"lastModified": 1666087781,
"narHash": "sha256-trKVdjMZ8mNkGfLcY5LsJJGtdV3xJDZnMVrkFjErlcs=",
"owner": "Alexis211",
"lastModified": 1705129117,
"narHash": "sha256-LgdDHibvimzYhxBK3kxCk2gAL7k4Hyigl5KI0X9cijA=",
"owner": "cargo2nix",
"repo": "cargo2nix",
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
"rev": "ae19a9e1f8f0880c088ea155ab66cee1fa001f59",
"type": "github"
},
"original": {
"owner": "Alexis211",
"owner": "cargo2nix",
"repo": "cargo2nix",
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
"rev": "ae19a9e1f8f0880c088ea155ab66cee1fa001f59",
"type": "github"
}
},
@ -58,33 +58,17 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1724395761,
"narHash": "sha256-zRkDV/nbrnp3Y8oCADf5ETl1sDrdmAW6/bBVJ8EbIdQ=",
"lastModified": 1736692550,
"narHash": "sha256-7tk8xH+g0sJkKLTJFOxphJxxOjMDFMWv24nXslaU2ro=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ae815cee91b417be55d43781eb4b73ae1ecc396c",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1724681257,
"narHash": "sha256-EJRuc5Qp7yfXko5ZNeEMYAs4DzAvkCyALuJ/tGllhN4=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
"type": "github"
}
},
@ -96,24 +80,28 @@
"cargo2nix",
"flake-utils"
],
"nixpkgs": "nixpkgs_2"
"nixpkgs": "nixpkgs"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": "nixpkgs"
"nixpkgs": [
"cargo2nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1724638882,
"narHash": "sha256-ap2jIQi/FuUHR6HCht6ASWhoz8EiB99XmI8Esot38VE=",
"lastModified": 1736649126,
"narHash": "sha256-XCw5sv/ePsroqiF3lJM6Y2X9EhPdHeE47gr3Q8b0UQw=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "19b70f147b9c67a759e35824b241f1ed92e46694",
"rev": "162ab0edc2936508470199b2e8e6c444a2535019",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "162ab0edc2936508470199b2e8e6c444a2535019",
"type": "github"
}
}

View file

@ -2,24 +2,27 @@
description =
"Garage, an S3-compatible distributed object store for self-hosted deployments";
# Nixpkgs 24.05 as of 2024-08-26 has rustc v1.77
# Nixpkgs 24.11 as of 2025-01-12 has rustc v1.82
inputs.nixpkgs.url =
"github:NixOS/nixpkgs/0239aeb2f82ea27ccd6b61582b8f7fb8750eeada";
"github:NixOS/nixpkgs/7c4869c47090dd7f9f1bdfb49a22aea026996815";
inputs.flake-compat.url = "github:nix-community/flake-compat";
inputs.cargo2nix = {
# As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
#url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
# As of 2023-04-25:
# - my two patches were merged into unstable (one for clippy and one to "fix" feature detection)
# - rustc v1.66
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
# Rust overlay as of 2024-08-26
# Mainline cargo2nix as of of 2025-01-12 (branch release-0.11.0)
url = "github:cargo2nix/cargo2nix/ae19a9e1f8f0880c088ea155ab66cee1fa001f59";
# Rust overlay as of 2025-01-12
inputs.rust-overlay.url =
"github:oxalica/rust-overlay/19b70f147b9c67a759e35824b241f1ed92e46694";
"github:oxalica/rust-overlay/162ab0edc2936508470199b2e8e6c444a2535019";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-compat.follows = "flake-compat";

View file

@ -1,4 +1,4 @@
{ system, target ? null, pkgsSrc, cargo2nixOverlay, compiler ? "rustc"
{ system, target ? null, pkgsSrc, cargo2nixOverlay
, release ? false, git_version ? null, features ? null, }:
let
@ -20,24 +20,10 @@ let
};
toolchainOptions = {
rustVersion = "1.77.0";
rustVersion = "1.78.0";
extraRustComponents = [ "clippy" ];
};
buildEnv = (drv:
{
rustc = drv.setBuildEnv;
clippy = ''
${drv.setBuildEnv or ""}
echo
echo --- BUILDING WITH CLIPPY ---
echo
export NIX_RUST_BUILD_FLAGS="''${NIX_RUST_BUILD_FLAGS} --deny warnings"
export RUSTC="''${CLIPPY_DRIVER}"
'';
}.${compiler});
/* Cargo2nix provides many overrides by default, you can take inspiration from them:
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
@ -46,9 +32,7 @@ let
*/
packageOverrides = pkgs:
pkgs.rustBuilder.overrides.all ++ [
/* [1] We add some logic to compile our crates with clippy, it provides us many additional lints
[2] We need to alter Nix hardening to make static binaries: PIE,
/* [1] We need to alter Nix hardening to make static binaries: PIE,
Position Independent Executables seems to be supported only on amd64. Having
this flag set either 1. make our executables crash or 2. compile as dynamic on some platforms.
Here, we deactivate it. Later (find `codegenOpts`), we reactivate it for supported targets
@ -56,11 +40,11 @@ let
PIE is a feature used by ASLR, which helps mitigate security issues.
Learn more about Nix Hardening at: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh
[3] We want to inject the git version while keeping the build deterministic.
[2] We want to inject the git version while keeping the build deterministic.
As we do not want to consider the .git folder as part of the input source,
we ask the user (the CI often) to pass the value to Nix.
[4] We don't want libsodium-sys and zstd-sys to try to use pkgconfig to build against a system library.
[3] We don't want libsodium-sys and zstd-sys to try to use pkgconfig to build against a system library.
However the features to do so get activated for some reason (due to a bug in cargo2nix?),
so disable them manually here.
*/
@ -68,7 +52,7 @@ let
name = "garage";
overrideAttrs = drv:
(if git_version != null then {
# [3]
# [2]
preConfigure = ''
${drv.preConfigure or ""}
export GIT_VERSION="${git_version}"
@ -76,86 +60,21 @@ let
} else
{ }) // {
# [1]
setBuildEnv = (buildEnv drv);
# [2]
hardeningDisable = [ "pie" ];
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_rpc";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_db";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_util";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_table";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_block";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_model";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_api";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_web";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "k2v-client";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "libsodium-sys";
overrideArgs = old: {
features = [ ]; # [4]
features = [ ]; # [3]
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "zstd-sys";
overrideArgs = old: {
features = [ ]; # [4]
features = [ ]; # [3]
};
})
];
@ -215,4 +134,5 @@ let
in pkgs.rustBuilder.makePackageSet ({
inherit release packageFun packageOverrides codegenOpts rootFeatures;
target = rustTarget;
workspaceSrc = pkgs.lib.cleanSource ../.;
} // toolchainOptions)

View file

@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.5.1
version: 0.6.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View file

@ -0,0 +1,86 @@
# garage
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.0.1](https://img.shields.io/badge/AppVersion-v1.0.1-informational?style=flat-square)
S3-compatible object store for small self-hosted geo-distributed deployments
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | |
| deployment.kind | string | `"StatefulSet"` | Switchable to DaemonSet |
| deployment.podManagementPolicy | string | `"OrderedReady"` | If using statefulset, allow Parallel or OrderedReady (default) |
| deployment.replicaCount | int | `3` | Number of StatefulSet replicas/garage nodes to start |
| environment | object | `{}` | |
| extraVolumeMounts | object | `{}` | |
| extraVolumes | object | `{}` | |
| fullnameOverride | string | `""` | |
| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size |
| garage.bootstrapPeers | list | `[]` | This is not required if you use the integrated kubernetes discovery |
| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level |
| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 |
| garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml |
| garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ |
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources |
| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode |
| garage.rpcBindAddr | string | `"[::]:3901"` | |
| garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object |
| garage.s3.api.region | string | `"garage"` | |
| garage.s3.api.rootDomain | string | `".s3.garage.tld"` | |
| garage.s3.web.index | string | `"index.html"` | |
| garage.s3.web.rootDomain | string | `".web.garage.tld"` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"dxflrs/amd64_garage"` | default to amd64 docker image |
| image.tag | string | `""` | set the image tag, please prefer using the chart version and not this to avoid compatibility issues |
| imagePullSecrets | list | `[]` | set if you need credentials to pull your custom image |
| ingress.s3.api.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
| ingress.s3.api.enabled | bool | `false` | |
| ingress.s3.api.hosts[0] | object | `{"host":"s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, to be used with awscli for example |
| ingress.s3.api.hosts[1] | object | `{"host":"*.s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, DNS style bucket access |
| ingress.s3.api.labels | object | `{}` | |
| ingress.s3.api.tls | list | `[]` | |
| ingress.s3.web.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
| ingress.s3.web.enabled | bool | `false` | |
| ingress.s3.web.hosts[0] | object | `{"host":"*.web.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | wildcard website access with bucket name prefix |
| ingress.s3.web.hosts[1] | object | `{"host":"mywebpage.example.com","paths":[{"path":"/","pathType":"Prefix"}]}` | specific bucket access with FQDN bucket |
| ingress.s3.web.labels | object | `{}` | |
| ingress.s3.web.tls | list | `[]` | |
| initImage.pullPolicy | string | `"IfNotPresent"` | |
| initImage.repository | string | `"busybox"` | |
| initImage.tag | string | `"stable"` | |
| monitoring.metrics.enabled | bool | `false` | If true, a service for monitoring is created with a prometheus.io/scrape annotation |
| monitoring.metrics.serviceMonitor.enabled | bool | `false` | If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator |
| monitoring.metrics.serviceMonitor.interval | string | `"15s"` | |
| monitoring.metrics.serviceMonitor.labels | object | `{}` | |
| monitoring.metrics.serviceMonitor.path | string | `"/metrics"` | |
| monitoring.metrics.serviceMonitor.relabelings | list | `[]` | |
| monitoring.metrics.serviceMonitor.scheme | string | `"http"` | |
| monitoring.metrics.serviceMonitor.scrapeTimeout | string | `"10s"` | |
| monitoring.metrics.serviceMonitor.tlsConfig | object | `{}` | |
| monitoring.tracing.sink | string | `""` | specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317` |
| nameOverride | string | `""` | |
| nodeSelector | object | `{}` | |
| persistence.data.hostPath | string | `"/var/lib/garage/data"` | |
| persistence.data.size | string | `"100Mi"` | |
| persistence.enabled | bool | `true` | |
| persistence.meta.hostPath | string | `"/var/lib/garage/meta"` | |
| persistence.meta.size | string | `"100Mi"` | |
| podAnnotations | object | `{}` | additonal pod annotations |
| podSecurityContext.fsGroup | int | `1000` | |
| podSecurityContext.runAsGroup | int | `1000` | |
| podSecurityContext.runAsNonRoot | bool | `true` | |
| podSecurityContext.runAsUser | int | `1000` | |
| resources | object | `{}` | |
| securityContext.capabilities | object | `{"drop":["ALL"]}` | The default security context is heavily restricted, feel free to tune it to your requirements |
| securityContext.readOnlyRootFilesystem | bool | `true` | |
| service.s3.api.port | int | `3900` | |
| service.s3.web.port | int | `3902` | |
| service.type | string | `"ClusterIP"` | You can rely on any service to expose your cluster - ClusterIP (+ Ingress) - NodePort (+ Ingress) - LoadBalancer |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
| tolerations | list | `[]` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View file

@ -1,7 +1,49 @@
{{- if not .Values.garage.existingConfigMap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "garage.fullname" . }}-config
data:
garage.toml: |-
{{- tpl (index (index .Values.garage) "garage.toml") $ | nindent 4 }}
{{- if .Values.garage.garageTomlString }}
{{- tpl (index (index .Values.garage) "garageTomlString") $ | nindent 4 }}
{{- else }}
metadata_dir = "/mnt/meta"
data_dir = "/mnt/data"
db_engine = "{{ .Values.garage.dbEngine }}"
block_size = {{ .Values.garage.blockSize }}
replication_mode = "{{ .Values.garage.replicationMode }}"
compression_level = {{ .Values.garage.compressionLevel }}
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
# rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__"
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
[kubernetes_discovery]
namespace = "{{ .Release.Namespace }}"
service_name = "{{ include "garage.fullname" . }}"
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
[s3_api]
s3_region = "{{ .Values.garage.s3.api.region }}"
api_bind_addr = "[::]:3900"
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
[s3_web]
bind_addr = "[::]:3902"
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
index = "{{ .Values.garage.s3.web.index }}"
[admin]
api_bind_addr = "[::]:3903"
{{- if .Values.monitoring.tracing.sink }}
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
{{- end }}
{{- end }}
{{- end }}

View file

@ -4,28 +4,30 @@
# Garage configuration. These values go to garage.toml
garage:
# Can be changed for better performance on certain systems
# -- Can be changed for better performance on certain systems
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
dbEngine: "lmdb"
# Defaults is 1MB
# -- Defaults is 1MB
# An increase can result in better performance in certain scenarios
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
blockSize: "1048576"
# Default to 3 replicas, see the replication_mode section at
# -- Default to 3 replicas, see the replication_mode section at
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
replicationMode: "3"
# zstd compression level of stored blocks
# -- zstd compression level of stored blocks
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
compressionLevel: "1"
rpcBindAddr: "[::]:3901"
# If not given, a random secret will be generated and stored in a Secret object
# -- If not given, a random secret will be generated and stored in a Secret object
rpcSecret: ""
# This is not required if you use the integrated kubernetes discovery
# -- This is not required if you use the integrated kubernetes discovery
bootstrapPeers: []
# -- Set to true if you want to use k8s discovery but install the CRDs manually outside
# of the helm chart, for example if you operate at namespace level without cluster ressources
kubernetesSkipCrd: false
s3:
api:
@ -34,47 +36,16 @@ garage:
web:
rootDomain: ".web.garage.tld"
index: "index.html"
# Template for the garage configuration
# Values can be templated
# ref: https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
garage.toml: |-
metadata_dir = "/mnt/meta"
data_dir = "/mnt/data"
db_engine = "{{ .Values.garage.dbEngine }}"
# -- if not empty string, allow using an existing ConfigMap for the garage.toml,
# if set, ignores garage.toml
existingConfigMap: ""
block_size = {{ .Values.garage.blockSize }}
replication_mode = "{{ .Values.garage.replicationMode }}"
compression_level = {{ .Values.garage.compressionLevel }}
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
# rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__"
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
[kubernetes_discovery]
namespace = "{{ .Release.Namespace }}"
service_name = "{{ include "garage.fullname" . }}"
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
[s3_api]
s3_region = "{{ .Values.garage.s3.api.region }}"
api_bind_addr = "[::]:3900"
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
[s3_web]
bind_addr = "[::]:3902"
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
index = "{{ .Values.garage.s3.web.index }}"
[admin]
api_bind_addr = "[::]:3903"
{{- if .Values.monitoring.tracing.sink }}
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
{{- end }}
# -- String Template for the garage configuration
# if set, ignores above values.
# Values can be templated,
# see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
garageTomlString: ""
# Data persistence
persistence:
@ -92,16 +63,18 @@ persistence:
# Deployment configuration
deployment:
# Switchable to DaemonSet
# -- Switchable to DaemonSet
kind: StatefulSet
# Number of StatefulSet replicas/garage nodes to start
# -- Number of StatefulSet replicas/garage nodes to start
replicaCount: 3
# If using statefulset, allow Parallel or OrderedReady (default)
# -- If using statefulset, allow Parallel or OrderedReady (default)
podManagementPolicy: OrderedReady
image:
# -- default to amd64 docker image
repository: dxflrs/amd64_garage
# please prefer using the chart version and not this tag
# -- set the image tag, please prefer using the chart version and not this
# to avoid compatibility issues
tag: ""
pullPolicy: IfNotPresent
@ -110,19 +83,21 @@ initImage:
tag: stable
pullPolicy: IfNotPresent
# -- set if you need credentials to pull your custom image
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
# -- Specifies whether a service account should be created
create: true
# Annotations to add to the service account
# -- Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# -- additonal pod annotations
podAnnotations: {}
podSecurityContext:
@ -132,7 +107,7 @@ podSecurityContext:
runAsNonRoot: true
securityContext:
# The default security context is heavily restricted
# -- The default security context is heavily restricted,
# feel free to tune it to your requirements
capabilities:
drop:
@ -140,7 +115,7 @@ securityContext:
readOnlyRootFilesystem: true
service:
# You can rely on any service to expose your cluster
# -- You can rely on any service to expose your cluster
# - ClusterIP (+ Ingress)
# - NodePort (+ Ingress)
# - LoadBalancer
@ -156,20 +131,23 @@ ingress:
s3:
api:
enabled: false
# Rely either on the className or the annotation below but not both
# replace "nginx" by an Ingress controller
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
# -- Rely _either_ on the className or the annotation below but not both!
# If you want to use the className, set
# className: "nginx"
# and replace "nginx" by an Ingress controller name,
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
annotations: {}
# kubernetes.io/ingress.class: "nginx"
# kubernetes.io/tls-acme: "true"
labels: {}
hosts:
- host: "s3.garage.tld" # garage S3 API endpoint
# -- garage S3 API endpoint, to be used with awscli for example
- host: "s3.garage.tld"
paths:
- path: /
pathType: Prefix
- host: "*.s3.garage.tld" # garage S3 API endpoint, DNS style bucket access
# -- garage S3 API endpoint, DNS style bucket access
- host: "*.s3.garage.tld"
paths:
- path: /
pathType: Prefix
@ -179,20 +157,23 @@ ingress:
# - kubernetes.docker.internal
web:
enabled: false
# Rely either on the className or the annotation below but not both
# replace "nginx" by an Ingress controller
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
# -- Rely _either_ on the className or the annotation below but not both!
# If you want to use the className, set
# className: "nginx"
# and replace "nginx" by an Ingress controller name,
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
hosts:
- host: "*.web.garage.tld" # wildcard website access with bucket name prefix
# -- wildcard website access with bucket name prefix
- host: "*.web.garage.tld"
paths:
- path: /
pathType: Prefix
- host: "mywebpage.example.com" # specific bucket access with FQDN bucket
# -- specific bucket access with FQDN bucket
- host: "mywebpage.example.com"
paths:
- path: /
pathType: Prefix
@ -224,10 +205,10 @@ extraVolumeMounts: {}
monitoring:
metrics:
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
# -- If true, a service for monitoring is created with a prometheus.io/scrape annotation
enabled: false
serviceMonitor:
# If true, a ServiceMonitor CRD is created for a prometheus operator
# -- If true, a ServiceMonitor CRD is created for a prometheus operator
# https://github.com/coreos/prometheus-operator
enabled: false
path: /metrics
@ -239,4 +220,5 @@ monitoring:
scrapeTimeout: 10s
relabelings: []
tracing:
# -- specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317`
sink: ""

View file

@ -115,7 +115,7 @@ in
shellHook = ''
function refresh_cache {
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
for attr in pkgs.amd64.debug test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
echo "Updating cache for ''${attr}"
nix copy -j8 \
--to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/tmp/nix-signing-key.sec' \

43
src/api/admin/Cargo.toml Normal file
View file

@ -0,0 +1,43 @@
[package]
name = "garage_api_admin"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
description = "Admin API server crate for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../README.md"
[lib]
path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_model.workspace = true
garage_table.workspace = true
garage_util.workspace = true
garage_rpc.workspace = true
garage_api_common.workspace = true
argon2.workspace = true
async-trait.workspace = true
err-derive.workspace = true
hex.workspace = true
tracing.workspace = true
futures.workspace = true
tokio.workspace = true
http.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
url.workspace = true
serde.workspace = true
serde_json.workspace = true
opentelemetry.workspace = true
opentelemetry-prometheus = { workspace = true, optional = true }
prometheus = { workspace = true, optional = true }
[features]
metrics = [ "opentelemetry-prometheus", "prometheus" ]

View file

@ -20,15 +20,15 @@ use garage_rpc::system::ClusterHealthStatus;
use garage_util::error::Error as GarageError;
use garage_util::socket_address::UnixOrTCPSocketAddress;
use crate::generic_server::*;
use garage_api_common::generic_server::*;
use garage_api_common::helpers::*;
use crate::admin::bucket::*;
use crate::admin::cluster::*;
use crate::admin::error::*;
use crate::admin::key::*;
use crate::admin::router_v0;
use crate::admin::router_v1::{Authorization, Endpoint};
use crate::helpers::*;
use crate::bucket::*;
use crate::cluster::*;
use crate::error::*;
use crate::key::*;
use crate::router_v0;
use crate::router_v1::{Authorization, Endpoint};
pub type ResBody = BoxBody<Error>;

View file

@ -17,11 +17,12 @@ use garage_model::permission::*;
use garage_model::s3::mpu_table;
use garage_model::s3::object_table::*;
use crate::admin::api_server::ResBody;
use crate::admin::error::*;
use crate::admin::key::ApiBucketKeyPerm;
use crate::common_error::CommonError;
use crate::helpers::*;
use garage_api_common::common_error::CommonError;
use garage_api_common::helpers::*;
use crate::api_server::ResBody;
use crate::error::*;
use crate::key::ApiBucketKeyPerm;
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let buckets = garage

View file

@ -12,9 +12,10 @@ use garage_rpc::layout;
use garage_model::garage::Garage;
use crate::admin::api_server::ResBody;
use crate::admin::error::*;
use crate::helpers::{json_ok_response, parse_json_body};
use garage_api_common::helpers::{json_ok_response, parse_json_body};
use crate::api_server::ResBody;
use crate::error::*;
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let layout = garage.system.cluster_layout();

View file

@ -1,20 +1,24 @@
use std::convert::TryFrom;
use err_derive::Error;
use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode};
pub use garage_model::helper::error::Error as HelperError;
use crate::common_error::CommonError;
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
use crate::generic_server::ApiError;
use crate::helpers::*;
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
pub use garage_api_common::common_error::{
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
};
use garage_api_common::generic_server::ApiError;
use garage_api_common::helpers::*;
/// Errors of this crate
#[derive(Debug, Error)]
pub enum Error {
#[error(display = "{}", _0)]
/// Error from common error
Common(CommonError),
Common(#[error(source)] CommonError),
// Category: cannot process
/// The API access key does not exist
@ -29,17 +33,21 @@ pub enum Error {
KeyAlreadyExists(String),
}
impl<T> From<T> for Error
where
CommonError: From<T>,
{
fn from(err: T) -> Self {
Error::Common(CommonError::from(err))
commonErrorDerivative!(Error);
/// FIXME: helper errors are transformed into their corresponding variants
/// in the Error struct, but in many case a helper error should be considered
/// an internal error.
impl From<HelperError> for Error {
fn from(err: HelperError) -> Error {
match CommonError::try_from(err) {
Ok(ce) => Self::Common(ce),
Err(HelperError::NoSuchAccessKey(k)) => Self::NoSuchAccessKey(k),
Err(_) => unreachable!(),
}
}
}
impl CommonErrorDerivative for Error {}
impl Error {
fn code(&self) -> &'static str {
match self {

View file

@ -9,9 +9,10 @@ use garage_table::*;
use garage_model::garage::Garage;
use garage_model::key_table::*;
use crate::admin::api_server::ResBody;
use crate::admin::error::*;
use crate::helpers::*;
use garage_api_common::helpers::*;
use crate::api_server::ResBody;
use crate::error::*;
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let res = garage

View file

@ -1,3 +1,6 @@
#[macro_use]
extern crate tracing;
pub mod api_server;
mod error;
mod router_v0;

View file

@ -2,8 +2,9 @@ use std::borrow::Cow;
use hyper::{Method, Request};
use crate::admin::error::*;
use crate::router_macros::*;
use garage_api_common::router_macros::*;
use crate::error::*;
router_match! {@func

View file

@ -2,9 +2,10 @@ use std::borrow::Cow;
use hyper::{Method, Request};
use crate::admin::error::*;
use crate::admin::router_v0;
use crate::router_macros::*;
use garage_api_common::router_macros::*;
use crate::error::*;
use crate::router_v0;
pub enum Authorization {
None,

45
src/api/common/Cargo.toml Normal file
View file

@ -0,0 +1,45 @@
[package]
name = "garage_api_common"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
description = "Common functions for the API server crates for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../README.md"
[lib]
path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_model.workspace = true
garage_table.workspace = true
garage_util.workspace = true
async-trait.workspace = true
bytes.workspace = true
chrono.workspace = true
crypto-common.workspace = true
err-derive.workspace = true
hex.workspace = true
hmac.workspace = true
idna.workspace = true
tracing.workspace = true
nom.workspace = true
pin-project.workspace = true
sha2.workspace = true
futures.workspace = true
tokio.workspace = true
http.workspace = true
http-body-util.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
hyper-util.workspace = true
url.workspace = true
serde.workspace = true
serde_json.workspace = true
opentelemetry.workspace = true

View file

@ -1,3 +1,5 @@
use std::convert::TryFrom;
use err_derive::Error;
use hyper::StatusCode;
@ -55,6 +57,35 @@ pub enum CommonError {
InvalidBucketName(String),
}
#[macro_export]
macro_rules! commonErrorDerivative {
( $error_struct: ident ) => {
impl From<garage_util::error::Error> for $error_struct {
fn from(err: garage_util::error::Error) -> Self {
Self::Common(CommonError::InternalError(err))
}
}
impl From<http::Error> for $error_struct {
fn from(err: http::Error) -> Self {
Self::Common(CommonError::Http(err))
}
}
impl From<hyper::Error> for $error_struct {
fn from(err: hyper::Error) -> Self {
Self::Common(CommonError::Hyper(err))
}
}
impl From<hyper::header::ToStrError> for $error_struct {
fn from(err: hyper::header::ToStrError) -> Self {
Self::Common(CommonError::InvalidHeader(err))
}
}
impl CommonErrorDerivative for $error_struct {}
};
}
pub use commonErrorDerivative;
impl CommonError {
pub fn http_status_code(&self) -> StatusCode {
match self {
@ -97,18 +128,39 @@ impl CommonError {
}
}
impl From<HelperError> for CommonError {
fn from(err: HelperError) -> Self {
impl TryFrom<HelperError> for CommonError {
type Error = HelperError;
fn try_from(err: HelperError) -> Result<Self, HelperError> {
match err {
HelperError::Internal(i) => Self::InternalError(i),
HelperError::BadRequest(b) => Self::BadRequest(b),
HelperError::InvalidBucketName(n) => Self::InvalidBucketName(n),
HelperError::NoSuchBucket(n) => Self::NoSuchBucket(n),
e => Self::bad_request(format!("{}", e)),
HelperError::Internal(i) => Ok(Self::InternalError(i)),
HelperError::BadRequest(b) => Ok(Self::BadRequest(b)),
HelperError::InvalidBucketName(n) => Ok(Self::InvalidBucketName(n)),
HelperError::NoSuchBucket(n) => Ok(Self::NoSuchBucket(n)),
e => Err(e),
}
}
}
/// This function converts HelperErrors into CommonErrors,
/// for variants that exist in CommonError.
/// This is used for helper functions that might return InvalidBucketName
/// or NoSuchBucket for instance, and we want to pass that error
/// up to our caller.
pub fn pass_helper_error(err: HelperError) -> CommonError {
match CommonError::try_from(err) {
Ok(e) => e,
Err(e) => panic!("Helper error `{}` should hot have happenned here", e),
}
}
pub fn helper_error_as_internal(err: HelperError) -> CommonError {
match err {
HelperError::Internal(e) => CommonError::InternalError(e),
e => CommonError::InternalError(GarageError::Message(e.to_string())),
}
}
pub trait CommonErrorDerivative: From<CommonError> {
fn internal_error<M: ToString>(msg: M) -> Self {
Self::from(CommonError::InternalError(GarageError::Message(

170
src/api/common/cors.rs Normal file
View file

@ -0,0 +1,170 @@
use std::sync::Arc;
use http::header::{
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
};
use hyper::{body::Body, body::Incoming as IncomingBody, Request, Response, StatusCode};
use garage_model::bucket_table::{BucketParams, CorsRule as GarageCorsRule};
use garage_model::garage::Garage;
use crate::common_error::{
helper_error_as_internal, CommonError, OkOrBadRequest, OkOrInternalError,
};
use crate::helpers::*;
pub fn find_matching_cors_rule<'a>(
bucket_params: &'a BucketParams,
req: &Request<impl Body>,
) -> Result<Option<&'a GarageCorsRule>, CommonError> {
if let Some(cors_config) = bucket_params.cors_config.get() {
if let Some(origin) = req.headers().get("Origin") {
let origin = origin.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
return Ok(cors_config.iter().find(|rule| {
cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter())
}));
}
}
Ok(None)
}
pub fn cors_rule_matches<'a, HI, S>(
rule: &GarageCorsRule,
origin: &'a str,
method: &'a str,
mut request_headers: HI,
) -> bool
where
HI: Iterator<Item = S>,
S: AsRef<str>,
{
rule.allow_origins.iter().any(|x| x == "*" || x == origin)
&& rule.allow_methods.iter().any(|x| x == "*" || x == method)
&& request_headers.all(|h| {
rule.allow_headers
.iter()
.any(|x| x == "*" || x == h.as_ref())
})
}
pub fn add_cors_headers(
resp: &mut Response<impl Body>,
rule: &GarageCorsRule,
) -> Result<(), http::header::InvalidHeaderValue> {
let h = resp.headers_mut();
h.insert(
ACCESS_CONTROL_ALLOW_ORIGIN,
rule.allow_origins.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_METHODS,
rule.allow_methods.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_HEADERS,
rule.allow_headers.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_EXPOSE_HEADERS,
rule.expose_headers.join(", ").parse()?,
);
Ok(())
}
pub async fn handle_options_api(
garage: Arc<Garage>,
req: &Request<IncomingBody>,
bucket_name: Option<String>,
) -> Result<Response<EmptyBody>, CommonError> {
// FIXME: CORS rules of buckets with local aliases are
// not taken into account.
// If the bucket name is a global bucket name,
// we try to apply the CORS rules of that bucket.
// If a user has a local bucket name that has
// the same name, its CORS rules won't be applied
// and will be shadowed by the rules of the globally
// existing bucket (but this is inevitable because
// OPTIONS calls are not auhtenticated).
if let Some(bn) = bucket_name {
let helper = garage.bucket_helper();
let bucket_id = helper
.resolve_global_bucket_name(&bn)
.await
.map_err(helper_error_as_internal)?;
if let Some(id) = bucket_id {
let bucket = garage
.bucket_helper()
.get_existing_bucket(id)
.await
.map_err(helper_error_as_internal)?;
let bucket_params = bucket.state.into_option().unwrap();
handle_options_for_bucket(req, &bucket_params)
} else {
// If there is a bucket name in the request, but that name
// does not correspond to a global alias for a bucket,
// then it's either a non-existing bucket or a local bucket.
// We have no way of knowing, because the request is not
// authenticated and thus we can't resolve local aliases.
// We take the permissive approach of allowing everything,
// because we don't want to prevent web apps that use
// local bucket names from making API calls.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
} else {
// If there is no bucket name in the request,
// we are doing a ListBuckets call, which we want to allow
// for all origins.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
}
pub fn handle_options_for_bucket(
req: &Request<IncomingBody>,
bucket_params: &BucketParams,
) -> Result<Response<EmptyBody>, CommonError> {
let origin = req
.headers()
.get("Origin")
.ok_or_bad_request("Missing Origin header")?
.to_str()?;
let request_method = req
.headers()
.get(ACCESS_CONTROL_REQUEST_METHOD)
.ok_or_bad_request("Missing Access-Control-Request-Method header")?
.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
if let Some(cors_config) = bucket_params.cors_config.get() {
let matching_rule = cors_config
.iter()
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
if let Some(rule) = matching_rule {
let mut resp = Response::builder()
.status(StatusCode::OK)
.body(EmptyBody::new())?;
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
return Ok(resp);
}
}
Err(CommonError::Forbidden(
"This CORS request is not allowed.".into(),
))
}

View file

@ -36,7 +36,7 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
use crate::helpers::{BoxBody, ErrorBody};
pub(crate) trait ApiEndpoint: Send + Sync + 'static {
pub trait ApiEndpoint: Send + Sync + 'static {
fn name(&self) -> &'static str;
fn add_span_attributes(&self, span: SpanRef<'_>);
}
@ -48,7 +48,7 @@ pub trait ApiError: std::error::Error + Send + Sync + 'static {
}
#[async_trait]
pub(crate) trait ApiHandler: Send + Sync + 'static {
pub trait ApiHandler: Send + Sync + 'static {
const API_NAME: &'static str;
const API_NAME_DISPLAY: &'static str;
@ -63,7 +63,7 @@ pub(crate) trait ApiHandler: Send + Sync + 'static {
) -> Result<Response<BoxBody<Self::Error>>, Self::Error>;
}
pub(crate) struct ApiServer<A: ApiHandler> {
pub struct ApiServer<A: ApiHandler> {
region: String,
api_handler: A,

View file

@ -363,9 +363,9 @@ mod tests {
}
#[derive(Serialize)]
pub(crate) struct CustomApiErrorBody {
pub(crate) code: String,
pub(crate) message: String,
pub(crate) region: String,
pub(crate) path: String,
pub struct CustomApiErrorBody {
pub code: String,
pub message: String,
pub region: String,
pub path: String,
}

12
src/api/common/lib.rs Normal file
View file

@ -0,0 +1,12 @@
//! Crate for serving a S3 compatible API
#[macro_use]
extern crate tracing;
pub mod common_error;
pub mod cors;
pub mod encoding;
pub mod generic_server;
pub mod helpers;
pub mod router_macros;
pub mod signature;

View file

@ -1,5 +1,6 @@
/// This macro is used to generate very repetitive match {} blocks in this module
/// It is _not_ made to be used anywhere else
#[macro_export]
macro_rules! router_match {
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
@ -133,6 +134,7 @@ macro_rules! router_match {
/// This macro is used to generate part of the code in this module. It must be called only one, and
/// is useless outside of this module.
#[macro_export]
macro_rules! generateQueryParameters {
(
keywords: [ $($kw_param:expr => $kw_name: ident),* ],
@ -204,7 +206,7 @@ macro_rules! generateQueryParameters {
}
/// Get an error message in case not all parameters where used when extracting them to
/// build an Enpoint variant
/// build an Endpoint variant
fn nonempty_message(&self) -> Option<&str> {
if self.keyword.is_some() {
Some("Keyword not used")
@ -220,5 +222,5 @@ macro_rules! generateQueryParameters {
}
}
pub(crate) use generateQueryParameters;
pub(crate) use router_match;
pub use generateQueryParameters;
pub use router_match;

View file

@ -47,8 +47,8 @@ pub async fn check_payload_signature(
let query = parse_query_map(request.uri())?;
if query.contains_key(&X_AMZ_ALGORITHM) {
// We check for presigned-URL-style authentification first, because
// the browser or someting else could inject an Authorization header
// We check for presigned-URL-style authentication first, because
// the browser or something else could inject an Authorization header
// that is totally unrelated to AWS signatures.
check_presigned_signature(garage, service, request, query).await
} else if request.headers().contains_key(AUTHORIZATION) {
@ -132,7 +132,7 @@ async fn check_presigned_signature(
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
// Verify that all necessary request headers are included in signed_headers
// For AWSv4 pre-signed URLs, the following must be incldued:
// For AWSv4 pre-signed URLs, the following must be included:
// - the Host header (mandatory)
// - all x-amz-* headers used in the request
let signed_headers = split_signed_headers(&authorization)?;
@ -306,7 +306,7 @@ pub fn canonical_request(
// Note that there is also the issue of path normalization, which I hope is unrelated to the
// one of URI-encoding. At least in aws-sigv4 both parameters can be set independently,
// and rusoto_signature does not seem to do any effective path normalization, even though
// it mentions it in the comments (same link to the souce code as above).
// it mentions it in the comments (same link to the source code as above).
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so
// would make non-normalized paths invalid K2V partition keys, and we don't want that.
let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
@ -518,7 +518,7 @@ impl Authorization {
})
}
pub(crate) fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
pub fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
let algorithm = params
.get(X_AMZ_ALGORITHM)
.ok_or_bad_request("Missing X-Amz-Algorithm header")?

38
src/api/k2v/Cargo.toml Normal file
View file

@ -0,0 +1,38 @@
[package]
name = "garage_api_k2v"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
description = "K2V API server crate for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../README.md"
[lib]
path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_model = { workspace = true, features = [ "k2v" ] }
garage_table.workspace = true
garage_util = { workspace = true, features = [ "k2v" ] }
garage_api_common.workspace = true
async-trait.workspace = true
base64.workspace = true
err-derive.workspace = true
tracing.workspace = true
futures.workspace = true
tokio.workspace = true
http.workspace = true
http-body-util.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
percent-encoding.workspace = true
url.workspace = true
serde.workspace = true
serde_json.workspace = true
opentelemetry.workspace = true

View file

@ -12,26 +12,25 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
use garage_model::garage::Garage;
use crate::generic_server::*;
use crate::k2v::error::*;
use garage_api_common::cors::*;
use garage_api_common::generic_server::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_request;
use crate::signature::verify_request;
use crate::batch::*;
use crate::error::*;
use crate::index::*;
use crate::item::*;
use crate::router::Endpoint;
use crate::helpers::*;
use crate::k2v::batch::*;
use crate::k2v::index::*;
use crate::k2v::item::*;
use crate::k2v::router::Endpoint;
use crate::s3::cors::*;
pub use crate::signature::streaming::ReqBody;
pub use garage_api_common::signature::streaming::ReqBody;
pub type ResBody = BoxBody<Error>;
pub struct K2VApiServer {
garage: Arc<Garage>,
}
pub(crate) struct K2VApiEndpoint {
pub struct K2VApiEndpoint {
bucket_name: String,
endpoint: Endpoint,
}
@ -77,7 +76,7 @@ impl ApiHandler for K2VApiServer {
} = endpoint;
let garage = self.garage.clone();
// The OPTIONS method is procesed early, before we even check for an API key
// The OPTIONS method is processed early, before we even check for an API key
if let Endpoint::Options = endpoint {
let options_res = handle_options_api(garage, &req, Some(bucket_name))
.await
@ -90,11 +89,13 @@ impl ApiHandler for K2VApiServer {
let bucket_id = garage
.bucket_helper()
.resolve_bucket(&bucket_name, &api_key)
.await?;
.await
.map_err(pass_helper_error)?;
let bucket = garage
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
.await
.map_err(helper_error_as_internal)?;
let bucket_params = bucket.state.into_option().unwrap();
let allowed = match endpoint.authorization_type() {

View file

@ -4,13 +4,14 @@ use serde::{Deserialize, Serialize};
use garage_table::{EnumerationOrder, TableSchema};
use garage_model::k2v::causality::*;
use garage_model::k2v::item_table::*;
use crate::helpers::*;
use crate::k2v::api_server::{ReqBody, ResBody};
use crate::k2v::error::*;
use crate::k2v::range::read_range;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::item::parse_causality_token;
use crate::range::read_range;
pub async fn handle_insert_batch(
ctx: ReqCtx,
@ -23,7 +24,7 @@ pub async fn handle_insert_batch(
let mut items2 = vec![];
for it in items {
let ct = it.ct.map(|s| CausalContext::parse_helper(&s)).transpose()?;
let ct = it.ct.map(|s| parse_causality_token(&s)).transpose()?;
let v = match it.v {
Some(vs) => DvvsValue::Value(
BASE64_STANDARD
@ -281,7 +282,8 @@ pub(crate) async fn handle_poll_range(
query.seen_marker,
timeout_msec,
)
.await?;
.await
.map_err(pass_helper_error)?;
if let Some((items, seen_marker)) = resp {
let resp = PollRangeResponse {

View file

@ -2,18 +2,21 @@ use err_derive::Error;
use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode};
use crate::common_error::CommonError;
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
use crate::generic_server::ApiError;
use crate::helpers::*;
use crate::signature::error::Error as SignatureError;
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
pub use garage_api_common::common_error::{
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
};
use garage_api_common::generic_server::ApiError;
use garage_api_common::helpers::*;
use garage_api_common::signature::error::Error as SignatureError;
/// Errors of this crate
#[derive(Debug, Error)]
pub enum Error {
#[error(display = "{}", _0)]
/// Error from common error
Common(CommonError),
Common(#[error(source)] CommonError),
// Category: cannot process
/// Authorization Header Malformed
@ -28,6 +31,10 @@ pub enum Error {
#[error(display = "Invalid base64: {}", _0)]
InvalidBase64(#[error(source)] base64::DecodeError),
/// Invalid causality token
#[error(display = "Invalid causality token")]
InvalidCausalityToken,
/// The client asked for an invalid return format (invalid Accept header)
#[error(display = "Not acceptable: {}", _0)]
NotAcceptable(String),
@ -37,16 +44,7 @@ pub enum Error {
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
}
impl<T> From<T> for Error
where
CommonError: From<T>,
{
fn from(err: T) -> Self {
Error::Common(CommonError::from(err))
}
}
impl CommonErrorDerivative for Error {}
commonErrorDerivative!(Error);
impl From<SignatureError> for Error {
fn from(err: SignatureError) -> Self {
@ -72,6 +70,7 @@ impl Error {
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
Error::InvalidBase64(_) => "InvalidBase64",
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
Error::InvalidCausalityToken => "CausalityToken",
}
}
}
@ -85,7 +84,8 @@ impl ApiError for Error {
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
Error::AuthorizationHeaderMalformed(_)
| Error::InvalidBase64(_)
| Error::InvalidUtf8Str(_) => StatusCode::BAD_REQUEST,
| Error::InvalidUtf8Str(_)
| Error::InvalidCausalityToken => StatusCode::BAD_REQUEST,
}
}

View file

@ -5,10 +5,11 @@ use garage_table::util::*;
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
use crate::helpers::*;
use crate::k2v::api_server::ResBody;
use crate::k2v::error::*;
use crate::k2v::range::read_range;
use garage_api_common::helpers::*;
use crate::api_server::ResBody;
use crate::error::*;
use crate::range::read_range;
pub async fn handle_read_index(
ctx: ReqCtx,

View file

@ -6,9 +6,10 @@ use hyper::{Request, Response, StatusCode};
use garage_model::k2v::causality::*;
use garage_model::k2v::item_table::*;
use crate::helpers::*;
use crate::k2v::api_server::{ReqBody, ResBody};
use crate::k2v::error::*;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
@ -18,6 +19,10 @@ pub enum ReturnFormat {
Either,
}
pub(crate) fn parse_causality_token(s: &str) -> Result<CausalContext, Error> {
CausalContext::parse(s).ok_or(Error::InvalidCausalityToken)
}
impl ReturnFormat {
pub fn from(req: &Request<ReqBody>) -> Result<Self, Error> {
let accept = match req.headers().get(header::ACCEPT) {
@ -136,7 +141,7 @@ pub async fn handle_insert_item(
.get(X_GARAGE_CAUSALITY_TOKEN)
.map(|s| s.to_str())
.transpose()?
.map(CausalContext::parse_helper)
.map(parse_causality_token)
.transpose()?;
let body = http_body_util::BodyExt::collect(req.into_body())
@ -176,7 +181,7 @@ pub async fn handle_delete_item(
.get(X_GARAGE_CAUSALITY_TOKEN)
.map(|s| s.to_str())
.transpose()?
.map(CausalContext::parse_helper)
.map(parse_causality_token)
.transpose()?;
let value = DvvsValue::Deleted;

View file

@ -1,3 +1,6 @@
#[macro_use]
extern crate tracing;
pub mod api_server;
mod error;
mod router;

View file

@ -7,8 +7,9 @@ use std::sync::Arc;
use garage_table::replication::TableShardedReplication;
use garage_table::*;
use crate::helpers::key_after_prefix;
use crate::k2v::error::*;
use garage_api_common::helpers::key_after_prefix;
use crate::error::*;
/// Read range in a Garage table.
/// Returns (entries, more?, nextStart)

View file

@ -1,11 +1,11 @@
use crate::k2v::error::*;
use crate::error::*;
use std::borrow::Cow;
use hyper::{Method, Request};
use crate::helpers::Authorization;
use crate::router_macros::{generateQueryParameters, router_match};
use garage_api_common::helpers::Authorization;
use garage_api_common::router_macros::{generateQueryParameters, router_match};
router_match! {@func

View file

@ -1,17 +0,0 @@
//! Crate for serving a S3 compatible API
#[macro_use]
extern crate tracing;
pub mod common_error;
mod encoding;
pub mod generic_server;
pub mod helpers;
mod router_macros;
/// This mode is public only to help testing. Don't expect stability here
pub mod signature;
pub mod admin;
#[cfg(feature = "k2v")]
pub mod k2v;
pub mod s3;

View file

@ -1,5 +1,5 @@
[package]
name = "garage_api"
name = "garage_api_s3"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
@ -20,9 +20,9 @@ garage_block.workspace = true
garage_net.workspace = true
garage_util.workspace = true
garage_rpc.workspace = true
garage_api_common.workspace = true
aes-gcm.workspace = true
argon2.workspace = true
async-compression.workspace = true
async-trait.workspace = true
base64.workspace = true
@ -30,20 +30,15 @@ bytes.workspace = true
chrono.workspace = true
crc32fast.workspace = true
crc32c.workspace = true
crypto-common.workspace = true
err-derive.workspace = true
hex.workspace = true
hmac.workspace = true
idna.workspace = true
tracing.workspace = true
md-5.workspace = true
nom.workspace = true
pin-project.workspace = true
sha1.workspace = true
sha2.workspace = true
futures.workspace = true
futures-util.workspace = true
tokio.workspace = true
tokio-stream.workspace = true
tokio-util.workspace = true
@ -54,21 +49,13 @@ httpdate.workspace = true
http-range.workspace = true
http-body-util.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
hyper-util.workspace = true
multer.workspace = true
percent-encoding.workspace = true
roxmltree.workspace = true
url.workspace = true
serde.workspace = true
serde_bytes.workspace = true
serde_json.workspace = true
quick-xml.workspace = true
opentelemetry.workspace = true
opentelemetry-prometheus = { workspace = true, optional = true }
prometheus = { workspace = true, optional = true }
[features]
k2v = [ "garage_util/k2v", "garage_model/k2v" ]
metrics = [ "opentelemetry-prometheus", "prometheus" ]

View file

@ -14,33 +14,33 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
use garage_model::garage::Garage;
use garage_model::key_table::Key;
use crate::generic_server::*;
use crate::s3::error::*;
use garage_api_common::cors::*;
use garage_api_common::generic_server::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_request;
use crate::signature::verify_request;
use crate::bucket::*;
use crate::copy::*;
use crate::cors::*;
use crate::delete::*;
use crate::error::*;
use crate::get::*;
use crate::lifecycle::*;
use crate::list::*;
use crate::multipart::*;
use crate::post_object::handle_post_object;
use crate::put::*;
use crate::router::Endpoint;
use crate::website::*;
use crate::helpers::*;
use crate::s3::bucket::*;
use crate::s3::copy::*;
use crate::s3::cors::*;
use crate::s3::delete::*;
use crate::s3::get::*;
use crate::s3::lifecycle::*;
use crate::s3::list::*;
use crate::s3::multipart::*;
use crate::s3::post_object::handle_post_object;
use crate::s3::put::*;
use crate::s3::router::Endpoint;
use crate::s3::website::*;
pub use crate::signature::streaming::ReqBody;
pub use garage_api_common::signature::streaming::ReqBody;
pub type ResBody = BoxBody<Error>;
pub struct S3ApiServer {
garage: Arc<Garage>,
}
pub(crate) struct S3ApiEndpoint {
pub struct S3ApiEndpoint {
bucket_name: Option<String>,
endpoint: Endpoint,
}
@ -150,7 +150,8 @@ impl ApiHandler for S3ApiServer {
let bucket_id = garage
.bucket_helper()
.resolve_bucket(&bucket_name, &api_key)
.await?;
.await
.map_err(pass_helper_error)?;
let bucket = garage
.bucket_helper()
.get_existing_bucket(bucket_id)

View file

@ -13,12 +13,13 @@ use garage_util::crdt::*;
use garage_util::data::*;
use garage_util::time::*;
use crate::common_error::CommonError;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
use garage_api_common::common_error::CommonError;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::xml as s3_xml;
pub fn handle_get_bucket_location(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { garage, .. } = ctx;

View file

@ -15,7 +15,7 @@ use garage_util::error::OkOrMessage;
use garage_model::s3::object_table::*;
use crate::s3::error::*;
use crate::error::*;
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
HeaderName::from_static("x-amz-checksum-algorithm");
@ -340,8 +340,8 @@ pub(crate) fn request_checksum_value(
Ok(ret.pop())
}
/// Checks for the presense of x-amz-checksum-algorithm
/// if so extract the corrseponding x-amz-checksum-* value
/// Checks for the presence of x-amz-checksum-algorithm
/// if so extract the corresponding x-amz-checksum-* value
pub(crate) fn request_checksum_algorithm_value(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumValue>, Error> {

View file

@ -20,15 +20,16 @@ use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::checksum::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use crate::s3::get::full_object_byte_stream;
use crate::s3::multipart;
use crate::s3::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
use crate::s3::xml::{self as s3_xml, xmlns_tag};
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::checksum::*;
use crate::encryption::EncryptionParams;
use crate::error::*;
use crate::get::full_object_byte_stream;
use crate::multipart;
use crate::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
use crate::xml::{self as s3_xml, xmlns_tag};
// -------- CopyObject ---------
@ -63,7 +64,7 @@ pub async fn handle_copy(
let source_checksum_algorithm = source_checksum.map(|x| x.algorithm());
// If source object has a checksum, the destination object must as well.
// The x-amz-checksum-algorihtm header allows to change that algorithm,
// The x-amz-checksum-algorithm header allows to change that algorithm,
// but if it is absent, we must use the same as before
let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm);
@ -655,7 +656,8 @@ async fn get_copy_source(ctx: &ReqCtx, req: &Request<ReqBody>) -> Result<Object,
let source_bucket_id = garage
.bucket_helper()
.resolve_bucket(&source_bucket.to_string(), api_key)
.await?;
.await
.map_err(pass_helper_error)?;
if !api_key.allow_read(&source_bucket_id) {
return Err(Error::forbidden(format!(
@ -861,7 +863,7 @@ pub struct CopyPartResult {
#[cfg(test)]
mod tests {
use super::*;
use crate::s3::xml::to_xml_with_header;
use crate::xml::to_xml_with_header;
#[test]
fn copy_object_result() -> Result<(), Error> {

View file

@ -1,30 +1,21 @@
use quick_xml::de::from_reader;
use std::sync::Arc;
use http::header::{
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
};
use hyper::{
body::Body, body::Incoming as IncomingBody, header::HeaderName, Method, Request, Response,
StatusCode,
};
use hyper::{header::HeaderName, Method, Request, Response, StatusCode};
use http_body_util::BodyExt;
use serde::{Deserialize, Serialize};
use crate::common_error::CommonError;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content;
use garage_model::bucket_table::{Bucket, BucketParams, CorsRule as GarageCorsRule};
use garage_model::garage::Garage;
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule};
use garage_util::data::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
pub async fn handle_get_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { bucket_params, .. } = ctx;
if let Some(cors) = bucket_params.cors_config.get() {
@ -99,154 +90,6 @@ pub async fn handle_put_cors(
.body(empty_body())?)
}
pub async fn handle_options_api(
garage: Arc<Garage>,
req: &Request<IncomingBody>,
bucket_name: Option<String>,
) -> Result<Response<EmptyBody>, CommonError> {
// FIXME: CORS rules of buckets with local aliases are
// not taken into account.
// If the bucket name is a global bucket name,
// we try to apply the CORS rules of that bucket.
// If a user has a local bucket name that has
// the same name, its CORS rules won't be applied
// and will be shadowed by the rules of the globally
// existing bucket (but this is inevitable because
// OPTIONS calls are not auhtenticated).
if let Some(bn) = bucket_name {
let helper = garage.bucket_helper();
let bucket_id = helper.resolve_global_bucket_name(&bn).await?;
if let Some(id) = bucket_id {
let bucket = garage.bucket_helper().get_existing_bucket(id).await?;
let bucket_params = bucket.state.into_option().unwrap();
handle_options_for_bucket(req, &bucket_params)
} else {
// If there is a bucket name in the request, but that name
// does not correspond to a global alias for a bucket,
// then it's either a non-existing bucket or a local bucket.
// We have no way of knowing, because the request is not
// authenticated and thus we can't resolve local aliases.
// We take the permissive approach of allowing everything,
// because we don't want to prevent web apps that use
// local bucket names from making API calls.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
} else {
// If there is no bucket name in the request,
// we are doing a ListBuckets call, which we want to allow
// for all origins.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
}
pub fn handle_options_for_bucket(
req: &Request<IncomingBody>,
bucket_params: &BucketParams,
) -> Result<Response<EmptyBody>, CommonError> {
let origin = req
.headers()
.get("Origin")
.ok_or_bad_request("Missing Origin header")?
.to_str()?;
let request_method = req
.headers()
.get(ACCESS_CONTROL_REQUEST_METHOD)
.ok_or_bad_request("Missing Access-Control-Request-Method header")?
.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
if let Some(cors_config) = bucket_params.cors_config.get() {
let matching_rule = cors_config
.iter()
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
if let Some(rule) = matching_rule {
let mut resp = Response::builder()
.status(StatusCode::OK)
.body(EmptyBody::new())?;
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
return Ok(resp);
}
}
Err(CommonError::Forbidden(
"This CORS request is not allowed.".into(),
))
}
pub fn find_matching_cors_rule<'a>(
bucket_params: &'a BucketParams,
req: &Request<impl Body>,
) -> Result<Option<&'a GarageCorsRule>, Error> {
if let Some(cors_config) = bucket_params.cors_config.get() {
if let Some(origin) = req.headers().get("Origin") {
let origin = origin.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
return Ok(cors_config.iter().find(|rule| {
cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter())
}));
}
}
Ok(None)
}
fn cors_rule_matches<'a, HI, S>(
rule: &GarageCorsRule,
origin: &'a str,
method: &'a str,
mut request_headers: HI,
) -> bool
where
HI: Iterator<Item = S>,
S: AsRef<str>,
{
rule.allow_origins.iter().any(|x| x == "*" || x == origin)
&& rule.allow_methods.iter().any(|x| x == "*" || x == method)
&& request_headers.all(|h| {
rule.allow_headers
.iter()
.any(|x| x == "*" || x == h.as_ref())
})
}
pub fn add_cors_headers(
resp: &mut Response<impl Body>,
rule: &GarageCorsRule,
) -> Result<(), http::header::InvalidHeaderValue> {
let h = resp.headers_mut();
h.insert(
ACCESS_CONTROL_ALLOW_ORIGIN,
rule.allow_origins.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_METHODS,
rule.allow_methods.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_HEADERS,
rule.allow_headers.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_EXPOSE_HEADERS,
rule.expose_headers.join(", ").parse()?,
);
Ok(())
}
// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ----
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]

View file

@ -5,12 +5,13 @@ use garage_util::data::*;
use garage_model::s3::object_table::*;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::put::next_timestamp;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::put::next_timestamp;
use crate::xml as s3_xml;
async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid), Error> {
let ReqCtx {

View file

@ -28,9 +28,10 @@ use garage_util::migrate::Migrate;
use garage_model::garage::Garage;
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
use crate::common_error::*;
use crate::s3::checksum::Md5Checksum;
use crate::s3::error::Error;
use garage_api_common::common_error::*;
use crate::checksum::Md5Checksum;
use crate::error::Error;
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
HeaderName::from_static("x-amz-server-side-encryption-customer-algorithm");

View file

@ -4,19 +4,30 @@ use err_derive::Error;
use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode};
use crate::common_error::CommonError;
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
use crate::generic_server::ApiError;
use crate::helpers::*;
use crate::s3::xml as s3_xml;
use crate::signature::error::Error as SignatureError;
use garage_model::helper::error::Error as HelperError;
pub(crate) use garage_api_common::common_error::pass_helper_error;
use garage_api_common::common_error::{
commonErrorDerivative, helper_error_as_internal, CommonError,
};
pub use garage_api_common::common_error::{
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
};
use garage_api_common::generic_server::ApiError;
use garage_api_common::helpers::*;
use garage_api_common::signature::error::Error as SignatureError;
use crate::xml as s3_xml;
/// Errors of this crate
#[derive(Debug, Error)]
pub enum Error {
#[error(display = "{}", _0)]
/// Error from common error
Common(CommonError),
Common(#[error(source)] CommonError),
// Category: cannot process
/// Authorization Header Malformed
@ -78,17 +89,16 @@ pub enum Error {
NotImplemented(String),
}
impl<T> From<T> for Error
where
CommonError: From<T>,
{
fn from(err: T) -> Self {
Error::Common(CommonError::from(err))
commonErrorDerivative!(Error);
// Helper errors are always passed as internal errors by default.
// To pass the specific error code back to the client, use `pass_helper_error`.
impl From<HelperError> for Error {
fn from(err: HelperError) -> Error {
Error::Common(helper_error_as_internal(err))
}
}
impl CommonErrorDerivative for Error {}
impl From<roxmltree::Error> for Error {
fn from(err: roxmltree::Error) -> Self {
Self::InvalidXml(format!("{}", err))

View file

@ -25,11 +25,12 @@ use garage_model::garage::Garage;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::helpers::*;
use crate::s3::api_server::ResBody;
use crate::s3::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use garage_api_common::helpers::*;
use crate::api_server::ResBody;
use crate::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
use crate::encryption::EncryptionParams;
use crate::error::*;
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
@ -68,14 +69,11 @@ fn object_headers(
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
let mut headers_by_name = BTreeMap::new();
for (name, value) in meta_inner.headers.iter() {
match headers_by_name.get_mut(name) {
None => {
headers_by_name.insert(name, vec![value.as_str()]);
}
Some(headers) => {
headers.push(value.as_str());
}
}
let name_lower = name.to_ascii_lowercase();
headers_by_name
.entry(name_lower)
.or_insert(vec![])
.push(value.as_str());
}
for (name, values) in headers_by_name {

View file

@ -1,3 +1,6 @@
#[macro_use]
extern crate tracing;
pub mod api_server;
pub mod error;

View file

@ -5,11 +5,12 @@ use hyper::{Request, Response, StatusCode};
use serde::{Deserialize, Serialize};
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use garage_model::bucket_table::{
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,

View file

@ -13,13 +13,14 @@ use garage_model::s3::object_table::*;
use garage_table::EnumerationOrder;
use crate::encoding::*;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use crate::s3::multipart as s3_multipart;
use crate::s3::xml as s3_xml;
use garage_api_common::encoding::*;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::encryption::EncryptionParams;
use crate::error::*;
use crate::multipart as s3_multipart;
use crate::xml as s3_xml;
const DUMMY_NAME: &str = "Dummy Key";
const DUMMY_KEY: &str = "GKDummyKey";
@ -398,7 +399,7 @@ enum ExtractionResult {
key: String,
},
// Fallback key is used for legacy APIs that only support
// exlusive pagination (and not inclusive one).
// exclusive pagination (and not inclusive one).
SkipTo {
key: String,
fallback_key: Option<String>,
@ -408,7 +409,7 @@ enum ExtractionResult {
#[derive(PartialEq, Clone, Debug)]
enum RangeBegin {
// Fallback key is used for legacy APIs that only support
// exlusive pagination (and not inclusive one).
// exclusive pagination (and not inclusive one).
IncludingKey {
key: String,
fallback_key: Option<String>,

View file

@ -15,14 +15,15 @@ use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::checksum::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use crate::s3::put::*;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody};
use crate::checksum::*;
use crate::encryption::EncryptionParams;
use crate::error::*;
use crate::put::*;
use crate::xml as s3_xml;
// ----

View file

@ -16,15 +16,16 @@ use serde::Deserialize;
use garage_model::garage::Garage;
use garage_model::s3::object_table::*;
use crate::helpers::*;
use crate::s3::api_server::ResBody;
use crate::s3::checksum::*;
use crate::s3::cors::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use crate::s3::put::{get_headers, save_stream, ChecksumMode};
use crate::s3::xml as s3_xml;
use crate::signature::payload::{verify_v4, Authorization};
use garage_api_common::cors::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::payload::{verify_v4, Authorization};
use crate::api_server::ResBody;
use crate::checksum::*;
use crate::encryption::EncryptionParams;
use crate::error::*;
use crate::put::{get_headers, save_stream, ChecksumMode};
use crate::xml as s3_xml;
pub async fn handle_post_object(
garage: Arc<Garage>,
@ -107,7 +108,8 @@ pub async fn handle_post_object(
let bucket_id = garage
.bucket_helper()
.resolve_bucket(&bucket_name, &api_key)
.await?;
.await
.map_err(pass_helper_error)?;
if !api_key.allow_write(&bucket_id) {
return Err(Error::forbidden("Operation is not allowed for this key."));
@ -213,7 +215,7 @@ pub async fn handle_post_object(
}
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
// arround here to make sure the rest of the machinery takes our acl into account.
// around here to make sure the rest of the machinery takes our acl into account.
let headers = get_headers(&params)?;
let expected_checksums = ExpectedChecksums {

View file

@ -30,11 +30,12 @@ use garage_model::s3::block_ref_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::checksum::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::checksum::*;
use crate::encryption::EncryptionParams;
use crate::error::*;
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
@ -622,7 +623,7 @@ pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList
for (name, value) in headers.iter() {
if name.as_str().starts_with("x-amz-meta-") {
ret.push((
name.to_string(),
name.as_str().to_ascii_lowercase(),
std::str::from_utf8(value.as_bytes())?.to_string(),
));
}

View file

@ -3,9 +3,10 @@ use std::borrow::Cow;
use hyper::header::HeaderValue;
use hyper::{HeaderMap, Method, Request};
use crate::helpers::Authorization;
use crate::router_macros::{generateQueryParameters, router_match};
use crate::s3::error::*;
use garage_api_common::helpers::Authorization;
use garage_api_common::router_macros::{generateQueryParameters, router_match};
use crate::error::*;
router_match! {@func

View file

@ -4,15 +4,16 @@ use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode};
use serde::{Deserialize, Serialize};
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content;
use garage_model::bucket_table::*;
use garage_util::data::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { bucket_params, .. } = ctx;
if let Some(website) = bucket_params.website_config.get() {
@ -276,7 +277,7 @@ impl Redirect {
return Err(Error::bad_request("Bad XML: invalid protocol"));
}
}
// TODO there are probably more invalide cases, but which ones?
// TODO there are probably more invalid cases, but which ones?
Ok(())
}
}

View file

@ -1,7 +1,7 @@
use quick_xml::se::to_string;
use serde::{Deserialize, Serialize, Serializer};
use crate::s3::error::Error as ApiError;
use crate::error::Error as ApiError;
pub fn to_xml_with_header<T: Serialize>(x: &T) -> Result<String, ApiError> {
let mut xml = r#"<?xml version="1.0" encoding="UTF-8"?>"#.to_string();

View file

@ -34,10 +34,8 @@ async-compression.workspace = true
zstd.workspace = true
serde.workspace = true
serde_bytes.workspace = true
futures.workspace = true
futures-util.workspace = true
tokio.workspace = true
tokio-util.workspace = true

View file

@ -279,7 +279,8 @@ impl DataLayout {
u16::from_be_bytes([
hash.as_slice()[HASH_DRIVE_BYTES.0],
hash.as_slice()[HASH_DRIVE_BYTES.1],
]) as usize % DRIVE_NPART
]) as usize
% DRIVE_NPART
}
fn block_dir_from(&self, hash: &Hash, dir: &PathBuf) -> PathBuf {

View file

@ -105,7 +105,7 @@ impl BlockResyncManager {
}
}
/// Get lenght of resync queue
/// Get length of resync queue
pub fn queue_len(&self) -> Result<usize, Error> {
Ok(self.queue.len()?)
}
@ -185,10 +185,10 @@ impl BlockResyncManager {
//
// - resync.errors: a tree that indicates for each block
// if the last resync resulted in an error, and if so,
// the following two informations (see the ErrorCounter struct):
// the following two information (see the ErrorCounter struct):
// - how many consecutive resync errors for this block?
// - when was the last try?
// These two informations are used to implement an
// These two information are used to implement an
// exponential backoff retry strategy.
// The key in this tree is the 32-byte hash of the block,
// and the value is the encoded ErrorCounter value.

View file

@ -13,7 +13,6 @@ path = "lib.rs"
[dependencies]
err-derive.workspace = true
hexdump.workspace = true
tracing.workspace = true
heed = { workspace = true, optional = true }

View file

@ -122,7 +122,7 @@ impl Db {
_ => unreachable!(),
},
Err(TxError::Db(e2)) => match ret {
// Ok was stored -> the error occured when finalizing
// Ok was stored -> the error occurred when finalizing
// transaction
Ok(_) => Err(TxError::Db(e2)),
// An error was already stored: that's the one we want to

View file

@ -233,7 +233,7 @@ impl<'a> LmdbTx<'a> {
fn get_tree(&self, i: usize) -> TxOpResult<&Database> {
self.trees.get(i).ok_or_else(|| {
TxOpError(Error(
"invalid tree id (it might have been openned after the transaction started)".into(),
"invalid tree id (it might have been opened after the transaction started)".into(),
))
})
}

View file

@ -142,11 +142,14 @@ impl IDb for SqliteDb {
fn snapshot(&self, to: &PathBuf) -> Result<()> {
fn progress(p: rusqlite::backup::Progress) {
let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
info!("Sqlite snapshot progres: {}%", percent);
info!("Sqlite snapshot progress: {}%", percent);
}
std::fs::create_dir_all(to)?;
let mut path = to.clone();
path.push("db.sqlite");
self.db
.get()?
.backup(rusqlite::DatabaseName::Main, to, Some(progress))?;
.backup(rusqlite::DatabaseName::Main, path, Some(progress))?;
Ok(())
}
@ -304,7 +307,7 @@ impl<'a> SqliteTx<'a> {
fn get_tree(&self, i: usize) -> TxOpResult<&'_ str> {
self.trees.get(i).map(Arc::as_ref).ok_or_else(|| {
TxOpError(Error(
"invalid tree id (it might have been openned after the transaction started)".into(),
"invalid tree id (it might have been opened after the transaction started)".into(),
))
})
}

View file

@ -23,7 +23,9 @@ path = "tests/lib.rs"
[dependencies]
format_table.workspace = true
garage_db.workspace = true
garage_api.workspace = true
garage_api_admin.workspace = true
garage_api_s3.workspace = true
garage_api_k2v = { workspace = true, optional = true }
garage_block.workspace = true
garage_model.workspace = true
garage_net.workspace = true
@ -40,7 +42,6 @@ parse_duration.workspace = true
hex.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
rand.workspace = true
async-trait.workspace = true
sha1.workspace = true
sodiumoxide.workspace = true
@ -48,21 +49,18 @@ structopt.workspace = true
git-version.workspace = true
serde.workspace = true
serde_bytes.workspace = true
toml.workspace = true
futures.workspace = true
futures-util.workspace = true
tokio.workspace = true
opentelemetry.workspace = true
opentelemetry-prometheus = { workspace = true, optional = true }
opentelemetry-otlp = { workspace = true, optional = true }
prometheus = { workspace = true, optional = true }
syslog-tracing = { workspace = true, optional = true }
[dev-dependencies]
aws-config.workspace = true
garage_api_common.workspace = true
aws-sdk-s3.workspace = true
chrono.workspace = true
http.workspace = true
@ -84,7 +82,7 @@ k2v-client.workspace = true
[features]
default = [ "bundled-libs", "metrics", "lmdb", "sqlite", "k2v" ]
k2v = [ "garage_util/k2v", "garage_api/k2v" ]
k2v = [ "garage_util/k2v", "garage_api_k2v" ]
# Database engines
lmdb = [ "garage_model/lmdb" ]
@ -95,7 +93,7 @@ consul-discovery = [ "garage_rpc/consul-discovery" ]
# Automatic registration and discovery via Kubernetes API
kubernetes-discovery = [ "garage_rpc/kubernetes-discovery" ]
# Prometheus exporter (/metrics endpoint).
metrics = [ "garage_api/metrics", "opentelemetry-prometheus", "prometheus" ]
metrics = [ "garage_api_admin/metrics", "opentelemetry-prometheus" ]
# Exporter for the OpenTelemetry Collector.
telemetry-otlp = [ "opentelemetry-otlp" ]
# Logging to syslog

View file

@ -129,7 +129,7 @@ pub async fn cmd_assign_role(
zone: args
.zone
.clone()
.ok_or("Please specifiy a zone with the -z flag")?,
.ok_or("Please specify a zone with the -z flag")?,
capacity,
tags: args.tags.clone(),
}
@ -145,7 +145,7 @@ pub async fn cmd_assign_role(
send_layout(rpc_cli, rpc_host, layout).await?;
println!("Role changes are staged but not yet commited.");
println!("Role changes are staged but not yet committed.");
println!("Use `garage layout show` to view staged role changes,");
println!("and `garage layout apply` to enact staged changes.");
Ok(())
@ -172,7 +172,7 @@ pub async fn cmd_remove_role(
send_layout(rpc_cli, rpc_host, layout).await?;
println!("Role removal is staged but not yet commited.");
println!("Role removal is staged but not yet committed.");
println!("Use `garage layout show` to view staged role changes,");
println!("and `garage layout apply` to enact staged changes.");
Ok(())

View file

@ -184,7 +184,7 @@ pub struct SkipDeadNodesOpt {
/// This will generally be the current layout version.
#[structopt(long = "version")]
pub(crate) version: u64,
/// Allow the skip even if a quorum of ndoes could not be found for
/// Allow the skip even if a quorum of nodes could not be found for
/// the data among the remaining nodes
#[structopt(long = "allow-missing-data")]
pub(crate) allow_missing_data: bool,

View file

@ -107,7 +107,7 @@ async fn main() {
);
// Initialize panic handler that aborts on panic and shows a nice message.
// By default, Tokio continues runing normally when a task panics. We want
// By default, Tokio continues running normally when a task panics. We want
// to avoid this behavior in Garage as this would risk putting the process in an
// unknown/uncontrollable state. We prefer to exit the process and restart it
// from scratch, so that it boots back into a fresh, known state.

View file

@ -104,7 +104,7 @@ pub(crate) fn fill_secret(
if let Some(val) = cli_value {
if config_secret.is_some() || config_secret_file.is_some() {
debug!("Overriding secret `{}` using value specified using CLI argument or environnement variable.", name);
debug!("Overriding secret `{}` using value specified using CLI argument or environment variable.", name);
}
*config_secret = Some(val);

View file

@ -6,13 +6,13 @@ use garage_util::background::*;
use garage_util::config::*;
use garage_util::error::Error;
use garage_api::admin::api_server::AdminApiServer;
use garage_api::s3::api_server::S3ApiServer;
use garage_api_admin::api_server::AdminApiServer;
use garage_api_s3::api_server::S3ApiServer;
use garage_model::garage::Garage;
use garage_web::WebServer;
#[cfg(feature = "k2v")]
use garage_api::k2v::api_server::K2VApiServer;
use garage_api_k2v::api_server::K2VApiServer;
use crate::admin::*;
use crate::secrets::{fill_secrets, Secrets};

View file

@ -15,7 +15,7 @@ use hyper_util::client::legacy::{connect::HttpConnector, Client};
use hyper_util::rt::TokioExecutor;
use super::garage::{Instance, Key};
use garage_api::signature;
use garage_api_common::signature;
pub type Body = FullBody<hyper::body::Bytes>;
@ -153,7 +153,7 @@ impl<'a> RequestBuilder<'a> {
pub async fn send(&mut self) -> Result<Response<Body>, String> {
// TODO this is a bit incorrect in that path and query params should be url-encoded and
// aren't, but this is good enought for now.
// aren't, but this is good enough for now.
let query = query_param_to_string(&self.query_params);
let (host, path) = if self.vhost_style {
@ -210,9 +210,9 @@ impl<'a> RequestBuilder<'a> {
HeaderName::from_static("x-amz-decoded-content-length"),
HeaderValue::from_str(&self.body.len().to_string()).unwrap(),
);
// Get lenght of body by doing the conversion to a streaming body with an
// Get length of body by doing the conversion to a streaming body with an
// invalid signature (we don't know the seed) just to get its length. This
// is a pretty lazy and inefficient way to do it, but it's enought for test
// is a pretty lazy and inefficient way to do it, but it's enough for test
// code.
all_headers.insert(
CONTENT_LENGTH,

View file

@ -29,12 +29,11 @@ tokio.workspace = true
# cli deps
clap = { workspace = true, optional = true }
format_table = { workspace = true, optional = true }
tracing = { workspace = true, optional = true }
tracing-subscriber = { workspace = true, optional = true }
[features]
cli = ["clap", "tokio/fs", "tokio/io-std", "tracing", "tracing-subscriber", "format_table"]
cli = ["clap", "tokio/fs", "tokio/io-std", "tracing-subscriber", "format_table"]
[lib]
path = "lib.rs"

View file

@ -54,7 +54,7 @@ enum Command {
partition_key: String,
/// Sort key to read from
sort_key: String,
/// Output formating
/// Output formatting
#[clap(flatten)]
output_kind: ReadOutputKind,
},
@ -70,7 +70,7 @@ enum Command {
/// Timeout, in seconds
#[clap(short = 'T', long)]
timeout: Option<u64>,
/// Output formating
/// Output formatting
#[clap(flatten)]
output_kind: ReadOutputKind,
},
@ -87,7 +87,7 @@ enum Command {
/// Timeout, in seconds
#[clap(short = 'T', long)]
timeout: Option<u64>,
/// Output formating
/// Output formatting
#[clap(flatten)]
output_kind: BatchOutputKind,
},
@ -103,7 +103,7 @@ enum Command {
},
/// List partition keys
ReadIndex {
/// Output formating
/// Output formatting
#[clap(flatten)]
output_kind: BatchOutputKind,
/// Output only partition keys matching this filter
@ -114,7 +114,7 @@ enum Command {
ReadRange {
/// Partition key to read from
partition_key: String,
/// Output formating
/// Output formatting
#[clap(flatten)]
output_kind: BatchOutputKind,
/// Output only sort keys matching this filter
@ -125,7 +125,7 @@ enum Command {
DeleteRange {
/// Partition key to delete from
partition_key: String,
/// Output formating
/// Output formatting
#[clap(flatten)]
output_kind: BatchOutputKind,
/// Delete only sort keys matching this filter
@ -185,10 +185,10 @@ struct ReadOutputKind {
/// Raw output. Conflicts generate error, causality token is not returned
#[clap(short, long, group = "output-kind")]
raw: bool,
/// Human formated output
/// Human formatted output
#[clap(short = 'H', long, group = "output-kind")]
human: bool,
/// JSON formated output
/// JSON formatted output
#[clap(short, long, group = "output-kind")]
json: bool,
}
@ -207,7 +207,7 @@ impl ReadOutputKind {
let mut val = val.value;
if val.len() != 1 {
eprintln!(
"Raw mode can only read non-concurent values, found {} values, expected 1",
"Raw mode can only read non-concurrent values, found {} values, expected 1",
val.len()
);
exit(1);
@ -265,10 +265,10 @@ impl ReadOutputKind {
#[derive(Parser, Debug)]
#[clap(group = clap::ArgGroup::new("output-kind").multiple(false).required(false))]
struct BatchOutputKind {
/// Human formated output
/// Human formatted output
#[clap(short = 'H', long, group = "output-kind")]
human: bool,
/// JSON formated output
/// JSON formatted output
#[clap(short, long, group = "output-kind")]
json: bool,
}

View file

@ -336,7 +336,7 @@ impl K2vClient {
.collect())
}
/// Perform a DeleteBatch request, deleting mutiple values or range of values at once, without
/// Perform a DeleteBatch request, deleting multiple values or range of values at once, without
/// providing causality information.
pub async fn delete_batch(&self, operations: &[BatchDeleteOp<'_>]) -> Result<Vec<u64>, Error> {
let url = self.build_url(None, &[("delete", "")]);

View file

@ -22,7 +22,6 @@ garage_util.workspace = true
garage_net.workspace = true
async-trait.workspace = true
arc-swap.workspace = true
blake2.workspace = true
chrono.workspace = true
err-derive.workspace = true
@ -38,9 +37,7 @@ serde.workspace = true
serde_bytes.workspace = true
futures.workspace = true
futures-util.workspace = true
tokio.workspace = true
opentelemetry.workspace = true
[features]
default = [ "lmdb", "sqlite" ]

View file

@ -89,9 +89,9 @@ pub fn is_valid_bucket_name(n: &str) -> bool {
// Bucket names must start and end with a letter or a number
&& !n.starts_with(&['-', '.'][..])
&& !n.ends_with(&['-', '.'][..])
// Bucket names must not be formated as an IP address
// Bucket names must not be formatted as an IP address
&& n.parse::<std::net::IpAddr>().is_err()
// Bucket names must not start wih "xn--"
// Bucket names must not start with "xn--"
&& !n.starts_with("xn--")
// Bucket names must not end with "-s3alias"
&& !n.ends_with("-s3alias")

View file

@ -14,7 +14,7 @@ mod v08 {
/// A bucket is a collection of objects
///
/// Its parameters are not directly accessible as:
/// - It must be possible to merge paramaters, hence the use of a LWW CRDT.
/// - It must be possible to merge parameters, hence the use of a LWW CRDT.
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Bucket {
@ -126,7 +126,7 @@ impl AutoCrdt for BucketQuotas {
}
impl BucketParams {
/// Create an empty BucketParams with no authorized keys and no website accesss
/// Create an empty BucketParams with no authorized keys and no website access
fn new() -> Self {
BucketParams {
creation_date: now_msec(),

View file

@ -231,7 +231,7 @@ impl<'a> LockedHelper<'a> {
let bucket_p_local_alias_key = (key.key_id.clone(), alias_name.clone());
// Calculate the timestamp to assign to this aliasing in the two local_aliases maps
// (the one from key to bucket, and the reverse one stored in the bucket iself)
// (the one from key to bucket, and the reverse one stored in the bucket itself)
// so that merges on both maps in case of a concurrent operation resolve
// to the same alias being set
let alias_ts = increment_logical_clock_2(
@ -279,7 +279,8 @@ impl<'a> LockedHelper<'a> {
.local_aliases
.get(alias_name)
.cloned()
.flatten() != Some(bucket_id)
.flatten()
!= Some(bucket_id)
{
return Err(GarageError::Message(format!(
"Bucket {:?} does not have alias {} in namespace of key {}",

View file

@ -16,8 +16,6 @@ use serde::{Deserialize, Serialize};
use garage_util::data::*;
use crate::helper::error::{Error as HelperError, OkOrBadRequest};
/// Node IDs used in K2V are u64 integers that are the abbreviation
/// of full Garage node IDs which are 256-bit UUIDs.
pub type K2VNodeId = u64;
@ -99,10 +97,6 @@ impl CausalContext {
Some(ret)
}
pub fn parse_helper(s: &str) -> Result<Self, HelperError> {
Self::parse(s).ok_or_bad_request("Invalid causality token")
}
/// Check if this causal context contains newer items than another one
pub fn is_newer_than(&self, other: &Self) -> bool {
vclock_gt(&self.vector_clock, &other.vector_clock)

View file

@ -310,7 +310,7 @@ impl K2VRpcHandler {
// - we have a response to a read quorum of requests (e.g. 2/3), and an extra delay
// has passed since the quorum was achieved
// - a global RPC timeout expired
// The extra delay after a quorum was received is usefull if the third response was to
// The extra delay after a quorum was received is useful if the third response was to
// arrive during this short interval: this would allow us to consider all the data seen
// by that last node in the response we produce, and would likely help reduce the
// size of the seen marker that we will return (because we would have an info of the
@ -500,7 +500,7 @@ impl K2VRpcHandler {
} else {
// If no seen marker was specified, we do not poll for anything.
// We return immediately with the set of known items (even if
// it is empty), which will give the client an inital view of
// it is empty), which will give the client an initial view of
// the dataset and an initial seen marker for further
// PollRange calls.
self.poll_range_read_range(range, &RangeSeenMarker::default())

View file

@ -70,7 +70,7 @@ pub fn register_bg_vars(
impl LifecycleWorker {
pub fn new(garage: Arc<Garage>, persister: PersisterShared<LifecycleWorkerPersisted>) -> Self {
let today = today();
let today = today(garage.config.use_local_tz);
let last_completed = persister.get_with(|x| {
x.last_completed
.as_deref()
@ -205,8 +205,9 @@ impl Worker for LifecycleWorker {
async fn wait_for_work(&mut self) -> WorkerState {
match &self.state {
State::Completed(d) => {
let use_local_tz = self.garage.config.use_local_tz;
let next_day = d.succ_opt().expect("no next day");
let next_start = midnight_ts(next_day);
let next_start = midnight_ts(next_day, use_local_tz);
loop {
let now = now_msec();
if now < next_start {
@ -218,7 +219,7 @@ impl Worker for LifecycleWorker {
break;
}
}
self.state = State::start(std::cmp::max(next_day, today()));
self.state = State::start(std::cmp::max(next_day, today(use_local_tz)));
}
State::Running { .. } => (),
}
@ -385,10 +386,16 @@ fn check_size_filter(version_data: &ObjectVersionData, filter: &LifecycleFilter)
true
}
fn midnight_ts(date: NaiveDate) -> u64 {
date.and_hms_opt(0, 0, 0)
.expect("midnight does not exist")
.timestamp_millis() as u64
fn midnight_ts(date: NaiveDate, use_local_tz: bool) -> u64 {
let midnight = date.and_hms_opt(0, 0, 0).expect("midnight does not exist");
if use_local_tz {
return midnight
.and_local_timezone(Local)
.single()
.expect("bad local midnight")
.timestamp_millis() as u64;
}
midnight.timestamp_millis() as u64
}
fn next_date(ts: u64) -> NaiveDate {
@ -399,6 +406,9 @@ fn next_date(ts: u64) -> NaiveDate {
.expect("no next day")
}
fn today() -> NaiveDate {
fn today(use_local_tz: bool) -> NaiveDate {
if use_local_tz {
return Local::now().naive_local().date();
}
Utc::now().naive_utc().date()
}

Some files were not shown because too many files have changed in this diff Show more