Compare commits

..

163 commits
typo ... main

Author SHA1 Message Date
c5237c31e7 Merge pull request 'Implement all HTTP preconditions in GetObject/HeadObject' (#967) from fix-804 into main
Reviewed-on: Deuxfleurs/garage#967
2025-02-19 17:31:26 +00:00
f87943a39d tests: add test for http preconditions 2025-02-19 18:26:03 +01:00
c0846c56fe api: unify http precondition handling 2025-02-19 18:14:27 +01:00
1cb0ae10a8 Merge pull request 'fix crash in layout computation when changing all nodes of a zone to gateway mode' (#937) from baptiste/garage:fix_layout_crash into main
Reviewed-on: Deuxfleurs/garage#937
Reviewed-by: Alex <lx@deuxfleurs.fr>
2025-02-19 17:09:10 +00:00
1a8f74fc94 api: GetObject: implement if-match and if-unmodified-since 2025-02-19 17:26:29 +01:00
2191620af5 Merge pull request 'web: implement x-amz-website-redirect-location' (#966) from redirect-location-header into main
Reviewed-on: Deuxfleurs/garage#966
2025-02-19 16:10:04 +00:00
bf27a3ec98 web: implement x-amz-website-redirect-location 2025-02-19 17:04:10 +01:00
f64ec6e542 Merge pull request 'implement STREAMING-*-PAYLOAD-TRAILER' (#960) from fix-824 into main
Reviewed-on: Deuxfleurs/garage#960
2025-02-19 09:59:32 +00:00
6d38907dac test: verify saved checksums in streaming putobject tests 2025-02-18 22:02:04 +01:00
cfe8e8d45c api: PutObject: save trailer checksum in metadata 2025-02-18 21:56:32 +01:00
f6e805e7db api: various fixes 2025-02-18 21:47:53 +01:00
45e10e55f9 update aws-sdk-s3 in tests and fix wrong checksumming behavior in GetObject 2025-02-18 15:33:42 +01:00
730bfee753 api: validate trailing checksum + add test for unsigned-paylad-trailer 2025-02-18 15:33:42 +01:00
ccab0e4ae5 api: fix optional \n after trailer checksum header 2025-02-18 15:33:42 +01:00
abb60dcf7e api: remove content-encoding: aws-chunked for streaming payload 2025-02-18 15:33:42 +01:00
f8b0817ddc api: streaming signature: fix trailer parsing 2025-02-18 12:00:41 +01:00
21c0dda16a api: refactor: move checksumming code around again 2025-02-17 20:11:06 +01:00
658541d812 api: use checksumming in api_common::signature for put/putpart 2025-02-17 19:54:25 +01:00
c5df820e2c api: start refactor of signature to calculate checksums earlier 2025-02-17 18:47:06 +01:00
a04d6cd5b8 api: streaming: parse unsigned streaming bodies and payload trailers 2025-02-17 16:23:24 +01:00
44a896f9b5 api: add logic to parse x-amz-content-sha256 2025-02-16 18:25:35 +01:00
cee7560fc1 api: refactor: move checksum algorithms to common 2025-02-16 17:25:55 +01:00
2f0c5ca220 signature: refactor: move constant defs to mod.rs 2025-02-16 16:34:18 +01:00
859b38b0d2 Merge pull request 'fix compilation warnings' (#959) from fixes into main
Reviewed-on: Deuxfleurs/garage#959
2025-02-14 17:32:30 +00:00
2729a71d9d fix warning in garage test 2025-02-14 18:27:00 +01:00
c9d00f5f7b garage_api_s3: remove unused field in ListPartsQuery 2025-02-14 18:25:23 +01:00
89c944ebd6 Merge pull request 's3api: return Location in CompleteMultipartUpload (fix #852)' (#958) from fix-852 into main
Reviewed-on: Deuxfleurs/garage#958
2025-02-14 17:16:58 +00:00
24470377c9 garage_model: fix warning about dead code 2025-02-14 18:12:14 +01:00
5b26545abf fix deprecated uses of chrono in lifecycle worker 2025-02-14 18:08:23 +01:00
9c7e3c7bde remove cargo build options in makefile to avoid mistakes 2025-02-14 18:06:07 +01:00
165f9316e2 s3api: return Location in CompleteMultipartUpload (fix #852)
NB. The location returned is not guaranteed to work in all cases.
This already fixes the parse issue in #852.
2025-02-14 18:05:07 +01:00
a94adf804f Merge pull request 'block manager: avoid deadlock in fix_block_location (fix #845)' (#957) from fix-845 into main
Reviewed-on: Deuxfleurs/garage#957
2025-02-14 16:53:01 +00:00
e4c9a8cd53 block manager: avoid deadlock in fix_block_location (fix #845) 2025-02-14 17:41:50 +01:00
9312c6bbcb Merge pull request 'Store data blocks only on nodes in the latest cluster layout version (fix #815)' (#956) from fix-815 into main
Reviewed-on: Deuxfleurs/garage#956
2025-02-14 15:53:16 +00:00
fdf4dad728 block resync: avoid saving blocks to draining nodes 2025-02-14 16:45:55 +01:00
6820b69f30 block manager: improve read strategy to find blocks faster 2025-02-14 16:45:55 +01:00
d0104b9f9b block manager: write blocks only to currently active layout version (fix #815)
avoid wastefully writing blocks to nodes that will discard them as soon
as the layout migration is finished
2025-02-14 16:45:55 +01:00
3fe8db9e52 Merge pull request 'web_server.rs: Added bucket domain to observability' (#608) from jpds/garage:domain-web-requests into main
Reviewed-on: Deuxfleurs/garage#608
2025-02-14 14:26:08 +00:00
627a37fe9f Merge pull request 's3 api: parse x-id query parameter and warn of any inconsistency (fix #822)' (#954) from fix-822 into main
Reviewed-on: Deuxfleurs/garage#954
2025-02-14 14:07:01 +00:00
2f55889835 add configuration option to enable/disable monitoring bucket in web metrics 2025-02-14 14:59:00 +01:00
Jonathan Davies
8b9cc5ca3f web_server.rs: Added bucket domain to observability. 2025-02-14 14:36:20 +01:00
a1533d2919 Merge pull request 'cli: return info of all nodes when doing garage stats -a (fix #814)' (#953) from fix-814 into main
Reviewed-on: Deuxfleurs/garage#953
2025-02-14 13:31:42 +00:00
c1b39d9ba1 s3 api: parse x-id query parameter and warn of any inconsistency (fix #822) 2025-02-14 14:30:58 +01:00
d84308c413 cli: return info of all nodes when doing garage stats -a (fix #814) 2025-02-14 14:11:41 +01:00
63f20bdeab Merge pull request 'db-snapshot: Add error handling to metadata snapshot creation' (#930) from handle_snapshot_errors into main
Reviewed-on: Deuxfleurs/garage#930
Reviewed-by: Armael <armael@noreply.localhost>
2025-02-14 11:52:58 +00:00
Baptiste Jonglez
a2e134f036 db-snapshot: propagate any node snapshot error through RPC call
In particular, it means that "garage meta snapshot --all" will get an exit
code of 1 if any node fails to snapshot.

This makes sure that any external tool trying to snapshot nodes (e.g. from
cron) will be aware of the failure.

Fix #920
2025-02-07 00:29:43 +01:00
Baptiste Jonglez
06aa4b604f db-snapshot: Fix error reporting when using "garage meta snapshot --all"
Snapshot errors on remote nodes were not reported at all.

We now get proper error output such as:

    0fa0f35be69528ab  error: Internal error: DB error: LMDB: No space left on device (os error 28)
    88d92e2971d14bae  ok

Fix #920
2025-02-07 00:18:01 +01:00
d3226bfa91 Merge pull request 'remove uses of #[async_trait]' (#952) from remove-async-trait into main
Reviewed-on: Deuxfleurs/garage#952
2025-02-05 19:52:00 +00:00
af67626ab2 remove async_trait for TableRepair 2025-02-05 20:45:07 +01:00
5475da8ea8 remove async_trait used in generic_server.rs 2025-02-05 20:31:34 +01:00
620dc58560 remove async_trait for traits declared in garage_net 2025-02-05 20:22:16 +01:00
47e87c8739 Merge pull request 'upgrade Rust compiler and Cargo dependencies' (#951) from nix-crane into main
Reviewed-on: Deuxfleurs/garage#951
2025-02-03 17:49:00 +00:00
34599bff51 update all Cargo dependencies except AWS crates and their dependencies 2025-02-03 17:46:54 +01:00
ec1a475923 build with rust 1.82.0 2025-02-03 17:46:48 +01:00
b9df2d1ad1 Merge pull request 'compile with crane' (#950) from nix-crane into main
Reviewed-on: Deuxfleurs/garage#950
2025-02-03 15:54:54 +00:00
390a5d97fe nix, ci: build with Crane
This removes our dependency on cargo2nix, which was causing us some
issues. Whereas cargo2nix creates one Nix derivation per crate, Crane
uses only two derivations:

1. Build dependencies only
2. Build the final binary

This means that during the second step, no caching can be done. For
instance, if we do a change in garage_model, we need to recompile all of
the Garage crates including those that do not depend on garage_model.
On the upside, this allows all of the Garage crates to be built at once
using cargo build logic, which is optimized for high parallelism and
better pipelining between all of the steps of the build. All in all,
this makes most builds faster than cargo2nix.

A few other changes have been made to the build scripts and CI:

- Unit tests are now run within a Nix derivation. In fact, we have
  different derivations to run the tests using LMDB and Sqlite as
  metadata db engines.

- For debug builds, most CI steps now run in parallel (with the notable
  exception of the smoke test that runs after the build, which is
  inevitable).

- We no longer pass the GIT_VERSION argument when building debug builds
  and running the tests. This means that dev binaries and test
  binaries don't know the exact version of Garage they are from. That
  shouldn't be an issue in most cases.

- The not-dynamic.sh scripts has been fixed to fail if the file does not
  exist.
2025-02-03 16:39:50 +01:00
4dc2bc337f Merge pull request 'woodpecker: use parallel nix-build in debug builds' (#949) from nix-parallel into main
Reviewed-on: Deuxfleurs/garage#949
2025-02-01 18:58:15 +00:00
5dd2791981 woodpecker: use parallel nix-build in debug builds 2025-02-01 19:48:01 +01:00
d601f31186 Merge pull request 'split garage_api in garage_api_{common,s3,k2v,admin}' (#947) from split-garage-api into main
Reviewed-on: Deuxfleurs/garage#947
2025-02-01 17:48:25 +00:00
e4de7bdfd5 fix ci for more test crates 2025-01-31 19:21:36 +01:00
d18c5ad0ff fix tests 2025-01-31 19:12:51 +01:00
3d5e9a027e cargo defs: simplify and fix descriptions 2025-01-31 18:54:29 +01:00
f4ca7758b4 update cargo.nix 2025-01-31 18:48:07 +01:00
4563313f87 use cargo-shear to remove many unused dependencies between crates 2025-01-31 18:47:30 +01:00
afa28706e5 split s3/cors.rs into also common/cors.rs 2025-01-31 18:42:14 +01:00
84f1db91c4 fix things up 2025-01-31 18:34:57 +01:00
9fa20d45be wip: split garage_api into garage_api_{common,s3,k2v,admin} 2025-01-31 18:18:29 +01:00
9330fd79d3 Merge pull request 'table::insert_many: avoid failure with zero items (fix #915)' (#946) from fix-915 into main
Reviewed-on: Deuxfleurs/garage#946
2025-01-31 13:10:54 +00:00
83f6928ff7 table::insert_many: avoid failure with zero items (fix #915) 2025-01-30 18:06:47 +01:00
ab71544499 Merge pull request 'api: better handling of helper errors to distinguish error codes' (#942) from fix-getkeyinfo-404 into main
Reviewed-on: Deuxfleurs/garage#942
2025-01-29 18:25:44 +00:00
991edbe02c Merge pull request 'Update doc/book/connect/repositories.md' (#941) from yatesco/garage:main into main
Reviewed-on: Deuxfleurs/garage#941
2025-01-29 18:18:59 +00:00
9f3c7c3720 api: better handling of helper errors to distinguish error codes 2025-01-29 19:14:34 +01:00
bfde9152b8 Update doc/book/operations/multi-hdd.md
trivial spelling mistake
2025-01-29 13:40:41 +00:00
7bb042f0b7 Update doc/book/connect/repositories.md
trivial spelling mistake
2025-01-29 13:34:35 +00:00
a1d081ee84 Merge pull request 's3 api: make x-amz-meta-* headers lowercase (fix #844)' (#938) from fix-844 into main
Reviewed-on: Deuxfleurs/garage#938
2025-01-27 19:32:19 +00:00
e8fa89e834 s3 api: make x-amz-meta-* headers lowercase (fix #844) 2025-01-27 19:58:06 +01:00
beedc9fd11 Merge pull request 'snapshot: sqlite: use a subdirectory for consistency with LMDB' (#932) from baptiste/garage:snapshot_consistency_sqlite into main
Reviewed-on: Deuxfleurs/garage#932
2025-01-27 18:50:11 +00:00
Baptiste Jonglez
6d798c640f WIP: fix crash in layout computation when changing all nodes of a zone to gateway mode
This change is probably not a proper fix, somebody with more expertise on
this code should look at it.

Here is how to reproduce the crash:

- start with a layout with two zones
- move all nodes of a zone to gateway mode: `garage layout assign fea54bcc081f318 -g`
- `garage layout show` will panic with a backtrace

Fortunately, the crash is only on the RPC client side, not on the Garage
server itself, and `garage layout revert` still works to go back to the
previous state.

As far as I can tell, this bug is present since Garage 0.9.0 which
includes the new layout assignation algorithm:

  Deuxfleurs/garage#296
2025-01-27 19:33:57 +01:00
d4e3e60920 Merge pull request 'update nix crate to 0.29 and libc to 0.2.169' (#931) from neuschaefer/garage:nix into main
Reviewed-on: Deuxfleurs/garage#931
2025-01-27 18:09:51 +00:00
Baptiste Jonglez
43402c9619 snapshot: sqlite: use a subdirectory for consistency with LMDB
Currently, taking a snapshot of the metadata database with sqlite creates
a sqlite file without extension with the following format:

    snapshots/2025-01-26T15:29:17Z

This makes it hard to understand what kind of data this is, and is not
consistent with LMDB:

    snapshots/2025-01-26T15:29:17Z/data.mdb

With this change, we now get a directory with a single db.sqlite file:

    snapshots/2025-01-26T15:29:17Z/db.sqlite
2025-01-27 19:06:52 +01:00
efa6f3d85e Merge pull request 'db-snapshot: allow to set directory where snapshots are stored' (#933) from baptiste/garage:configure_metadata_snapshots_dir into main
Reviewed-on: Deuxfleurs/garage#933
2025-01-27 18:04:05 +00:00
74a1b49b13 Update Cargo.nix 2025-01-27 18:37:05 +01:00
23d57b89dc update nix crate to 0.29 and libc to 0.2.169 2025-01-27 18:37:05 +01:00
5e3e1f4453 Merge pull request 'fix problems with CI doing work multiple times' (#936) from woodpecker-simplify into main
Reviewed-on: Deuxfleurs/garage#936
2025-01-27 17:36:27 +00:00
Baptiste Jonglez
59c153d280 db-snapshot: allow to set directory where snapshots are stored
Fix #926
2025-01-27 18:33:55 +01:00
bb3e0f7d22 nix CI: reduce redundant work 2025-01-27 18:09:51 +01:00
0156e40c9d Merge pull request 'ci: fix woodpecker definitions to comply with woodpecker 3' (#935) from woodpecker3 into main
Reviewed-on: Deuxfleurs/garage#935
Reviewed-by: maximilien <me@mricher.fr>
2025-01-27 12:03:46 +00:00
f6f88065ad ci: fix woodpecker definitions to comply with woodpecker 3 2025-01-27 12:06:31 +01:00
591bd808ec Merge pull request 'doc: Fix Nix devenv setup' (#927) from fix_devenv into main
Reviewed-on: Deuxfleurs/garage#927
2025-01-23 10:20:04 +00:00
294cb99409 Merge pull request 'Fix all typos' (#928) from majst01/garage:fix-typos into main
Reviewed-on: Deuxfleurs/garage#928
Reviewed-by: maximilien <me@mricher.fr>
2025-01-17 06:06:14 +00:00
Stefan Majer
2eb9fcae20 Fix all typos 2025-01-16 13:22:00 +01:00
Baptiste Jonglez
58b9eb46fc doc: Fix Nix devenv setup
This is a hotfix to fix the doc for the current setup, see #868 for
possible future directions.
2025-01-16 10:00:12 +01:00
255b01b626 Merge pull request 'Helm chart: Add garage.existingConfigmap and replace garage.garage.toml with garage.garageTomlString' (#923) from jessebot/garage:allow-existing-configmap into main
Reviewed-on: Deuxfleurs/garage#923
Reviewed-by: maximilien <me@mricher.fr>
2025-01-15 23:53:25 +00:00
58a765c51f Minor rewording, add some more hints 2025-01-15 23:51:07 +00:00
1c431b8457 Add garage.existingConfigmap and replace garage.garage.toml with garage.garageTomlString
also moves all gotemplating back to configmap

and adds autogenerated docs via helm-docs

Signed-off-by: jessebot <jessebot@linux.com>
2025-01-15 23:51:07 +00:00
39ac034de5 Merge pull request 'update toolchain' (#924) from nix-update into main
Reviewed-on: Deuxfleurs/garage#924
2025-01-13 10:19:53 +00:00
8ddb0dd485 nix build: switch to upstream cargo2nix (branch release-0.11.0) 2025-01-12 18:16:23 +01:00
83887a8519 nix build: remove clippy build env that doesn't work 2025-01-12 17:51:33 +01:00
0a15db6960 nix build: update rustc to v1.78 2025-01-12 17:37:36 +01:00
295237476e fix formatting to comply with latest rustfmt 2025-01-12 17:36:25 +01:00
9d83605736 flake: update versions of nixpkgs and rust-overlay 2025-01-12 17:34:04 +01:00
4b1a7fb5e3 Merge pull request 'The version flag is required when applying a layout' (#921) from update-quickstart-docs-layout-apply into main
Reviewed-on: Deuxfleurs/garage#921
2025-01-09 00:41:35 +00:00
b6aaebaf4c The version flag is required when applying a layout
I followed the documentation and got an error if the layout's version was not specified 

```
garage layout apply

Error: Internal error:
Please pass the new layout version number to ensure that you are writing the correct version of the cluster layout.
To know the correct value of the new layout version, invoke `garage layout show` and review the proposed changes.
```

This fixes that
2025-01-08 20:30:09 +00:00
7bbc8fec50 Merge pull request 'Fix #907' (#917) from vk/garage:fix_907 into main
Reviewed-on: Deuxfleurs/garage#917
2025-01-04 16:07:40 +00:00
6689800986 Formatting with 2025-01-04 16:52:23 +01:00
d2246baab7 Merge pull request 'update flake.lock' (#918) from update-flake into main
Reviewed-on: Deuxfleurs/garage#918
2025-01-04 15:43:41 +00:00
afac1d4d4a update flake.lock 2025-01-04 16:29:42 +01:00
6ca99fd02c formatting 2025-01-04 14:46:42 +01:00
b568bb863d Fix #907 2025-01-04 12:50:10 +01:00
b8f301a61d Merge pull request 'woodpecker: use modern syntax for secrets (removes warning)' (#912) from woodpecker-fix-warnings into main
Reviewed-on: Deuxfleurs/garage#912
2024-12-23 17:41:15 +00:00
428ad2075d
woodpecker: use modern syntax for secrets (removes warning) 2024-12-23 18:00:22 +01:00
3661a597fa Merge pull request 'feat: add use_local_tz configuration' (#908) from ragazenta/garage:feat/local-timezone into main
Reviewed-on: Deuxfleurs/garage#908
Reviewed-by: maximilien <me@mricher.fr>
2024-12-01 13:23:24 +00:00
0fd3c0e794
doc: add use_local_tz configuration 2024-11-25 10:35:00 +07:00
4c1bf42192
feat: add use_local_tz configuration
Used in lifecycle_worker to determine midnight time
2024-11-23 05:51:12 +07:00
906c8708fd Merge pull request 'add extraVolumes and extraVolumeMounts to helm chart' (#896) from eugene-davis/garage:main into main
Reviewed-on: Deuxfleurs/garage#896
Reviewed-by: maximilien <me@mricher.fr>
2024-11-19 22:23:13 +00:00
747889a096 Merge pull request 'Update Python SDK documentation' (#887) from cryptolukas/garage:fix-python-sdk-docs into main
Reviewed-on: Deuxfleurs/garage#887
2024-11-19 09:15:03 +00:00
feb09a4bc6 Merge pull request 'doc: update mastodon media header pruning section' (#888) from teutat3s/garage:doc-update-mastodon-media into main
Reviewed-on: Deuxfleurs/garage#888
2024-11-19 09:14:34 +00:00
aa8bc6aa88 Merge pull request 'doc: add Triplebit's use-case' (#901) from jonah/garage:triplebit into main
Reviewed-on: Deuxfleurs/garage#901
Reviewed-by: maximilien <me@mricher.fr>
2024-11-17 13:43:49 +00:00
aba7902995
doc: add Triplebit's use-case 2024-11-15 16:27:46 -06:00
78de7b5bde Merge pull request 'fix bit/byte inversion in rpc secret error message' (#898) from trinity-1686a/garage:rpc-comment into main
Reviewed-on: Deuxfleurs/garage#898
2024-11-07 11:11:12 +00:00
9bd9e392ba fix bit/byte inversion in rpc secret error message 2024-11-07 00:29:26 +01:00
116ad479a8
add extraVolumes and extraVolumeMounts to helm chart 2024-10-26 21:14:08 +02:00
teutat3s
b6a58c5c16
doc: update mastodon media header pruning section
This is now possible since the upstream issue has been resolved.
https://github.com/mastodon/mastodon/issues/9567
2024-10-17 20:59:21 +02:00
2b0bfa9b18 the old value do not work out of the box 2024-10-14 17:20:26 +02:00
a18b3f0d1f Merge pull request 'Garage v1.0.1' (#881) from rel-v1.0.1 into main
Reviewed-on: Deuxfleurs/garage#881
2024-09-22 13:02:02 +00:00
7a143f46fc
Bump to version 1.0.1 2024-09-22 14:25:32 +02:00
c731f0291a Merge pull request 'fix logic in garage layout skip-dead-nodes + fix typo (fix #879)' (#880) from fix-skip-dead-nodes into main
Reviewed-on: Deuxfleurs/garage#880
2024-09-22 12:01:49 +00:00
34453bc9c2
fix logic in garage layout skip-dead-nodes + fix typo (fix #879) 2024-09-22 13:47:27 +02:00
6da1353541 Merge pull request 'Don't fetch old values in cross-partition transactional inserts' (#877) from withings/garage:perf/kv/insert-no-return-cross-partition into main
Reviewed-on: Deuxfleurs/garage#877
2024-09-14 15:57:27 +00:00
bd71728874
Tests: don't expect old value after transactional insert 2024-09-12 10:50:53 +02:00
51ced60366
Don't fetch old values in cross-partition transactional inserts 2024-09-12 10:26:28 +02:00
586957b4b7 Merge pull request 'KV: don't retrieve values for write ops' (#873) from marvinj97/garage:perf/kv/insert-no-return into main
Reviewed-on: Deuxfleurs/garage#873
Reviewed-by: Alex <alex@adnab.me>
2024-09-10 09:06:29 +00:00
8d2bb4afeb Merge pull request 'Typo' (#875) from faust/garage:doc2 into main
Reviewed-on: Deuxfleurs/garage#875
2024-09-10 09:03:31 +00:00
c26f32b769
Typo
And remove trailing white space.
2024-09-10 09:34:59 +02:00
8062ec7b4b test: fix db tests 2024-09-04 19:24:36 +02:00
eb416a02fb dont assert deletion count in sqlite KV adapter 2024-09-04 18:51:51 +02:00
74363c9060 perf(kv): dont retrieve values for write ops
see Deuxfleurs/garage#851
2024-09-04 18:45:17 +02:00
615698df7d Merge pull request 'update compiler to Rust 1.77' (#866) from rust-1.77 into main
Reviewed-on: Deuxfleurs/garage#866
2024-08-26 19:08:00 +00:00
7061fa5a56
use rust 1.77 in nix/compile.nix 2024-08-26 19:19:16 +02:00
8881930cdd
update nixpkgs and rust-overlay sources in flake.nix 2024-08-26 19:19:16 +02:00
52f6c0760b Merge pull request 'update crate time (fix #849)' (#865) from update-time into main
Reviewed-on: Deuxfleurs/garage#865
2024-08-26 16:20:04 +00:00
5b0602c7e9
update crate time (fix #849) 2024-08-26 18:11:21 +02:00
182b2af7e5 Merge pull request 'api servers: kill opened connections after SIGINT after 10s deadline (fix #806)' (#864) from exit-deadline into main
Reviewed-on: Deuxfleurs/garage#864
2024-08-25 18:34:55 +00:00
baf32c9575
api servers: kill opened connections after SIGINT after 10s deadline (fix #806) 2024-08-25 20:04:56 +02:00
3dda1ee4f6 Merge pull request 'fix build when lmdb feature is disabled (fix #800)' (#863) from fix-800 into main
Reviewed-on: Deuxfleurs/garage#863
2024-08-25 10:00:40 +00:00
aa7ce9e97c
fix build when lmdb feature is disabled (fix #800) 2024-08-25 11:42:37 +02:00
8d62616ec0 Merge pull request 'layout: discard old info when it is completely out-of-date (fix #841)' (#861) from fix-841 into main
Reviewed-on: Deuxfleurs/garage#861
2024-08-24 11:12:39 +00:00
bd6fe72c06 Merge pull request 'Quick start: mention Docker (replace #803)' (#862) from dougreeder into main
Reviewed-on: Deuxfleurs/garage#862
2024-08-24 11:07:46 +00:00
4c9e8ef625
doc: clarify quick start on using docker 2024-08-24 13:07:02 +02:00
3e711bc110 Merge pull request 'don't modify postobject request before validating policy' (#850) from trinity-1686a/garage:fix-acl-postobject into main
Reviewed-on: Deuxfleurs/garage#850
2024-08-24 10:49:14 +00:00
7fb66b4944
layout: discard old info when it is completely out-of-date (fix #841) 2024-08-24 12:38:56 +02:00
679ae8bcbb Merge pull request 'Set "no read ahead" on LMDB to improve performances when "LMDB size" > "RAM size"' (#855) from fix-lmdb-no-read-ahead into main
Reviewed-on: Deuxfleurs/garage#855
Reviewed-by: Alex <alex@adnab.me>
2024-08-18 12:25:35 +00:00
2a93ad0c84
force flag "no read ahead" on LMDB 2024-08-17 21:17:15 +02:00
f190032589 don't modify postobject request before validating policy 2024-08-10 20:10:47 +02:00
3a87bd1370 Merge pull request 'Improve error message for malformed RPC secret key' (#846) from improve-secret-error-message into main
Reviewed-on: Deuxfleurs/garage#846
Reviewed-by: Quentin <quentin@dufour.io>
2024-08-09 06:47:11 +00:00
9302cd42f0 Improve error message for malformed RPC secret key 2024-08-08 23:05:24 +00:00
060ad0da32 docs: Update LMDB website 2024-08-06 21:47:14 +00:00
a5ed1161c6 Merge pull request 'Add environment variable dict to helm chart.' (#843) from Benjamin/garage:main into main
Reviewed-on: Deuxfleurs/garage#843
Reviewed-by: maximilien <me@mricher.fr>
2024-08-06 21:45:35 +00:00
Benjamin von Mossner
222674432b This commit adds an environment dict to garage helm chart. Using it, env variables can be set into the garage container environment, useful to set eg. GARAGE_ADMIN_TOKEN or GARAGE_METRICS_TOKEN 2024-07-25 11:42:13 +02:00
070a8ad110 Merge pull request 'doc: fix typo' (#831) from Armael/garage:typo into main
Reviewed-on: Deuxfleurs/garage#831
2024-06-18 12:40:32 +00:00
770384cae1 Merge pull request 'add rpc_public_addr_subnet config option' (#817) from flokli/garage:rpc_public_addr_subnet into main
Reviewed-on: Deuxfleurs/garage#817
Reviewed-by: Alex <alex@adnab.me>
2024-06-18 12:40:07 +00:00
a0f6bc5b7f add rpc_public_addr_subnet config option
In case `rpc_public_addr` is not set, but autodiscovery is used, this
allows filtering the list of automatically discovered IPs to a specific
subnet.

For example, if nodes should pick *their* IP inside a specific subnet,
but you don't want to explicitly write the IP down (as it's dynamic, or
you want to share configs across nodes), you can use this option.
2024-06-05 08:41:36 +02:00
0c3b198b22 Improves Quick Start for users not using Linux 2024-04-10 16:42:10 -04:00
171 changed files with 4832 additions and 10186 deletions

View file

@ -1,3 +0,0 @@
[target.x86_64-unknown-linux-gnu]
linker = "clang"
rustflags = ["-C", "link-arg=-fuse-ld=mold"]

View file

@ -16,32 +16,21 @@ steps:
- name: build
image: nixpkgs/nix:nixos-22.05
commands:
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-build -j4 --attr flakePackages.dev
- name: unit + func tests
- name: unit + func tests (lmdb)
image: nixpkgs/nix:nixos-22.05
environment:
GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage
GARAGE_TEST_INTEGRATION_PATH: tmp-garage-integration
commands:
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-build --no-build-output --attr test.amd64
- ./result/bin/garage_db-*
- ./result/bin/garage_api-*
- ./result/bin/garage_model-*
- ./result/bin/garage_rpc-*
- ./result/bin/garage_table-*
- ./result/bin/garage_util-*
- ./result/bin/garage_web-*
- ./result/bin/garage-*
- GARAGE_TEST_INTEGRATION_DB_ENGINE=lmdb ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
- nix-shell --attr ci --run "killall -9 garage" || true
- GARAGE_TEST_INTEGRATION_DB_ENGINE=sqlite ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
- rm result
- rm -rv tmp-garage-integration
- nix-build -j4 --attr flakePackages.tests-lmdb
- name: unit + func tests (sqlite)
image: nixpkgs/nix:nixos-22.05
commands:
- nix-build -j4 --attr flakePackages.tests-sqlite
- name: integration tests
image: nixpkgs/nix:nixos-22.05
commands:
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-build -j4 --attr flakePackages.dev
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
depends_on: [ build ]

View file

@ -9,19 +9,20 @@ depends_on:
steps:
- name: refresh-index
image: nixpkgs/nix:nixos-22.05
secrets:
- source: garagehq_aws_access_key_id
target: AWS_ACCESS_KEY_ID
- source: garagehq_aws_secret_access_key
target: AWS_SECRET_ACCESS_KEY
environment:
AWS_ACCESS_KEY_ID:
from_secret: garagehq_aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: garagehq_aws_secret_access_key
commands:
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
- nix-shell --attr ci --run "refresh_index"
- name: multiarch-docker
image: nixpkgs/nix:nixos-22.05
secrets:
- docker_auth
environment:
DOCKER_AUTH:
from_secret: docker_auth
commands:
- mkdir -p /root/.docker
- echo $DOCKER_AUTH > /root/.docker/config.json

View file

@ -18,13 +18,12 @@ steps:
- name: build
image: nixpkgs/nix:nixos-22.05
commands:
- nix-build --no-build-output --attr pkgs.${ARCH}.release --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- name: check is static binary
image: nixpkgs/nix:nixos-22.05
commands:
- nix-build --no-build-output --attr pkgs.${ARCH}.release --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-shell --attr ci --run "./script/not-dynamic.sh result-bin/bin/garage"
- nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage"
- name: integration tests
image: nixpkgs/nix:nixos-22.05
@ -48,11 +47,10 @@ steps:
image: nixpkgs/nix:nixos-22.05
environment:
TARGET: "${TARGET}"
secrets:
- source: garagehq_aws_access_key_id
target: AWS_ACCESS_KEY_ID
- source: garagehq_aws_secret_access_key
target: AWS_SECRET_ACCESS_KEY
AWS_ACCESS_KEY_ID:
from_secret: garagehq_aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: garagehq_aws_secret_access_key
commands:
- nix-shell --attr ci --run "to_s3"
@ -61,8 +59,8 @@ steps:
environment:
DOCKER_PLATFORM: "linux/${ARCH}"
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
secrets:
- docker_auth
DOCKER_AUTH:
from_secret: docker_auth
commands:
- mkdir -p /root/.docker
- echo $DOCKER_AUTH > /root/.docker/config.json

1799
Cargo.lock generated

File diff suppressed because it is too large Load diff

7100
Cargo.nix

File diff suppressed because it is too large Load diff

View file

@ -8,7 +8,10 @@ members = [
"src/table",
"src/block",
"src/model",
"src/api",
"src/api/common",
"src/api/s3",
"src/api/k2v",
"src/api/admin",
"src/web",
"src/garage",
"src/k2v-client",
@ -21,15 +24,18 @@ default-members = ["src/garage"]
# Internal Garage crates
format_table = { version = "0.1.1", path = "src/format-table" }
garage_api = { version = "1.0.0", path = "src/api" }
garage_block = { version = "1.0.0", path = "src/block" }
garage_db = { version = "1.0.0", path = "src/db", default-features = false }
garage_model = { version = "1.0.0", path = "src/model", default-features = false }
garage_net = { version = "1.0.0", path = "src/net" }
garage_rpc = { version = "1.0.0", path = "src/rpc" }
garage_table = { version = "1.0.0", path = "src/table" }
garage_util = { version = "1.0.0", path = "src/util" }
garage_web = { version = "1.0.0", path = "src/web" }
garage_api_common = { version = "1.0.1", path = "src/api/common" }
garage_api_admin = { version = "1.0.1", path = "src/api/admin" }
garage_api_s3 = { version = "1.0.1", path = "src/api/s3" }
garage_api_k2v = { version = "1.0.1", path = "src/api/k2v" }
garage_block = { version = "1.0.1", path = "src/block" }
garage_db = { version = "1.0.1", path = "src/db", default-features = false }
garage_model = { version = "1.0.1", path = "src/model", default-features = false }
garage_net = { version = "1.0.1", path = "src/net" }
garage_rpc = { version = "1.0.1", path = "src/rpc" }
garage_table = { version = "1.0.1", path = "src/table" }
garage_util = { version = "1.0.1", path = "src/util" }
garage_web = { version = "1.0.1", path = "src/web" }
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
# External crates from crates.io
@ -46,7 +52,6 @@ chrono = "0.4"
crc32fast = "1.4"
crc32c = "0.6"
crypto-common = "0.1"
digest = "0.10"
err-derive = "0.3"
gethostname = "0.4"
git-version = "0.3.4"
@ -55,10 +60,11 @@ hexdump = "0.1"
hmac = "0.12"
idna = "0.5"
itertools = "0.12"
ipnet = "2.9.0"
lazy_static = "1.4"
md-5 = "0.10"
mktemp = "0.5"
nix = { version = "0.27", default-features = false, features = ["fs"] }
nix = { version = "0.29", default-features = false, features = ["fs"] }
nom = "7.1"
parse_duration = "2.1"
pin-project = "1.0.12"
@ -135,10 +141,8 @@ thiserror = "1.0"
assert-json-diff = "2.0"
rustc_version = "0.4.0"
static_init = "1.0"
aws-config = "1.1.4"
aws-sdk-config = "1.13"
aws-sdk-s3 = "1.14"
aws-sdk-config = "1.62"
aws-sdk-s3 = "=1.68"
[profile.dev]
#lto = "thin" # disabled for now, adds 2-4 min to each CI build

View file

@ -3,5 +3,5 @@ FROM scratch
ENV RUST_BACKTRACE=1
ENV RUST_LOG=garage=info
COPY result-bin/bin/garage /
COPY result/bin/garage /
CMD [ "/garage", "server"]

View file

@ -1,13 +1,8 @@
.PHONY: doc all release shell run1 run2 run3
.PHONY: doc all run1 run2 run3
all:
clear; cargo build
release:
nix-build --attr pkgs.amd64.release --no-build-output
shell:
nix-shell
clear
cargo build
# ----

View file

@ -3,53 +3,22 @@
with import ./nix/common.nix;
let
pkgs = import pkgsSrc { };
pkgs = import nixpkgs { };
compile = import ./nix/compile.nix;
build_debug_and_release = (target: {
debug = (compile {
inherit system target git_version pkgsSrc cargo2nixOverlay;
release = false;
}).workspace.garage { compileMode = "build"; };
release = (compile {
inherit system target git_version pkgsSrc cargo2nixOverlay;
release = true;
}).workspace.garage { compileMode = "build"; };
});
test = (rustPkgs:
pkgs.symlinkJoin {
name = "garage-tests";
paths =
builtins.map (key: rustPkgs.workspace.${key} { compileMode = "test"; })
(builtins.attrNames rustPkgs.workspace);
});
build_release = target: (compile {
inherit target system git_version nixpkgs;
crane = flake.inputs.crane;
rust-overlay = flake.inputs.rust-overlay;
release = true;
}).garage;
in {
pkgs = {
amd64 = build_debug_and_release "x86_64-unknown-linux-musl";
i386 = build_debug_and_release "i686-unknown-linux-musl";
arm64 = build_debug_and_release "aarch64-unknown-linux-musl";
arm = build_debug_and_release "armv6l-unknown-linux-musleabihf";
};
test = {
amd64 = test (compile {
inherit system git_version pkgsSrc cargo2nixOverlay;
target = "x86_64-unknown-linux-musl";
features = [
"garage/bundled-libs"
"garage/k2v"
"garage/lmdb"
"garage/sqlite"
];
});
};
clippy = {
amd64 = (compile {
inherit system git_version pkgsSrc cargo2nixOverlay;
target = "x86_64-unknown-linux-musl";
compiler = "clippy";
}).workspace.garage { compileMode = "build"; };
releasePackages = {
amd64 = build_release "x86_64-unknown-linux-musl";
i386 = build_release "i686-unknown-linux-musl";
arm64 = build_release "aarch64-unknown-linux-musl";
arm = build_release "armv6l-unknown-linux-musleabihf";
};
flakePackages = flake.packages.${system};
}

View file

@ -23,7 +23,7 @@ client = minio.Minio(
"GKyourapikey",
"abcd[...]1234",
# Force the region, this is specific to garage
region="region",
region="garage",
)
```

View file

@ -335,6 +335,7 @@ From the [official Mastodon documentation](https://docs.joinmastodon.org/admin/t
```bash
$ RAILS_ENV=production bin/tootctl media remove --days 3
$ RAILS_ENV=production bin/tootctl media remove --days 15 --prune-profiles
$ RAILS_ENV=production bin/tootctl media remove-orphans
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
```
@ -353,8 +354,6 @@ Imports: 1.7 KB
Settings: 0 Bytes
```
Unfortunately, [old avatars and headers cannot currently be cleaned up](https://github.com/mastodon/mastodon/issues/9567).
### Migrating your data
Data migration should be done with an efficient S3 client.

View file

@ -17,7 +17,7 @@ Garage can also help you serve this content.
## Gitea
You can use Garage with Gitea to store your [git LFS](https://git-lfs.github.com/) data, your users' avatar, and their attachements.
You can use Garage with Gitea to store your [git LFS](https://git-lfs.github.com/) data, your users' avatar, and their attachments.
You can configure a different target for each data type (check `[lfs]` and `[attachment]` sections of the Gitea documentation) and you can provide a default one through the `[storage]` section.
Let's start by creating a key and a bucket (your key id and secret will be needed later, keep them somewhere):

View file

@ -96,14 +96,14 @@ to store 2 TB of data in total.
## Get a Docker image
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
We encourage you to use a fixed tag (eg. `v1.0.0`) and not the `latest` tag.
For this example, we will use the latest published version at the time of the writing which is `v1.0.0` but it's up to you
We encourage you to use a fixed tag (eg. `v1.0.1`) and not the `latest` tag.
For this example, we will use the latest published version at the time of the writing which is `v1.0.1` but it's up to you
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
For example:
```
sudo docker pull dxflrs/garage:v1.0.0
sudo docker pull dxflrs/garage:v1.0.1
```
## Deploying and configuring Garage
@ -152,6 +152,8 @@ Check the following for your configuration files:
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
This parameter is optional but recommended: if your nodes have trouble communicating with
one another, consider adding it.
Alternatively, you can also set `rpc_public_addr_subnet`, which can filter
the addresses announced to other peers to a specific subnet.
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
You can generate such a key with `openssl rand -hex 32`.
@ -169,7 +171,7 @@ docker run \
-v /etc/garage.toml:/etc/garage.toml \
-v /var/lib/garage/meta:/var/lib/garage/meta \
-v /var/lib/garage/data:/var/lib/garage/data \
dxflrs/garage:v1.0.0
dxflrs/garage:v1.0.1
```
With this command line, Garage should be started automatically at each boot.
@ -183,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
version: "3"
services:
garage:
image: dxflrs/garage:v1.0.0
image: dxflrs/garage:v1.0.1
network_mode: "host"
restart: unless-stopped
volumes:

View file

@ -50,3 +50,20 @@ locations. They use Garage themselves for the following tasks:
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
9 nodes in 3 physical locations.
### Triplebit
[Triplebit](https://www.triplebit.org) is a non-profit hosting provider and
ISP focused on improving access to privacy-related services. They use
Garage themselves for the following tasks:
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
Triplebit's Garage cluster is a multi-site cluster currently composed of
10 nodes in 3 physical locations.

View file

@ -36,7 +36,7 @@ sudo killall nix-daemon
Now you can enter our nix-shell, all the required packages will be downloaded but they will not pollute your environment outside of the shell:
```bash
nix-shell
nix-shell -A devShell
```
You can use the traditional Rust development workflow:
@ -65,8 +65,8 @@ nix-build -j $(nproc) --max-jobs auto
```
Our build has multiple parameters you might want to set:
- `release` build with release optimisations instead of debug
- `target allows` for cross compilation
- `release` to build with release optimisations instead of debug
- `target` allows for cross compilation
- `compileMode` can be set to test or bench to build a unit test runner
- `git_version` to inject the hash to display when running `garage stats`

View file

@ -21,14 +21,14 @@ data_dir = [
```
Garage will automatically balance all blocks stored by the node
among the different specified directories, proportionnally to the
among the different specified directories, proportionally to the
specified capacities.
## Updating the list of storage locations
If you add new storage locations to your `data_dir`,
Garage will not rebalance existing data between storage locations.
Newly written blocks will be balanced proportionnally to the specified capacities,
Newly written blocks will be balanced proportionally to the specified capacities,
and existing data may be moved between drives to improve balancing,
but only opportunistically when a data block is re-written (e.g. an object
is re-uploaded, or an object with a duplicate block is uploaded).

View file

@ -5,7 +5,7 @@ weight = 40
Garage is meant to work on old, second-hand hardware.
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
Fear not! For Garage is fully equipped to handle drive failures, in most common cases.
Fear not! Garage is fully equipped to handle drive failures, in most common cases.
## A note on availability of Garage
@ -61,7 +61,7 @@ garage repair -a --yes blocks
This will re-synchronize blocks of data that are missing to the new HDD, reading them from copies located on other nodes.
You can check on the advancement of this process by doing the following command:
You can check on the advancement of this process by doing the following command:
```bash
garage stats -a

View file

@ -42,6 +42,13 @@ If a binary of the last version is not available for your architecture,
or if you want a build customized for your system,
you can [build Garage from source](@/documentation/cookbook/from-source.md).
If none of these option work for you, you can also run Garage in a Docker
container. When using Docker, the commands used in this guide will not work
anymore. We recommend reading the tutorial on [configuring a
multi-node cluster](@/documentation/cookbook/real-world.md) to learn about
using Garage as a Docker container. For simplicity, a minimal command to launch
Garage using Docker is provided in this quick start guide as well.
## Configuring and starting Garage
@ -85,6 +92,9 @@ metrics_token = "$(openssl rand -base64 32)"
EOF
```
See the [Configuration file format](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/)
for complete options and values.
Now that your configuration file has been created, you may save it to the directory of your choice.
By default, Garage looks for **`/etc/garage.toml`.**
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
@ -111,6 +121,26 @@ garage -c path/to/garage.toml server
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
Alternatively, if you cannot or do not wish to run the Garage binary directly,
you may use Docker to run Garage in a container using the following command:
```bash
docker run \
-d \
--name garaged \
-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \
-v /etc/garage.toml:/path/to/garage.toml \
-v /var/lib/garage/meta:/path/to/garage/meta \
-v /var/lib/garage/data:/path/to/garage/data \
dxflrs/garage:v0.9.4
```
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
#### Troubleshooting
Ensure your configuration file, `metadata_dir` and `data_dir` are readable by the user running the `garage` server or Docker.
You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \
Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`.
@ -131,6 +161,9 @@ It uses values from the TOML configuration file to find the Garage daemon runnin
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
again have to specify `-c path/to/garage.toml` at each invocation.
If you are running Garage in a Docker container, you can set `alias garage="docker exec -ti <container name> /garage"`
to use the Garage binary inside your container.
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
the following command should be enough to show the status of your cluster:
@ -166,7 +199,7 @@ For instance here you could write just `garage layout assign -z dc1 -c 1G 563e`.
The layout then has to be applied to the cluster, using:
```bash
garage layout apply
garage layout apply --version 1
```
@ -316,7 +349,7 @@ Check [our s3 compatibility list](@/documentation/reference-manual/s3-compatibil
### Other tools for interacting with Garage
The following tools can also be used to send and recieve files from/to Garage:
The following tools can also be used to send and receive files from/to Garage:
- [minio-client](@/documentation/connect/cli.md#minio-client)
- [s3cmd](@/documentation/connect/cli.md#s3cmd)

View file

@ -13,9 +13,11 @@ consistency_mode = "consistent"
metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
metadata_snapshots_dir = "/var/lib/garage/snapshots"
metadata_fsync = true
data_fsync = false
disable_scrub = false
use_local_tz = false
metadata_auto_snapshot_interval = "6h"
db_engine = "lmdb"
@ -31,6 +33,9 @@ rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
rpc_bind_addr = "[::]:3901"
rpc_bind_outgoing = false
rpc_public_addr = "[fc00:1::1]:3901"
# or set rpc_public_adr_subnet to filter down autodiscovery to a subnet:
# rpc_public_addr_subnet = "2001:0db8:f00:b00:/64"
allow_world_readable_secrets = false
@ -70,6 +75,7 @@ root_domain = ".s3.garage"
[s3_web]
bind_addr = "[::]:3902"
root_domain = ".web.garage"
add_host_to_metrics = true
[admin]
api_bind_addr = "0.0.0.0:3903"
@ -96,15 +102,18 @@ Top-level configuration options:
[`data_fsync`](#data_fsync),
[`db_engine`](#db_engine),
[`disable_scrub`](#disable_scrub),
[`use_local_tz`](#use_local_tz),
[`lmdb_map_size`](#lmdb_map_size),
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
[`metadata_dir`](#metadata_dir),
[`metadata_fsync`](#metadata_fsync),
[`metadata_snapshots_dir`](#metadata_snapshots_dir),
[`replication_factor`](#replication_factor),
[`consistency_mode`](#consistency_mode),
[`rpc_bind_addr`](#rpc_bind_addr),
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
[`rpc_public_addr`](#rpc_public_addr),
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
The `[consul_discovery]` section:
@ -130,6 +139,7 @@ The `[s3_api]` section:
[`s3_region`](#s3_region).
The `[s3_web]` section:
[`add_host_to_metrics`](#web_add_host_to_metrics),
[`bind_addr`](#web_bind_addr),
[`root_domain`](#web_root_domain).
@ -269,6 +279,7 @@ as the index of all objects, object version and object blocks.
Store this folder on a fast SSD drive if possible to maximize Garage's performance.
#### `data_dir` {#data_dir}
The directory in which Garage will store the data blocks of objects.
@ -289,13 +300,32 @@ data_dir = [
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
on how to operate Garage in such a setup.
#### `metadata_snapshots_dir` (since Garage `v1.0.2`) {#metadata_snapshots_dir}
The directory in which Garage will store metadata snapshots when it
performs a snapshot of the metadata database, either when instructed to do
so from a RPC call or regularly through
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval).
By default, Garage will store snapshots into a `snapshots/` subdirectory
of [`metadata_dir`](#metadata_dir). This might quickly fill up your
metadata storage space if you use snapshots, because Garage will need up
to 4x the space of the existing metadata database: each snapshot requires
roughly as much space as the original database, and Garage temporarily
needs to store up to three different snapshots before it cleans up the oldest
snapshot to go back to two stored snapshots.
To prevent filling your disk, you might to change this setting to a
directory with ample available space, e.g. on the same storage space as
[`data_dir`](#data_dir).
#### `db_engine` (since `v0.8.0`) {#db_engine}
Since `v0.8.0`, Garage can use alternative storage backends as follows:
| DB engine | `db_engine` value | Database path |
| --------- | ----------------- | ------------- |
| [LMDB](https://www.lmdb.tech) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
@ -423,6 +453,13 @@ you should delete it from the data directory and then call `garage repair
blocks` on the node to ensure that it re-obtains a copy from another node on
the network.
#### `use_local_tz` {#use_local_tz}
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
`use_local_tz` configuration value to `true` if you want Garage to run the
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
you should also ensure that each node has the same timezone configuration.
#### `block_size` {#block_size}
Garage splits stored objects in consecutive chunks of size `block_size`
@ -543,6 +580,14 @@ RPC calls. **This parameter is optional but recommended.** In case you have
a NAT that binds the RPC port to a port that is different on your public IP,
this field might help making it work.
#### `rpc_public_addr_subnet` {#rpc_public_addr_subnet}
In case `rpc_public_addr` is not set, but autodiscovery is used, this allows
filtering the list of automatically discovered IPs to a specific subnet.
For example, if nodes should pick *their* IP inside a specific subnet, but you
don't want to explicitly write the IP down (as it's dynamic, or you want to
share configs across nodes), you can use this option.
#### `bootstrap_peers` {#bootstrap_peers}
A list of peer identifiers on which to contact other Garage peers of this cluster.
@ -701,6 +746,13 @@ For instance, if `root_domain` is `web.garage.eu`, a bucket called `deuxfleurs.f
will be accessible either with hostname `deuxfleurs.fr.web.garage.eu`
or with hostname `deuxfleurs.fr`.
#### `add_host_to_metrics` {#web_add_host_to_metrics}
Whether to include the requested domain name (HTTP `Host` header) in the
Prometheus metrics of the web endpoint. This is disabled by default as the
number of possible values is not bounded and can be a source of cardinality
explosion in the exported metrics.
### The `[admin]` section

View file

@ -61,7 +61,7 @@ directed to a Garage cluster can be handled independently of one another instead
of going through a central bottleneck (the leader node).
As a consequence, requests can be handled much faster, even in cases where latency
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
This is particularly usefull when nodes are far from one another and talk to one other through standard Internet connections.
This is particularly useful when nodes are far from one another and talk to one other through standard Internet connections.
### Web server for static websites

View file

@ -392,7 +392,7 @@ table_merkle_updater_todo_queue_length{table_name="block_ref"} 0
#### `table_sync_items_received`, `table_sync_items_sent` (counters)
Number of data items sent to/recieved from other nodes during resync procedures
Number of data items sent to/received from other nodes during resync procedures
```
table_sync_items_received{from="<remote node>",table_name="bucket_v2"} 3

View file

@ -42,7 +42,7 @@ The general principle are similar, but details have not been updated.**
A version is defined by the existence of at least one entry in the blocks table for a certain version UUID.
We must keep the following invariant: if a version exists in the blocks table, it has to be referenced in the objects table.
We explicitly manage concurrent versions of an object: the version timestamp and version UUID columns are index columns, thus we may have several concurrent versions of an object.
Important: before deleting an older version from the objects table, we must make sure that we did a successfull delete of the blocks of that version from the blocks table.
Important: before deleting an older version from the objects table, we must make sure that we did a successful delete of the blocks of that version from the blocks table.
Thus, the workflow for reading an object is as follows:
@ -95,7 +95,7 @@ Known issue: if someone is reading from a version that we want to delete and the
Usefull metadata:
- list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist
- list of other nodes that we know have acknowledged a write of this block, usefull in the rebalancing algorithm
- list of other nodes that we know have acknowledged a write of this block, useful in the rebalancing algorithm
Write strategy: have a single thread that does all write IO so that it is serialized (or have several threads that manage independent parts of the hash space). When writing a blob, write it to a temporary file, close, then rename so that a concurrent read gets a consistent result (either not found or found with whole content).

View file

@ -68,7 +68,7 @@ The migration steps are as follows:
5. Turn off Garage 0.3
6. Backup metadata folders if you can (i.e. if you have space to do it
somewhere). Backuping data folders could also be usefull but that's much
somewhere). Backuping data folders could also be useful but that's much
harder to do. If your filesystem supports snapshots, this could be a good
time to use them.

View file

@ -37,7 +37,7 @@ There are two reasons for this:
Reminder: rules of simplicity, concerning changes to Garage's source code.
Always question what we are doing.
Never do anything just because it looks nice or because we "think" it might be usefull at some later point but without knowing precisely why/when.
Never do anything just because it looks nice or because we "think" it might be useful at some later point but without knowing precisely why/when.
Only do things that make perfect sense in the context of what we currently know.
## References

View file

@ -70,7 +70,7 @@ Example response body:
```json
{
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
"garageVersion": "v1.0.0",
"garageVersion": "v1.0.1",
"garageFeatures": [
"k2v",
"lmdb",

View file

@ -562,7 +562,7 @@ token>", v: ["<value1>", ...] }`, with the following fields:
- in case of concurrent update and deletion, a `null` is added to the list of concurrent values
- if the `tombstones` query parameter is set to `true`, tombstones are returned
for items that have been deleted (this can be usefull for inserting after an
for items that have been deleted (this can be useful for inserting after an
item that has been deleted, so that the insert is not considered
concurrent with the delete). Tombstones are returned as tuples in the
same format with only `null` values

118
flake.lock generated
View file

@ -1,38 +1,27 @@
{
"nodes": {
"cargo2nix": {
"inputs": {
"flake-compat": [
"flake-compat"
],
"flake-utils": "flake-utils",
"nixpkgs": [
"nixpkgs"
],
"rust-overlay": "rust-overlay"
},
"crane": {
"locked": {
"lastModified": 1666087781,
"narHash": "sha256-trKVdjMZ8mNkGfLcY5LsJJGtdV3xJDZnMVrkFjErlcs=",
"owner": "Alexis211",
"repo": "cargo2nix",
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
"lastModified": 1737689766,
"narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=",
"owner": "ipetkov",
"repo": "crane",
"rev": "6fe74265bbb6d016d663b1091f015e2976c4a527",
"type": "github"
},
"original": {
"owner": "Alexis211",
"repo": "cargo2nix",
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"flake-compat": {
"locked": {
"lastModified": 1688025799,
"narHash": "sha256-ktpB4dRtnksm9F5WawoIkEneh1nrEvuxb5lJFt1iOyw=",
"lastModified": 1717312683,
"narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
"owner": "nix-community",
"repo": "flake-compat",
"rev": "8bf105319d44f6b9f0d764efa4fdef9f1cc9ba1c",
"rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
"type": "github"
},
"original": {
@ -46,29 +35,11 @@
"systems": "systems"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
@ -79,63 +50,47 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1682109806,
"narHash": "sha256-d9g7RKNShMLboTWwukM+RObDWWpHKaqTYXB48clBWXI=",
"lastModified": 1736692550,
"narHash": "sha256-7tk8xH+g0sJkKLTJFOxphJxxOjMDFMWv24nXslaU2ro=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2362848adf8def2866fabbffc50462e929d7fffb",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1707091808,
"narHash": "sha256-LahKBAfGbY836gtpVNnWwBTIzN7yf/uYM/S0g393r0Y=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
"type": "github"
}
},
"root": {
"inputs": {
"cargo2nix": "cargo2nix",
"crane": "crane",
"flake-compat": "flake-compat",
"flake-utils": [
"cargo2nix",
"flake-utils"
],
"nixpkgs": "nixpkgs_2"
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs"
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1707271822,
"narHash": "sha256-/DZsoPH5GBzOpVEGz5PgJ7vh8Q6TcrJq5u8FcBjqAfI=",
"lastModified": 1738549608,
"narHash": "sha256-GdyT9QEUSx5k/n8kILuNy83vxxdyUfJ8jL5mMpQZWfw=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "7a94fe7690d2bdfe1aab475382a505e14dc114a6",
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
"type": "github"
}
},
@ -153,21 +108,6 @@
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

119
flake.nix
View file

@ -2,89 +2,84 @@
description =
"Garage, an S3-compatible distributed object store for self-hosted deployments";
# Nixpkgs 23.11 as of 2024-02-07, has rustc v1.73
# Nixpkgs 24.11 as of 2025-01-12
inputs.nixpkgs.url =
"github:NixOS/nixpkgs/9f2ee8c91ac42da3ae6c6a1d21555f283458247e";
"github:NixOS/nixpkgs/7c4869c47090dd7f9f1bdfb49a22aea026996815";
# Rust overlay as of 2025-02-03
inputs.rust-overlay.url =
"github:oxalica/rust-overlay/35c6f8c4352f995ecd53896200769f80a3e8f22d";
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
inputs.crane.url = "github:ipetkov/crane";
inputs.flake-compat.url = "github:nix-community/flake-compat";
inputs.flake-utils.url = "github:numtide/flake-utils";
inputs.cargo2nix = {
# As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
# As of 2023-04-25:
# - my two patches were merged into unstable (one for clippy and one to "fix" feature detection)
# - rustc v1.66
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
# Rust overlay as of 2024-02-07
inputs.rust-overlay.url =
"github:oxalica/rust-overlay/7a94fe7690d2bdfe1aab475382a505e14dc114a6";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-compat.follows = "flake-compat";
};
inputs.flake-utils.follows = "cargo2nix/flake-utils";
outputs = { self, nixpkgs, cargo2nix, flake-utils, ... }:
outputs = { self, nixpkgs, flake-utils, crane, rust-overlay, ... }:
let
git_version = self.lastModifiedDate;
compile = import ./nix/compile.nix;
in
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
packageFor = target: release: (compile {
inherit system target nixpkgs crane rust-overlay release;
}).garage;
testWith = extraTestEnv: (compile {
inherit system nixpkgs crane rust-overlay extraTestEnv;
release = false;
}).garage-test;
in
{
packages =
let
packageFor = target: (compile {
inherit system git_version target;
pkgsSrc = nixpkgs;
cargo2nixOverlay = cargo2nix.overlays.default;
release = true;
}).workspace.garage { compileMode = "build"; };
in
{
# default = native release build
default = packageFor null;
# other = cross-compiled, statically-linked builds
amd64 = packageFor "x86_64-unknown-linux-musl";
i386 = packageFor "i686-unknown-linux-musl";
arm64 = packageFor "aarch64-unknown-linux-musl";
arm = packageFor "armv6l-unknown-linux-musl";
packages = {
# default = native release build
default = packageFor null true;
# <arch> = cross-compiled, statically-linked release builds
amd64 = packageFor "x86_64-unknown-linux-musl" true;
i386 = packageFor "i686-unknown-linux-musl" true;
arm64 = packageFor "aarch64-unknown-linux-musl" true;
arm = packageFor "armv6l-unknown-linux-musl" true;
# dev = native dev build
dev = packageFor null false;
# test = cargo test
tests = testWith {};
tests-lmdb = testWith {
GARAGE_TEST_INTEGRATION_DB_ENGINE = "lmdb";
};
tests-sqlite = testWith {
GARAGE_TEST_INTEGRATION_DB_ENGINE = "sqlite";
};
};
# ---- developpment shell, for making native builds only ----
devShells =
let
shellWithPackages = (packages: (compile {
inherit system git_version;
pkgsSrc = nixpkgs;
cargo2nixOverlay = cargo2nix.overlays.default;
}).workspaceShell { inherit packages; });
targets = compile {
inherit system nixpkgs crane rust-overlay;
};
in
{
default = shellWithPackages
(with pkgs; [
rustfmt
clang
mold
]);
default = targets.devShell;
# import the full shell using `nix develop .#full`
full = shellWithPackages (with pkgs; [
rustfmt
rust-analyzer
clang
mold
# ---- extra packages for dev tasks ----
cargo-audit
cargo-outdated
cargo-machete
nixpkgs-fmt
]);
full = pkgs.mkShell {
buildInputs = with pkgs; [
targets.toolchain
protobuf
clang
mold
# ---- extra packages for dev tasks ----
rust-analyzer
cargo-audit
cargo-outdated
cargo-machete
nixpkgs-fmt
];
};
};
});
}

View file

@ -2,7 +2,7 @@
with import ./common.nix;
let
pkgs = import pkgsSrc { };
pkgs = import nixpkgs { };
lib = pkgs.lib;
/* Converts a key list and a value list to a set

View file

@ -10,9 +10,9 @@ let
flake = (import flake-compat { system = builtins.currentSystem; src = ../.; });
in
rec {
pkgsSrc = flake.defaultNix.inputs.nixpkgs;
cargo2nix = flake.defaultNix.inputs.cargo2nix;
cargo2nixOverlay = cargo2nix.overlays.default;
devShells = builtins.getAttr builtins.currentSystem flake.defaultNix.devShells;
{
flake = flake.defaultNix;
nixpkgs = flake.defaultNix.inputs.nixpkgs;
devShells = flake.defaultNix.devShells.${builtins.currentSystem};
}

View file

@ -1,164 +1,64 @@
{ system, target ? null, pkgsSrc, cargo2nixOverlay, compiler ? "rustc"
, release ? false, git_version ? null, features ? null, }:
{
/* build inputs */
nixpkgs,
crane,
rust-overlay,
/* parameters */
system,
git_version ? null,
target ? null,
release ? false,
features ? null,
extraTestEnv ? {}
}:
let
log = v: builtins.trace v v;
# NixOS and Rust/Cargo triples do not match for ARM, fix it here.
rustTarget = if target == "armv6l-unknown-linux-musleabihf" then
"arm-unknown-linux-musleabihf"
else
target;
rustTargetEnvMap = {
"x86_64-unknown-linux-musl" = "X86_64_UNKNOWN_LINUX_MUSL";
"aarch64-unknown-linux-musl" = "AARCH64_UNKNOWN_LINUX_MUSL";
"i686-unknown-linux-musl" = "I686_UNKNOWN_LINUX_MUSL";
"arm-unknown-linux-musleabihf" = "ARM_UNKNOWN_LINUX_MUSLEABIHF";
};
pkgsNative = import nixpkgs {
inherit system;
overlays = [ (import rust-overlay) ];
};
pkgs = if target != null then
import pkgsSrc {
import nixpkgs {
inherit system;
crossSystem = {
config = target;
isStatic = true;
};
overlays = [ cargo2nixOverlay ];
overlays = [ (import rust-overlay) ];
}
else
import pkgsSrc {
inherit system;
overlays = [ cargo2nixOverlay ];
};
pkgsNative;
toolchainOptions = {
rustVersion = "1.73.0";
extraRustComponents = [ "clippy" ];
};
inherit (pkgs) lib stdenv;
buildEnv = (drv:
{
rustc = drv.setBuildEnv;
clippy = ''
${drv.setBuildEnv or ""}
echo
echo --- BUILDING WITH CLIPPY ---
echo
export NIX_RUST_BUILD_FLAGS="''${NIX_RUST_BUILD_FLAGS} --deny warnings"
export RUSTC="''${CLIPPY_DRIVER}"
'';
}.${compiler});
/* Cargo2nix provides many overrides by default, you can take inspiration from them:
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
You can have a complete list of the available options by looking at the overriden object, mkcrate:
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/mkcrate.nix
*/
packageOverrides = pkgs:
pkgs.rustBuilder.overrides.all ++ [
/* [1] We add some logic to compile our crates with clippy, it provides us many additional lints
[2] We need to alter Nix hardening to make static binaries: PIE,
Position Independent Executables seems to be supported only on amd64. Having
this flag set either 1. make our executables crash or 2. compile as dynamic on some platforms.
Here, we deactivate it. Later (find `codegenOpts`), we reactivate it for supported targets
(only amd64 curently) through the `-static-pie` flag.
PIE is a feature used by ASLR, which helps mitigate security issues.
Learn more about Nix Hardening at: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh
[3] We want to inject the git version while keeping the build deterministic.
As we do not want to consider the .git folder as part of the input source,
we ask the user (the CI often) to pass the value to Nix.
[4] We don't want libsodium-sys and zstd-sys to try to use pkgconfig to build against a system library.
However the features to do so get activated for some reason (due to a bug in cargo2nix?),
so disable them manually here.
*/
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage";
overrideAttrs = drv:
(if git_version != null then {
# [3]
preConfigure = ''
${drv.preConfigure or ""}
export GIT_VERSION="${git_version}"
'';
} else
{ }) // {
# [1]
setBuildEnv = (buildEnv drv);
# [2]
hardeningDisable = [ "pie" ];
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_rpc";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_db";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_util";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_table";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_block";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_model";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_api";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_web";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "k2v-client";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "libsodium-sys";
overrideArgs = old: {
features = [ ]; # [4]
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "zstd-sys";
overrideArgs = old: {
features = [ ]; # [4]
};
})
toolchainFn = (p: p.rust-bin.stable."1.82.0".default.override {
targets = lib.optionals (target != null) [ rustTarget ];
extensions = [
"rust-src"
"rustfmt"
];
});
craneLib = (crane.mkLib pkgs).overrideToolchain toolchainFn;
src = craneLib.cleanCargoSource ../.;
/* We ship some parts of the code disabled by default by putting them behind a flag.
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.).
@ -168,16 +68,15 @@ let
rootFeatures = if features != null then
features
else
([ "garage/bundled-libs" "garage/lmdb" "garage/sqlite" "garage/k2v" ] ++ (if release then [
"garage/consul-discovery"
"garage/kubernetes-discovery"
"garage/metrics"
"garage/telemetry-otlp"
"garage/syslog"
] else
[ ]));
([ "bundled-libs" "lmdb" "sqlite" "k2v" ] ++ (lib.optionals release [
"consul-discovery"
"kubernetes-discovery"
"metrics"
"telemetry-otlp"
"syslog"
]));
packageFun = import ../Cargo.nix;
featuresStr = lib.concatStringsSep "," rootFeatures;
/* We compile fully static binaries with musl to simplify deployment on most systems.
When possible, we reactivate PIE hardening (see above).
@ -188,12 +87,9 @@ let
For more information on static builds, please refer to Rust's RFC 1721.
https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage
*/
codegenOpts = {
"armv6l-unknown-linux-musleabihf" = [
"target-feature=+crt-static"
"link-arg=-static"
]; # compile as dynamic with static-pie
codegenOptsMap = {
"x86_64-unknown-linux-musl" =
[ "target-feature=+crt-static" "link-arg=-static-pie" ];
"aarch64-unknown-linux-musl" = [
"target-feature=+crt-static"
"link-arg=-static"
@ -202,17 +98,95 @@ let
"target-feature=+crt-static"
"link-arg=-static"
]; # segfault with static-pie
"x86_64-unknown-linux-musl" =
[ "target-feature=+crt-static" "link-arg=-static-pie" ];
"armv6l-unknown-linux-musleabihf" = [
"target-feature=+crt-static"
"link-arg=-static"
]; # compile as dynamic with static-pie
};
# NixOS and Rust/Cargo triples do not match for ARM, fix it here.
rustTarget = if target == "armv6l-unknown-linux-musleabihf" then
"arm-unknown-linux-musleabihf"
else
target;
codegenOpts = if target != null then codegenOptsMap.${target} else [
"link-arg=-fuse-ld=mold"
];
in pkgs.rustBuilder.makePackageSet ({
inherit release packageFun packageOverrides codegenOpts rootFeatures;
target = rustTarget;
} // toolchainOptions)
commonArgs =
{
inherit src;
pname = "garage";
version = "dev";
strictDeps = true;
cargoExtraArgs = "--locked --features ${featuresStr}";
cargoTestExtraArgs = "--workspace";
nativeBuildInputs = [
pkgsNative.protobuf
pkgs.stdenv.cc
] ++ lib.optionals (target == null) [
pkgs.clang
pkgs.mold
];
CARGO_PROFILE = if release then "release" else "dev";
CARGO_BUILD_RUSTFLAGS =
lib.concatStringsSep
" "
(builtins.map (flag: "-C ${flag}") codegenOpts);
}
//
(if rustTarget != null then {
CARGO_BUILD_TARGET = rustTarget;
"CARGO_TARGET_${rustTargetEnvMap.${rustTarget}}_LINKER" = "${stdenv.cc.targetPrefix}cc";
HOST_CC = "${stdenv.cc.nativePrefix}cc";
TARGET_CC = "${stdenv.cc.targetPrefix}cc";
} else {
CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER = "clang";
});
in rec {
toolchain = toolchainFn pkgs;
devShell = pkgs.mkShell {
buildInputs = [
toolchain
] ++ (with pkgs; [
protobuf
clang
mold
]);
};
# ---- building garage ----
garage-deps = craneLib.buildDepsOnly commonArgs;
garage = craneLib.buildPackage (commonArgs // {
cargoArtifacts = garage-deps;
doCheck = false;
} //
(if git_version != null then {
version = git_version;
GIT_VERSION = git_version;
} else {}));
# ---- testing garage ----
garage-test-bin = craneLib.cargoBuild (commonArgs // {
cargoArtifacts = garage-deps;
pname = "garage-tests";
CARGO_PROFILE = "test";
cargoExtraArgs = "${commonArgs.cargoExtraArgs} --tests --workspace";
doCheck = false;
});
garage-test = craneLib.cargoTest (commonArgs // {
cargoArtifacts = garage-test-bin;
nativeBuildInputs = commonArgs.nativeBuildInputs ++ [
pkgs.cacert
];
} // extraTestEnv);
}

View file

@ -11,7 +11,7 @@ PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
FANCYCOLORS=("41m" "42m" "44m" "45m" "100m" "104m")
export RUST_BACKTRACE=1
export RUST_LOG=garage=info,garage_api=debug
export RUST_LOG=garage=info,garage_api_common=debug,garage_api_s3=debug
MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m"
if [ -z "$GARAGE_BIN" ]; then

View file

@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.5.0
version: 0.6.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "v1.0.0"
appVersion: "v1.0.1"

View file

@ -0,0 +1,86 @@
# garage
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.0.1](https://img.shields.io/badge/AppVersion-v1.0.1-informational?style=flat-square)
S3-compatible object store for small self-hosted geo-distributed deployments
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | |
| deployment.kind | string | `"StatefulSet"` | Switchable to DaemonSet |
| deployment.podManagementPolicy | string | `"OrderedReady"` | If using statefulset, allow Parallel or OrderedReady (default) |
| deployment.replicaCount | int | `3` | Number of StatefulSet replicas/garage nodes to start |
| environment | object | `{}` | |
| extraVolumeMounts | object | `{}` | |
| extraVolumes | object | `{}` | |
| fullnameOverride | string | `""` | |
| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size |
| garage.bootstrapPeers | list | `[]` | This is not required if you use the integrated kubernetes discovery |
| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level |
| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 |
| garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml |
| garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ |
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources |
| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode |
| garage.rpcBindAddr | string | `"[::]:3901"` | |
| garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object |
| garage.s3.api.region | string | `"garage"` | |
| garage.s3.api.rootDomain | string | `".s3.garage.tld"` | |
| garage.s3.web.index | string | `"index.html"` | |
| garage.s3.web.rootDomain | string | `".web.garage.tld"` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"dxflrs/amd64_garage"` | default to amd64 docker image |
| image.tag | string | `""` | set the image tag, please prefer using the chart version and not this to avoid compatibility issues |
| imagePullSecrets | list | `[]` | set if you need credentials to pull your custom image |
| ingress.s3.api.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
| ingress.s3.api.enabled | bool | `false` | |
| ingress.s3.api.hosts[0] | object | `{"host":"s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, to be used with awscli for example |
| ingress.s3.api.hosts[1] | object | `{"host":"*.s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, DNS style bucket access |
| ingress.s3.api.labels | object | `{}` | |
| ingress.s3.api.tls | list | `[]` | |
| ingress.s3.web.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
| ingress.s3.web.enabled | bool | `false` | |
| ingress.s3.web.hosts[0] | object | `{"host":"*.web.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | wildcard website access with bucket name prefix |
| ingress.s3.web.hosts[1] | object | `{"host":"mywebpage.example.com","paths":[{"path":"/","pathType":"Prefix"}]}` | specific bucket access with FQDN bucket |
| ingress.s3.web.labels | object | `{}` | |
| ingress.s3.web.tls | list | `[]` | |
| initImage.pullPolicy | string | `"IfNotPresent"` | |
| initImage.repository | string | `"busybox"` | |
| initImage.tag | string | `"stable"` | |
| monitoring.metrics.enabled | bool | `false` | If true, a service for monitoring is created with a prometheus.io/scrape annotation |
| monitoring.metrics.serviceMonitor.enabled | bool | `false` | If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator |
| monitoring.metrics.serviceMonitor.interval | string | `"15s"` | |
| monitoring.metrics.serviceMonitor.labels | object | `{}` | |
| monitoring.metrics.serviceMonitor.path | string | `"/metrics"` | |
| monitoring.metrics.serviceMonitor.relabelings | list | `[]` | |
| monitoring.metrics.serviceMonitor.scheme | string | `"http"` | |
| monitoring.metrics.serviceMonitor.scrapeTimeout | string | `"10s"` | |
| monitoring.metrics.serviceMonitor.tlsConfig | object | `{}` | |
| monitoring.tracing.sink | string | `""` | specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317` |
| nameOverride | string | `""` | |
| nodeSelector | object | `{}` | |
| persistence.data.hostPath | string | `"/var/lib/garage/data"` | |
| persistence.data.size | string | `"100Mi"` | |
| persistence.enabled | bool | `true` | |
| persistence.meta.hostPath | string | `"/var/lib/garage/meta"` | |
| persistence.meta.size | string | `"100Mi"` | |
| podAnnotations | object | `{}` | additonal pod annotations |
| podSecurityContext.fsGroup | int | `1000` | |
| podSecurityContext.runAsGroup | int | `1000` | |
| podSecurityContext.runAsNonRoot | bool | `true` | |
| podSecurityContext.runAsUser | int | `1000` | |
| resources | object | `{}` | |
| securityContext.capabilities | object | `{"drop":["ALL"]}` | The default security context is heavily restricted, feel free to tune it to your requirements |
| securityContext.readOnlyRootFilesystem | bool | `true` | |
| service.s3.api.port | int | `3900` | |
| service.s3.web.port | int | `3902` | |
| service.type | string | `"ClusterIP"` | You can rely on any service to expose your cluster - ClusterIP (+ Ingress) - NodePort (+ Ingress) - LoadBalancer |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
| tolerations | list | `[]` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View file

@ -1,7 +1,49 @@
{{- if not .Values.garage.existingConfigMap }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "garage.fullname" . }}-config
data:
garage.toml: |-
{{- tpl (index (index .Values.garage) "garage.toml") $ | nindent 4 }}
{{- if .Values.garage.garageTomlString }}
{{- tpl (index (index .Values.garage) "garageTomlString") $ | nindent 4 }}
{{- else }}
metadata_dir = "/mnt/meta"
data_dir = "/mnt/data"
db_engine = "{{ .Values.garage.dbEngine }}"
block_size = {{ .Values.garage.blockSize }}
replication_mode = "{{ .Values.garage.replicationMode }}"
compression_level = {{ .Values.garage.compressionLevel }}
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
# rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__"
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
[kubernetes_discovery]
namespace = "{{ .Release.Namespace }}"
service_name = "{{ include "garage.fullname" . }}"
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
[s3_api]
s3_region = "{{ .Values.garage.s3.api.region }}"
api_bind_addr = "[::]:3900"
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
[s3_web]
bind_addr = "[::]:3902"
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
index = "{{ .Values.garage.s3.web.index }}"
[admin]
api_bind_addr = "[::]:3903"
{{- if .Values.monitoring.tracing.sink }}
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
{{- end }}
{{- end }}
{{- end }}

View file

@ -64,6 +64,10 @@ spec:
name: web-api
- containerPort: 3903
name: admin
{{- with .Values.environment }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: meta
mountPath: /mnt/meta
@ -72,6 +76,9 @@ spec:
- name: etc
mountPath: /etc/garage.toml
subPath: garage.toml
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
# TODO
# livenessProbe:
# httpGet:
@ -106,6 +113,9 @@ spec:
- name: data
emptyDir: {}
{{- end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View file

@ -4,28 +4,30 @@
# Garage configuration. These values go to garage.toml
garage:
# Can be changed for better performance on certain systems
# -- Can be changed for better performance on certain systems
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
dbEngine: "lmdb"
# Defaults is 1MB
# -- Defaults is 1MB
# An increase can result in better performance in certain scenarios
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
blockSize: "1048576"
# Default to 3 replicas, see the replication_mode section at
# -- Default to 3 replicas, see the replication_mode section at
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
replicationMode: "3"
# zstd compression level of stored blocks
# -- zstd compression level of stored blocks
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
compressionLevel: "1"
rpcBindAddr: "[::]:3901"
# If not given, a random secret will be generated and stored in a Secret object
# -- If not given, a random secret will be generated and stored in a Secret object
rpcSecret: ""
# This is not required if you use the integrated kubernetes discovery
# -- This is not required if you use the integrated kubernetes discovery
bootstrapPeers: []
# -- Set to true if you want to use k8s discovery but install the CRDs manually outside
# of the helm chart, for example if you operate at namespace level without cluster ressources
kubernetesSkipCrd: false
s3:
api:
@ -34,47 +36,16 @@ garage:
web:
rootDomain: ".web.garage.tld"
index: "index.html"
# Template for the garage configuration
# Values can be templated
# ref: https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
garage.toml: |-
metadata_dir = "/mnt/meta"
data_dir = "/mnt/data"
db_engine = "{{ .Values.garage.dbEngine }}"
# -- if not empty string, allow using an existing ConfigMap for the garage.toml,
# if set, ignores garage.toml
existingConfigMap: ""
block_size = {{ .Values.garage.blockSize }}
replication_mode = "{{ .Values.garage.replicationMode }}"
compression_level = {{ .Values.garage.compressionLevel }}
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
# rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__"
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
[kubernetes_discovery]
namespace = "{{ .Release.Namespace }}"
service_name = "{{ include "garage.fullname" . }}"
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
[s3_api]
s3_region = "{{ .Values.garage.s3.api.region }}"
api_bind_addr = "[::]:3900"
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
[s3_web]
bind_addr = "[::]:3902"
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
index = "{{ .Values.garage.s3.web.index }}"
[admin]
api_bind_addr = "[::]:3903"
{{- if .Values.monitoring.tracing.sink }}
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
{{- end }}
# -- String Template for the garage configuration
# if set, ignores above values.
# Values can be templated,
# see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
garageTomlString: ""
# Data persistence
persistence:
@ -92,16 +63,18 @@ persistence:
# Deployment configuration
deployment:
# Switchable to DaemonSet
# -- Switchable to DaemonSet
kind: StatefulSet
# Number of StatefulSet replicas/garage nodes to start
# -- Number of StatefulSet replicas/garage nodes to start
replicaCount: 3
# If using statefulset, allow Parallel or OrderedReady (default)
# -- If using statefulset, allow Parallel or OrderedReady (default)
podManagementPolicy: OrderedReady
image:
# -- default to amd64 docker image
repository: dxflrs/amd64_garage
# please prefer using the chart version and not this tag
# -- set the image tag, please prefer using the chart version and not this
# to avoid compatibility issues
tag: ""
pullPolicy: IfNotPresent
@ -110,19 +83,21 @@ initImage:
tag: stable
pullPolicy: IfNotPresent
# -- set if you need credentials to pull your custom image
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
# -- Specifies whether a service account should be created
create: true
# Annotations to add to the service account
# -- Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# -- additonal pod annotations
podAnnotations: {}
podSecurityContext:
@ -132,7 +107,7 @@ podSecurityContext:
runAsNonRoot: true
securityContext:
# The default security context is heavily restricted
# -- The default security context is heavily restricted,
# feel free to tune it to your requirements
capabilities:
drop:
@ -140,7 +115,7 @@ securityContext:
readOnlyRootFilesystem: true
service:
# You can rely on any service to expose your cluster
# -- You can rely on any service to expose your cluster
# - ClusterIP (+ Ingress)
# - NodePort (+ Ingress)
# - LoadBalancer
@ -156,20 +131,23 @@ ingress:
s3:
api:
enabled: false
# Rely either on the className or the annotation below but not both
# replace "nginx" by an Ingress controller
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
# -- Rely _either_ on the className or the annotation below but not both!
# If you want to use the className, set
# className: "nginx"
# and replace "nginx" by an Ingress controller name,
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
annotations: {}
# kubernetes.io/ingress.class: "nginx"
# kubernetes.io/tls-acme: "true"
labels: {}
hosts:
- host: "s3.garage.tld" # garage S3 API endpoint
# -- garage S3 API endpoint, to be used with awscli for example
- host: "s3.garage.tld"
paths:
- path: /
pathType: Prefix
- host: "*.s3.garage.tld" # garage S3 API endpoint, DNS style bucket access
# -- garage S3 API endpoint, DNS style bucket access
- host: "*.s3.garage.tld"
paths:
- path: /
pathType: Prefix
@ -179,20 +157,23 @@ ingress:
# - kubernetes.docker.internal
web:
enabled: false
# Rely either on the className or the annotation below but not both
# replace "nginx" by an Ingress controller
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
# -- Rely _either_ on the className or the annotation below but not both!
# If you want to use the className, set
# className: "nginx"
# and replace "nginx" by an Ingress controller name,
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
hosts:
- host: "*.web.garage.tld" # wildcard website access with bucket name prefix
# -- wildcard website access with bucket name prefix
- host: "*.web.garage.tld"
paths:
- path: /
pathType: Prefix
- host: "mywebpage.example.com" # specific bucket access with FQDN bucket
# -- specific bucket access with FQDN bucket
- host: "mywebpage.example.com"
paths:
- path: /
pathType: Prefix
@ -216,12 +197,18 @@ tolerations: []
affinity: {}
environment: {}
extraVolumes: {}
extraVolumeMounts: {}
monitoring:
metrics:
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
# -- If true, a service for monitoring is created with a prometheus.io/scrape annotation
enabled: false
serviceMonitor:
# If true, a ServiceMonitor CRD is created for a prometheus operator
# -- If true, a ServiceMonitor CRD is created for a prometheus operator
# https://github.com/coreos/prometheus-operator
enabled: false
path: /metrics
@ -233,4 +220,5 @@ monitoring:
scrapeTimeout: 10s
relabelings: []
tracing:
# -- specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317`
sink: ""

View file

@ -7,7 +7,12 @@ if [ "$#" -ne 1 ]; then
exit 2
fi
if file $1 | grep 'dynamically linked' 2>&1; then
if [ ! -x "$1" ]; then
echo "[fail] $1 does not exist or is not an executable"
exit 1
fi
if file "$1" | grep 'dynamically linked' 2>&1; then
echo "[fail] $1 is dynamic"
exit 1
fi

View file

@ -3,7 +3,7 @@
with import ./nix/common.nix;
let
pkgs = import pkgsSrc {
pkgs = import nixpkgs {
inherit system;
};
winscp = (import ./nix/winscp.nix) pkgs;
@ -39,7 +39,7 @@ in
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
s3 cp \
./result-bin/bin/garage \
./result/bin/garage \
s3://garagehq.deuxfleurs.fr/_releases/''${CI_COMMIT_TAG:-$CI_COMMIT_SHA}/''${TARGET}/garage
}
@ -115,7 +115,7 @@ in
shellHook = ''
function refresh_cache {
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
for attr in pkgs.amd64.debug test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
echo "Updating cache for ''${attr}"
nix copy -j8 \
--to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/tmp/nix-signing-key.sec' \

43
src/api/admin/Cargo.toml Normal file
View file

@ -0,0 +1,43 @@
[package]
name = "garage_api_admin"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
description = "Admin API server crate for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../README.md"
[lib]
path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_model.workspace = true
garage_table.workspace = true
garage_util.workspace = true
garage_rpc.workspace = true
garage_api_common.workspace = true
argon2.workspace = true
async-trait.workspace = true
err-derive.workspace = true
hex.workspace = true
tracing.workspace = true
futures.workspace = true
tokio.workspace = true
http.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
url.workspace = true
serde.workspace = true
serde_json.workspace = true
opentelemetry.workspace = true
opentelemetry-prometheus = { workspace = true, optional = true }
prometheus = { workspace = true, optional = true }
[features]
metrics = [ "opentelemetry-prometheus", "prometheus" ]

View file

@ -2,7 +2,6 @@ use std::collections::HashMap;
use std::sync::Arc;
use argon2::password_hash::PasswordHash;
use async_trait::async_trait;
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
@ -20,15 +19,15 @@ use garage_rpc::system::ClusterHealthStatus;
use garage_util::error::Error as GarageError;
use garage_util::socket_address::UnixOrTCPSocketAddress;
use crate::generic_server::*;
use garage_api_common::generic_server::*;
use garage_api_common::helpers::*;
use crate::admin::bucket::*;
use crate::admin::cluster::*;
use crate::admin::error::*;
use crate::admin::key::*;
use crate::admin::router_v0;
use crate::admin::router_v1::{Authorization, Endpoint};
use crate::helpers::*;
use crate::bucket::*;
use crate::cluster::*;
use crate::error::*;
use crate::key::*;
use crate::router_v0;
use crate::router_v1::{Authorization, Endpoint};
pub type ResBody = BoxBody<Error>;
@ -221,7 +220,6 @@ impl AdminApiServer {
}
}
#[async_trait]
impl ApiHandler for AdminApiServer {
const API_NAME: &'static str = "admin";
const API_NAME_DISPLAY: &'static str = "Admin";

View file

@ -17,11 +17,12 @@ use garage_model::permission::*;
use garage_model::s3::mpu_table;
use garage_model::s3::object_table::*;
use crate::admin::api_server::ResBody;
use crate::admin::error::*;
use crate::admin::key::ApiBucketKeyPerm;
use crate::common_error::CommonError;
use crate::helpers::*;
use garage_api_common::common_error::CommonError;
use garage_api_common::helpers::*;
use crate::api_server::ResBody;
use crate::error::*;
use crate::key::ApiBucketKeyPerm;
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let buckets = garage

View file

@ -12,9 +12,10 @@ use garage_rpc::layout;
use garage_model::garage::Garage;
use crate::admin::api_server::ResBody;
use crate::admin::error::*;
use crate::helpers::{json_ok_response, parse_json_body};
use garage_api_common::helpers::{json_ok_response, parse_json_body};
use crate::api_server::ResBody;
use crate::error::*;
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let layout = garage.system.cluster_layout();

View file

@ -1,20 +1,24 @@
use std::convert::TryFrom;
use err_derive::Error;
use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode};
pub use garage_model::helper::error::Error as HelperError;
use crate::common_error::CommonError;
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
use crate::generic_server::ApiError;
use crate::helpers::*;
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
pub use garage_api_common::common_error::{
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
};
use garage_api_common::generic_server::ApiError;
use garage_api_common::helpers::*;
/// Errors of this crate
#[derive(Debug, Error)]
pub enum Error {
#[error(display = "{}", _0)]
/// Error from common error
Common(CommonError),
Common(#[error(source)] CommonError),
// Category: cannot process
/// The API access key does not exist
@ -29,17 +33,21 @@ pub enum Error {
KeyAlreadyExists(String),
}
impl<T> From<T> for Error
where
CommonError: From<T>,
{
fn from(err: T) -> Self {
Error::Common(CommonError::from(err))
commonErrorDerivative!(Error);
/// FIXME: helper errors are transformed into their corresponding variants
/// in the Error struct, but in many case a helper error should be considered
/// an internal error.
impl From<HelperError> for Error {
fn from(err: HelperError) -> Error {
match CommonError::try_from(err) {
Ok(ce) => Self::Common(ce),
Err(HelperError::NoSuchAccessKey(k)) => Self::NoSuchAccessKey(k),
Err(_) => unreachable!(),
}
}
}
impl CommonErrorDerivative for Error {}
impl Error {
fn code(&self) -> &'static str {
match self {

View file

@ -9,9 +9,10 @@ use garage_table::*;
use garage_model::garage::Garage;
use garage_model::key_table::*;
use crate::admin::api_server::ResBody;
use crate::admin::error::*;
use crate::helpers::*;
use garage_api_common::helpers::*;
use crate::api_server::ResBody;
use crate::error::*;
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let res = garage

View file

@ -1,3 +1,6 @@
#[macro_use]
extern crate tracing;
pub mod api_server;
mod error;
mod router_v0;

View file

@ -2,8 +2,9 @@ use std::borrow::Cow;
use hyper::{Method, Request};
use crate::admin::error::*;
use crate::router_macros::*;
use garage_api_common::router_macros::*;
use crate::error::*;
router_match! {@func

View file

@ -2,9 +2,10 @@ use std::borrow::Cow;
use hyper::{Method, Request};
use crate::admin::error::*;
use crate::admin::router_v0;
use crate::router_macros::*;
use garage_api_common::router_macros::*;
use crate::error::*;
use crate::router_v0;
pub enum Authorization {
None,

49
src/api/common/Cargo.toml Normal file
View file

@ -0,0 +1,49 @@
[package]
name = "garage_api_common"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
description = "Common functions for the API server crates for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../README.md"
[lib]
path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_model.workspace = true
garage_table.workspace = true
garage_util.workspace = true
base64.workspace = true
bytes.workspace = true
chrono.workspace = true
crc32fast.workspace = true
crc32c.workspace = true
crypto-common.workspace = true
err-derive.workspace = true
hex.workspace = true
hmac.workspace = true
md-5.workspace = true
idna.workspace = true
tracing.workspace = true
nom.workspace = true
pin-project.workspace = true
sha1.workspace = true
sha2.workspace = true
futures.workspace = true
tokio.workspace = true
http.workspace = true
http-body-util.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
hyper-util.workspace = true
url.workspace = true
serde.workspace = true
serde_json.workspace = true
opentelemetry.workspace = true

View file

@ -1,3 +1,5 @@
use std::convert::TryFrom;
use err_derive::Error;
use hyper::StatusCode;
@ -55,6 +57,35 @@ pub enum CommonError {
InvalidBucketName(String),
}
#[macro_export]
macro_rules! commonErrorDerivative {
( $error_struct: ident ) => {
impl From<garage_util::error::Error> for $error_struct {
fn from(err: garage_util::error::Error) -> Self {
Self::Common(CommonError::InternalError(err))
}
}
impl From<http::Error> for $error_struct {
fn from(err: http::Error) -> Self {
Self::Common(CommonError::Http(err))
}
}
impl From<hyper::Error> for $error_struct {
fn from(err: hyper::Error) -> Self {
Self::Common(CommonError::Hyper(err))
}
}
impl From<hyper::header::ToStrError> for $error_struct {
fn from(err: hyper::header::ToStrError) -> Self {
Self::Common(CommonError::InvalidHeader(err))
}
}
impl CommonErrorDerivative for $error_struct {}
};
}
pub use commonErrorDerivative;
impl CommonError {
pub fn http_status_code(&self) -> StatusCode {
match self {
@ -97,18 +128,39 @@ impl CommonError {
}
}
impl From<HelperError> for CommonError {
fn from(err: HelperError) -> Self {
impl TryFrom<HelperError> for CommonError {
type Error = HelperError;
fn try_from(err: HelperError) -> Result<Self, HelperError> {
match err {
HelperError::Internal(i) => Self::InternalError(i),
HelperError::BadRequest(b) => Self::BadRequest(b),
HelperError::InvalidBucketName(n) => Self::InvalidBucketName(n),
HelperError::NoSuchBucket(n) => Self::NoSuchBucket(n),
e => Self::bad_request(format!("{}", e)),
HelperError::Internal(i) => Ok(Self::InternalError(i)),
HelperError::BadRequest(b) => Ok(Self::BadRequest(b)),
HelperError::InvalidBucketName(n) => Ok(Self::InvalidBucketName(n)),
HelperError::NoSuchBucket(n) => Ok(Self::NoSuchBucket(n)),
e => Err(e),
}
}
}
/// This function converts HelperErrors into CommonErrors,
/// for variants that exist in CommonError.
/// This is used for helper functions that might return InvalidBucketName
/// or NoSuchBucket for instance, and we want to pass that error
/// up to our caller.
pub fn pass_helper_error(err: HelperError) -> CommonError {
match CommonError::try_from(err) {
Ok(e) => e,
Err(e) => panic!("Helper error `{}` should hot have happenned here", e),
}
}
pub fn helper_error_as_internal(err: HelperError) -> CommonError {
match err {
HelperError::Internal(e) => CommonError::InternalError(e),
e => CommonError::InternalError(GarageError::Message(e.to_string())),
}
}
pub trait CommonErrorDerivative: From<CommonError> {
fn internal_error<M: ToString>(msg: M) -> Self {
Self::from(CommonError::InternalError(GarageError::Message(

170
src/api/common/cors.rs Normal file
View file

@ -0,0 +1,170 @@
use std::sync::Arc;
use http::header::{
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
};
use hyper::{body::Body, body::Incoming as IncomingBody, Request, Response, StatusCode};
use garage_model::bucket_table::{BucketParams, CorsRule as GarageCorsRule};
use garage_model::garage::Garage;
use crate::common_error::{
helper_error_as_internal, CommonError, OkOrBadRequest, OkOrInternalError,
};
use crate::helpers::*;
pub fn find_matching_cors_rule<'a, B>(
bucket_params: &'a BucketParams,
req: &Request<B>,
) -> Result<Option<&'a GarageCorsRule>, CommonError> {
if let Some(cors_config) = bucket_params.cors_config.get() {
if let Some(origin) = req.headers().get("Origin") {
let origin = origin.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
return Ok(cors_config.iter().find(|rule| {
cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter())
}));
}
}
Ok(None)
}
pub fn cors_rule_matches<'a, HI, S>(
rule: &GarageCorsRule,
origin: &'a str,
method: &'a str,
mut request_headers: HI,
) -> bool
where
HI: Iterator<Item = S>,
S: AsRef<str>,
{
rule.allow_origins.iter().any(|x| x == "*" || x == origin)
&& rule.allow_methods.iter().any(|x| x == "*" || x == method)
&& request_headers.all(|h| {
rule.allow_headers
.iter()
.any(|x| x == "*" || x == h.as_ref())
})
}
pub fn add_cors_headers(
resp: &mut Response<impl Body>,
rule: &GarageCorsRule,
) -> Result<(), http::header::InvalidHeaderValue> {
let h = resp.headers_mut();
h.insert(
ACCESS_CONTROL_ALLOW_ORIGIN,
rule.allow_origins.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_METHODS,
rule.allow_methods.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_HEADERS,
rule.allow_headers.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_EXPOSE_HEADERS,
rule.expose_headers.join(", ").parse()?,
);
Ok(())
}
pub async fn handle_options_api(
garage: Arc<Garage>,
req: &Request<IncomingBody>,
bucket_name: Option<String>,
) -> Result<Response<EmptyBody>, CommonError> {
// FIXME: CORS rules of buckets with local aliases are
// not taken into account.
// If the bucket name is a global bucket name,
// we try to apply the CORS rules of that bucket.
// If a user has a local bucket name that has
// the same name, its CORS rules won't be applied
// and will be shadowed by the rules of the globally
// existing bucket (but this is inevitable because
// OPTIONS calls are not auhtenticated).
if let Some(bn) = bucket_name {
let helper = garage.bucket_helper();
let bucket_id = helper
.resolve_global_bucket_name(&bn)
.await
.map_err(helper_error_as_internal)?;
if let Some(id) = bucket_id {
let bucket = garage
.bucket_helper()
.get_existing_bucket(id)
.await
.map_err(helper_error_as_internal)?;
let bucket_params = bucket.state.into_option().unwrap();
handle_options_for_bucket(req, &bucket_params)
} else {
// If there is a bucket name in the request, but that name
// does not correspond to a global alias for a bucket,
// then it's either a non-existing bucket or a local bucket.
// We have no way of knowing, because the request is not
// authenticated and thus we can't resolve local aliases.
// We take the permissive approach of allowing everything,
// because we don't want to prevent web apps that use
// local bucket names from making API calls.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
} else {
// If there is no bucket name in the request,
// we are doing a ListBuckets call, which we want to allow
// for all origins.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
}
pub fn handle_options_for_bucket<B>(
req: &Request<B>,
bucket_params: &BucketParams,
) -> Result<Response<EmptyBody>, CommonError> {
let origin = req
.headers()
.get("Origin")
.ok_or_bad_request("Missing Origin header")?
.to_str()?;
let request_method = req
.headers()
.get(ACCESS_CONTROL_REQUEST_METHOD)
.ok_or_bad_request("Missing Access-Control-Request-Method header")?
.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
if let Some(cors_config) = bucket_params.cors_config.get() {
let matching_rule = cors_config
.iter()
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
if let Some(rule) = matching_rule {
let mut resp = Response::builder()
.status(StatusCode::OK)
.body(EmptyBody::new())?;
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
return Ok(resp);
}
}
Err(CommonError::Forbidden(
"This CORS request is not allowed.".into(),
))
}

View file

@ -2,8 +2,7 @@ use std::convert::Infallible;
use std::fs::{self, Permissions};
use std::os::unix::fs::PermissionsExt;
use std::sync::Arc;
use async_trait::async_trait;
use std::time::Duration;
use futures::future::Future;
use futures::stream::{futures_unordered::FuturesUnordered, StreamExt};
@ -19,6 +18,7 @@ use hyper_util::rt::TokioIo;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
use tokio::sync::watch;
use tokio::time::{sleep_until, Instant};
use opentelemetry::{
global,
@ -34,7 +34,7 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
use crate::helpers::{BoxBody, ErrorBody};
pub(crate) trait ApiEndpoint: Send + Sync + 'static {
pub trait ApiEndpoint: Send + Sync + 'static {
fn name(&self) -> &'static str;
fn add_span_attributes(&self, span: SpanRef<'_>);
}
@ -45,8 +45,7 @@ pub trait ApiError: std::error::Error + Send + Sync + 'static {
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody;
}
#[async_trait]
pub(crate) trait ApiHandler: Send + Sync + 'static {
pub trait ApiHandler: Send + Sync + 'static {
const API_NAME: &'static str;
const API_NAME_DISPLAY: &'static str;
@ -54,14 +53,14 @@ pub(crate) trait ApiHandler: Send + Sync + 'static {
type Error: ApiError;
fn parse_endpoint(&self, r: &Request<IncomingBody>) -> Result<Self::Endpoint, Self::Error>;
async fn handle(
fn handle(
&self,
req: Request<IncomingBody>,
endpoint: Self::Endpoint,
) -> Result<Response<BoxBody<Self::Error>>, Self::Error>;
) -> impl Future<Output = Result<Response<BoxBody<Self::Error>>, Self::Error>> + Send;
}
pub(crate) struct ApiServer<A: ApiHandler> {
pub struct ApiServer<A: ApiHandler> {
region: String,
api_handler: A,
@ -246,13 +245,11 @@ impl<A: ApiHandler> ApiServer<A> {
// ==== helper functions ====
#[async_trait]
pub trait Accept: Send + Sync + 'static {
type Stream: AsyncRead + AsyncWrite + Send + Sync + 'static;
async fn accept(&self) -> std::io::Result<(Self::Stream, String)>;
fn accept(&self) -> impl Future<Output = std::io::Result<(Self::Stream, String)>> + Send;
}
#[async_trait]
impl Accept for TcpListener {
type Stream = TcpStream;
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
@ -264,7 +261,6 @@ impl Accept for TcpListener {
pub struct UnixListenerOn(pub UnixListener, pub String);
#[async_trait]
impl Accept for UnixListenerOn {
type Stream = UnixStream;
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
@ -291,7 +287,7 @@ where
let connection_collector = tokio::spawn({
let server_name = server_name.clone();
async move {
let mut connections = FuturesUnordered::new();
let mut connections = FuturesUnordered::<tokio::task::JoinHandle<()>>::new();
loop {
let collect_next = async {
if connections.is_empty() {
@ -312,23 +308,34 @@ where
}
}
}
if !connections.is_empty() {
let deadline = Instant::now() + Duration::from_secs(10);
while !connections.is_empty() {
info!(
"{} server: {} connections still open",
"{} server: {} connections still open, deadline in {:.2}s",
server_name,
connections.len()
connections.len(),
(deadline - Instant::now()).as_secs_f32(),
);
while let Some(conn_res) = connections.next().await {
trace!(
"{} server: HTTP connection finished: {:?}",
server_name,
conn_res
);
info!(
"{} server: {} connections still open",
server_name,
connections.len()
);
tokio::select! {
conn_res = connections.next() => {
trace!(
"{} server: HTTP connection finished: {:?}",
server_name,
conn_res.unwrap(),
);
}
_ = sleep_until(deadline) => {
warn!("{} server: exit deadline reached with {} connections still open, killing them now",
server_name,
connections.len());
for conn in connections.iter() {
conn.abort();
}
for conn in connections {
assert!(conn.await.unwrap_err().is_cancelled());
}
break;
}
}
}
}

View file

@ -363,9 +363,9 @@ mod tests {
}
#[derive(Serialize)]
pub(crate) struct CustomApiErrorBody {
pub(crate) code: String,
pub(crate) message: String,
pub(crate) region: String,
pub(crate) path: String,
pub struct CustomApiErrorBody {
pub code: String,
pub message: String,
pub region: String,
pub path: String,
}

12
src/api/common/lib.rs Normal file
View file

@ -0,0 +1,12 @@
//! Crate for serving a S3 compatible API
#[macro_use]
extern crate tracing;
pub mod common_error;
pub mod cors;
pub mod encoding;
pub mod generic_server;
pub mod helpers;
pub mod router_macros;
pub mod signature;

View file

@ -1,5 +1,6 @@
/// This macro is used to generate very repetitive match {} blocks in this module
/// It is _not_ made to be used anywhere else
#[macro_export]
macro_rules! router_match {
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
@ -133,6 +134,7 @@ macro_rules! router_match {
/// This macro is used to generate part of the code in this module. It must be called only one, and
/// is useless outside of this module.
#[macro_export]
macro_rules! generateQueryParameters {
(
keywords: [ $($kw_param:expr => $kw_name: ident),* ],
@ -204,7 +206,7 @@ macro_rules! generateQueryParameters {
}
/// Get an error message in case not all parameters where used when extracting them to
/// build an Enpoint variant
/// build an Endpoint variant
fn nonempty_message(&self) -> Option<&str> {
if self.keyword.is_some() {
Some("Keyword not used")
@ -220,5 +222,5 @@ macro_rules! generateQueryParameters {
}
}
pub(crate) use generateQueryParameters;
pub(crate) use router_match;
pub use generateQueryParameters;
pub use router_match;

View file

@ -0,0 +1,135 @@
use std::sync::Mutex;
use futures::prelude::*;
use futures::stream::BoxStream;
use http_body_util::{BodyExt, StreamBody};
use hyper::body::{Bytes, Frame};
use serde::Deserialize;
use tokio::sync::mpsc;
use tokio::task;
use super::*;
use crate::signature::checksum::*;
pub struct ReqBody {
// why need mutex to be sync??
pub(crate) stream: Mutex<BoxStream<'static, Result<Frame<Bytes>, Error>>>,
pub(crate) checksummer: Checksummer,
pub(crate) expected_checksums: ExpectedChecksums,
pub(crate) trailer_algorithm: Option<ChecksumAlgorithm>,
}
pub type StreamingChecksumReceiver = task::JoinHandle<Result<Checksums, Error>>;
impl ReqBody {
pub fn add_expected_checksums(&mut self, more: ExpectedChecksums) {
if more.md5.is_some() {
self.expected_checksums.md5 = more.md5;
}
if more.sha256.is_some() {
self.expected_checksums.sha256 = more.sha256;
}
if more.extra.is_some() {
self.expected_checksums.extra = more.extra;
}
self.checksummer.add_expected(&self.expected_checksums);
}
pub fn add_md5(&mut self) {
self.checksummer.add_md5();
}
// ============ non-streaming =============
pub async fn json<T: for<'a> Deserialize<'a>>(self) -> Result<T, Error> {
let body = self.collect().await?;
let resp: T = serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?;
Ok(resp)
}
pub async fn collect(self) -> Result<Bytes, Error> {
self.collect_with_checksums().await.map(|(b, _)| b)
}
pub async fn collect_with_checksums(mut self) -> Result<(Bytes, Checksums), Error> {
let stream: BoxStream<_> = self.stream.into_inner().unwrap();
let bytes = BodyExt::collect(StreamBody::new(stream)).await?.to_bytes();
self.checksummer.update(&bytes);
let checksums = self.checksummer.finalize();
checksums.verify(&self.expected_checksums)?;
Ok((bytes, checksums))
}
// ============ streaming =============
pub fn streaming_with_checksums(
self,
) -> (
BoxStream<'static, Result<Bytes, Error>>,
StreamingChecksumReceiver,
) {
let Self {
stream,
mut checksummer,
mut expected_checksums,
trailer_algorithm,
} = self;
let (frame_tx, mut frame_rx) = mpsc::channel::<Frame<Bytes>>(5);
let join_checksums = tokio::spawn(async move {
while let Some(frame) = frame_rx.recv().await {
match frame.into_data() {
Ok(data) => {
checksummer = tokio::task::spawn_blocking(move || {
checksummer.update(&data);
checksummer
})
.await
.unwrap()
}
Err(frame) => {
let trailers = frame.into_trailers().unwrap();
let algo = trailer_algorithm.unwrap();
expected_checksums.extra = Some(extract_checksum_value(&trailers, algo)?);
break;
}
}
}
if trailer_algorithm.is_some() && expected_checksums.extra.is_none() {
return Err(Error::bad_request("trailing checksum was not sent"));
}
let checksums = checksummer.finalize();
checksums.verify(&expected_checksums)?;
Ok(checksums)
});
let stream: BoxStream<_> = stream.into_inner().unwrap();
let stream = stream.filter_map(move |x| {
let frame_tx = frame_tx.clone();
async move {
match x {
Err(e) => Some(Err(e)),
Ok(frame) => {
if frame.is_data() {
let data = frame.data_ref().unwrap().clone();
let _ = frame_tx.send(frame).await;
Some(Ok(data))
} else {
let _ = frame_tx.send(frame).await;
None
}
}
}
}
});
(stream.boxed(), join_checksums)
}
}

View file

@ -11,11 +11,12 @@ use sha2::Sha256;
use http::{HeaderMap, HeaderName, HeaderValue};
use garage_util::data::*;
use garage_util::error::OkOrMessage;
use garage_model::s3::object_table::*;
use super::*;
use crate::s3::error::*;
pub use garage_model::s3::object_table::{ChecksumAlgorithm, ChecksumValue};
pub const CONTENT_MD5: HeaderName = HeaderName::from_static("content-md5");
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
HeaderName::from_static("x-amz-checksum-algorithm");
@ -31,8 +32,8 @@ pub type Md5Checksum = [u8; 16];
pub type Sha1Checksum = [u8; 20];
pub type Sha256Checksum = [u8; 32];
#[derive(Debug, Default)]
pub(crate) struct ExpectedChecksums {
#[derive(Debug, Default, Clone)]
pub struct ExpectedChecksums {
// base64-encoded md5 (content-md5 header)
pub md5: Option<String>,
// content_sha256 (as a Hash / FixedBytes32)
@ -41,7 +42,7 @@ pub(crate) struct ExpectedChecksums {
pub extra: Option<ChecksumValue>,
}
pub(crate) struct Checksummer {
pub struct Checksummer {
pub crc32: Option<Crc32>,
pub crc32c: Option<Crc32c>,
pub md5: Option<Md5>,
@ -50,7 +51,7 @@ pub(crate) struct Checksummer {
}
#[derive(Default)]
pub(crate) struct Checksums {
pub struct Checksums {
pub crc32: Option<Crc32Checksum>,
pub crc32c: Option<Crc32cChecksum>,
pub md5: Option<Md5Checksum>,
@ -59,34 +60,48 @@ pub(crate) struct Checksums {
}
impl Checksummer {
pub(crate) fn init(expected: &ExpectedChecksums, require_md5: bool) -> Self {
let mut ret = Self {
pub fn new() -> Self {
Self {
crc32: None,
crc32c: None,
md5: None,
sha1: None,
sha256: None,
};
}
}
if expected.md5.is_some() || require_md5 {
ret.md5 = Some(Md5::new());
}
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
ret.sha256 = Some(Sha256::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
ret.crc32 = Some(Crc32::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
ret.crc32c = Some(Crc32c::default());
}
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
ret.sha1 = Some(Sha1::new());
pub fn init(expected: &ExpectedChecksums, add_md5: bool) -> Self {
let mut ret = Self::new();
ret.add_expected(expected);
if add_md5 {
ret.add_md5();
}
ret
}
pub(crate) fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
pub fn add_md5(&mut self) {
self.md5 = Some(Md5::new());
}
pub fn add_expected(&mut self, expected: &ExpectedChecksums) {
if expected.md5.is_some() {
self.md5 = Some(Md5::new());
}
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
self.sha256 = Some(Sha256::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
self.crc32 = Some(Crc32::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
self.crc32c = Some(Crc32c::default());
}
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
self.sha1 = Some(Sha1::new());
}
}
pub fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
match algo {
Some(ChecksumAlgorithm::Crc32) => {
self.crc32 = Some(Crc32::new());
@ -105,7 +120,7 @@ impl Checksummer {
self
}
pub(crate) fn update(&mut self, bytes: &[u8]) {
pub fn update(&mut self, bytes: &[u8]) {
if let Some(crc32) = &mut self.crc32 {
crc32.update(bytes);
}
@ -123,7 +138,7 @@ impl Checksummer {
}
}
pub(crate) fn finalize(self) -> Checksums {
pub fn finalize(self) -> Checksums {
Checksums {
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
crc32c: self
@ -183,153 +198,56 @@ impl Checksums {
// ----
#[derive(Default)]
pub(crate) struct MultipartChecksummer {
pub md5: Md5,
pub extra: Option<MultipartExtraChecksummer>,
}
pub(crate) enum MultipartExtraChecksummer {
Crc32(Crc32),
Crc32c(Crc32c),
Sha1(Sha1),
Sha256(Sha256),
}
impl MultipartChecksummer {
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
Self {
md5: Md5::new(),
extra: match algo {
None => None,
Some(ChecksumAlgorithm::Crc32) => {
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
}
Some(ChecksumAlgorithm::Crc32c) => {
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
}
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
Some(ChecksumAlgorithm::Sha256) => {
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
}
},
}
}
pub(crate) fn update(
&mut self,
etag: &str,
checksum: Option<ChecksumValue>,
) -> Result<(), Error> {
self.md5
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
match (&mut self.extra, checksum) {
(None, _) => (),
(
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
Some(ChecksumValue::Crc32(x)),
) => {
crc32.update(&x);
}
(
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
Some(ChecksumValue::Crc32c(x)),
) => {
crc32c.write(&x);
}
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
sha1.update(&x);
}
(
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
Some(ChecksumValue::Sha256(x)),
) => {
sha256.update(&x);
}
(Some(_), b) => {
return Err(Error::internal_error(format!(
"part checksum was not computed correctly, got: {:?}",
b
)))
}
}
Ok(())
}
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
let md5 = self.md5.finalize()[..].try_into().unwrap();
let extra = match self.extra {
None => None,
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
}
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
)),
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
}
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
sha256.finalize()[..].try_into().unwrap(),
)),
};
(md5, extra)
pub fn parse_checksum_algorithm(algo: &str) -> Result<ChecksumAlgorithm, Error> {
match algo {
"CRC32" => Ok(ChecksumAlgorithm::Crc32),
"CRC32C" => Ok(ChecksumAlgorithm::Crc32c),
"SHA1" => Ok(ChecksumAlgorithm::Sha1),
"SHA256" => Ok(ChecksumAlgorithm::Sha256),
_ => Err(Error::bad_request("invalid checksum algorithm")),
}
}
// ----
/// Extract the value of the x-amz-checksum-algorithm header
pub(crate) fn request_checksum_algorithm(
pub fn request_checksum_algorithm(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumAlgorithm>, Error> {
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
None => Ok(None),
Some(x) if x == "CRC32" => Ok(Some(ChecksumAlgorithm::Crc32)),
Some(x) if x == "CRC32C" => Ok(Some(ChecksumAlgorithm::Crc32c)),
Some(x) if x == "SHA1" => Ok(Some(ChecksumAlgorithm::Sha1)),
Some(x) if x == "SHA256" => Ok(Some(ChecksumAlgorithm::Sha256)),
Some(x) => parse_checksum_algorithm(x.to_str()?).map(Some),
}
}
pub fn request_trailer_checksum_algorithm(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumAlgorithm>, Error> {
match headers.get(X_AMZ_TRAILER).map(|x| x.to_str()).transpose()? {
None => Ok(None),
Some(x) if x == X_AMZ_CHECKSUM_CRC32 => Ok(Some(ChecksumAlgorithm::Crc32)),
Some(x) if x == X_AMZ_CHECKSUM_CRC32C => Ok(Some(ChecksumAlgorithm::Crc32c)),
Some(x) if x == X_AMZ_CHECKSUM_SHA1 => Ok(Some(ChecksumAlgorithm::Sha1)),
Some(x) if x == X_AMZ_CHECKSUM_SHA256 => Ok(Some(ChecksumAlgorithm::Sha256)),
_ => Err(Error::bad_request("invalid checksum algorithm")),
}
}
/// Extract the value of any of the x-amz-checksum-* headers
pub(crate) fn request_checksum_value(
pub fn request_checksum_value(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumValue>, Error> {
let mut ret = vec![];
if let Some(crc32_str) = headers.get(X_AMZ_CHECKSUM_CRC32) {
let crc32 = BASE64_STANDARD
.decode(&crc32_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
ret.push(ChecksumValue::Crc32(crc32))
if headers.contains_key(X_AMZ_CHECKSUM_CRC32) {
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32)?);
}
if let Some(crc32c_str) = headers.get(X_AMZ_CHECKSUM_CRC32C) {
let crc32c = BASE64_STANDARD
.decode(&crc32c_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
ret.push(ChecksumValue::Crc32c(crc32c))
if headers.contains_key(X_AMZ_CHECKSUM_CRC32C) {
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Crc32c)?);
}
if let Some(sha1_str) = headers.get(X_AMZ_CHECKSUM_SHA1) {
let sha1 = BASE64_STANDARD
.decode(&sha1_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
ret.push(ChecksumValue::Sha1(sha1))
if headers.contains_key(X_AMZ_CHECKSUM_SHA1) {
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha1)?);
}
if let Some(sha256_str) = headers.get(X_AMZ_CHECKSUM_SHA256) {
let sha256 = BASE64_STANDARD
.decode(&sha256_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
ret.push(ChecksumValue::Sha256(sha256))
if headers.contains_key(X_AMZ_CHECKSUM_SHA256) {
ret.push(extract_checksum_value(headers, ChecksumAlgorithm::Sha256)?);
}
if ret.len() > 1 {
@ -340,50 +258,49 @@ pub(crate) fn request_checksum_value(
Ok(ret.pop())
}
/// Checks for the presense of x-amz-checksum-algorithm
/// if so extract the corrseponding x-amz-checksum-* value
pub(crate) fn request_checksum_algorithm_value(
/// Checks for the presence of x-amz-checksum-algorithm
/// if so extract the corresponding x-amz-checksum-* value
pub fn extract_checksum_value(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumValue>, Error> {
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
Some(x) if x == "CRC32" => {
algo: ChecksumAlgorithm,
) -> Result<ChecksumValue, Error> {
match algo {
ChecksumAlgorithm::Crc32 => {
let crc32 = headers
.get(X_AMZ_CHECKSUM_CRC32)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
Ok(Some(ChecksumValue::Crc32(crc32)))
Ok(ChecksumValue::Crc32(crc32))
}
Some(x) if x == "CRC32C" => {
ChecksumAlgorithm::Crc32c => {
let crc32c = headers
.get(X_AMZ_CHECKSUM_CRC32C)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
Ok(Some(ChecksumValue::Crc32c(crc32c)))
Ok(ChecksumValue::Crc32c(crc32c))
}
Some(x) if x == "SHA1" => {
ChecksumAlgorithm::Sha1 => {
let sha1 = headers
.get(X_AMZ_CHECKSUM_SHA1)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
Ok(Some(ChecksumValue::Sha1(sha1)))
Ok(ChecksumValue::Sha1(sha1))
}
Some(x) if x == "SHA256" => {
ChecksumAlgorithm::Sha256 => {
let sha256 = headers
.get(X_AMZ_CHECKSUM_SHA256)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
Ok(Some(ChecksumValue::Sha256(sha256)))
Ok(ChecksumValue::Sha256(sha256))
}
Some(_) => Err(Error::bad_request("invalid x-amz-checksum-algorithm")),
None => Ok(None),
}
}
pub(crate) fn add_checksum_response_headers(
pub fn add_checksum_response_headers(
checksum: &Option<ChecksumValue>,
mut resp: http::response::Builder,
) -> http::response::Builder {

View file

@ -18,6 +18,10 @@ pub enum Error {
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
#[error(display = "Invalid UTF-8: {}", _0)]
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
/// The provided digest (checksum) value was invalid
#[error(display = "Invalid digest: {}", _0)]
InvalidDigest(String),
}
impl<T> From<T> for Error

View file

@ -0,0 +1,118 @@
use chrono::{DateTime, Utc};
use hmac::{Hmac, Mac};
use sha2::Sha256;
use hyper::header::HeaderName;
use hyper::{body::Incoming as IncomingBody, Request};
use garage_model::garage::Garage;
use garage_model::key_table::Key;
use garage_util::data::{sha256sum, Hash};
use error::*;
pub mod body;
pub mod checksum;
pub mod error;
pub mod payload;
pub mod streaming;
pub const SHORT_DATE: &str = "%Y%m%d";
pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ";
// ---- Constants used in AWSv4 signatures ----
pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
pub const X_AMZ_CONTENT_SHA256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
pub const X_AMZ_TRAILER: HeaderName = HeaderName::from_static("x-amz-trailer");
/// Result of `sha256("")`
pub(crate) const EMPTY_STRING_HEX_DIGEST: &str =
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
// Signature calculation algorithm
pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
type HmacSha256 = Hmac<Sha256>;
// Possible values for x-amz-content-sha256, in addition to the actual sha256
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
pub const STREAMING_UNSIGNED_PAYLOAD_TRAILER: &str = "STREAMING-UNSIGNED-PAYLOAD-TRAILER";
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
// Used in the computation of StringToSign
pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
// ---- enums to describe stuff going on in signature calculation ----
#[derive(Debug)]
pub enum ContentSha256Header {
UnsignedPayload,
Sha256Checksum(Hash),
StreamingPayload { trailer: bool, signed: bool },
}
// ---- top-level functions ----
pub struct VerifiedRequest {
pub request: Request<streaming::ReqBody>,
pub access_key: Key,
pub content_sha256_header: ContentSha256Header,
}
pub async fn verify_request(
garage: &Garage,
mut req: Request<IncomingBody>,
service: &'static str,
) -> Result<VerifiedRequest, Error> {
let checked_signature = payload::check_payload_signature(&garage, &mut req, service).await?;
let request = streaming::parse_streaming_body(
req,
&checked_signature,
&garage.config.s3_api.s3_region,
service,
)?;
let access_key = checked_signature
.key
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
Ok(VerifiedRequest {
request,
access_key,
content_sha256_header: checked_signature.content_sha256_header,
})
}
pub fn signing_hmac(
datetime: &DateTime<Utc>,
secret_key: &str,
region: &str,
service: &str,
) -> Result<HmacSha256, crypto_common::InvalidLength> {
let secret = String::from("AWS4") + secret_key;
let mut date_hmac = HmacSha256::new_from_slice(secret.as_bytes())?;
date_hmac.update(datetime.format(SHORT_DATE).to_string().as_bytes());
let mut region_hmac = HmacSha256::new_from_slice(&date_hmac.finalize().into_bytes())?;
region_hmac.update(region.as_bytes());
let mut service_hmac = HmacSha256::new_from_slice(&region_hmac.finalize().into_bytes())?;
service_hmac.update(service.as_bytes());
let mut signing_hmac = HmacSha256::new_from_slice(&service_hmac.finalize().into_bytes())?;
signing_hmac.update(b"aws4_request");
let hmac = HmacSha256::new_from_slice(&signing_hmac.finalize().into_bytes())?;
Ok(hmac)
}
pub fn compute_scope(datetime: &DateTime<Utc>, region: &str, service: &str) -> String {
format!(
"{}/{}/{}/aws4_request",
datetime.format(SHORT_DATE),
region,
service
)
}

View file

@ -13,23 +13,9 @@ use garage_util::data::Hash;
use garage_model::garage::Garage;
use garage_model::key_table::*;
use super::LONG_DATETIME;
use super::{compute_scope, signing_hmac};
use super::*;
use crate::encoding::uri_encode;
use crate::signature::error::*;
pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
pub const X_AMZ_CONTENT_SH256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
pub type QueryMap = HeaderMap<QueryValue>;
pub struct QueryValue {
@ -39,16 +25,23 @@ pub struct QueryValue {
value: String,
}
#[derive(Debug)]
pub struct CheckedSignature {
pub key: Option<Key>,
pub content_sha256_header: ContentSha256Header,
pub signature_header: Option<String>,
}
pub async fn check_payload_signature(
garage: &Garage,
request: &mut Request<IncomingBody>,
service: &'static str,
) -> Result<(Option<Key>, Option<Hash>), Error> {
) -> Result<CheckedSignature, Error> {
let query = parse_query_map(request.uri())?;
if query.contains_key(&X_AMZ_ALGORITHM) {
// We check for presigned-URL-style authentification first, because
// the browser or someting else could inject an Authorization header
// We check for presigned-URL-style authentication first, because
// the browser or something else could inject an Authorization header
// that is totally unrelated to AWS signatures.
check_presigned_signature(garage, service, request, query).await
} else if request.headers().contains_key(AUTHORIZATION) {
@ -57,17 +50,46 @@ pub async fn check_payload_signature(
// Unsigned (anonymous) request
let content_sha256 = request
.headers()
.get("x-amz-content-sha256")
.filter(|c| c.as_bytes() != UNSIGNED_PAYLOAD.as_bytes());
if let Some(content_sha256) = content_sha256 {
let sha256 = hex::decode(content_sha256)
.ok()
.and_then(|bytes| Hash::try_from(&bytes))
.ok_or_bad_request("Invalid content sha256 hash")?;
Ok((None, Some(sha256)))
.get(X_AMZ_CONTENT_SHA256)
.map(|x| x.to_str())
.transpose()?;
Ok(CheckedSignature {
key: None,
content_sha256_header: parse_x_amz_content_sha256(content_sha256)?,
signature_header: None,
})
}
}
fn parse_x_amz_content_sha256(header: Option<&str>) -> Result<ContentSha256Header, Error> {
let header = match header {
Some(x) => x,
None => return Ok(ContentSha256Header::UnsignedPayload),
};
if header == UNSIGNED_PAYLOAD {
Ok(ContentSha256Header::UnsignedPayload)
} else if let Some(rest) = header.strip_prefix("STREAMING-") {
let (trailer, algo) = if let Some(rest2) = rest.strip_suffix("-TRAILER") {
(true, rest2)
} else {
Ok((None, None))
}
(false, rest)
};
let signed = match algo {
AWS4_HMAC_SHA256_PAYLOAD => true,
UNSIGNED_PAYLOAD => false,
_ => {
return Err(Error::bad_request(
"invalid or unsupported x-amz-content-sha256",
))
}
};
Ok(ContentSha256Header::StreamingPayload { trailer, signed })
} else {
let sha256 = hex::decode(header)
.ok()
.and_then(|bytes| Hash::try_from(&bytes))
.ok_or_bad_request("Invalid content sha256 hash")?;
Ok(ContentSha256Header::Sha256Checksum(sha256))
}
}
@ -76,7 +98,7 @@ async fn check_standard_signature(
service: &'static str,
request: &Request<IncomingBody>,
query: QueryMap,
) -> Result<(Option<Key>, Option<Hash>), Error> {
) -> Result<CheckedSignature, Error> {
let authorization = Authorization::parse_header(request.headers())?;
// Verify that all necessary request headers are included in signed_headers
@ -108,18 +130,13 @@ async fn check_standard_signature(
let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
let content_sha256 = if authorization.content_sha256 == UNSIGNED_PAYLOAD {
None
} else if authorization.content_sha256 == STREAMING_AWS4_HMAC_SHA256_PAYLOAD {
let bytes = hex::decode(authorization.signature).ok_or_bad_request("Invalid signature")?;
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid signature")?)
} else {
let bytes = hex::decode(authorization.content_sha256)
.ok_or_bad_request("Invalid content sha256 hash")?;
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid content sha256 hash")?)
};
let content_sha256_header = parse_x_amz_content_sha256(Some(&authorization.content_sha256))?;
Ok((Some(key), content_sha256))
Ok(CheckedSignature {
key: Some(key),
content_sha256_header,
signature_header: Some(authorization.signature),
})
}
async fn check_presigned_signature(
@ -127,12 +144,12 @@ async fn check_presigned_signature(
service: &'static str,
request: &mut Request<IncomingBody>,
mut query: QueryMap,
) -> Result<(Option<Key>, Option<Hash>), Error> {
) -> Result<CheckedSignature, Error> {
let algorithm = query.get(&X_AMZ_ALGORITHM).unwrap();
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
// Verify that all necessary request headers are included in signed_headers
// For AWSv4 pre-signed URLs, the following must be incldued:
// For AWSv4 pre-signed URLs, the following must be included:
// - the Host header (mandatory)
// - all x-amz-* headers used in the request
let signed_headers = split_signed_headers(&authorization)?;
@ -193,7 +210,11 @@ async fn check_presigned_signature(
// Presigned URLs always use UNSIGNED-PAYLOAD,
// so there is no sha256 hash to return.
Ok((Some(key), None))
Ok(CheckedSignature {
key: Some(key),
content_sha256_header: ContentSha256Header::UnsignedPayload,
signature_header: Some(authorization.signature),
})
}
pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> {
@ -306,7 +327,7 @@ pub fn canonical_request(
// Note that there is also the issue of path normalization, which I hope is unrelated to the
// one of URI-encoding. At least in aws-sigv4 both parameters can be set independently,
// and rusoto_signature does not seem to do any effective path normalization, even though
// it mentions it in the comments (same link to the souce code as above).
// it mentions it in the comments (same link to the source code as above).
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so
// would make non-normalized paths invalid K2V partition keys, and we don't want that.
let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
@ -442,7 +463,7 @@ impl Authorization {
.to_string();
let content_sha256 = headers
.get(X_AMZ_CONTENT_SH256)
.get(X_AMZ_CONTENT_SHA256)
.ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
let date = headers
@ -518,7 +539,7 @@ impl Authorization {
})
}
pub(crate) fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
pub fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
let algorithm = params
.get(X_AMZ_ALGORITHM)
.ok_or_bad_request("Missing X-Amz-Algorithm header")?

View file

@ -0,0 +1,618 @@
use std::pin::Pin;
use std::sync::Mutex;
use chrono::{DateTime, NaiveDateTime, TimeZone, Utc};
use futures::prelude::*;
use futures::task;
use hmac::Mac;
use http::header::{HeaderMap, HeaderValue, CONTENT_ENCODING};
use hyper::body::{Bytes, Frame, Incoming as IncomingBody};
use hyper::Request;
use garage_util::data::Hash;
use super::*;
use crate::helpers::body_stream;
use crate::signature::checksum::*;
use crate::signature::payload::CheckedSignature;
pub use crate::signature::body::ReqBody;
pub fn parse_streaming_body(
mut req: Request<IncomingBody>,
checked_signature: &CheckedSignature,
region: &str,
service: &str,
) -> Result<Request<ReqBody>, Error> {
debug!(
"Content signature mode: {:?}",
checked_signature.content_sha256_header
);
match checked_signature.content_sha256_header {
ContentSha256Header::StreamingPayload { signed, trailer } => {
// Sanity checks
if !signed && !trailer {
return Err(Error::bad_request(
"STREAMING-UNSIGNED-PAYLOAD without trailer is not a valid combination",
));
}
// Remove the aws-chunked component in the content-encoding: header
// Note: this header is not properly sent by minio client, so don't fail
// if it is absent from the request.
if let Some(content_encoding) = req.headers_mut().remove(CONTENT_ENCODING) {
if let Some(rest) = content_encoding.as_bytes().strip_prefix(b"aws-chunked,") {
req.headers_mut()
.insert(CONTENT_ENCODING, HeaderValue::from_bytes(rest).unwrap());
} else if content_encoding != "aws-chunked" {
return Err(Error::bad_request(
"content-encoding does not contain aws-chunked for STREAMING-*-PAYLOAD",
));
}
}
// If trailer header is announced, add the calculation of the requested checksum
let mut checksummer = Checksummer::init(&Default::default(), false);
let trailer_algorithm = if trailer {
let algo = Some(
request_trailer_checksum_algorithm(req.headers())?
.ok_or_bad_request("Missing x-amz-trailer header")?,
);
checksummer = checksummer.add(algo);
algo
} else {
None
};
// For signed variants, determine signing parameters
let sign_params = if signed {
let signature = checked_signature
.signature_header
.clone()
.ok_or_bad_request("No signature provided")?;
let signature = hex::decode(signature)
.ok()
.and_then(|bytes| Hash::try_from(&bytes))
.ok_or_bad_request("Invalid signature")?;
let secret_key = checked_signature
.key
.as_ref()
.ok_or_bad_request("Cannot sign streaming payload without signing key")?
.state
.as_option()
.ok_or_internal_error("Deleted key state")?
.secret_key
.to_string();
let date = req
.headers()
.get(X_AMZ_DATE)
.ok_or_bad_request("Missing X-Amz-Date field")?
.to_str()?;
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
.ok_or_bad_request("Invalid date")?;
let date: DateTime<Utc> = Utc.from_utc_datetime(&date);
let scope = compute_scope(&date, region, service);
let signing_hmac =
crate::signature::signing_hmac(&date, &secret_key, region, service)
.ok_or_internal_error("Unable to build signing HMAC")?;
Some(SignParams {
datetime: date,
scope,
signing_hmac,
previous_signature: signature,
})
} else {
None
};
Ok(req.map(move |body| {
let stream = body_stream::<_, Error>(body);
let signed_payload_stream =
StreamingPayloadStream::new(stream, sign_params, trailer).map_err(Error::from);
ReqBody {
stream: Mutex::new(signed_payload_stream.boxed()),
checksummer,
expected_checksums: Default::default(),
trailer_algorithm,
}
}))
}
_ => Ok(req.map(|body| {
let expected_checksums = ExpectedChecksums {
sha256: match &checked_signature.content_sha256_header {
ContentSha256Header::Sha256Checksum(sha256) => Some(*sha256),
_ => None,
},
..Default::default()
};
let checksummer = Checksummer::init(&expected_checksums, false);
let stream = http_body_util::BodyStream::new(body).map_err(Error::from);
ReqBody {
stream: Mutex::new(stream.boxed()),
checksummer,
expected_checksums,
trailer_algorithm: None,
}
})),
}
}
fn compute_streaming_payload_signature(
signing_hmac: &HmacSha256,
date: DateTime<Utc>,
scope: &str,
previous_signature: Hash,
content_sha256: Hash,
) -> Result<Hash, StreamingPayloadError> {
let string_to_sign = [
AWS4_HMAC_SHA256_PAYLOAD,
&date.format(LONG_DATETIME).to_string(),
scope,
&hex::encode(previous_signature),
EMPTY_STRING_HEX_DIGEST,
&hex::encode(content_sha256),
]
.join("\n");
let mut hmac = signing_hmac.clone();
hmac.update(string_to_sign.as_bytes());
Hash::try_from(&hmac.finalize().into_bytes())
.ok_or_else(|| StreamingPayloadError::Message("Could not build signature".into()))
}
fn compute_streaming_trailer_signature(
signing_hmac: &HmacSha256,
date: DateTime<Utc>,
scope: &str,
previous_signature: Hash,
trailer_sha256: Hash,
) -> Result<Hash, StreamingPayloadError> {
let string_to_sign = [
AWS4_HMAC_SHA256_PAYLOAD,
&date.format(LONG_DATETIME).to_string(),
scope,
&hex::encode(previous_signature),
&hex::encode(trailer_sha256),
]
.join("\n");
let mut hmac = signing_hmac.clone();
hmac.update(string_to_sign.as_bytes());
Hash::try_from(&hmac.finalize().into_bytes())
.ok_or_else(|| StreamingPayloadError::Message("Could not build signature".into()))
}
mod payload {
use http::{HeaderName, HeaderValue};
use garage_util::data::Hash;
use nom::bytes::streaming::{tag, take_while};
use nom::character::streaming::hex_digit1;
use nom::combinator::{map_res, opt};
use nom::number::streaming::hex_u32;
macro_rules! try_parse {
($expr:expr) => {
$expr.map_err(|e| e.map(Error::Parser))?
};
}
pub enum Error<I> {
Parser(nom::error::Error<I>),
BadSignature,
}
impl<I> Error<I> {
pub fn description(&self) -> &str {
match *self {
Error::Parser(ref e) => e.code.description(),
Error::BadSignature => "Bad signature",
}
}
}
#[derive(Debug, Clone)]
pub struct ChunkHeader {
pub size: usize,
pub signature: Option<Hash>,
}
impl ChunkHeader {
pub fn parse_signed(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
let (input, size) = try_parse!(hex_u32(input));
let (input, _) = try_parse!(tag(";")(input));
let (input, _) = try_parse!(tag("chunk-signature=")(input));
let (input, data) = try_parse!(map_res(hex_digit1, hex::decode)(input));
let signature = Hash::try_from(&data).ok_or(nom::Err::Failure(Error::BadSignature))?;
let (input, _) = try_parse!(tag("\r\n")(input));
let header = ChunkHeader {
size: size as usize,
signature: Some(signature),
};
Ok((input, header))
}
pub fn parse_unsigned(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
let (input, size) = try_parse!(hex_u32(input));
let (input, _) = try_parse!(tag("\r\n")(input));
let header = ChunkHeader {
size: size as usize,
signature: None,
};
Ok((input, header))
}
}
#[derive(Debug, Clone)]
pub struct TrailerChunk {
pub header_name: HeaderName,
pub header_value: HeaderValue,
pub signature: Option<Hash>,
}
impl TrailerChunk {
fn parse_content(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
let (input, header_name) = try_parse!(map_res(
take_while(|c: u8| c.is_ascii_alphanumeric() || c == b'-'),
HeaderName::from_bytes
)(input));
let (input, _) = try_parse!(tag(b":")(input));
let (input, header_value) = try_parse!(map_res(
take_while(|c: u8| c.is_ascii_alphanumeric() || b"+/=".contains(&c)),
HeaderValue::from_bytes
)(input));
// Possible '\n' after the header value, depends on clients
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
let (input, _) = try_parse!(opt(tag(b"\n"))(input));
let (input, _) = try_parse!(tag(b"\r\n")(input));
Ok((
input,
TrailerChunk {
header_name,
header_value,
signature: None,
},
))
}
pub fn parse_signed(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
let (input, trailer) = Self::parse_content(input)?;
let (input, _) = try_parse!(tag(b"x-amz-trailer-signature:")(input));
let (input, data) = try_parse!(map_res(hex_digit1, hex::decode)(input));
let signature = Hash::try_from(&data).ok_or(nom::Err::Failure(Error::BadSignature))?;
let (input, _) = try_parse!(tag(b"\r\n")(input));
Ok((
input,
TrailerChunk {
signature: Some(signature),
..trailer
},
))
}
pub fn parse_unsigned(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
let (input, trailer) = Self::parse_content(input)?;
let (input, _) = try_parse!(tag(b"\r\n")(input));
Ok((input, trailer))
}
}
}
#[derive(Debug)]
pub enum StreamingPayloadError {
Stream(Error),
InvalidSignature,
Message(String),
}
impl StreamingPayloadError {
fn message(msg: &str) -> Self {
StreamingPayloadError::Message(msg.into())
}
}
impl From<StreamingPayloadError> for Error {
fn from(err: StreamingPayloadError) -> Self {
match err {
StreamingPayloadError::Stream(e) => e,
StreamingPayloadError::InvalidSignature => {
Error::bad_request("Invalid payload signature")
}
StreamingPayloadError::Message(e) => {
Error::bad_request(format!("Chunk format error: {}", e))
}
}
}
}
impl<I> From<payload::Error<I>> for StreamingPayloadError {
fn from(err: payload::Error<I>) -> Self {
Self::message(err.description())
}
}
impl<I> From<nom::error::Error<I>> for StreamingPayloadError {
fn from(err: nom::error::Error<I>) -> Self {
Self::message(err.code.description())
}
}
enum StreamingPayloadChunk {
Chunk {
header: payload::ChunkHeader,
data: Bytes,
},
Trailer(payload::TrailerChunk),
}
struct SignParams {
datetime: DateTime<Utc>,
scope: String,
signing_hmac: HmacSha256,
previous_signature: Hash,
}
#[pin_project::pin_project]
pub struct StreamingPayloadStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
{
#[pin]
stream: S,
buf: bytes::BytesMut,
signing: Option<SignParams>,
has_trailer: bool,
done: bool,
}
impl<S> StreamingPayloadStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
{
fn new(stream: S, signing: Option<SignParams>, has_trailer: bool) -> Self {
Self {
stream,
buf: bytes::BytesMut::new(),
signing,
has_trailer,
done: false,
}
}
fn parse_next(
input: &[u8],
is_signed: bool,
has_trailer: bool,
) -> nom::IResult<&[u8], StreamingPayloadChunk, StreamingPayloadError> {
use nom::bytes::streaming::{tag, take};
macro_rules! try_parse {
($expr:expr) => {
$expr.map_err(nom::Err::convert)?
};
}
let (input, header) = if is_signed {
try_parse!(payload::ChunkHeader::parse_signed(input))
} else {
try_parse!(payload::ChunkHeader::parse_unsigned(input))
};
// 0-sized chunk is the last
if header.size == 0 {
if has_trailer {
let (input, trailer) = if is_signed {
try_parse!(payload::TrailerChunk::parse_signed(input))
} else {
try_parse!(payload::TrailerChunk::parse_unsigned(input))
};
return Ok((input, StreamingPayloadChunk::Trailer(trailer)));
} else {
return Ok((
input,
StreamingPayloadChunk::Chunk {
header,
data: Bytes::new(),
},
));
}
}
let (input, data) = try_parse!(take::<_, _, nom::error::Error<_>>(header.size)(input));
let (input, _) = try_parse!(tag::<_, _, nom::error::Error<_>>("\r\n")(input));
let data = Bytes::from(data.to_vec());
Ok((input, StreamingPayloadChunk::Chunk { header, data }))
}
}
impl<S> Stream for StreamingPayloadStream<S>
where
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
type Item = Result<Frame<Bytes>, StreamingPayloadError>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> task::Poll<Option<Self::Item>> {
use std::task::Poll;
let mut this = self.project();
if *this.done {
return Poll::Ready(None);
}
loop {
let (input, payload) =
match Self::parse_next(this.buf, this.signing.is_some(), *this.has_trailer) {
Ok(res) => res,
Err(nom::Err::Incomplete(_)) => {
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
Some(Ok(bytes)) => {
this.buf.extend(bytes);
continue;
}
Some(Err(e)) => {
return Poll::Ready(Some(Err(StreamingPayloadError::Stream(e))))
}
None => {
return Poll::Ready(Some(Err(StreamingPayloadError::message(
"Unexpected EOF",
))));
}
}
}
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => {
return Poll::Ready(Some(Err(e)))
}
};
match payload {
StreamingPayloadChunk::Chunk { data, header } => {
if let Some(signing) = this.signing.as_mut() {
let data_sha256sum = sha256sum(&data);
let expected_signature = compute_streaming_payload_signature(
&signing.signing_hmac,
signing.datetime,
&signing.scope,
signing.previous_signature,
data_sha256sum,
)?;
if header.signature.unwrap() != expected_signature {
return Poll::Ready(Some(Err(StreamingPayloadError::InvalidSignature)));
}
signing.previous_signature = header.signature.unwrap();
}
*this.buf = input.into();
// 0-sized chunk is the last
if data.is_empty() {
// if there was a trailer, it would have been returned by the parser
assert!(!*this.has_trailer);
*this.done = true;
return Poll::Ready(None);
}
return Poll::Ready(Some(Ok(Frame::data(data))));
}
StreamingPayloadChunk::Trailer(trailer) => {
trace!(
"In StreamingPayloadStream::poll_next: got trailer {:?}",
trailer
);
if let Some(signing) = this.signing.as_mut() {
let data = [
trailer.header_name.as_ref(),
&b":"[..],
trailer.header_value.as_ref(),
&b"\n"[..],
]
.concat();
let trailer_sha256sum = sha256sum(&data);
let expected_signature = compute_streaming_trailer_signature(
&signing.signing_hmac,
signing.datetime,
&signing.scope,
signing.previous_signature,
trailer_sha256sum,
)?;
if trailer.signature.unwrap() != expected_signature {
return Poll::Ready(Some(Err(StreamingPayloadError::InvalidSignature)));
}
}
*this.buf = input.into();
*this.done = true;
let mut trailers_map = HeaderMap::new();
trailers_map.insert(trailer.header_name, trailer.header_value);
return Poll::Ready(Some(Ok(Frame::trailers(trailers_map))));
}
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.stream.size_hint()
}
}
#[cfg(test)]
mod tests {
use futures::prelude::*;
use super::{SignParams, StreamingPayloadError, StreamingPayloadStream};
#[tokio::test]
async fn test_interrupted_signed_payload_stream() {
use chrono::{DateTime, Utc};
use garage_util::data::Hash;
let datetime = DateTime::parse_from_rfc3339("2021-12-13T13:12:42+01:00") // TODO UNIX 0
.unwrap()
.with_timezone(&Utc);
let secret_key = "test";
let region = "test";
let scope = crate::signature::compute_scope(&datetime, region, "s3");
let signing_hmac =
crate::signature::signing_hmac(&datetime, secret_key, region, "s3").unwrap();
let data: &[&[u8]] = &[b"1"];
let body = futures::stream::iter(data.iter().map(|block| Ok(block.to_vec().into())));
let seed_signature = Hash::default();
let mut stream = StreamingPayloadStream::new(
body,
Some(SignParams {
signing_hmac,
datetime,
scope,
previous_signature: seed_signature,
}),
false,
);
assert!(stream.try_next().await.is_err());
match stream.try_next().await {
Err(StreamingPayloadError::Message(msg)) if msg == "Unexpected EOF" => {}
item => panic!(
"Unexpected result, expected early EOF error, got {:?}",
item
),
}
}
}

37
src/api/k2v/Cargo.toml Normal file
View file

@ -0,0 +1,37 @@
[package]
name = "garage_api_k2v"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
description = "K2V API server crate for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../README.md"
[lib]
path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_model = { workspace = true, features = [ "k2v" ] }
garage_table.workspace = true
garage_util = { workspace = true, features = [ "k2v" ] }
garage_api_common.workspace = true
base64.workspace = true
err-derive.workspace = true
tracing.workspace = true
futures.workspace = true
tokio.workspace = true
http.workspace = true
http-body-util.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
percent-encoding.workspace = true
url.workspace = true
serde.workspace = true
serde_json.workspace = true
opentelemetry.workspace = true

View file

@ -1,7 +1,5 @@
use std::sync::Arc;
use async_trait::async_trait;
use hyper::{body::Incoming as IncomingBody, Method, Request, Response};
use tokio::sync::watch;
@ -12,26 +10,25 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
use garage_model::garage::Garage;
use crate::generic_server::*;
use crate::k2v::error::*;
use garage_api_common::cors::*;
use garage_api_common::generic_server::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_request;
use crate::signature::verify_request;
use crate::batch::*;
use crate::error::*;
use crate::index::*;
use crate::item::*;
use crate::router::Endpoint;
use crate::helpers::*;
use crate::k2v::batch::*;
use crate::k2v::index::*;
use crate::k2v::item::*;
use crate::k2v::router::Endpoint;
use crate::s3::cors::*;
pub use crate::signature::streaming::ReqBody;
pub use garage_api_common::signature::streaming::ReqBody;
pub type ResBody = BoxBody<Error>;
pub struct K2VApiServer {
garage: Arc<Garage>,
}
pub(crate) struct K2VApiEndpoint {
pub struct K2VApiEndpoint {
bucket_name: String,
endpoint: Endpoint,
}
@ -49,7 +46,6 @@ impl K2VApiServer {
}
}
#[async_trait]
impl ApiHandler for K2VApiServer {
const API_NAME: &'static str = "k2v";
const API_NAME_DISPLAY: &'static str = "K2V";
@ -77,7 +73,7 @@ impl ApiHandler for K2VApiServer {
} = endpoint;
let garage = self.garage.clone();
// The OPTIONS method is procesed early, before we even check for an API key
// The OPTIONS method is processed early, before we even check for an API key
if let Endpoint::Options = endpoint {
let options_res = handle_options_api(garage, &req, Some(bucket_name))
.await
@ -85,16 +81,20 @@ impl ApiHandler for K2VApiServer {
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
}
let (req, api_key, _content_sha256) = verify_request(&garage, req, "k2v").await?;
let verified_request = verify_request(&garage, req, "k2v").await?;
let req = verified_request.request;
let api_key = verified_request.access_key;
let bucket_id = garage
.bucket_helper()
.resolve_bucket(&bucket_name, &api_key)
.await?;
.await
.map_err(pass_helper_error)?;
let bucket = garage
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
.await
.map_err(helper_error_as_internal)?;
let bucket_params = bucket.state.into_option().unwrap();
let allowed = match endpoint.authorization_type() {

View file

@ -4,13 +4,14 @@ use serde::{Deserialize, Serialize};
use garage_table::{EnumerationOrder, TableSchema};
use garage_model::k2v::causality::*;
use garage_model::k2v::item_table::*;
use crate::helpers::*;
use crate::k2v::api_server::{ReqBody, ResBody};
use crate::k2v::error::*;
use crate::k2v::range::read_range;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::item::parse_causality_token;
use crate::range::read_range;
pub async fn handle_insert_batch(
ctx: ReqCtx,
@ -19,11 +20,11 @@ pub async fn handle_insert_batch(
let ReqCtx {
garage, bucket_id, ..
} = &ctx;
let items = parse_json_body::<Vec<InsertBatchItem>, _, Error>(req).await?;
let items = req.into_body().json::<Vec<InsertBatchItem>>().await?;
let mut items2 = vec![];
for it in items {
let ct = it.ct.map(|s| CausalContext::parse_helper(&s)).transpose()?;
let ct = it.ct.map(|s| parse_causality_token(&s)).transpose()?;
let v = match it.v {
Some(vs) => DvvsValue::Value(
BASE64_STANDARD
@ -46,7 +47,7 @@ pub async fn handle_read_batch(
ctx: ReqCtx,
req: Request<ReqBody>,
) -> Result<Response<ResBody>, Error> {
let queries = parse_json_body::<Vec<ReadBatchQuery>, _, Error>(req).await?;
let queries = req.into_body().json::<Vec<ReadBatchQuery>>().await?;
let resp_results = futures::future::join_all(
queries
@ -140,7 +141,7 @@ pub async fn handle_delete_batch(
ctx: ReqCtx,
req: Request<ReqBody>,
) -> Result<Response<ResBody>, Error> {
let queries = parse_json_body::<Vec<DeleteBatchQuery>, _, Error>(req).await?;
let queries = req.into_body().json::<Vec<DeleteBatchQuery>>().await?;
let resp_results = futures::future::join_all(
queries
@ -261,7 +262,7 @@ pub(crate) async fn handle_poll_range(
} = ctx;
use garage_model::k2v::sub::PollRange;
let query = parse_json_body::<PollRangeQuery, _, Error>(req).await?;
let query = req.into_body().json::<PollRangeQuery>().await?;
let timeout_msec = query.timeout.unwrap_or(300).clamp(1, 600) * 1000;
@ -281,7 +282,8 @@ pub(crate) async fn handle_poll_range(
query.seen_marker,
timeout_msec,
)
.await?;
.await
.map_err(pass_helper_error)?;
if let Some((items, seen_marker)) = resp {
let resp = PollRangeResponse {

View file

@ -2,24 +2,31 @@ use err_derive::Error;
use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode};
use crate::common_error::CommonError;
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
use crate::generic_server::ApiError;
use crate::helpers::*;
use crate::signature::error::Error as SignatureError;
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
pub use garage_api_common::common_error::{
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
};
use garage_api_common::generic_server::ApiError;
use garage_api_common::helpers::*;
use garage_api_common::signature::error::Error as SignatureError;
/// Errors of this crate
#[derive(Debug, Error)]
pub enum Error {
#[error(display = "{}", _0)]
/// Error from common error
Common(CommonError),
Common(#[error(source)] CommonError),
// Category: cannot process
/// Authorization Header Malformed
#[error(display = "Authorization header malformed, unexpected scope: {}", _0)]
AuthorizationHeaderMalformed(String),
/// The provided digest (checksum) value was invalid
#[error(display = "Invalid digest: {}", _0)]
InvalidDigest(String),
/// The object requested don't exists
#[error(display = "Key not found")]
NoSuchKey,
@ -28,6 +35,10 @@ pub enum Error {
#[error(display = "Invalid base64: {}", _0)]
InvalidBase64(#[error(source)] base64::DecodeError),
/// Invalid causality token
#[error(display = "Invalid causality token")]
InvalidCausalityToken,
/// The client asked for an invalid return format (invalid Accept header)
#[error(display = "Not acceptable: {}", _0)]
NotAcceptable(String),
@ -37,16 +48,7 @@ pub enum Error {
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
}
impl<T> From<T> for Error
where
CommonError: From<T>,
{
fn from(err: T) -> Self {
Error::Common(CommonError::from(err))
}
}
impl CommonErrorDerivative for Error {}
commonErrorDerivative!(Error);
impl From<SignatureError> for Error {
fn from(err: SignatureError) -> Self {
@ -56,6 +58,7 @@ impl From<SignatureError> for Error {
Self::AuthorizationHeaderMalformed(c)
}
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
SignatureError::InvalidDigest(d) => Self::InvalidDigest(d),
}
}
}
@ -72,6 +75,8 @@ impl Error {
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
Error::InvalidBase64(_) => "InvalidBase64",
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
Error::InvalidCausalityToken => "CausalityToken",
Error::InvalidDigest(_) => "InvalidDigest",
}
}
}
@ -85,7 +90,9 @@ impl ApiError for Error {
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
Error::AuthorizationHeaderMalformed(_)
| Error::InvalidBase64(_)
| Error::InvalidUtf8Str(_) => StatusCode::BAD_REQUEST,
| Error::InvalidUtf8Str(_)
| Error::InvalidDigest(_)
| Error::InvalidCausalityToken => StatusCode::BAD_REQUEST,
}
}

View file

@ -5,10 +5,11 @@ use garage_table::util::*;
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
use crate::helpers::*;
use crate::k2v::api_server::ResBody;
use crate::k2v::error::*;
use crate::k2v::range::read_range;
use garage_api_common::helpers::*;
use crate::api_server::ResBody;
use crate::error::*;
use crate::range::read_range;
pub async fn handle_read_index(
ctx: ReqCtx,

View file

@ -6,9 +6,10 @@ use hyper::{Request, Response, StatusCode};
use garage_model::k2v::causality::*;
use garage_model::k2v::item_table::*;
use crate::helpers::*;
use crate::k2v::api_server::{ReqBody, ResBody};
use crate::k2v::error::*;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
@ -18,6 +19,10 @@ pub enum ReturnFormat {
Either,
}
pub(crate) fn parse_causality_token(s: &str) -> Result<CausalContext, Error> {
CausalContext::parse(s).ok_or(Error::InvalidCausalityToken)
}
impl ReturnFormat {
pub fn from(req: &Request<ReqBody>) -> Result<Self, Error> {
let accept = match req.headers().get(header::ACCEPT) {
@ -136,12 +141,10 @@ pub async fn handle_insert_item(
.get(X_GARAGE_CAUSALITY_TOKEN)
.map(|s| s.to_str())
.transpose()?
.map(CausalContext::parse_helper)
.map(parse_causality_token)
.transpose()?;
let body = http_body_util::BodyExt::collect(req.into_body())
.await?
.to_bytes();
let body = req.into_body().collect().await?;
let value = DvvsValue::Value(body.to_vec());
@ -176,7 +179,7 @@ pub async fn handle_delete_item(
.get(X_GARAGE_CAUSALITY_TOKEN)
.map(|s| s.to_str())
.transpose()?
.map(CausalContext::parse_helper)
.map(parse_causality_token)
.transpose()?;
let value = DvvsValue::Deleted;

View file

@ -1,3 +1,6 @@
#[macro_use]
extern crate tracing;
pub mod api_server;
mod error;
mod router;

View file

@ -7,8 +7,9 @@ use std::sync::Arc;
use garage_table::replication::TableShardedReplication;
use garage_table::*;
use crate::helpers::key_after_prefix;
use crate::k2v::error::*;
use garage_api_common::helpers::key_after_prefix;
use crate::error::*;
/// Read range in a Garage table.
/// Returns (entries, more?, nextStart)

View file

@ -1,11 +1,11 @@
use crate::k2v::error::*;
use crate::error::*;
use std::borrow::Cow;
use hyper::{Method, Request};
use crate::helpers::Authorization;
use crate::router_macros::{generateQueryParameters, router_match};
use garage_api_common::helpers::Authorization;
use garage_api_common::router_macros::{generateQueryParameters, router_match};
router_match! {@func

View file

@ -1,17 +0,0 @@
//! Crate for serving a S3 compatible API
#[macro_use]
extern crate tracing;
pub mod common_error;
mod encoding;
pub mod generic_server;
pub mod helpers;
mod router_macros;
/// This mode is public only to help testing. Don't expect stability here
pub mod signature;
pub mod admin;
#[cfg(feature = "k2v")]
pub mod k2v;
pub mod s3;

View file

@ -1,6 +1,6 @@
[package]
name = "garage_api"
version = "1.0.0"
name = "garage_api_s3"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -20,30 +20,24 @@ garage_block.workspace = true
garage_net.workspace = true
garage_util.workspace = true
garage_rpc.workspace = true
garage_api_common.workspace = true
aes-gcm.workspace = true
argon2.workspace = true
async-compression.workspace = true
async-trait.workspace = true
base64.workspace = true
bytes.workspace = true
chrono.workspace = true
crc32fast.workspace = true
crc32c.workspace = true
crypto-common.workspace = true
err-derive.workspace = true
hex.workspace = true
hmac.workspace = true
idna.workspace = true
tracing.workspace = true
md-5.workspace = true
nom.workspace = true
pin-project.workspace = true
sha1.workspace = true
sha2.workspace = true
futures.workspace = true
futures-util.workspace = true
tokio.workspace = true
tokio-stream.workspace = true
tokio-util.workspace = true
@ -54,21 +48,13 @@ httpdate.workspace = true
http-range.workspace = true
http-body-util.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
hyper-util.workspace = true
multer.workspace = true
percent-encoding.workspace = true
roxmltree.workspace = true
url.workspace = true
serde.workspace = true
serde_bytes.workspace = true
serde_json.workspace = true
quick-xml.workspace = true
opentelemetry.workspace = true
opentelemetry-prometheus = { workspace = true, optional = true }
prometheus = { workspace = true, optional = true }
[features]
k2v = [ "garage_util/k2v", "garage_model/k2v" ]
metrics = [ "opentelemetry-prometheus", "prometheus" ]

View file

@ -1,7 +1,5 @@
use std::sync::Arc;
use async_trait::async_trait;
use hyper::header;
use hyper::{body::Incoming as IncomingBody, Request, Response};
use tokio::sync::watch;
@ -14,33 +12,33 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
use garage_model::garage::Garage;
use garage_model::key_table::Key;
use crate::generic_server::*;
use crate::s3::error::*;
use garage_api_common::cors::*;
use garage_api_common::generic_server::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_request;
use crate::signature::verify_request;
use crate::bucket::*;
use crate::copy::*;
use crate::cors::*;
use crate::delete::*;
use crate::error::*;
use crate::get::*;
use crate::lifecycle::*;
use crate::list::*;
use crate::multipart::*;
use crate::post_object::handle_post_object;
use crate::put::*;
use crate::router::Endpoint;
use crate::website::*;
use crate::helpers::*;
use crate::s3::bucket::*;
use crate::s3::copy::*;
use crate::s3::cors::*;
use crate::s3::delete::*;
use crate::s3::get::*;
use crate::s3::lifecycle::*;
use crate::s3::list::*;
use crate::s3::multipart::*;
use crate::s3::post_object::handle_post_object;
use crate::s3::put::*;
use crate::s3::router::Endpoint;
use crate::s3::website::*;
pub use crate::signature::streaming::ReqBody;
pub use garage_api_common::signature::streaming::ReqBody;
pub type ResBody = BoxBody<Error>;
pub struct S3ApiServer {
garage: Arc<Garage>,
}
pub(crate) struct S3ApiEndpoint {
pub struct S3ApiEndpoint {
bucket_name: Option<String>,
endpoint: Endpoint,
}
@ -70,7 +68,6 @@ impl S3ApiServer {
}
}
#[async_trait]
impl ApiHandler for S3ApiServer {
const API_NAME: &'static str = "s3";
const API_NAME_DISPLAY: &'static str = "S3";
@ -124,7 +121,9 @@ impl ApiHandler for S3ApiServer {
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
}
let (req, api_key, content_sha256) = verify_request(&garage, req, "s3").await?;
let verified_request = verify_request(&garage, req, "s3").await?;
let req = verified_request.request;
let api_key = verified_request.access_key;
let bucket_name = match bucket_name {
None => {
@ -137,20 +136,14 @@ impl ApiHandler for S3ApiServer {
// Special code path for CreateBucket API endpoint
if let Endpoint::CreateBucket {} = endpoint {
return handle_create_bucket(
&garage,
req,
content_sha256,
&api_key.key_id,
bucket_name,
)
.await;
return handle_create_bucket(&garage, req, &api_key.key_id, bucket_name).await;
}
let bucket_id = garage
.bucket_helper()
.resolve_bucket(&bucket_name, &api_key)
.await?;
.await
.map_err(pass_helper_error)?;
let bucket = garage
.bucket_helper()
.get_existing_bucket(bucket_id)
@ -181,7 +174,7 @@ impl ApiHandler for S3ApiServer {
let resp = match endpoint {
Endpoint::HeadObject {
key, part_number, ..
} => handle_head(ctx, &req, &key, part_number).await,
} => handle_head(ctx, &req.map(|_| ()), &key, part_number).await,
Endpoint::GetObject {
key,
part_number,
@ -201,20 +194,20 @@ impl ApiHandler for S3ApiServer {
response_content_type,
response_expires,
};
handle_get(ctx, &req, &key, part_number, overrides).await
handle_get(ctx, &req.map(|_| ()), &key, part_number, overrides).await
}
Endpoint::UploadPart {
key,
part_number,
upload_id,
} => handle_put_part(ctx, req, &key, part_number, &upload_id, content_sha256).await,
} => handle_put_part(ctx, req, &key, part_number, &upload_id).await,
Endpoint::CopyObject { key } => handle_copy(ctx, &req, &key).await,
Endpoint::UploadPartCopy {
key,
part_number,
upload_id,
} => handle_upload_part_copy(ctx, &req, &key, part_number, &upload_id).await,
Endpoint::PutObject { key } => handle_put(ctx, req, &key, content_sha256).await,
Endpoint::PutObject { key } => handle_put(ctx, req, &key).await,
Endpoint::AbortMultipartUpload { key, upload_id } => {
handle_abort_multipart_upload(ctx, &key, &upload_id).await
}
@ -223,7 +216,7 @@ impl ApiHandler for S3ApiServer {
handle_create_multipart_upload(ctx, &req, &key).await
}
Endpoint::CompleteMultipartUpload { key, upload_id } => {
handle_complete_multipart_upload(ctx, req, &key, &upload_id, content_sha256).await
handle_complete_multipart_upload(ctx, req, &key, &upload_id).await
}
Endpoint::CreateBucket {} => unreachable!(),
Endpoint::HeadBucket {} => {
@ -319,7 +312,6 @@ impl ApiHandler for S3ApiServer {
} => {
let query = ListPartsQuery {
bucket_name: ctx.bucket_name.clone(),
bucket_id,
key,
upload_id,
part_number_marker: part_number_marker.map(|p| p.min(10000)),
@ -327,17 +319,15 @@ impl ApiHandler for S3ApiServer {
};
handle_list_parts(ctx, req, &query).await
}
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req).await,
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req, content_sha256).await,
Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req).await,
Endpoint::DeleteBucketWebsite {} => handle_delete_website(ctx).await,
Endpoint::GetBucketCors {} => handle_get_cors(ctx).await,
Endpoint::PutBucketCors {} => handle_put_cors(ctx, req, content_sha256).await,
Endpoint::PutBucketCors {} => handle_put_cors(ctx, req).await,
Endpoint::DeleteBucketCors {} => handle_delete_cors(ctx).await,
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(ctx).await,
Endpoint::PutBucketLifecycleConfiguration {} => {
handle_put_lifecycle(ctx, req, content_sha256).await
}
Endpoint::PutBucketLifecycleConfiguration {} => handle_put_lifecycle(ctx, req).await,
Endpoint::DeleteBucketLifecycle {} => handle_delete_lifecycle(ctx).await,
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
};

View file

@ -1,6 +1,5 @@
use std::collections::HashMap;
use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode};
use garage_model::bucket_alias_table::*;
@ -10,15 +9,14 @@ use garage_model::key_table::Key;
use garage_model::permission::BucketKeyPerm;
use garage_table::util::*;
use garage_util::crdt::*;
use garage_util::data::*;
use garage_util::time::*;
use crate::common_error::CommonError;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
use garage_api_common::common_error::CommonError;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::xml as s3_xml;
pub fn handle_get_bucket_location(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { garage, .. } = ctx;
@ -121,15 +119,10 @@ pub async fn handle_list_buckets(
pub async fn handle_create_bucket(
garage: &Garage,
req: Request<ReqBody>,
content_sha256: Option<Hash>,
api_key_id: &String,
bucket_name: String,
) -> Result<Response<ResBody>, Error> {
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body = req.into_body().collect().await?;
let cmd =
parse_create_bucket_xml(&body[..]).ok_or_bad_request("Invalid create bucket XML query")?;

View file

@ -1,9 +1,9 @@
use std::pin::Pin;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use futures::{stream, stream::Stream, StreamExt, TryStreamExt};
use bytes::Bytes;
use http::header::HeaderName;
use hyper::{Request, Response};
use serde::Serialize;
@ -20,15 +20,25 @@ use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::checksum::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use crate::s3::get::full_object_byte_stream;
use crate::s3::multipart;
use crate::s3::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
use crate::s3::xml::{self as s3_xml, xmlns_tag};
use garage_api_common::helpers::*;
use garage_api_common::signature::checksum::*;
use crate::api_server::{ReqBody, ResBody};
use crate::encryption::EncryptionParams;
use crate::error::*;
use crate::get::{full_object_byte_stream, PreconditionHeaders};
use crate::multipart;
use crate::put::{extract_metadata_headers, save_stream, ChecksumMode, SaveStreamResult};
use crate::xml::{self as s3_xml, xmlns_tag};
pub const X_AMZ_COPY_SOURCE_IF_MATCH: HeaderName =
HeaderName::from_static("x-amz-copy-source-if-match");
pub const X_AMZ_COPY_SOURCE_IF_NONE_MATCH: HeaderName =
HeaderName::from_static("x-amz-copy-source-if-none-match");
pub const X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE: HeaderName =
HeaderName::from_static("x-amz-copy-source-if-modified-since");
pub const X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE: HeaderName =
HeaderName::from_static("x-amz-copy-source-if-unmodified-since");
// -------- CopyObject ---------
@ -37,7 +47,7 @@ pub async fn handle_copy(
req: &Request<ReqBody>,
dest_key: &str,
) -> Result<Response<ResBody>, Error> {
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
let copy_precondition = PreconditionHeaders::parse_copy_source(req)?;
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
@ -47,7 +57,7 @@ pub async fn handle_copy(
extract_source_info(&source_object)?;
// Check precondition, e.g. x-amz-copy-source-if-match
copy_precondition.check(source_version, &source_version_meta.etag)?;
copy_precondition.check_copy_source(source_version, &source_version_meta.etag)?;
// Determine encryption parameters
let (source_encryption, source_object_meta_inner) =
@ -63,7 +73,7 @@ pub async fn handle_copy(
let source_checksum_algorithm = source_checksum.map(|x| x.algorithm());
// If source object has a checksum, the destination object must as well.
// The x-amz-checksum-algorihtm header allows to change that algorithm,
// The x-amz-checksum-algorithm header allows to change that algorithm,
// but if it is absent, we must use the same as before
let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm);
@ -72,7 +82,7 @@ pub async fn handle_copy(
let dest_object_meta = ObjectVersionMetaInner {
headers: match req.headers().get("x-amz-metadata-directive") {
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
get_headers(req.headers())?
extract_metadata_headers(req.headers())?
}
_ => source_object_meta_inner.into_owned().headers,
},
@ -334,7 +344,7 @@ pub async fn handle_upload_part_copy(
part_number: u64,
upload_id: &str,
) -> Result<Response<ResBody>, Error> {
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
let copy_precondition = PreconditionHeaders::parse_copy_source(req)?;
let dest_upload_id = multipart::decode_upload_id(upload_id)?;
@ -350,7 +360,7 @@ pub async fn handle_upload_part_copy(
extract_source_info(&source_object)?;
// Check precondition on source, e.g. x-amz-copy-source-if-match
copy_precondition.check(source_object_version, &source_version_meta.etag)?;
copy_precondition.check_copy_source(source_object_version, &source_version_meta.etag)?;
// Determine encryption parameters
let (source_encryption, _) = EncryptionParams::check_decrypt_for_copy_source(
@ -655,7 +665,8 @@ async fn get_copy_source(ctx: &ReqCtx, req: &Request<ReqBody>) -> Result<Object,
let source_bucket_id = garage
.bucket_helper()
.resolve_bucket(&source_bucket.to_string(), api_key)
.await?;
.await
.map_err(pass_helper_error)?;
if !api_key.allow_read(&source_bucket_id) {
return Err(Error::forbidden(format!(
@ -701,97 +712,6 @@ fn extract_source_info(
Ok((source_version, source_version_data, source_version_meta))
}
struct CopyPreconditionHeaders {
copy_source_if_match: Option<Vec<String>>,
copy_source_if_modified_since: Option<SystemTime>,
copy_source_if_none_match: Option<Vec<String>>,
copy_source_if_unmodified_since: Option<SystemTime>,
}
impl CopyPreconditionHeaders {
fn parse(req: &Request<ReqBody>) -> Result<Self, Error> {
Ok(Self {
copy_source_if_match: req
.headers()
.get("x-amz-copy-source-if-match")
.map(|x| x.to_str())
.transpose()?
.map(|x| {
x.split(',')
.map(|m| m.trim().trim_matches('"').to_string())
.collect::<Vec<_>>()
}),
copy_source_if_modified_since: req
.headers()
.get("x-amz-copy-source-if-modified-since")
.map(|x| x.to_str())
.transpose()?
.map(httpdate::parse_http_date)
.transpose()
.ok_or_bad_request("Invalid date in x-amz-copy-source-if-modified-since")?,
copy_source_if_none_match: req
.headers()
.get("x-amz-copy-source-if-none-match")
.map(|x| x.to_str())
.transpose()?
.map(|x| {
x.split(',')
.map(|m| m.trim().trim_matches('"').to_string())
.collect::<Vec<_>>()
}),
copy_source_if_unmodified_since: req
.headers()
.get("x-amz-copy-source-if-unmodified-since")
.map(|x| x.to_str())
.transpose()?
.map(httpdate::parse_http_date)
.transpose()
.ok_or_bad_request("Invalid date in x-amz-copy-source-if-unmodified-since")?,
})
}
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> {
let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp);
let ok = match (
&self.copy_source_if_match,
&self.copy_source_if_unmodified_since,
&self.copy_source_if_none_match,
&self.copy_source_if_modified_since,
) {
// TODO I'm not sure all of the conditions are evaluated correctly here
// If we have both if-match and if-unmodified-since,
// basically we don't care about if-unmodified-since,
// because in the spec it says that if if-match evaluates to
// true but if-unmodified-since evaluates to false,
// the copy is still done.
(Some(im), _, None, None) => im.iter().any(|x| x == etag || x == "*"),
(None, Some(ius), None, None) => v_date <= *ius,
// If we have both if-none-match and if-modified-since,
// then both of the two conditions must evaluate to true
(None, None, Some(inm), Some(ims)) => {
!inm.iter().any(|x| x == etag || x == "*") && v_date > *ims
}
(None, None, Some(inm), None) => !inm.iter().any(|x| x == etag || x == "*"),
(None, None, None, Some(ims)) => v_date > *ims,
(None, None, None, None) => true,
_ => {
return Err(Error::bad_request(
"Invalid combination of x-amz-copy-source-if-xxxxx headers",
))
}
};
if ok {
Ok(())
} else {
Err(Error::PreconditionFailed)
}
}
}
type BlockStreamItemOk = (Bytes, Option<Hash>);
type BlockStreamItem = Result<BlockStreamItemOk, garage_util::error::Error>;
@ -861,7 +781,7 @@ pub struct CopyPartResult {
#[cfg(test)]
mod tests {
use super::*;
use crate::s3::xml::to_xml_with_header;
use crate::xml::to_xml_with_header;
#[test]
fn copy_object_result() -> Result<(), Error> {

View file

@ -1,29 +1,16 @@
use quick_xml::de::from_reader;
use std::sync::Arc;
use http::header::{
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
};
use hyper::{
body::Body, body::Incoming as IncomingBody, header::HeaderName, Method, Request, Response,
StatusCode,
};
use http_body_util::BodyExt;
use hyper::{header::HeaderName, Method, Request, Response, StatusCode};
use serde::{Deserialize, Serialize};
use crate::common_error::CommonError;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content;
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule};
use garage_model::bucket_table::{Bucket, BucketParams, CorsRule as GarageCorsRule};
use garage_model::garage::Garage;
use garage_util::data::*;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
pub async fn handle_get_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { bucket_params, .. } = ctx;
@ -68,7 +55,6 @@ pub async fn handle_delete_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error>
pub async fn handle_put_cors(
ctx: ReqCtx,
req: Request<ReqBody>,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage,
@ -77,11 +63,7 @@ pub async fn handle_put_cors(
..
} = ctx;
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body = req.into_body().collect().await?;
let conf: CorsConfiguration = from_reader(&body as &[u8])?;
conf.validate()?;
@ -99,154 +81,6 @@ pub async fn handle_put_cors(
.body(empty_body())?)
}
pub async fn handle_options_api(
garage: Arc<Garage>,
req: &Request<IncomingBody>,
bucket_name: Option<String>,
) -> Result<Response<EmptyBody>, CommonError> {
// FIXME: CORS rules of buckets with local aliases are
// not taken into account.
// If the bucket name is a global bucket name,
// we try to apply the CORS rules of that bucket.
// If a user has a local bucket name that has
// the same name, its CORS rules won't be applied
// and will be shadowed by the rules of the globally
// existing bucket (but this is inevitable because
// OPTIONS calls are not auhtenticated).
if let Some(bn) = bucket_name {
let helper = garage.bucket_helper();
let bucket_id = helper.resolve_global_bucket_name(&bn).await?;
if let Some(id) = bucket_id {
let bucket = garage.bucket_helper().get_existing_bucket(id).await?;
let bucket_params = bucket.state.into_option().unwrap();
handle_options_for_bucket(req, &bucket_params)
} else {
// If there is a bucket name in the request, but that name
// does not correspond to a global alias for a bucket,
// then it's either a non-existing bucket or a local bucket.
// We have no way of knowing, because the request is not
// authenticated and thus we can't resolve local aliases.
// We take the permissive approach of allowing everything,
// because we don't want to prevent web apps that use
// local bucket names from making API calls.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
} else {
// If there is no bucket name in the request,
// we are doing a ListBuckets call, which we want to allow
// for all origins.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
}
pub fn handle_options_for_bucket(
req: &Request<IncomingBody>,
bucket_params: &BucketParams,
) -> Result<Response<EmptyBody>, CommonError> {
let origin = req
.headers()
.get("Origin")
.ok_or_bad_request("Missing Origin header")?
.to_str()?;
let request_method = req
.headers()
.get(ACCESS_CONTROL_REQUEST_METHOD)
.ok_or_bad_request("Missing Access-Control-Request-Method header")?
.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
if let Some(cors_config) = bucket_params.cors_config.get() {
let matching_rule = cors_config
.iter()
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
if let Some(rule) = matching_rule {
let mut resp = Response::builder()
.status(StatusCode::OK)
.body(EmptyBody::new())?;
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
return Ok(resp);
}
}
Err(CommonError::Forbidden(
"This CORS request is not allowed.".into(),
))
}
pub fn find_matching_cors_rule<'a>(
bucket_params: &'a BucketParams,
req: &Request<impl Body>,
) -> Result<Option<&'a GarageCorsRule>, Error> {
if let Some(cors_config) = bucket_params.cors_config.get() {
if let Some(origin) = req.headers().get("Origin") {
let origin = origin.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
return Ok(cors_config.iter().find(|rule| {
cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter())
}));
}
}
Ok(None)
}
fn cors_rule_matches<'a, HI, S>(
rule: &GarageCorsRule,
origin: &'a str,
method: &'a str,
mut request_headers: HI,
) -> bool
where
HI: Iterator<Item = S>,
S: AsRef<str>,
{
rule.allow_origins.iter().any(|x| x == "*" || x == origin)
&& rule.allow_methods.iter().any(|x| x == "*" || x == method)
&& request_headers.all(|h| {
rule.allow_headers
.iter()
.any(|x| x == "*" || x == h.as_ref())
})
}
pub fn add_cors_headers(
resp: &mut Response<impl Body>,
rule: &GarageCorsRule,
) -> Result<(), http::header::InvalidHeaderValue> {
let h = resp.headers_mut();
h.insert(
ACCESS_CONTROL_ALLOW_ORIGIN,
rule.allow_origins.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_METHODS,
rule.allow_methods.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_HEADERS,
rule.allow_headers.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_EXPOSE_HEADERS,
rule.expose_headers.join(", ").parse()?,
);
Ok(())
}
// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ----
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]

View file

@ -1,16 +1,15 @@
use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode};
use garage_util::data::*;
use garage_model::s3::object_table::*;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::put::next_timestamp;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::put::next_timestamp;
use crate::xml as s3_xml;
async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid), Error> {
let ReqCtx {
@ -67,13 +66,8 @@ pub async fn handle_delete(ctx: ReqCtx, key: &str) -> Result<Response<ResBody>,
pub async fn handle_delete_objects(
ctx: ReqCtx,
req: Request<ReqBody>,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> {
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body = req.into_body().collect().await?;
let cmd_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
let cmd = parse_delete_objects_xml(&cmd_xml).ok_or_bad_request("Invalid delete XML query")?;

View file

@ -28,9 +28,10 @@ use garage_util::migrate::Migrate;
use garage_model::garage::Garage;
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
use crate::common_error::*;
use crate::s3::checksum::Md5Checksum;
use crate::s3::error::Error;
use garage_api_common::common_error::*;
use garage_api_common::signature::checksum::Md5Checksum;
use crate::error::Error;
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
HeaderName::from_static("x-amz-server-side-encryption-customer-algorithm");

View file

@ -4,19 +4,30 @@ use err_derive::Error;
use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode};
use crate::common_error::CommonError;
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
use crate::generic_server::ApiError;
use crate::helpers::*;
use crate::s3::xml as s3_xml;
use crate::signature::error::Error as SignatureError;
use garage_model::helper::error::Error as HelperError;
pub(crate) use garage_api_common::common_error::pass_helper_error;
use garage_api_common::common_error::{
commonErrorDerivative, helper_error_as_internal, CommonError,
};
pub use garage_api_common::common_error::{
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
};
use garage_api_common::generic_server::ApiError;
use garage_api_common::helpers::*;
use garage_api_common::signature::error::Error as SignatureError;
use crate::xml as s3_xml;
/// Errors of this crate
#[derive(Debug, Error)]
pub enum Error {
#[error(display = "{}", _0)]
/// Error from common error
Common(CommonError),
Common(#[error(source)] CommonError),
// Category: cannot process
/// Authorization Header Malformed
@ -69,7 +80,7 @@ pub enum Error {
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
InvalidEncryptionAlgorithm(String),
/// The client sent invalid XML data
/// The provided digest (checksum) value was invalid
#[error(display = "Invalid digest: {}", _0)]
InvalidDigest(String),
@ -78,17 +89,16 @@ pub enum Error {
NotImplemented(String),
}
impl<T> From<T> for Error
where
CommonError: From<T>,
{
fn from(err: T) -> Self {
Error::Common(CommonError::from(err))
commonErrorDerivative!(Error);
// Helper errors are always passed as internal errors by default.
// To pass the specific error code back to the client, use `pass_helper_error`.
impl From<HelperError> for Error {
fn from(err: HelperError) -> Error {
Error::Common(helper_error_as_internal(err))
}
}
impl CommonErrorDerivative for Error {}
impl From<roxmltree::Error> for Error {
fn from(err: roxmltree::Error) -> Self {
Self::InvalidXml(format!("{}", err))
@ -109,6 +119,7 @@ impl From<SignatureError> for Error {
Self::AuthorizationHeaderMalformed(c)
}
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
SignatureError::InvalidDigest(d) => Self::InvalidDigest(d),
}
}
}

View file

@ -2,17 +2,17 @@
use std::collections::BTreeMap;
use std::convert::TryInto;
use std::sync::Arc;
use std::time::{Duration, UNIX_EPOCH};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use bytes::Bytes;
use futures::future;
use futures::stream::{self, Stream, StreamExt};
use http::header::{
ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE,
CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MODIFIED_SINCE, IF_NONE_MATCH,
LAST_MODIFIED, RANGE,
HeaderMap, HeaderName, ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING,
CONTENT_LANGUAGE, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MATCH,
IF_MODIFIED_SINCE, IF_NONE_MATCH, IF_UNMODIFIED_SINCE, LAST_MODIFIED, RANGE,
};
use hyper::{body::Body, Request, Response, StatusCode};
use hyper::{Request, Response, StatusCode};
use tokio::sync::mpsc;
use garage_net::stream::ByteStream;
@ -25,13 +25,15 @@ use garage_model::garage::Garage;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::helpers::*;
use crate::s3::api_server::ResBody;
use crate::s3::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
use crate::api_server::ResBody;
use crate::copy::*;
use crate::encryption::EncryptionParams;
use crate::error::*;
const X_AMZ_MP_PARTS_COUNT: HeaderName = HeaderName::from_static("x-amz-mp-parts-count");
#[derive(Default)]
pub struct GetObjectOverrides {
@ -68,14 +70,11 @@ fn object_headers(
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
let mut headers_by_name = BTreeMap::new();
for (name, value) in meta_inner.headers.iter() {
match headers_by_name.get_mut(name) {
None => {
headers_by_name.insert(name, vec![value.as_str()]);
}
Some(headers) => {
headers.push(value.as_str());
}
}
let name_lower = name.to_ascii_lowercase();
headers_by_name
.entry(name_lower)
.or_insert(vec![])
.push(value.as_str());
}
for (name, values) in headers_by_name {
@ -117,49 +116,29 @@ fn getobject_override_headers(
Ok(())
}
fn try_answer_cached(
fn handle_http_precondition(
version: &ObjectVersion,
version_meta: &ObjectVersionMeta,
req: &Request<impl Body>,
) -> Option<Response<ResBody>> {
// <trinity> It is possible, and is even usually the case, [that both If-None-Match and
// If-Modified-Since] are present in a request. In this situation If-None-Match takes
// precedence and If-Modified-Since is ignored (as per 6.Precedence from rfc7232). The rational
// being that etag based matching is more accurate, it has no issue with sub-second precision
// for instance (in case of very fast updates)
let cached = if let Some(none_match) = req.headers().get(IF_NONE_MATCH) {
let none_match = none_match.to_str().ok()?;
let expected = format!("\"{}\"", version_meta.etag);
let found = none_match
.split(',')
.map(str::trim)
.any(|etag| etag == expected || etag == "\"*\"");
found
} else if let Some(modified_since) = req.headers().get(IF_MODIFIED_SINCE) {
let modified_since = modified_since.to_str().ok()?;
let client_date = httpdate::parse_http_date(modified_since).ok()?;
let server_date = UNIX_EPOCH + Duration::from_millis(version.timestamp);
client_date >= server_date
} else {
false
};
req: &Request<()>,
) -> Result<Option<Response<ResBody>>, Error> {
let precondition_headers = PreconditionHeaders::parse(req)?;
if cached {
Some(
if let Some(status_code) = precondition_headers.check(&version, &version_meta.etag)? {
Ok(Some(
Response::builder()
.status(StatusCode::NOT_MODIFIED)
.status(status_code)
.body(empty_body())
.unwrap(),
)
))
} else {
None
Ok(None)
}
}
/// Handle HEAD request
pub async fn handle_head(
ctx: ReqCtx,
req: &Request<impl Body>,
req: &Request<()>,
key: &str,
part_number: Option<u64>,
) -> Result<Response<ResBody>, Error> {
@ -169,7 +148,7 @@ pub async fn handle_head(
/// Handle HEAD request for website
pub async fn handle_head_without_ctx(
garage: Arc<Garage>,
req: &Request<impl Body>,
req: &Request<()>,
bucket_id: Uuid,
key: &str,
part_number: Option<u64>,
@ -198,8 +177,8 @@ pub async fn handle_head_without_ctx(
_ => unreachable!(),
};
if let Some(cached) = try_answer_cached(object_version, version_meta, req) {
return Ok(cached);
if let Some(res) = handle_http_precondition(object_version, version_meta, req)? {
return Ok(res);
}
let (encryption, headers) =
@ -280,7 +259,7 @@ pub async fn handle_head_without_ctx(
/// Handle GET request
pub async fn handle_get(
ctx: ReqCtx,
req: &Request<impl Body>,
req: &Request<()>,
key: &str,
part_number: Option<u64>,
overrides: GetObjectOverrides,
@ -291,7 +270,7 @@ pub async fn handle_get(
/// Handle GET request
pub async fn handle_get_without_ctx(
garage: Arc<Garage>,
req: &Request<impl Body>,
req: &Request<()>,
bucket_id: Uuid,
key: &str,
part_number: Option<u64>,
@ -320,8 +299,8 @@ pub async fn handle_get_without_ctx(
ObjectVersionData::FirstBlock(meta, _) => meta,
};
if let Some(cached) = try_answer_cached(last_v, last_v_meta, req) {
return Ok(cached);
if let Some(res) = handle_http_precondition(last_v, last_v_meta, req)? {
return Ok(res);
}
let (enc, headers) =
@ -342,7 +321,12 @@ pub async fn handle_get_without_ctx(
enc,
&headers,
pn,
checksum_mode,
ChecksumMode {
// TODO: for multipart uploads, checksums of each part should be stored
// so that we can return the corresponding checksum here
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
enabled: false,
},
)
.await
}
@ -356,7 +340,12 @@ pub async fn handle_get_without_ctx(
&headers,
range.start,
range.start + range.length,
checksum_mode,
ChecksumMode {
// TODO: for range queries that align with part boundaries,
// we should return the saved checksum of the part
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
enabled: false,
},
)
.await
}
@ -579,7 +568,7 @@ async fn handle_get_part(
}
fn parse_range_header(
req: &Request<impl Body>,
req: &Request<()>,
total_size: u64,
) -> Result<Option<http_range::HttpRange>, Error> {
let range = match req.headers().get(RANGE) {
@ -620,7 +609,7 @@ struct ChecksumMode {
enabled: bool,
}
fn checksum_mode(req: &Request<impl Body>) -> ChecksumMode {
fn checksum_mode(req: &Request<()>) -> ChecksumMode {
ChecksumMode {
enabled: req
.headers()
@ -753,3 +742,116 @@ fn std_error_from_read_error<E: std::fmt::Display>(e: E) -> std::io::Error {
format!("Error while reading object data: {}", e),
)
}
// ----
pub struct PreconditionHeaders {
if_match: Option<Vec<String>>,
if_modified_since: Option<SystemTime>,
if_none_match: Option<Vec<String>>,
if_unmodified_since: Option<SystemTime>,
}
impl PreconditionHeaders {
fn parse<B>(req: &Request<B>) -> Result<Self, Error> {
Self::parse_with(
req.headers(),
&IF_MATCH,
&IF_NONE_MATCH,
&IF_MODIFIED_SINCE,
&IF_UNMODIFIED_SINCE,
)
}
pub(crate) fn parse_copy_source<B>(req: &Request<B>) -> Result<Self, Error> {
Self::parse_with(
req.headers(),
&X_AMZ_COPY_SOURCE_IF_MATCH,
&X_AMZ_COPY_SOURCE_IF_NONE_MATCH,
&X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE,
&X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE,
)
}
fn parse_with(
headers: &HeaderMap,
hdr_if_match: &HeaderName,
hdr_if_none_match: &HeaderName,
hdr_if_modified_since: &HeaderName,
hdr_if_unmodified_since: &HeaderName,
) -> Result<Self, Error> {
Ok(Self {
if_match: headers
.get(hdr_if_match)
.map(|x| x.to_str())
.transpose()?
.map(|x| {
x.split(',')
.map(|m| m.trim().trim_matches('"').to_string())
.collect::<Vec<_>>()
}),
if_none_match: headers
.get(hdr_if_none_match)
.map(|x| x.to_str())
.transpose()?
.map(|x| {
x.split(',')
.map(|m| m.trim().trim_matches('"').to_string())
.collect::<Vec<_>>()
}),
if_modified_since: headers
.get(hdr_if_modified_since)
.map(|x| x.to_str())
.transpose()?
.map(httpdate::parse_http_date)
.transpose()
.ok_or_bad_request("Invalid date in if-modified-since")?,
if_unmodified_since: headers
.get(hdr_if_unmodified_since)
.map(|x| x.to_str())
.transpose()?
.map(httpdate::parse_http_date)
.transpose()
.ok_or_bad_request("Invalid date in if-unmodified-since")?,
})
}
fn check(&self, v: &ObjectVersion, etag: &str) -> Result<Option<StatusCode>, Error> {
let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp);
// Implemented from https://datatracker.ietf.org/doc/html/rfc7232#section-6
if let Some(im) = &self.if_match {
// Step 1: if-match is present
if !im.iter().any(|x| x == etag || x == "*") {
return Ok(Some(StatusCode::PRECONDITION_FAILED));
}
} else if let Some(ius) = &self.if_unmodified_since {
// Step 2: if-unmodified-since is present, and if-match is absent
if v_date > *ius {
return Ok(Some(StatusCode::PRECONDITION_FAILED));
}
}
if let Some(inm) = &self.if_none_match {
// Step 3: if-none-match is present
if inm.iter().any(|x| x == etag || x == "*") {
return Ok(Some(StatusCode::NOT_MODIFIED));
}
} else if let Some(ims) = &self.if_modified_since {
// Step 4: if-modified-since is present, and if-none-match is absent
if v_date <= *ims {
return Ok(Some(StatusCode::NOT_MODIFIED));
}
}
Ok(None)
}
pub(crate) fn check_copy_source(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> {
match self.check(v, etag)? {
Some(_) => Err(Error::PreconditionFailed),
None => Ok(()),
}
}
}

View file

@ -1,3 +1,6 @@
#[macro_use]
extern crate tracing;
pub mod api_server;
pub mod error;
@ -11,9 +14,8 @@ mod list;
mod multipart;
mod post_object;
mod put;
mod website;
pub mod website;
mod checksum;
mod encryption;
mod router;
pub mod xml;

View file

@ -1,21 +1,19 @@
use quick_xml::de::from_reader;
use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode};
use serde::{Deserialize, Serialize};
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use garage_model::bucket_table::{
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule,
};
use garage_util::data::*;
pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { bucket_params, .. } = ctx;
@ -55,7 +53,6 @@ pub async fn handle_delete_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, E
pub async fn handle_put_lifecycle(
ctx: ReqCtx,
req: Request<ReqBody>,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage,
@ -64,11 +61,7 @@ pub async fn handle_put_lifecycle(
..
} = ctx;
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body = req.into_body().collect().await?;
let conf: LifecycleConfiguration = from_reader(&body as &[u8])?;
let config = conf

View file

@ -13,13 +13,14 @@ use garage_model::s3::object_table::*;
use garage_table::EnumerationOrder;
use crate::encoding::*;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use crate::s3::multipart as s3_multipart;
use crate::s3::xml as s3_xml;
use garage_api_common::encoding::*;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::encryption::EncryptionParams;
use crate::error::*;
use crate::multipart as s3_multipart;
use crate::xml as s3_xml;
const DUMMY_NAME: &str = "Dummy Key";
const DUMMY_KEY: &str = "GKDummyKey";
@ -53,7 +54,6 @@ pub struct ListMultipartUploadsQuery {
#[derive(Debug)]
pub struct ListPartsQuery {
pub bucket_name: String,
pub bucket_id: Uuid,
pub key: String,
pub upload_id: String,
pub part_number_marker: Option<u64>,
@ -398,7 +398,7 @@ enum ExtractionResult {
key: String,
},
// Fallback key is used for legacy APIs that only support
// exlusive pagination (and not inclusive one).
// exclusive pagination (and not inclusive one).
SkipTo {
key: String,
fallback_key: Option<String>,
@ -408,7 +408,7 @@ enum ExtractionResult {
#[derive(PartialEq, Clone, Debug)]
enum RangeBegin {
// Fallback key is used for legacy APIs that only support
// exlusive pagination (and not inclusive one).
// exclusive pagination (and not inclusive one).
IncludingKey {
key: String,
fallback_key: Option<String>,
@ -1244,10 +1244,8 @@ mod tests {
#[test]
fn test_fetch_part_info() -> Result<(), Error> {
let uuid = Uuid::from([0x08; 32]);
let mut query = ListPartsQuery {
bucket_name: "a".to_string(),
bucket_id: uuid,
key: "a".to_string(),
upload_id: "xx".to_string(),
part_number_marker: None,

View file

@ -1,13 +1,20 @@
use std::collections::HashMap;
use std::convert::TryInto;
use std::convert::{TryFrom, TryInto};
use std::hash::Hasher;
use std::sync::Arc;
use base64::prelude::*;
use crc32c::Crc32cHasher as Crc32c;
use crc32fast::Hasher as Crc32;
use futures::prelude::*;
use hyper::{Request, Response};
use md5::{Digest, Md5};
use sha1::Sha1;
use sha2::Sha256;
use garage_table::*;
use garage_util::data::*;
use garage_util::error::OkOrMessage;
use garage_model::garage::Garage;
use garage_model::s3::block_ref_table::*;
@ -15,14 +22,14 @@ use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::checksum::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use crate::s3::put::*;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
use garage_api_common::helpers::*;
use garage_api_common::signature::checksum::*;
use crate::api_server::{ReqBody, ResBody};
use crate::encryption::EncryptionParams;
use crate::error::*;
use crate::put::*;
use crate::xml as s3_xml;
// ----
@ -42,7 +49,7 @@ pub async fn handle_create_multipart_upload(
let upload_id = gen_uuid();
let timestamp = next_timestamp(existing_object.as_ref());
let headers = get_headers(req.headers())?;
let headers = extract_metadata_headers(req.headers())?;
let meta = ObjectVersionMetaInner {
headers,
checksum: None,
@ -93,7 +100,6 @@ pub async fn handle_put_part(
key: &str,
part_number: u64,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> {
let ReqCtx { garage, .. } = &ctx;
@ -104,17 +110,30 @@ pub async fn handle_put_part(
Some(x) => Some(x.to_str()?.to_string()),
None => None,
},
sha256: content_sha256,
sha256: None,
extra: request_checksum_value(req.headers())?,
};
// Read first chuck, and at the same time try to get object to see if it exists
let key = key.to_string();
let (req_head, req_body) = req.into_parts();
let stream = body_stream(req_body);
let (req_head, mut req_body) = req.into_parts();
// Before we stream the body, configure the needed checksums.
req_body.add_expected_checksums(expected_checksums.clone());
// TODO: avoid parsing encryption headers twice...
if !EncryptionParams::new_from_headers(&garage, &req_head.headers)?.is_encrypted() {
// For non-encrypted objects, we need to compute the md5sum in all cases
// (even if content-md5 is not set), because it is used as an etag of the
// part, which is in turn used in the etag computation of the whole object
req_body.add_md5();
}
let (stream, stream_checksums) = req_body.streaming_with_checksums();
let stream = stream.map_err(Error::from);
let mut chunker = StreamChunker::new(stream, garage.config.block_size);
// Read first chuck, and at the same time try to get object to see if it exists
let ((_, object_version, mut mpu), first_block) =
futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?;
@ -171,21 +190,21 @@ pub async fn handle_put_part(
garage.version_table.insert(&version).await?;
// Copy data to version
let checksummer =
Checksummer::init(&expected_checksums, !encryption.is_encrypted()).add(checksum_algorithm);
let (total_size, checksums, _) = read_and_put_blocks(
let (total_size, _, _) = read_and_put_blocks(
&ctx,
&version,
encryption,
part_number,
first_block,
&mut chunker,
checksummer,
chunker,
Checksummer::new(),
)
.await?;
// Verify that checksums map
checksums.verify(&expected_checksums)?;
// Verify that checksums match
let checksums = stream_checksums
.await
.ok_or_internal_error("checksum calculation")??;
// Store part etag in version
let etag = encryption.etag_from_md5(&checksums.md5);
@ -247,7 +266,6 @@ pub async fn handle_complete_multipart_upload(
req: Request<ReqBody>,
key: &str,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage,
@ -259,11 +277,7 @@ pub async fn handle_complete_multipart_upload(
let expected_checksum = request_checksum_value(&req_head.headers)?;
let body = http_body_util::BodyExt::collect(req_body).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body = req_body.collect().await?;
let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml)
@ -429,7 +443,16 @@ pub async fn handle_complete_multipart_upload(
// Send response saying ok we're done
let result = s3_xml::CompleteMultipartUploadResult {
xmlns: (),
location: None,
// FIXME: the location returned is not always correct:
// - we always return https, but maybe some people do http
// - if root_domain is not specified, a full URL is not returned
location: garage
.config
.s3_api
.root_domain
.as_ref()
.map(|rd| s3_xml::Value(format!("https://{}.{}/{}", bucket_name, rd, key)))
.or(Some(s3_xml::Value(format!("/{}/{}", bucket_name, key)))),
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key),
etag: s3_xml::Value(format!("\"{}\"", etag)),
@ -592,3 +615,99 @@ fn parse_complete_multipart_upload_body(
Some(parts)
}
// ====== checksummer ====
#[derive(Default)]
pub(crate) struct MultipartChecksummer {
pub md5: Md5,
pub extra: Option<MultipartExtraChecksummer>,
}
pub(crate) enum MultipartExtraChecksummer {
Crc32(Crc32),
Crc32c(Crc32c),
Sha1(Sha1),
Sha256(Sha256),
}
impl MultipartChecksummer {
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
Self {
md5: Md5::new(),
extra: match algo {
None => None,
Some(ChecksumAlgorithm::Crc32) => {
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
}
Some(ChecksumAlgorithm::Crc32c) => {
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
}
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
Some(ChecksumAlgorithm::Sha256) => {
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
}
},
}
}
pub(crate) fn update(
&mut self,
etag: &str,
checksum: Option<ChecksumValue>,
) -> Result<(), Error> {
self.md5
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
match (&mut self.extra, checksum) {
(None, _) => (),
(
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
Some(ChecksumValue::Crc32(x)),
) => {
crc32.update(&x);
}
(
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
Some(ChecksumValue::Crc32c(x)),
) => {
crc32c.write(&x);
}
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
sha1.update(&x);
}
(
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
Some(ChecksumValue::Sha256(x)),
) => {
sha256.update(&x);
}
(Some(_), b) => {
return Err(Error::internal_error(format!(
"part checksum was not computed correctly, got: {:?}",
b
)))
}
}
Ok(())
}
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
let md5 = self.md5.finalize()[..].try_into().unwrap();
let extra = match self.extra {
None => None,
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
}
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
)),
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
}
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
sha256.finalize()[..].try_into().unwrap(),
)),
};
(md5, extra)
}
}

View file

@ -16,15 +16,16 @@ use serde::Deserialize;
use garage_model::garage::Garage;
use garage_model::s3::object_table::*;
use crate::helpers::*;
use crate::s3::api_server::ResBody;
use crate::s3::checksum::*;
use crate::s3::cors::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use crate::s3::put::{get_headers, save_stream, ChecksumMode};
use crate::s3::xml as s3_xml;
use crate::signature::payload::{verify_v4, Authorization};
use garage_api_common::cors::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::checksum::*;
use garage_api_common::signature::payload::{verify_v4, Authorization};
use crate::api_server::ResBody;
use crate::encryption::EncryptionParams;
use crate::error::*;
use crate::put::{extract_metadata_headers, save_stream, ChecksumMode};
use crate::xml as s3_xml;
pub async fn handle_post_object(
garage: Arc<Garage>,
@ -71,21 +72,11 @@ pub async fn handle_post_object(
}
if let Ok(content) = HeaderValue::from_str(&field.text().await?) {
match name.as_str() {
"tag" => (/* tag need to be reencoded, but we don't support them yet anyway */),
"acl" => {
if params.insert("x-amz-acl", content).is_some() {
return Err(Error::bad_request("Field 'acl' provided more than once"));
}
}
_ => {
if params.insert(&name, content).is_some() {
return Err(Error::bad_request(format!(
"Field '{}' provided more than once",
name
)));
}
}
if params.insert(&name, content).is_some() {
return Err(Error::bad_request(format!(
"Field '{}' provided more than once",
name
)));
}
}
};
@ -117,7 +108,8 @@ pub async fn handle_post_object(
let bucket_id = garage
.bucket_helper()
.resolve_bucket(&bucket_name, &api_key)
.await?;
.await
.map_err(pass_helper_error)?;
if !api_key.allow_write(&bucket_id) {
return Err(Error::forbidden("Operation is not allowed for this key."));
@ -222,8 +214,11 @@ pub async fn handle_post_object(
)));
}
let headers = get_headers(&params)?;
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
// around here to make sure the rest of the machinery takes our acl into account.
let headers = extract_metadata_headers(&params)?;
let checksum_algorithm = request_checksum_algorithm(&params)?;
let expected_checksums = ExpectedChecksums {
md5: params
.get("content-md5")
@ -231,7 +226,9 @@ pub async fn handle_post_object(
.transpose()?
.map(str::to_string),
sha256: None,
extra: request_checksum_algorithm_value(&params)?,
extra: checksum_algorithm
.map(|algo| extract_checksum_value(&params, algo))
.transpose()?,
};
let meta = ObjectVersionMetaInner {

View file

@ -30,11 +30,14 @@ use garage_model::s3::block_ref_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::checksum::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::body::StreamingChecksumReceiver;
use garage_api_common::signature::checksum::*;
use crate::api_server::{ReqBody, ResBody};
use crate::encryption::EncryptionParams;
use crate::error::*;
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
@ -47,6 +50,10 @@ pub(crate) struct SaveStreamResult {
pub(crate) enum ChecksumMode<'a> {
Verify(&'a ExpectedChecksums),
VerifyFrom {
checksummer: StreamingChecksumReceiver,
trailer_algo: Option<ChecksumAlgorithm>,
},
Calculate(Option<ChecksumAlgorithm>),
}
@ -54,10 +61,9 @@ pub async fn handle_put(
ctx: ReqCtx,
req: Request<ReqBody>,
key: &String,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> {
// Retrieve interesting headers from request
let headers = get_headers(req.headers())?;
let headers = extract_metadata_headers(req.headers())?;
debug!("Object headers: {:?}", headers);
let expected_checksums = ExpectedChecksums {
@ -65,9 +71,10 @@ pub async fn handle_put(
Some(x) => Some(x.to_str()?.to_string()),
None => None,
},
sha256: content_sha256,
sha256: None,
extra: request_checksum_value(req.headers())?,
};
let trailer_checksum_algorithm = request_trailer_checksum_algorithm(req.headers())?;
let meta = ObjectVersionMetaInner {
headers,
@ -77,7 +84,19 @@ pub async fn handle_put(
// Determine whether object should be encrypted, and if so the key
let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
let stream = body_stream(req.into_body());
// The request body is a special ReqBody object (see garage_api_common::signature::body)
// which supports calculating checksums while streaming the data.
// Before we start streaming, we configure it to calculate all the checksums we need.
let mut req_body = req.into_body();
req_body.add_expected_checksums(expected_checksums.clone());
if !encryption.is_encrypted() {
// For non-encrypted objects, we need to compute the md5sum in all cases
// (even if content-md5 is not set), because it is used as the object etag
req_body.add_md5();
}
let (stream, checksummer) = req_body.streaming_with_checksums();
let stream = stream.map_err(Error::from);
let res = save_stream(
&ctx,
@ -85,7 +104,10 @@ pub async fn handle_put(
encryption,
stream,
key,
ChecksumMode::Verify(&expected_checksums),
ChecksumMode::VerifyFrom {
checksummer,
trailer_algo: trailer_checksum_algorithm,
},
)
.await?;
@ -121,10 +143,15 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
let version_uuid = gen_uuid();
let version_timestamp = next_timestamp(existing_object.as_ref());
let mut checksummer = match checksum_mode {
let mut checksummer = match &checksum_mode {
ChecksumMode::Verify(expected) => Checksummer::init(expected, !encryption.is_encrypted()),
ChecksumMode::Calculate(algo) => {
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(algo)
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(*algo)
}
ChecksumMode::VerifyFrom { .. } => {
// Checksums are calculated by the garage_api_common::signature module
// so here we can just have an empty checksummer that does nothing
Checksummer::new()
}
};
@ -132,7 +159,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
// as "inline data". We can then return immediately.
if first_block.len() < INLINE_THRESHOLD {
checksummer.update(&first_block);
let checksums = checksummer.finalize();
let mut checksums = checksummer.finalize();
match checksum_mode {
ChecksumMode::Verify(expected) => {
@ -141,6 +168,18 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
ChecksumMode::Calculate(algo) => {
meta.checksum = checksums.extract(algo);
}
ChecksumMode::VerifyFrom {
checksummer,
trailer_algo,
} => {
drop(chunker);
checksums = checksummer
.await
.ok_or_internal_error("checksum calculation")??;
if let Some(algo) = trailer_algo {
meta.checksum = checksums.extract(Some(algo));
}
}
};
let size = first_block.len() as u64;
@ -212,13 +251,13 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
garage.version_table.insert(&version).await?;
// Transfer data
let (total_size, checksums, first_block_hash) = read_and_put_blocks(
let (total_size, mut checksums, first_block_hash) = read_and_put_blocks(
ctx,
&version,
encryption,
1,
first_block,
&mut chunker,
chunker,
checksummer,
)
.await?;
@ -231,6 +270,17 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
ChecksumMode::Calculate(algo) => {
meta.checksum = checksums.extract(algo);
}
ChecksumMode::VerifyFrom {
checksummer,
trailer_algo,
} => {
checksums = checksummer
.await
.ok_or_internal_error("checksum calculation")??;
if let Some(algo) = trailer_algo {
meta.checksum = checksums.extract(Some(algo));
}
}
};
// Verify quotas are respsected
@ -331,7 +381,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
encryption: EncryptionParams,
part_number: u64,
first_block: Bytes,
chunker: &mut StreamChunker<S>,
mut chunker: StreamChunker<S>,
checksummer: Checksummer,
) -> Result<(u64, Checksums, Hash), Error> {
let tracer = opentelemetry::global::tracer("garage");
@ -600,7 +650,9 @@ impl Drop for InterruptedCleanup {
// ============ helpers ============
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList, Error> {
pub(crate) fn extract_metadata_headers(
headers: &HeaderMap<HeaderValue>,
) -> Result<HeaderList, Error> {
let mut ret = Vec::new();
// Preserve standard headers
@ -622,10 +674,22 @@ pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList
for (name, value) in headers.iter() {
if name.as_str().starts_with("x-amz-meta-") {
ret.push((
name.to_string(),
name.as_str().to_ascii_lowercase(),
std::str::from_utf8(value.as_bytes())?.to_string(),
));
}
if name == X_AMZ_WEBSITE_REDIRECT_LOCATION {
let value = std::str::from_utf8(value.as_bytes())?.to_string();
if !(value.starts_with("/")
|| value.starts_with("http://")
|| value.starts_with("https://"))
{
return Err(Error::bad_request(format!(
"Invalid {X_AMZ_WEBSITE_REDIRECT_LOCATION} header",
)));
}
ret.push((X_AMZ_WEBSITE_REDIRECT_LOCATION.to_string(), value));
}
}
Ok(ret)

View file

@ -3,9 +3,10 @@ use std::borrow::Cow;
use hyper::header::HeaderValue;
use hyper::{HeaderMap, Method, Request};
use crate::helpers::Authorization;
use crate::router_macros::{generateQueryParameters, router_match};
use crate::s3::error::*;
use garage_api_common::helpers::Authorization;
use garage_api_common::router_macros::{generateQueryParameters, router_match};
use crate::error::*;
router_match! {@func
@ -351,6 +352,18 @@ impl Endpoint {
_ => return Err(Error::bad_request("Unknown method")),
};
if let Some(x_id) = query.x_id.take() {
if x_id != res.name() {
// I think AWS ignores the x-id parameter.
// Let's make this at least be a warnin to help debugging.
warn!(
"x-id ({}) does not match parsed endpoint ({})",
x_id,
res.name()
);
}
}
if let Some(message) = query.nonempty_message() {
debug!("Unused query parameter: {}", message)
}
@ -695,7 +708,8 @@ generateQueryParameters! {
"uploadId" => upload_id,
"upload-id-marker" => upload_id_marker,
"versionId" => version_id,
"version-id-marker" => version_id_marker
"version-id-marker" => version_id_marker,
"x-id" => x_id
]
}

View file

@ -1,17 +1,18 @@
use quick_xml::de::from_reader;
use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode};
use hyper::{header::HeaderName, Request, Response, StatusCode};
use serde::{Deserialize, Serialize};
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content;
use garage_model::bucket_table::*;
use garage_util::data::*;
use garage_api_common::helpers::*;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
pub const X_AMZ_WEBSITE_REDIRECT_LOCATION: HeaderName =
HeaderName::from_static("x-amz-website-redirect-location");
pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { bucket_params, .. } = ctx;
@ -60,7 +61,6 @@ pub async fn handle_delete_website(ctx: ReqCtx) -> Result<Response<ResBody>, Err
pub async fn handle_put_website(
ctx: ReqCtx,
req: Request<ReqBody>,
content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage,
@ -69,11 +69,7 @@ pub async fn handle_put_website(
..
} = ctx;
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body = req.into_body().collect().await?;
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
conf.validate()?;
@ -276,7 +272,7 @@ impl Redirect {
return Err(Error::bad_request("Bad XML: invalid protocol"));
}
}
// TODO there are probably more invalide cases, but which ones?
// TODO there are probably more invalid cases, but which ones?
Ok(())
}
}

View file

@ -1,7 +1,7 @@
use quick_xml::se::to_string;
use serde::{Deserialize, Serialize, Serializer};
use crate::s3::error::Error as ApiError;
use crate::error::Error as ApiError;
pub fn to_xml_with_header<T: Serialize>(x: &T) -> Result<String, ApiError> {
let mut xml = r#"<?xml version="1.0" encoding="UTF-8"?>"#.to_string();

View file

@ -1,78 +0,0 @@
use chrono::{DateTime, Utc};
use hmac::{Hmac, Mac};
use sha2::Sha256;
use hyper::{body::Incoming as IncomingBody, Request};
use garage_model::garage::Garage;
use garage_model::key_table::Key;
use garage_util::data::{sha256sum, Hash};
use error::*;
pub mod error;
pub mod payload;
pub mod streaming;
pub const SHORT_DATE: &str = "%Y%m%d";
pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ";
type HmacSha256 = Hmac<Sha256>;
pub async fn verify_request(
garage: &Garage,
mut req: Request<IncomingBody>,
service: &'static str,
) -> Result<(Request<streaming::ReqBody>, Key, Option<Hash>), Error> {
let (api_key, mut content_sha256) =
payload::check_payload_signature(&garage, &mut req, service).await?;
let api_key =
api_key.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
let req = streaming::parse_streaming_body(
&api_key,
req,
&mut content_sha256,
&garage.config.s3_api.s3_region,
service,
)?;
Ok((req, api_key, content_sha256))
}
pub fn verify_signed_content(expected_sha256: Hash, body: &[u8]) -> Result<(), Error> {
if expected_sha256 != sha256sum(body) {
return Err(Error::bad_request(
"Request content hash does not match signed hash".to_string(),
));
}
Ok(())
}
pub fn signing_hmac(
datetime: &DateTime<Utc>,
secret_key: &str,
region: &str,
service: &str,
) -> Result<HmacSha256, crypto_common::InvalidLength> {
let secret = String::from("AWS4") + secret_key;
let mut date_hmac = HmacSha256::new_from_slice(secret.as_bytes())?;
date_hmac.update(datetime.format(SHORT_DATE).to_string().as_bytes());
let mut region_hmac = HmacSha256::new_from_slice(&date_hmac.finalize().into_bytes())?;
region_hmac.update(region.as_bytes());
let mut service_hmac = HmacSha256::new_from_slice(&region_hmac.finalize().into_bytes())?;
service_hmac.update(service.as_bytes());
let mut signing_hmac = HmacSha256::new_from_slice(&service_hmac.finalize().into_bytes())?;
signing_hmac.update(b"aws4_request");
let hmac = HmacSha256::new_from_slice(&signing_hmac.finalize().into_bytes())?;
Ok(hmac)
}
pub fn compute_scope(datetime: &DateTime<Utc>, region: &str, service: &str) -> String {
format!(
"{}/{}/{}/aws4_request",
datetime.format(SHORT_DATE),
region,
service
)
}

View file

@ -1,373 +0,0 @@
use std::pin::Pin;
use chrono::{DateTime, NaiveDateTime, TimeZone, Utc};
use futures::prelude::*;
use futures::task;
use garage_model::key_table::Key;
use hmac::Mac;
use http_body_util::StreamBody;
use hyper::body::{Bytes, Incoming as IncomingBody};
use hyper::Request;
use garage_util::data::Hash;
use super::{compute_scope, sha256sum, HmacSha256, LONG_DATETIME};
use crate::helpers::*;
use crate::signature::error::*;
use crate::signature::payload::{
STREAMING_AWS4_HMAC_SHA256_PAYLOAD, X_AMZ_CONTENT_SH256, X_AMZ_DATE,
};
pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
pub type ReqBody = BoxBody<Error>;
pub fn parse_streaming_body(
api_key: &Key,
req: Request<IncomingBody>,
content_sha256: &mut Option<Hash>,
region: &str,
service: &str,
) -> Result<Request<ReqBody>, Error> {
match req.headers().get(X_AMZ_CONTENT_SH256) {
Some(header) if header == STREAMING_AWS4_HMAC_SHA256_PAYLOAD => {
let signature = content_sha256
.take()
.ok_or_bad_request("No signature provided")?;
let secret_key = &api_key
.state
.as_option()
.ok_or_internal_error("Deleted key state")?
.secret_key;
let date = req
.headers()
.get(X_AMZ_DATE)
.ok_or_bad_request("Missing X-Amz-Date field")?
.to_str()?;
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
.ok_or_bad_request("Invalid date")?;
let date: DateTime<Utc> = Utc.from_utc_datetime(&date);
let scope = compute_scope(&date, region, service);
let signing_hmac = crate::signature::signing_hmac(&date, secret_key, region, service)
.ok_or_internal_error("Unable to build signing HMAC")?;
Ok(req.map(move |body| {
let stream = body_stream::<_, Error>(body);
let signed_payload_stream =
SignedPayloadStream::new(stream, signing_hmac, date, &scope, signature)
.map(|x| x.map(hyper::body::Frame::data))
.map_err(Error::from);
ReqBody::new(StreamBody::new(signed_payload_stream))
}))
}
_ => Ok(req.map(|body| ReqBody::new(http_body_util::BodyExt::map_err(body, Error::from)))),
}
}
/// Result of `sha256("")`
const EMPTY_STRING_HEX_DIGEST: &str =
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
fn compute_streaming_payload_signature(
signing_hmac: &HmacSha256,
date: DateTime<Utc>,
scope: &str,
previous_signature: Hash,
content_sha256: Hash,
) -> Result<Hash, Error> {
let string_to_sign = [
AWS4_HMAC_SHA256_PAYLOAD,
&date.format(LONG_DATETIME).to_string(),
scope,
&hex::encode(previous_signature),
EMPTY_STRING_HEX_DIGEST,
&hex::encode(content_sha256),
]
.join("\n");
let mut hmac = signing_hmac.clone();
hmac.update(string_to_sign.as_bytes());
Ok(Hash::try_from(&hmac.finalize().into_bytes()).ok_or_internal_error("Invalid signature")?)
}
mod payload {
use garage_util::data::Hash;
pub enum Error<I> {
Parser(nom::error::Error<I>),
BadSignature,
}
impl<I> Error<I> {
pub fn description(&self) -> &str {
match *self {
Error::Parser(ref e) => e.code.description(),
Error::BadSignature => "Bad signature",
}
}
}
#[derive(Debug, Clone)]
pub struct Header {
pub size: usize,
pub signature: Hash,
}
impl Header {
pub fn parse(input: &[u8]) -> nom::IResult<&[u8], Self, Error<&[u8]>> {
use nom::bytes::streaming::tag;
use nom::character::streaming::hex_digit1;
use nom::combinator::map_res;
use nom::number::streaming::hex_u32;
macro_rules! try_parse {
($expr:expr) => {
$expr.map_err(|e| e.map(Error::Parser))?
};
}
let (input, size) = try_parse!(hex_u32(input));
let (input, _) = try_parse!(tag(";")(input));
let (input, _) = try_parse!(tag("chunk-signature=")(input));
let (input, data) = try_parse!(map_res(hex_digit1, hex::decode)(input));
let signature = Hash::try_from(&data).ok_or(nom::Err::Failure(Error::BadSignature))?;
let (input, _) = try_parse!(tag("\r\n")(input));
let header = Header {
size: size as usize,
signature,
};
Ok((input, header))
}
}
}
#[derive(Debug)]
pub enum SignedPayloadStreamError {
Stream(Error),
InvalidSignature,
Message(String),
}
impl SignedPayloadStreamError {
fn message(msg: &str) -> Self {
SignedPayloadStreamError::Message(msg.into())
}
}
impl From<SignedPayloadStreamError> for Error {
fn from(err: SignedPayloadStreamError) -> Self {
match err {
SignedPayloadStreamError::Stream(e) => e,
SignedPayloadStreamError::InvalidSignature => {
Error::bad_request("Invalid payload signature")
}
SignedPayloadStreamError::Message(e) => {
Error::bad_request(format!("Chunk format error: {}", e))
}
}
}
}
impl<I> From<payload::Error<I>> for SignedPayloadStreamError {
fn from(err: payload::Error<I>) -> Self {
Self::message(err.description())
}
}
impl<I> From<nom::error::Error<I>> for SignedPayloadStreamError {
fn from(err: nom::error::Error<I>) -> Self {
Self::message(err.code.description())
}
}
struct SignedPayload {
header: payload::Header,
data: Bytes,
}
#[pin_project::pin_project]
pub struct SignedPayloadStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
{
#[pin]
stream: S,
buf: bytes::BytesMut,
datetime: DateTime<Utc>,
scope: String,
signing_hmac: HmacSha256,
previous_signature: Hash,
}
impl<S> SignedPayloadStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
{
pub fn new(
stream: S,
signing_hmac: HmacSha256,
datetime: DateTime<Utc>,
scope: &str,
seed_signature: Hash,
) -> Self {
Self {
stream,
buf: bytes::BytesMut::new(),
datetime,
scope: scope.into(),
signing_hmac,
previous_signature: seed_signature,
}
}
fn parse_next(input: &[u8]) -> nom::IResult<&[u8], SignedPayload, SignedPayloadStreamError> {
use nom::bytes::streaming::{tag, take};
macro_rules! try_parse {
($expr:expr) => {
$expr.map_err(nom::Err::convert)?
};
}
let (input, header) = try_parse!(payload::Header::parse(input));
// 0-sized chunk is the last
if header.size == 0 {
return Ok((
input,
SignedPayload {
header,
data: Bytes::new(),
},
));
}
let (input, data) = try_parse!(take::<_, _, nom::error::Error<_>>(header.size)(input));
let (input, _) = try_parse!(tag::<_, _, nom::error::Error<_>>("\r\n")(input));
let data = Bytes::from(data.to_vec());
Ok((input, SignedPayload { header, data }))
}
}
impl<S> Stream for SignedPayloadStream<S>
where
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
type Item = Result<Bytes, SignedPayloadStreamError>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> task::Poll<Option<Self::Item>> {
use std::task::Poll;
let mut this = self.project();
loop {
let (input, payload) = match Self::parse_next(this.buf) {
Ok(res) => res,
Err(nom::Err::Incomplete(_)) => {
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
Some(Ok(bytes)) => {
this.buf.extend(bytes);
continue;
}
Some(Err(e)) => {
return Poll::Ready(Some(Err(SignedPayloadStreamError::Stream(e))))
}
None => {
return Poll::Ready(Some(Err(SignedPayloadStreamError::message(
"Unexpected EOF",
))));
}
}
}
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => {
return Poll::Ready(Some(Err(e)))
}
};
// 0-sized chunk is the last
if payload.data.is_empty() {
return Poll::Ready(None);
}
let data_sha256sum = sha256sum(&payload.data);
let expected_signature = compute_streaming_payload_signature(
this.signing_hmac,
*this.datetime,
this.scope,
*this.previous_signature,
data_sha256sum,
)
.map_err(|e| {
SignedPayloadStreamError::Message(format!("Could not build signature: {}", e))
})?;
if payload.header.signature != expected_signature {
return Poll::Ready(Some(Err(SignedPayloadStreamError::InvalidSignature)));
}
*this.buf = input.into();
*this.previous_signature = payload.header.signature;
return Poll::Ready(Some(Ok(payload.data)));
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.stream.size_hint()
}
}
#[cfg(test)]
mod tests {
use futures::prelude::*;
use super::{SignedPayloadStream, SignedPayloadStreamError};
#[tokio::test]
async fn test_interrupted_signed_payload_stream() {
use chrono::{DateTime, Utc};
use garage_util::data::Hash;
let datetime = DateTime::parse_from_rfc3339("2021-12-13T13:12:42+01:00") // TODO UNIX 0
.unwrap()
.with_timezone(&Utc);
let secret_key = "test";
let region = "test";
let scope = crate::signature::compute_scope(&datetime, region, "s3");
let signing_hmac =
crate::signature::signing_hmac(&datetime, secret_key, region, "s3").unwrap();
let data: &[&[u8]] = &[b"1"];
let body = futures::stream::iter(data.iter().map(|block| Ok(block.to_vec().into())));
let seed_signature = Hash::default();
let mut stream =
SignedPayloadStream::new(body, signing_hmac, datetime, &scope, seed_signature);
assert!(stream.try_next().await.is_err());
match stream.try_next().await {
Err(SignedPayloadStreamError::Message(msg)) if msg == "Unexpected EOF" => {}
item => panic!(
"Unexpected result, expected early EOF error, got {:?}",
item
),
}
}
}

View file

@ -1,6 +1,6 @@
[package]
name = "garage_block"
version = "1.0.0"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -34,10 +34,8 @@ async-compression.workspace = true
zstd.workspace = true
serde.workspace = true
serde_bytes.workspace = true
futures.workspace = true
futures-util.workspace = true
tokio.workspace = true
tokio-util.workspace = true

View file

@ -279,7 +279,8 @@ impl DataLayout {
u16::from_be_bytes([
hash.as_slice()[HASH_DRIVE_BYTES.0],
hash.as_slice()[HASH_DRIVE_BYTES.1],
]) as usize % DRIVE_NPART
]) as usize
% DRIVE_NPART
}
fn block_dir_from(&self, hash: &Hash, dir: &PathBuf) -> PathBuf {

View file

@ -4,7 +4,6 @@ use std::sync::Arc;
use std::time::Duration;
use arc_swap::{ArcSwap, ArcSwapOption};
use async_trait::async_trait;
use bytes::Bytes;
use rand::prelude::*;
use serde::{Deserialize, Serialize};
@ -371,7 +370,7 @@ impl BlockManager {
prevent_compression: bool,
order_tag: Option<OrderTag>,
) -> Result<(), Error> {
let who = self.replication.write_sets(&hash);
let who = self.system.cluster_layout().current_storage_nodes_of(&hash);
let compression_level = self.compression_level.filter(|_| !prevent_compression);
let (header, bytes) = DataBlock::from_buffer(data, compression_level)
@ -397,7 +396,7 @@ impl BlockManager {
.rpc_helper()
.try_write_many_sets(
&self.endpoint,
who.as_ref(),
&[who],
put_block_rpc,
RequestStrategy::with_priority(PRIO_NORMAL | PRIO_SECONDARY)
.with_drop_on_completion(permit)
@ -669,10 +668,12 @@ impl BlockManager {
hash: &Hash,
wrong_path: DataBlockPath,
) -> Result<usize, Error> {
let data = self.read_block_from(hash, &wrong_path).await?;
self.lock_mutate(hash)
.await
.fix_block_location(hash, wrong_path, self)
.await
.write_block_inner(hash, &data, self, Some(wrong_path))
.await?;
Ok(data.as_parts_ref().1.len())
}
async fn lock_mutate(&self, hash: &Hash) -> MutexGuard<'_, BlockManagerLocked> {
@ -688,7 +689,6 @@ impl BlockManager {
}
}
#[async_trait]
impl StreamingEndpointHandler<BlockRpc> for BlockManager {
async fn handle(self: &Arc<Self>, mut message: Req<BlockRpc>, _from: NodeID) -> Resp<BlockRpc> {
match message.msg() {
@ -829,18 +829,6 @@ impl BlockManagerLocked {
}
Ok(())
}
async fn fix_block_location(
&self,
hash: &Hash,
wrong_path: DataBlockPath,
mgr: &BlockManager,
) -> Result<usize, Error> {
let data = mgr.read_block_from(hash, &wrong_path).await?;
self.write_block_inner(hash, &data, mgr, Some(wrong_path))
.await?;
Ok(data.as_parts_ref().1.len())
}
}
struct DeleteOnDrop(Option<PathBuf>);

View file

@ -105,7 +105,7 @@ impl BlockResyncManager {
}
}
/// Get lenght of resync queue
/// Get length of resync queue
pub fn queue_len(&self) -> Result<usize, Error> {
Ok(self.queue.len()?)
}
@ -185,10 +185,10 @@ impl BlockResyncManager {
//
// - resync.errors: a tree that indicates for each block
// if the last resync resulted in an error, and if so,
// the following two informations (see the ErrorCounter struct):
// the following two information (see the ErrorCounter struct):
// - how many consecutive resync errors for this block?
// - when was the last try?
// These two informations are used to implement an
// These two information are used to implement an
// exponential backoff retry strategy.
// The key in this tree is the 32-byte hash of the block,
// and the value is the encoded ErrorCounter value.
@ -377,7 +377,10 @@ impl BlockResyncManager {
info!("Resync block {:?}: offloading and deleting", hash);
let existing_path = existing_path.unwrap();
let mut who = manager.replication.storage_nodes(hash);
let mut who = manager
.system
.cluster_layout()
.current_storage_nodes_of(hash);
if who.len() < manager.replication.write_quorum() {
return Err(Error::Message("Not trying to offload block because we don't have a quorum of nodes to write to".to_string()));
}
@ -455,6 +458,25 @@ impl BlockResyncManager {
}
if rc.is_nonzero() && !exists {
// The refcount is > 0, and the block is not present locally.
// We might need to fetch it from another node.
// First, check whether we are still supposed to store that
// block in the latest cluster layout version.
let storage_nodes = manager
.system
.cluster_layout()
.current_storage_nodes_of(&hash);
if !storage_nodes.contains(&manager.system.id) {
info!(
"Resync block {:?}: block is absent with refcount > 0, but it will drop to zero after all metadata is synced. Not fetching the block.",
hash
);
return Ok(());
}
// We know we need the block. Fetch it.
info!(
"Resync block {:?}: fetching absent but needed block (refcount > 0)",
hash

View file

@ -1,6 +1,6 @@
[package]
name = "garage_db"
version = "1.0.0"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -13,7 +13,6 @@ path = "lib.rs"
[dependencies]
err-derive.workspace = true
hexdump.workspace = true
tracing.workspace = true
heed = { workspace = true, optional = true }

View file

@ -122,7 +122,7 @@ impl Db {
_ => unreachable!(),
},
Err(TxError::Db(e2)) => match ret {
// Ok was stored -> the error occured when finalizing
// Ok was stored -> the error occurred when finalizing
// transaction
Ok(_) => Err(TxError::Db(e2)),
// An error was already stored: that's the one we want to
@ -211,16 +211,12 @@ impl Tree {
/// Returns the old value if there was one
#[inline]
pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>(
&self,
key: T,
value: U,
) -> Result<Option<Value>> {
pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>(&self, key: T, value: U) -> Result<()> {
self.0.insert(self.1, key.as_ref(), value.as_ref())
}
/// Returns the old value if there was one
#[inline]
pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<Option<Value>> {
pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<()> {
self.0.remove(self.1, key.as_ref())
}
/// Clears all values from the tree
@ -278,12 +274,12 @@ impl<'a> Transaction<'a> {
tree: &Tree,
key: T,
value: U,
) -> TxOpResult<Option<Value>> {
) -> TxOpResult<()> {
self.tx.insert(tree.1, key.as_ref(), value.as_ref())
}
/// Returns the old value if there was one
#[inline]
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> {
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<()> {
self.tx.remove(tree.1, key.as_ref())
}
/// Clears all values in a tree
@ -339,8 +335,8 @@ pub(crate) trait IDb: Send + Sync {
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
fn len(&self, tree: usize) -> Result<usize>;
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>>;
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()>;
fn remove(&self, tree: usize, key: &[u8]) -> Result<()>;
fn clear(&self, tree: usize) -> Result<()>;
fn iter(&self, tree: usize) -> Result<ValueIter<'_>>;
@ -366,8 +362,8 @@ pub(crate) trait ITx {
fn get(&self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
fn len(&self, tree: usize) -> TxOpResult<usize>;
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>>;
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()>;
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()>;
fn clear(&mut self, tree: usize) -> TxOpResult<()>;
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>>;

View file

@ -132,22 +132,20 @@ impl IDb for LmdbDb {
Ok(tree.len(&tx)?.try_into().unwrap())
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
let tree = self.get_tree(tree)?;
let mut tx = self.db.write_txn()?;
let old_val = tree.get(&tx, key)?.map(Vec::from);
tree.put(&mut tx, key, value)?;
tx.commit()?;
Ok(old_val)
Ok(())
}
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
fn remove(&self, tree: usize, key: &[u8]) -> Result<()> {
let tree = self.get_tree(tree)?;
let mut tx = self.db.write_txn()?;
let old_val = tree.get(&tx, key)?.map(Vec::from);
tree.delete(&mut tx, key)?;
tx.commit()?;
Ok(old_val)
Ok(())
}
fn clear(&self, tree: usize) -> Result<()> {
@ -235,7 +233,7 @@ impl<'a> LmdbTx<'a> {
fn get_tree(&self, i: usize) -> TxOpResult<&Database> {
self.trees.get(i).ok_or_else(|| {
TxOpError(Error(
"invalid tree id (it might have been openned after the transaction started)".into(),
"invalid tree id (it might have been opened after the transaction started)".into(),
))
})
}
@ -254,17 +252,15 @@ impl<'a> ITx for LmdbTx<'a> {
Ok(tree.len(&self.tx)? as usize)
}
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>> {
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
let tree = *self.get_tree(tree)?;
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
tree.put(&mut self.tx, key, value)?;
Ok(old_val)
Ok(())
}
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()> {
let tree = *self.get_tree(tree)?;
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
tree.delete(&mut self.tx, key)?;
Ok(old_val)
Ok(())
}
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
let tree = *self.get_tree(tree)?;

Some files were not shown because too many files have changed in this diff Show more