Compare commits

...

236 commits
main ... main

Author SHA1 Message Date
906c8708fd Merge pull request 'add extraVolumes and extraVolumeMounts to helm chart' (#896) from eugene-davis/garage:main into main
Reviewed-on: Deuxfleurs/garage#896
Reviewed-by: maximilien <me@mricher.fr>
2024-11-19 22:23:13 +00:00
747889a096 Merge pull request 'Update Python SDK documentation' (#887) from cryptolukas/garage:fix-python-sdk-docs into main
Reviewed-on: Deuxfleurs/garage#887
2024-11-19 09:15:03 +00:00
feb09a4bc6 Merge pull request 'doc: update mastodon media header pruning section' (#888) from teutat3s/garage:doc-update-mastodon-media into main
Reviewed-on: Deuxfleurs/garage#888
2024-11-19 09:14:34 +00:00
aa8bc6aa88 Merge pull request 'doc: add Triplebit's use-case' (#901) from jonah/garage:triplebit into main
Reviewed-on: Deuxfleurs/garage#901
Reviewed-by: maximilien <me@mricher.fr>
2024-11-17 13:43:49 +00:00
aba7902995
doc: add Triplebit's use-case 2024-11-15 16:27:46 -06:00
78de7b5bde Merge pull request 'fix bit/byte inversion in rpc secret error message' (#898) from trinity-1686a/garage:rpc-comment into main
Reviewed-on: Deuxfleurs/garage#898
2024-11-07 11:11:12 +00:00
9bd9e392ba fix bit/byte inversion in rpc secret error message 2024-11-07 00:29:26 +01:00
116ad479a8
add extraVolumes and extraVolumeMounts to helm chart 2024-10-26 21:14:08 +02:00
teutat3s
b6a58c5c16
doc: update mastodon media header pruning section
This is now possible since the upstream issue has been resolved.
https://github.com/mastodon/mastodon/issues/9567
2024-10-17 20:59:21 +02:00
2b0bfa9b18 the old value do not work out of the box 2024-10-14 17:20:26 +02:00
a18b3f0d1f Merge pull request 'Garage v1.0.1' (#881) from rel-v1.0.1 into main
Reviewed-on: Deuxfleurs/garage#881
2024-09-22 13:02:02 +00:00
7a143f46fc
Bump to version 1.0.1 2024-09-22 14:25:32 +02:00
c731f0291a Merge pull request 'fix logic in garage layout skip-dead-nodes + fix typo (fix #879)' (#880) from fix-skip-dead-nodes into main
Reviewed-on: Deuxfleurs/garage#880
2024-09-22 12:01:49 +00:00
34453bc9c2
fix logic in garage layout skip-dead-nodes + fix typo (fix #879) 2024-09-22 13:47:27 +02:00
6da1353541 Merge pull request 'Don't fetch old values in cross-partition transactional inserts' (#877) from withings/garage:perf/kv/insert-no-return-cross-partition into main
Reviewed-on: Deuxfleurs/garage#877
2024-09-14 15:57:27 +00:00
bd71728874
Tests: don't expect old value after transactional insert 2024-09-12 10:50:53 +02:00
51ced60366
Don't fetch old values in cross-partition transactional inserts 2024-09-12 10:26:28 +02:00
586957b4b7 Merge pull request 'KV: don't retrieve values for write ops' (#873) from marvinj97/garage:perf/kv/insert-no-return into main
Reviewed-on: Deuxfleurs/garage#873
Reviewed-by: Alex <alex@adnab.me>
2024-09-10 09:06:29 +00:00
8d2bb4afeb Merge pull request 'Typo' (#875) from faust/garage:doc2 into main
Reviewed-on: Deuxfleurs/garage#875
2024-09-10 09:03:31 +00:00
c26f32b769
Typo
And remove trailing white space.
2024-09-10 09:34:59 +02:00
8062ec7b4b test: fix db tests 2024-09-04 19:24:36 +02:00
eb416a02fb dont assert deletion count in sqlite KV adapter 2024-09-04 18:51:51 +02:00
74363c9060 perf(kv): dont retrieve values for write ops
see Deuxfleurs/garage#851
2024-09-04 18:45:17 +02:00
615698df7d Merge pull request 'update compiler to Rust 1.77' (#866) from rust-1.77 into main
Reviewed-on: Deuxfleurs/garage#866
2024-08-26 19:08:00 +00:00
7061fa5a56
use rust 1.77 in nix/compile.nix 2024-08-26 19:19:16 +02:00
8881930cdd
update nixpkgs and rust-overlay sources in flake.nix 2024-08-26 19:19:16 +02:00
52f6c0760b Merge pull request 'update crate time (fix #849)' (#865) from update-time into main
Reviewed-on: Deuxfleurs/garage#865
2024-08-26 16:20:04 +00:00
5b0602c7e9
update crate time (fix #849) 2024-08-26 18:11:21 +02:00
182b2af7e5 Merge pull request 'api servers: kill opened connections after SIGINT after 10s deadline (fix #806)' (#864) from exit-deadline into main
Reviewed-on: Deuxfleurs/garage#864
2024-08-25 18:34:55 +00:00
baf32c9575
api servers: kill opened connections after SIGINT after 10s deadline (fix #806) 2024-08-25 20:04:56 +02:00
3dda1ee4f6 Merge pull request 'fix build when lmdb feature is disabled (fix #800)' (#863) from fix-800 into main
Reviewed-on: Deuxfleurs/garage#863
2024-08-25 10:00:40 +00:00
aa7ce9e97c
fix build when lmdb feature is disabled (fix #800) 2024-08-25 11:42:37 +02:00
8d62616ec0 Merge pull request 'layout: discard old info when it is completely out-of-date (fix #841)' (#861) from fix-841 into main
Reviewed-on: Deuxfleurs/garage#861
2024-08-24 11:12:39 +00:00
bd6fe72c06 Merge pull request 'Quick start: mention Docker (replace #803)' (#862) from dougreeder into main
Reviewed-on: Deuxfleurs/garage#862
2024-08-24 11:07:46 +00:00
4c9e8ef625
doc: clarify quick start on using docker 2024-08-24 13:07:02 +02:00
3e711bc110 Merge pull request 'don't modify postobject request before validating policy' (#850) from trinity-1686a/garage:fix-acl-postobject into main
Reviewed-on: Deuxfleurs/garage#850
2024-08-24 10:49:14 +00:00
7fb66b4944
layout: discard old info when it is completely out-of-date (fix #841) 2024-08-24 12:38:56 +02:00
679ae8bcbb Merge pull request 'Set "no read ahead" on LMDB to improve performances when "LMDB size" > "RAM size"' (#855) from fix-lmdb-no-read-ahead into main
Reviewed-on: Deuxfleurs/garage#855
Reviewed-by: Alex <alex@adnab.me>
2024-08-18 12:25:35 +00:00
2a93ad0c84
force flag "no read ahead" on LMDB 2024-08-17 21:17:15 +02:00
f190032589 don't modify postobject request before validating policy 2024-08-10 20:10:47 +02:00
3a87bd1370 Merge pull request 'Improve error message for malformed RPC secret key' (#846) from improve-secret-error-message into main
Reviewed-on: Deuxfleurs/garage#846
Reviewed-by: Quentin <quentin@dufour.io>
2024-08-09 06:47:11 +00:00
9302cd42f0 Improve error message for malformed RPC secret key 2024-08-08 23:05:24 +00:00
060ad0da32 docs: Update LMDB website 2024-08-06 21:47:14 +00:00
a5ed1161c6 Merge pull request 'Add environment variable dict to helm chart.' (#843) from Benjamin/garage:main into main
Reviewed-on: Deuxfleurs/garage#843
Reviewed-by: maximilien <me@mricher.fr>
2024-08-06 21:45:35 +00:00
Benjamin von Mossner
222674432b This commit adds an environment dict to garage helm chart. Using it, env variables can be set into the garage container environment, useful to set eg. GARAGE_ADMIN_TOKEN or GARAGE_METRICS_TOKEN 2024-07-25 11:42:13 +02:00
070a8ad110 Merge pull request 'doc: fix typo' (#831) from Armael/garage:typo into main
Reviewed-on: Deuxfleurs/garage#831
2024-06-18 12:40:32 +00:00
770384cae1 Merge pull request 'add rpc_public_addr_subnet config option' (#817) from flokli/garage:rpc_public_addr_subnet into main
Reviewed-on: Deuxfleurs/garage#817
Reviewed-by: Alex <alex@adnab.me>
2024-06-18 12:40:07 +00:00
a0f6bc5b7f add rpc_public_addr_subnet config option
In case `rpc_public_addr` is not set, but autodiscovery is used, this
allows filtering the list of automatically discovered IPs to a specific
subnet.

For example, if nodes should pick *their* IP inside a specific subnet,
but you don't want to explicitly write the IP down (as it's dynamic, or
you want to share configs across nodes), you can use this option.
2024-06-05 08:41:36 +02:00
Armaël Guéneau
88c734bbd9 typo 2024-06-04 15:34:02 +02:00
d38509ef4b Merge pull request 'adding the ability to change the default podManagementPolicy for StatefulSets' (#823) from bodaciousbiscuits/garage:main into main
Reviewed-on: Deuxfleurs/garage#823
Reviewed-by: maximilien <me@mricher.fr>
2024-05-25 18:35:53 +00:00
bodaciousbiscuits
39b37833c5 adding the ability to change the default podManagementPolicy for StatefulSets 2024-05-19 21:31:19 -05:00
a2c1de646b Merge pull request 'cli: clarify garage block is node-local' (#813) from flokli/garage:block-node-local into main
Reviewed-on: Deuxfleurs/garage#813
2024-05-12 08:53:26 +00:00
15847a636a cli: clarify garage block is node-local
Prevents some of the confusion from
Deuxfleurs/garage#810.
2024-05-07 07:42:33 +00:00
123d3e1f04 Merge pull request 'flake.nix: add rust-analyzer to devShells.full, expose devShells.full in shell.nix' (#816) from flokli/garage:shell-fixes into main
Reviewed-on: Deuxfleurs/garage#816
2024-04-23 18:54:26 +00:00
a6e4b96ca9 shell.nix: expose devShellFull
This allows accessing devShells.full from shell.nix.
2024-04-23 11:59:37 +03:00
b442b0e35e devShells.full: add rust-analyzer 2024-04-23 11:59:37 +03:00
0c3b198b22 Improves Quick Start for users not using Linux 2024-04-10 16:42:10 -04:00
33c2086d9e Merge pull request '[fix-doc] fix broken references in documentation' (#802) from fix-doc into main
Reviewed-on: Deuxfleurs/garage#802
2024-04-10 15:49:03 +00:00
5ad1e55ccf [fix-doc] fix broken references in documentation 2024-04-10 17:47:34 +02:00
1779fd40c0 Merge pull request 'Garage v1.0' (#683) from next-0.10 into main
Reviewed-on: Deuxfleurs/garage#683
2024-04-10 15:23:12 +00:00
ff093ddbb8
Merge branch 'main' into next-0.10 2024-04-10 14:38:14 +02:00
90e3c2af91
[next-0.10] small updates to mention Garage v0.9.4 2024-04-10 14:35:30 +02:00
b47706809c Merge pull request 'fix typo in doc' (#799) from fix-typo into main
Reviewed-on: Deuxfleurs/garage#799
2024-04-08 15:09:27 +00:00
126e0f47a3 fix typo in doc 2024-04-08 17:08:44 +02:00
738bb2f09c Merge pull request 'Garage v0.9.4' (#798) from rel-0.9.4 into main
Reviewed-on: Deuxfleurs/garage#798
2024-04-08 09:57:03 +00:00
7dd7cb5759
[rel-0.9.4] upgrade version to v0.9.4 2024-04-08 11:18:19 +02:00
8b663d8c5b Merge pull request 'jepsen testing of Garage v1.0.0-rc1' (#796) from jepsen-1.0rc1 into main
Reviewed-on: Deuxfleurs/garage#796
2024-04-05 21:05:58 +00:00
c051db8204 jepsen testing of Garage v1.0.0-rc1 2024-04-05 22:57:00 +02:00
50669b3e76
[next-0.10] bump helm chart version 2024-04-03 14:19:59 +02:00
e5838b4837 Merge pull request '[doc-units] document how interval value is parsed' (#795) from doc-units into main
Reviewed-on: Deuxfleurs/garage#795
2024-04-03 12:15:18 +00:00
87dfaf2eb9
[doc-units] document how interval value is parsed 2024-04-03 14:14:13 +02:00
554437254e
[next-0.10] Add migration guide for v1.0 2024-03-28 18:45:06 +01:00
afad62939e
[next-0.10] bump version number to 1.0 2024-03-28 15:19:44 +01:00
8bfc16ba7d
Merge branch 'main' into next-0.10 2024-03-28 15:01:05 +01:00
ecf641d88c Merge pull request 'Fix unbounded buffering when one node has slower network' (#792) from fix-buffering into main
Reviewed-on: Deuxfleurs/garage#792
2024-03-28 12:40:27 +00:00
75cd14926d Merge pull request 'CI: properly cleanup between garage integration tests' (#793) from fix-ci into main
Reviewed-on: Deuxfleurs/garage#793
2024-03-28 12:37:18 +00:00
e1dc84e123
[fix-ci] CI: properly cleanup between garage integration tests 2024-03-28 13:08:42 +01:00
85f580cbde
[fix-buffering] change request sending strategy and fix priorities
remove LAS, priorize new requests but otherwise just do standard queuing
2024-03-27 16:22:40 +01:00
0d3e285d13
[fix-buffering] implement block_ram_buffer_max to avoid excessive RAM usage 2024-03-27 16:22:40 +01:00
25c196f34d
[next-0.10] admin api: fix logic in get cluster status 2024-03-27 13:55:49 +01:00
4eba32f29f
[next-0.10] layout helper: rename & clarify updates to update trackers 2024-03-27 13:47:06 +01:00
32f1786f9f
[next-0.10] cache layout check result 2024-03-27 13:37:20 +01:00
01a0bd5410
[next-0.10] remove impl Deref for LayoutHelper 2024-03-27 13:32:13 +01:00
c0eeb0b0f3
[next-0.10] fixes to k2v rpc + comment fixes 2024-03-27 10:44:03 +01:00
51d11b4b26
[next-0.10] doc: 2 changes
- rewrite section on encryption to mention SSE-C
- change to real-world to make it closer to main branch
2024-03-27 10:10:45 +01:00
f7cd4eb600
Merge branch 'main' into next-0.10 2024-03-26 16:34:40 +01:00
95eb8808e8 Merge pull request 'Disable more K2V tests' (#791) from disable-k2v-test into main
Reviewed-on: Deuxfleurs/garage#791
2024-03-26 15:33:46 +00:00
e0a4fc097a
[disable-k2v-test] remove obsolete k2v test script 2024-03-26 16:24:44 +01:00
73551e9a2d
[disable-k2v-test] disable the other k2v poll test 2024-03-26 16:24:26 +01:00
80f81fa6f3 Merge pull request '[disable-k2v-test] disable tests::k2v::test_poll_item as it is not 100% reliable' (#789) from disable-k2v-test into main
Reviewed-on: Deuxfleurs/garage#789
2024-03-26 14:57:46 +00:00
f267609343
[disable-k2v-test] disable tests::k2v::test_poll_item as it is not 100% reliable 2024-03-26 15:39:17 +01:00
cdde0f19ee Merge pull request 'checksum algorithms' (#787) from s3-checksum into next-0.10
Reviewed-on: Deuxfleurs/garage#787
2024-03-26 14:24:58 +00:00
74949c69cb
[s3-checksum] implement x-amz-checksum-* headers 2024-03-26 15:01:34 +01:00
7e0107c47d Merge pull request 'Fixes to garage_net peering manager' (#786) from net-fixes into next-0.10
Reviewed-on: Deuxfleurs/garage#786
2024-03-21 10:26:36 +00:00
3844110cd0
[net-fixes] netapp peer exchange: send only currently connected address 2024-03-21 10:50:44 +01:00
961b4f9af3
[net-fixes] fix issues with local peer address (fix #761) 2024-03-21 10:45:34 +01:00
5225a81dee
[net-fixes] peering: only count node IDs and not addresses in hash 2024-03-21 09:47:04 +01:00
e835196940 Merge pull request 'Add marker files in data directories (fix #601)' (#785) from check-data-dir into main
Reviewed-on: Deuxfleurs/garage#785
2024-03-20 16:53:47 +00:00
ba33bb31f1
[check-data-dir] add marker files in data directories (fix #601) 2024-03-20 15:20:25 +01:00
30abf7e086 Merge pull request 'Add support to logging to syslog (based on patch by @jirutka)' (#784) from syslog into main
Reviewed-on: Deuxfleurs/garage#784
2024-03-20 13:54:18 +00:00
84018be862
[syslog] warning when syslog support is not enabled 2024-03-20 14:39:04 +01:00
091e693670
[syslog] document environment variables 2024-03-20 14:39:04 +01:00
fe8a7819fa
[syslog] Add support to logging to syslog
Original patch by Jakub Jirutka for Alpine Linux port.
2024-03-20 14:22:18 +01:00
ce69dc302c
Merge branch 'main' into next-0.10 2024-03-19 17:17:46 +01:00
26310f3242 Merge pull request 'CLI: allow manipulating buckets by prefixes of their full IDs' (#783) from bucket-id-prefix into main
Reviewed-on: Deuxfleurs/garage#783
2024-03-19 16:17:16 +00:00
65853a4863 Merge pull request 'block refcount repair' (#782) from block-ref-repair into next-0.10
Reviewed-on: Deuxfleurs/garage#782
2024-03-19 15:59:19 +00:00
783b586de9
[bucket-id-prefix] CLI: allow manipulating buckets by prefixes of their full IDs 2024-03-19 16:57:51 +01:00
3eab639c14
[block-ref-repair] mention garage block repair-rc in documentation 2024-03-19 16:24:34 +01:00
3165ab926c
[block-ref-repair] rename rc's rc field to rc_table 2024-03-19 16:20:22 +01:00
dc0b78cdb8
[block-ref-repair] Block refcount recalculation and repair
- We always recalculate the reference count of a block before deleting
  it locally, to make sure that it is indeed zero.

- If we had to fetch a remote block but we were not able to get it,
  check that refcount is indeed > 0.

- Repair procedure that checks everything
2024-03-19 16:20:22 +01:00
693b89b94b Merge pull request 'Update WinSCP link in documentation' (#781) from stefano/garage:main into main
Reviewed-on: Deuxfleurs/garage#781
2024-03-19 09:34:16 +00:00
cf344d73d5 Update WinSCP link in documentation
Update link to new wiki location. See Deuxfleurs/garage#780
2024-03-19 09:21:50 +00:00
0038ca8a78
Merge branch 'main' into next-0.10 2024-03-18 20:19:30 +01:00
1a0bffae34 Merge pull request 'Use connection pooling in sqlite backend' (#779) from sqlite-r2d2 into main
Reviewed-on: Deuxfleurs/garage#779
2024-03-18 19:02:36 +00:00
b55f52a9b7
[sqlite-r2d2] run integration test with all db engines 2024-03-18 18:31:35 +01:00
e8f9718ccd
[sqlite-r2d2] implement connection pooling in sqlite backend 2024-03-18 18:05:25 +01:00
fd2e19bf1b Merge pull request 'metadata db snapshotting' (#775) from db-snapshot into main
Reviewed-on: Deuxfleurs/garage#775
2024-03-15 13:17:53 +00:00
8cf3d24875
[db-snapshot] documentation for metadata db snapshots 2024-03-15 13:51:31 +01:00
a68c37555d
[db-snapshot] add garage meta snapshot cli operation 2024-03-15 13:51:31 +01:00
1e42808a59
[db-snapshot] implement meta_auto_snapshot_interval 2024-03-15 13:51:31 +01:00
8dff278b72
[db-snapshot] Implement db snapshotting logic in garage_db 2024-03-15 10:57:22 +01:00
a80ce6ab5a Merge pull request 'disable_scrub configuration option' (#774) from disable-scrub into main
Reviewed-on: Deuxfleurs/garage#774
2024-03-15 09:22:33 +00:00
990205dc3b
[disable-scrub] document disable_scrub config option 2024-03-14 17:01:16 +01:00
7c86ff6c37
[disable-scrub] implement a disable_scrub configuration option 2024-03-14 17:01:16 +01:00
62b01d8705 Merge pull request 'Doc: be slightly more critical of LMDB' (#773) from doc-updates into main
Reviewed-on: Deuxfleurs/garage#773
2024-03-14 15:43:30 +00:00
422d45b659
[doc-updates] from source: fix default feature list 2024-03-14 16:35:15 +01:00
a7dddebedd
[doc-updates] doc: be slightly more critical of LMDB 2024-03-14 16:31:22 +01:00
81191d2d92 Merge pull request 'Remove Sled' (#767) from rm-sled into next-0.10
Reviewed-on: Deuxfleurs/garage#767
2024-03-12 10:45:57 +00:00
2795b53b8b
[rm-sled] factorize some code in sqlite backend 2024-03-12 11:15:26 +01:00
32aa246300
[rm-sled] Make proper use of pinning in LMDB adapter + comment unsafe 2024-03-08 17:39:17 +01:00
b942949940
[rm-sled] Implement iterators in sqlite & lmdb transactions
with way too much unsafe code
2024-03-08 16:38:01 +01:00
66c23890c1
[rm-sled] Implement some missing functionality in garage_db 2024-03-08 16:02:58 +01:00
05c92204ec
[rm-sled] Remove counted_tree_hack 2024-03-08 15:09:57 +01:00
2128b5febd Merge pull request 'Remove migration path from Garage v0.5' (#766) from rm-migration into next-0.10
Reviewed-on: Deuxfleurs/garage#766
2024-03-08 13:43:42 +00:00
44454aac01
[rm-sled] Remove the Sled database engine 2024-03-08 14:11:02 +01:00
1ace34adbb
Merge branch 'main' into next-0.10 2024-03-08 13:57:10 +01:00
238545e564 Merge pull request 'Refactoring of db engines' (#765) from factor-db-open into main
Reviewed-on: Deuxfleurs/garage#765
2024-03-08 12:56:40 +00:00
f537f76681
[rm-migration] Remove migration path from Garage v0.5 2024-03-08 13:24:47 +01:00
ec34728b27
[factor-db-open] Combine logic for opening db engines 2024-03-08 12:58:17 +01:00
20c0b4ffb2 Merge pull request 'ReplicationMode -> ConsistencyMode+ReplicationFactor' (#750) from yuka/garage:split-consistency-mode into next-0.10
Reviewed-on: Deuxfleurs/garage#750
2024-03-07 16:32:52 +00:00
2fd13c7d13 Merge pull request 'SSE-C encryption' (#730) from sse-c into next-0.10
Reviewed-on: Deuxfleurs/garage#730
2024-03-07 15:21:37 +00:00
3fcb54e3cf
[sse-c] Remove special case for Content-Type header 2024-03-07 15:43:48 +01:00
e3333f2ac5
[sse-c] Documentation for SSE-C 2024-03-07 15:43:48 +01:00
fa4878bad6
[sse-c] Testing for SSE-C encryption 2024-03-07 15:43:48 +01:00
57acc60082
[sse-c] Implement SSE-C encryption 2024-03-07 15:43:47 +01:00
fe2dc5d51c
Merge branch 'main' into next-0.10 2024-03-07 14:00:34 +01:00
afee8c2207 Merge pull request 'allow utf-8 in headers + add test for object metadata' (#763) from unicode-headers into main
Reviewed-on: Deuxfleurs/garage#763
2024-03-07 12:54:07 +00:00
eab2b81be2
[unicode-headers] allow utf-8 in headers + add test for object metadata 2024-03-07 13:42:01 +01:00
Yureka
c1769bbe69 ReplicationMode -> ConsistencyMode+ReplicationFactor 2024-03-07 12:45:33 +01:00
Yureka
8f86af52ed adjust docs for replication factor 2024-03-05 22:57:08 +01:00
603604cdfc Merge pull request 'refactor: remove max_write_errors and max_faults' (#760) from yuka/garage:remove-max-write-errors into next-0.10
Reviewed-on: Deuxfleurs/garage#760
2024-03-05 21:56:17 +00:00
Yureka
6760895926 refactor: remove max_write_errors and max_faults 2024-03-04 18:39:56 +01:00
bbde9bc912
Merge branch 'main' into next-0.10 2024-03-04 15:56:10 +01:00
3168bb34a0 Merge pull request 'add request context helper' (#751) from yuka/garage:req-ctx into main
Reviewed-on: Deuxfleurs/garage#751
2024-03-04 14:51:05 +00:00
512933a036 Merge pull request 'Garage v0.9.3' (#757) from rel-0.9.3 into main
Reviewed-on: Deuxfleurs/garage#757
2024-03-04 13:26:47 +00:00
8670140358
[rel-0.9.3] Bump version to 0.9.3 2024-03-04 14:00:55 +01:00
5bb69a1257 Merge pull request 'Add API test + fix presigned requests' (#756) from test-presigned into main
Reviewed-on: Deuxfleurs/garage#756
2024-03-04 12:56:02 +00:00
c8e416aaa5
[test-presigned] Use a HeaderMap type for QueryMap 2024-03-04 13:33:14 +01:00
Yureka
fb55682c66 add request context helper 2024-03-04 13:26:39 +01:00
c94bf45cba
Store original-cased query keys alongside query values 2024-03-04 13:03:27 +01:00
7c4f3473af
Lowercase query parameter keys when parsing 2024-03-04 13:03:16 +01:00
b6a91e549b
[test-presigned] Add API test for presigned requests 2024-03-04 13:02:07 +01:00
32d6b4def8 Merge pull request 'Add talk on 2024-02-09 at capitoul.org' (#755) from talk-capitoul into main
Reviewed-on: Deuxfleurs/garage#755
2024-03-04 11:08:23 +00:00
d0d95fd53f
[next-0.10] woodpecker: run debug pipeline on manual trigger 2024-02-27 10:13:09 +01:00
4b978b7533
Merge branch 'main' into next-0.10 2024-02-26 18:55:24 +01:00
ee2b0c8dda
[talk-capitoul] Add talk on 2024-02-09 at capitoul.org 2024-02-26 13:42:47 +01:00
3692af7052
Merge branch 'main' into next-0.10 2024-02-23 18:28:05 +01:00
916c67ccf4
Merge branch 'main' into next-0.10 2024-02-23 16:50:34 +01:00
81cebdd124
[next-0.10] fix build 2024-02-22 15:53:47 +01:00
59f61c966a
Merge branch 'main' into next-0.10 2024-02-22 15:45:45 +01:00
75e591727d
[next-0.10] cluster node status metrics: report nodes of all active layout versions 2024-02-20 17:08:31 +01:00
643d1aabd8
Merge branch 'main' into next-0.10 2024-02-20 17:02:44 +01:00
eb4a6ce106
Merge branch 'main' into next-0.10 2024-02-15 14:06:34 +01:00
cf2af186fc
Merge branch 'main' into next-0.10 2024-02-13 11:36:28 +01:00
db48dd3d6c
bump crate versions to 0.10.0 2024-01-11 12:05:51 +01:00
8a6ec1d611 Merge pull request 'NLnet task 3' (#667) from nlnet-task3 into next-0.10
Reviewed-on: Deuxfleurs/garage#667
2024-01-11 10:58:08 +00:00
0041b013a4
layout: refactoring and fix in layout helper 2023-12-11 16:09:22 +01:00
adccce1145
layout: refactor/fix bad while loop 2023-12-11 15:45:14 +01:00
85b5a6bcd1
fix some clippy lints 2023-12-11 15:31:47 +01:00
e4f493b481
table: remove redundant tracing in insert_many 2023-12-11 14:57:42 +01:00
f8df90b79b
table: fix insert_many to not send duplicates 2023-12-08 14:54:11 +01:00
4dbf254512
layout: refactoring, merge two files 2023-12-08 14:15:52 +01:00
64a6e557a4
rpc helper: small refactorings 2023-12-08 12:18:12 +01:00
5dd200c015
layout: move block_read_nodes_of to rpc_helper to avoid double-locking
(in theory, this could have caused a deadlock)
2023-12-08 12:02:24 +01:00
063294dd56
layout version: refactor get_node_zone 2023-12-08 11:50:58 +01:00
7f2541101f
cli: improvements to the layout commands when multiple layouts are live 2023-12-08 11:24:23 +01:00
91b874c4ef
rpc: fix system::health 2023-12-08 10:36:37 +01:00
431b28e0cf
fix build with discovery features 2023-12-07 15:15:59 +01:00
9cecea64d4
layout: allow sync update tracker to progress with only quorums 2023-12-07 14:51:20 +01:00
aa59059a91
layout cli: safer skip-dead-nodes command 2023-12-07 11:56:14 +01:00
d90de365b3
table sync: use write quorums to report global success or failure of sync 2023-12-07 11:16:10 +01:00
95eb13eb08
rpc: refactor result tracking for quorum sets 2023-12-07 10:57:21 +01:00
c8356a91d9
layout updates: fix the set of nodes among which minima are calculated 2023-12-07 10:30:26 +01:00
c04dd8788a
admin: more info in admin GetClusterStatus 2023-11-28 14:25:04 +01:00
539af6eac4
rpc helper: write comments + small refactoring of tracing 2023-11-28 11:12:39 +01:00
c539077d30
cli: remove historic layout info from status 2023-11-27 16:22:27 +01:00
11e6fef93c
cli: add layout history and layout assume-sync commands 2023-11-27 16:22:25 +01:00
539a920313
cli: show when nodes are draining metadata 2023-11-27 13:18:59 +01:00
78362140f5
rpc: update system::health to take into account write sets for all partitions 2023-11-27 12:10:21 +01:00
d6d239fc79
block manager: read_block using old layout versions if necessary 2023-11-27 11:52:57 +01:00
3ecd14b9f6
table: implement write sets for insert_many 2023-11-16 16:41:45 +01:00
22f38808e7
rpc_helper: don't use tokio::spawn for individual requests 2023-11-16 16:34:01 +01:00
707442f5de
layout: refactor digests and add "!=" assertions before epidemic bcast 2023-11-16 13:51:40 +01:00
ad5c6f779f
layout: split helper in separate file; more precise difference tracking 2023-11-16 13:26:43 +01:00
d4df03424f
layout: fix test 2023-11-15 15:56:57 +01:00
33c8a489b0
layou: implement ack locking 2023-11-15 15:40:44 +01:00
393c4d4515
layout: add helper for cached/external values to centralize recomputation 2023-11-15 14:20:50 +01:00
65066c7064
layout: wip cache global mins 2023-11-15 13:28:30 +01:00
acd49de9f9
rpc: fix write set quorums 2023-11-15 13:07:42 +01:00
46007bf01d
integration test: print stdout and stderr on subcommand crash 2023-11-15 12:56:52 +01:00
b3e729f4b8
layout history merge: rm invalid versions when valid versions are added 2023-11-15 12:15:58 +01:00
7ef2c23120
layout: fix test 2023-11-14 15:45:01 +01:00
90e1619b1e
table: take into account multiple write sets in inserts 2023-11-14 15:40:46 +01:00
3b361d2959
layout: prepare for write sets 2023-11-14 14:28:16 +01:00
866196750f
system: add todo wrt new layout 2023-11-14 13:36:58 +01:00
83a11374ca
layout: fixes in schema 2023-11-14 13:29:26 +01:00
1aab1f4e68
layout: refactoring of all_nodes 2023-11-14 13:12:32 +01:00
8e292e06b3
layout: some refactoring of nongateway nodes 2023-11-14 12:48:38 +01:00
9a491fa137
layout: fix test 2023-11-11 13:10:59 +01:00
df24bb806d
layout/sync: fix bugs and add tracing 2023-11-11 12:44:27 +01:00
ce89d1ddab
table sync: adapt to new layout history 2023-11-11 12:08:32 +01:00
df36cf3099
layout: add helpers to LayoutHistory and prepare integration with Table 2023-11-09 16:32:31 +01:00
9d95f6f704
layout: fix tracker bugs 2023-11-09 15:52:45 +01:00
bad7cc812e
layout admin: add missing calls to update_hash 2023-11-09 15:42:10 +01:00
03ebf18830
layout: begin managing the update tracker values 2023-11-09 15:31:59 +01:00
94caf9c0c1
layout: separate code path for synchronizing update trackers only 2023-11-09 14:53:34 +01:00
bfb1845fdc
layout: refactor to use a RwLock on LayoutHistory 2023-11-09 14:12:05 +01:00
19ef1ec8e7
layout: more refactoring 2023-11-09 13:34:14 +01:00
8a2b1dd422
wip: split out layout management from System into separate LayoutManager 2023-11-09 12:55:36 +01:00
523d2ecb95
layout: use separate CRDT for staged layout changes 2023-11-09 11:19:43 +01:00
1da0a5676e
bump garage protocol version tag to 0x000A (0.10) 2023-11-08 19:30:58 +01:00
8dccee3ccf
cluster layout: adapt all uses of ClusterLayout to LayoutHistory 2023-11-08 19:28:36 +01:00
fe9af1dcaa
WIP: garage_rpc: store layout version history 2023-11-08 17:49:06 +01:00
4a9c94514f
avoid using layout_watch in System directly 2023-11-08 16:41:00 +01:00
12d1dbfc6b
remove Ring and use ClusterLayout everywhere 2023-11-08 15:41:24 +01:00
0962313ebd
garage_rpc: reorder functions in layout.rs 2023-11-08 13:13:04 +01:00
178 changed files with 15279 additions and 5090 deletions

View file

@ -5,6 +5,7 @@ when:
- pull_request - pull_request
- deployment - deployment
- cron - cron
- manual
steps: steps:
- name: check formatting - name: check formatting
@ -33,7 +34,9 @@ steps:
- ./result/bin/garage_util-* - ./result/bin/garage_util-*
- ./result/bin/garage_web-* - ./result/bin/garage_web-*
- ./result/bin/garage-* - ./result/bin/garage-*
- ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false) - GARAGE_TEST_INTEGRATION_DB_ENGINE=lmdb ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
- nix-shell --attr ci --run "killall -9 garage" || true
- GARAGE_TEST_INTEGRATION_DB_ENGINE=sqlite ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
- rm result - rm result
- rm -rv tmp-garage-integration - rm -rv tmp-garage-integration

247
Cargo.lock generated
View file

@ -17,6 +17,41 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "aead"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0"
dependencies = [
"crypto-common",
"generic-array",
]
[[package]]
name = "aes"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"
dependencies = [
"cfg-if",
"cipher",
"cpufeatures",
]
[[package]]
name = "aes-gcm"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1"
dependencies = [
"aead",
"aes",
"cipher",
"ctr",
"ghash",
"subtle",
]
[[package]] [[package]]
name = "ahash" name = "ahash"
version = "0.8.7" version = "0.8.7"
@ -761,6 +796,16 @@ dependencies = [
"windows-targets 0.52.0", "windows-targets 0.52.0",
] ]
[[package]]
name = "cipher"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
dependencies = [
"crypto-common",
"inout",
]
[[package]] [[package]]
name = "clap" name = "clap"
version = "2.34.0" version = "2.34.0"
@ -860,9 +905,9 @@ dependencies = [
[[package]] [[package]]
name = "crc32fast" name = "crc32fast"
version = "1.3.2" version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
] ]
@ -876,15 +921,6 @@ dependencies = [
"crossbeam-utils", "crossbeam-utils",
] ]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]] [[package]]
name = "crossbeam-queue" name = "crossbeam-queue"
version = "0.3.11" version = "0.3.11"
@ -929,9 +965,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [ dependencies = [
"generic-array", "generic-array",
"rand_core",
"typenum", "typenum",
] ]
[[package]]
name = "ctr"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835"
dependencies = [
"cipher",
]
[[package]] [[package]]
name = "darling" name = "darling"
version = "0.20.5" version = "0.20.5"
@ -1167,16 +1213,6 @@ dependencies = [
name = "format_table" name = "format_table"
version = "0.1.1" version = "0.1.1"
[[package]]
name = "fs2"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"
dependencies = [
"libc",
"winapi",
]
[[package]] [[package]]
name = "futures" name = "futures"
version = "0.3.30" version = "0.3.30"
@ -1266,18 +1302,9 @@ dependencies = [
"slab", "slab",
] ]
[[package]]
name = "fxhash"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
dependencies = [
"byteorder",
]
[[package]] [[package]]
name = "garage" name = "garage"
version = "0.9.2" version = "1.0.1"
dependencies = [ dependencies = [
"assert-json-diff", "assert-json-diff",
"async-trait", "async-trait",
@ -1319,9 +1346,11 @@ dependencies = [
"serde", "serde",
"serde_bytes", "serde_bytes",
"serde_json", "serde_json",
"sha1",
"sha2", "sha2",
"static_init", "static_init",
"structopt", "structopt",
"syslog-tracing",
"timeago", "timeago",
"tokio", "tokio",
"toml", "toml",
@ -1331,13 +1360,17 @@ dependencies = [
[[package]] [[package]]
name = "garage_api" name = "garage_api"
version = "0.9.2" version = "1.0.1"
dependencies = [ dependencies = [
"aes-gcm",
"argon2", "argon2",
"async-compression",
"async-trait", "async-trait",
"base64 0.21.7", "base64 0.21.7",
"bytes", "bytes",
"chrono", "chrono",
"crc32c",
"crc32fast",
"crypto-common", "crypto-common",
"err-derive", "err-derive",
"form_urlencoded", "form_urlencoded",
@ -1371,16 +1404,18 @@ dependencies = [
"serde", "serde",
"serde_bytes", "serde_bytes",
"serde_json", "serde_json",
"sha1",
"sha2", "sha2",
"tokio", "tokio",
"tokio-stream", "tokio-stream",
"tokio-util 0.7.10",
"tracing", "tracing",
"url", "url",
] ]
[[package]] [[package]]
name = "garage_block" name = "garage_block"
version = "0.9.2" version = "1.0.1"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-compression", "async-compression",
@ -1407,20 +1442,21 @@ dependencies = [
[[package]] [[package]]
name = "garage_db" name = "garage_db"
version = "0.9.2" version = "1.0.1"
dependencies = [ dependencies = [
"err-derive", "err-derive",
"heed", "heed",
"hexdump", "hexdump",
"mktemp", "mktemp",
"r2d2",
"r2d2_sqlite",
"rusqlite", "rusqlite",
"sled",
"tracing", "tracing",
] ]
[[package]] [[package]]
name = "garage_model" name = "garage_model"
version = "0.9.2" version = "1.0.1"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
@ -1437,7 +1473,9 @@ dependencies = [
"garage_table", "garage_table",
"garage_util", "garage_util",
"hex", "hex",
"http 1.0.0",
"opentelemetry", "opentelemetry",
"parse_duration",
"rand", "rand",
"serde", "serde",
"serde_bytes", "serde_bytes",
@ -1448,7 +1486,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_net" name = "garage_net"
version = "0.9.2" version = "1.0.1"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
@ -1474,7 +1512,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_rpc" name = "garage_rpc"
version = "0.9.2" version = "1.0.1"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
@ -1489,6 +1527,7 @@ dependencies = [
"garage_util", "garage_util",
"gethostname", "gethostname",
"hex", "hex",
"ipnet",
"itertools 0.12.1", "itertools 0.12.1",
"k8s-openapi", "k8s-openapi",
"kube", "kube",
@ -1509,7 +1548,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_table" name = "garage_table"
version = "0.9.2" version = "1.0.1"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
@ -1531,7 +1570,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_util" name = "garage_util"
version = "0.9.2" version = "1.0.1"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"async-trait", "async-trait",
@ -1565,7 +1604,7 @@ dependencies = [
[[package]] [[package]]
name = "garage_web" name = "garage_web"
version = "0.9.2" version = "1.0.1"
dependencies = [ dependencies = [
"err-derive", "err-derive",
"futures", "futures",
@ -1614,6 +1653,16 @@ dependencies = [
"wasi", "wasi",
] ]
[[package]]
name = "ghash"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1"
dependencies = [
"opaque-debug",
"polyval",
]
[[package]] [[package]]
name = "gimli" name = "gimli"
version = "0.28.1" version = "0.28.1"
@ -1707,9 +1756,9 @@ dependencies = [
[[package]] [[package]]
name = "hashlink" name = "hashlink"
version = "0.8.4" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee"
dependencies = [ dependencies = [
"hashbrown 0.14.3", "hashbrown 0.14.3",
] ]
@ -2063,6 +2112,15 @@ dependencies = [
"hashbrown 0.14.3", "hashbrown 0.14.3",
] ]
[[package]]
name = "inout"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5"
dependencies = [
"generic-array",
]
[[package]] [[package]]
name = "instant" name = "instant"
version = "0.1.12" version = "0.1.12"
@ -2366,9 +2424,9 @@ dependencies = [
[[package]] [[package]]
name = "libsqlite3-sys" name = "libsqlite3-sys"
version = "0.27.0" version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716" checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f"
dependencies = [ dependencies = [
"cc", "cc",
"pkg-config", "pkg-config",
@ -2643,6 +2701,12 @@ version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "opaque-debug"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381"
[[package]] [[package]]
name = "openssl-probe" name = "openssl-probe"
version = "0.1.5" version = "0.1.5"
@ -2980,6 +3044,18 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "polyval"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25"
dependencies = [
"cfg-if",
"cpufeatures",
"opaque-debug",
"universal-hash",
]
[[package]] [[package]]
name = "powerfmt" name = "powerfmt"
version = "0.2.0" version = "0.2.0"
@ -3128,6 +3204,28 @@ dependencies = [
"proc-macro2", "proc-macro2",
] ]
[[package]]
name = "r2d2"
version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93"
dependencies = [
"log",
"parking_lot 0.12.1",
"scheduled-thread-pool",
]
[[package]]
name = "r2d2_sqlite"
version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a982edf65c129796dba72f8775b292ef482b40d035e827a9825b3bc07ccc5f2"
dependencies = [
"r2d2",
"rusqlite",
"uuid",
]
[[package]] [[package]]
name = "rand" name = "rand"
version = "0.8.5" version = "0.8.5"
@ -3321,9 +3419,9 @@ checksum = "3cd14fd5e3b777a7422cca79358c57a8f6e3a703d9ac187448d0daf220c2407f"
[[package]] [[package]]
name = "rusqlite" name = "rusqlite"
version = "0.30.0" version = "0.31.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a78046161564f5e7cd9008aff3b2990b3850dc8e0349119b98e8f251e099f24d" checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae"
dependencies = [ dependencies = [
"bitflags 2.4.2", "bitflags 2.4.2",
"fallible-iterator", "fallible-iterator",
@ -3488,6 +3586,15 @@ dependencies = [
"windows-sys 0.52.0", "windows-sys 0.52.0",
] ]
[[package]]
name = "scheduled-thread-pool"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"
dependencies = [
"parking_lot 0.12.1",
]
[[package]] [[package]]
name = "schemars" name = "schemars"
version = "0.8.16" version = "0.8.16"
@ -3735,22 +3842,6 @@ dependencies = [
"autocfg", "autocfg",
] ]
[[package]]
name = "sled"
version = "0.34.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935"
dependencies = [
"crc32fast",
"crossbeam-epoch",
"crossbeam-utils",
"fs2",
"fxhash",
"libc",
"log",
"parking_lot 0.11.2",
]
[[package]] [[package]]
name = "smallvec" name = "smallvec"
version = "1.13.1" version = "1.13.1"
@ -3896,6 +3987,17 @@ dependencies = [
"unicode-xid", "unicode-xid",
] ]
[[package]]
name = "syslog-tracing"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "340b1540dcdb6b066bc2966e7974f977ab1a38f21b2be189014ffb0cc2405768"
dependencies = [
"libc",
"tracing-core",
"tracing-subscriber",
]
[[package]] [[package]]
name = "system-configuration" name = "system-configuration"
version = "0.5.1" version = "0.5.1"
@ -3980,9 +4082,9 @@ dependencies = [
[[package]] [[package]]
name = "time" name = "time"
version = "0.3.34" version = "0.3.36"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
dependencies = [ dependencies = [
"deranged", "deranged",
"num-conv", "num-conv",
@ -4000,9 +4102,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]] [[package]]
name = "time-macros" name = "time-macros"
version = "0.2.17" version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"
dependencies = [ dependencies = [
"num-conv", "num-conv",
"time-core", "time-core",
@ -4399,6 +4501,16 @@ version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
[[package]]
name = "universal-hash"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea"
dependencies = [
"crypto-common",
"subtle",
]
[[package]] [[package]]
name = "unsafe-libyaml" name = "unsafe-libyaml"
version = "0.2.10" version = "0.2.10"
@ -4441,6 +4553,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"
dependencies = [ dependencies = [
"getrandom", "getrandom",
"rand",
] ]
[[package]] [[package]]

528
Cargo.nix
View file

@ -34,7 +34,7 @@ args@{
ignoreLockHash, ignoreLockHash,
}: }:
let let
nixifiedLockHash = "3e1e0730302ee7d1f4185a13ad0086392562eb7cb962b1212801847887beaa47"; nixifiedLockHash = "466643eea782cd68c6f205858bb9e053aecdb18e2e58427b0527022aad596130";
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc; workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock); currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
lockHashIgnored = if ignoreLockHash lockHashIgnored = if ignoreLockHash
@ -58,17 +58,17 @@ in
{ {
cargo2nixVersion = "0.11.0"; cargo2nixVersion = "0.11.0";
workspace = { workspace = {
garage_db = rustPackages.unknown.garage_db."0.9.2"; garage_db = rustPackages.unknown.garage_db."1.0.1";
garage_util = rustPackages.unknown.garage_util."0.9.2"; garage_util = rustPackages.unknown.garage_util."1.0.1";
garage_net = rustPackages.unknown.garage_net."0.9.2"; garage_net = rustPackages.unknown.garage_net."1.0.1";
garage_rpc = rustPackages.unknown.garage_rpc."0.9.2"; garage_rpc = rustPackages.unknown.garage_rpc."1.0.1";
format_table = rustPackages.unknown.format_table."0.1.1"; format_table = rustPackages.unknown.format_table."0.1.1";
garage_table = rustPackages.unknown.garage_table."0.9.2"; garage_table = rustPackages.unknown.garage_table."1.0.1";
garage_block = rustPackages.unknown.garage_block."0.9.2"; garage_block = rustPackages.unknown.garage_block."1.0.1";
garage_model = rustPackages.unknown.garage_model."0.9.2"; garage_model = rustPackages.unknown.garage_model."1.0.1";
garage_api = rustPackages.unknown.garage_api."0.9.2"; garage_api = rustPackages.unknown.garage_api."1.0.1";
garage_web = rustPackages.unknown.garage_web."0.9.2"; garage_web = rustPackages.unknown.garage_web."1.0.1";
garage = rustPackages.unknown.garage."0.9.2"; garage = rustPackages.unknown.garage."1.0.1";
k2v-client = rustPackages.unknown.k2v-client."0.0.4"; k2v-client = rustPackages.unknown.k2v-client."0.0.4";
}; };
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.21.0" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".addr2line."0.21.0" = overridableMkRustCrate (profileName: rec {
@ -88,6 +88,58 @@ in
src = fetchCratesIo { inherit name version; sha256 = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"; }; src = fetchCratesIo { inherit name version; sha256 = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".aead."0.5.2" = overridableMkRustCrate (profileName: rec {
name = "aead";
version = "0.5.2";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0"; };
features = builtins.concatLists [
[ "alloc" ]
[ "getrandom" ]
[ "rand_core" ]
[ "stream" ]
];
dependencies = {
crypto_common = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crypto-common."0.1.6" { inherit profileName; }).out;
generic_array = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".generic-array."0.14.7" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".aes."0.8.4" = overridableMkRustCrate (profileName: rec {
name = "aes";
version = "0.8.4";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"; };
dependencies = {
cfg_if = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
cipher = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cipher."0.4.4" { inherit profileName; }).out;
${ if hostPlatform.parsed.cpu.name == "aarch64" || hostPlatform.parsed.cpu.name == "x86_64" || hostPlatform.parsed.cpu.name == "i686" then "cpufeatures" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cpufeatures."0.2.12" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".aes-gcm."0.10.3" = overridableMkRustCrate (profileName: rec {
name = "aes-gcm";
version = "0.10.3";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1"; };
features = builtins.concatLists [
[ "aes" ]
[ "alloc" ]
[ "default" ]
[ "getrandom" ]
[ "rand_core" ]
[ "stream" ]
];
dependencies = {
aead = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aead."0.5.2" { inherit profileName; }).out;
aes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aes."0.8.4" { inherit profileName; }).out;
cipher = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cipher."0.4.4" { inherit profileName; }).out;
ctr = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ctr."0.9.2" { inherit profileName; }).out;
ghash = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ghash."0.5.1" { inherit profileName; }).out;
subtle = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".subtle."2.5.0" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.7" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.7" = overridableMkRustCrate (profileName: rec {
name = "ahash"; name = "ahash";
version = "0.8.7"; version = "0.8.7";
@ -100,13 +152,13 @@ in
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "std") (lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "std")
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "getrandom" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "getrandom" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && !((hostPlatform.parsed.cpu.name == "armv6l" || hostPlatform.parsed.cpu.name == "armv7l") && hostPlatform.parsed.kernel.name == "none") then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.19.0" { inherit profileName; }).out; ${ if (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && !((hostPlatform.parsed.cpu.name == "armv6l" || hostPlatform.parsed.cpu.name == "armv7l") && hostPlatform.parsed.kernel.name == "none") then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.19.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "zerocopy" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy."0.7.32" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "zerocopy" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy."0.7.32" { inherit profileName; }).out;
}; };
buildDependencies = { buildDependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "version_check" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "version_check" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out;
}; };
}); });
@ -130,7 +182,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"; }; src = fetchCratesIo { inherit name version; sha256 = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "alloc") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "alloc")
]; ];
}); });
@ -372,7 +424,7 @@ in
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out;
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.28" { inherit profileName; }).out; hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.28" { inherit profileName; }).out;
ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out; ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out; time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out; tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out; tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out; zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out;
@ -591,7 +643,7 @@ in
ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out; ring = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ring."0.17.7" { inherit profileName; }).out;
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out; sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out;
subtle = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".subtle."2.5.0" { inherit profileName; }).out; subtle = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".subtle."2.5.0" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out; time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out; tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out; zeroize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".zeroize."1.7.0" { inherit profileName; }).out;
}; };
@ -622,7 +674,7 @@ in
aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out; aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out;
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out; bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
crc32c = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32c."0.6.4" { inherit profileName; }).out; crc32c = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32c."0.6.4" { inherit profileName; }).out;
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" { inherit profileName; }).out; crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.11" { inherit profileName; }).out;
http_body = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body."0.4.6" { inherit profileName; }).out; http_body = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body."0.4.6" { inherit profileName; }).out;
@ -642,7 +694,7 @@ in
dependencies = { dependencies = {
aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out; aws_smithy_types = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-smithy-types."1.1.4" { inherit profileName; }).out;
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out; bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" { inherit profileName; }).out; crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" { inherit profileName; }).out;
}; };
}); });
@ -771,7 +823,7 @@ in
pin_utils = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-utils."0.1.0" { inherit profileName; }).out; pin_utils = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-utils."0.1.0" { inherit profileName; }).out;
ryu = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ryu."1.0.16" { inherit profileName; }).out; ryu = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ryu."1.0.16" { inherit profileName; }).out;
${ if false then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out; ${ if false then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" { inherit profileName; }).out; time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out; tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
tokio_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-util."0.7.10" { inherit profileName; }).out; tokio_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-util."0.7.10" { inherit profileName; }).out;
}; };
@ -1085,6 +1137,17 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".cipher."0.4.4" = overridableMkRustCrate (profileName: rec {
name = "cipher";
version = "0.4.4";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"; };
dependencies = {
crypto_common = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crypto-common."0.1.6" { inherit profileName; }).out;
inout = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".inout."0.1.3" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".clap."2.34.0" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".clap."2.34.0" = overridableMkRustCrate (profileName: rec {
name = "clap"; name = "clap";
version = "2.34.0"; version = "2.34.0";
@ -1224,11 +1287,11 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" = overridableMkRustCrate (profileName: rec {
name = "crc32fast"; name = "crc32fast";
version = "1.3.2"; version = "1.4.0";
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"; }; src = fetchCratesIo { inherit name version; sha256 = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"; };
features = builtins.concatLists [ features = builtins.concatLists [
[ "default" ] [ "default" ]
[ "std" ] [ "std" ]
@ -1252,21 +1315,6 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".crossbeam-epoch."0.9.18" = overridableMkRustCrate (profileName: rec {
name = "crossbeam-epoch";
version = "0.9.18";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "alloc")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "std")
];
dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "crossbeam_utils" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.19" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".crossbeam-queue."0.3.11" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".crossbeam-queue."0.3.11" = overridableMkRustCrate (profileName: rec {
name = "crossbeam-queue"; name = "crossbeam-queue";
version = "0.3.11"; version = "0.3.11";
@ -1288,7 +1336,6 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"; }; src = fetchCratesIo { inherit name version; sha256 = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "default")
[ "std" ] [ "std" ]
]; ];
}); });
@ -1333,14 +1380,27 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"; }; src = fetchCratesIo { inherit name version; sha256 = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"; };
features = builtins.concatLists [ features = builtins.concatLists [
[ "getrandom" ]
[ "rand_core" ]
[ "std" ] [ "std" ]
]; ];
dependencies = { dependencies = {
generic_array = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".generic-array."0.14.7" { inherit profileName; }).out; generic_array = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".generic-array."0.14.7" { inherit profileName; }).out;
rand_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand_core."0.6.4" { inherit profileName; }).out;
typenum = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".typenum."1.17.0" { inherit profileName; }).out; typenum = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".typenum."1.17.0" { inherit profileName; }).out;
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".ctr."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "ctr";
version = "0.9.2";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835"; };
dependencies = {
cipher = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cipher."0.4.4" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".darling."0.20.5" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".darling."0.20.5" = overridableMkRustCrate (profileName: rec {
name = "darling"; name = "darling";
version = "0.20.5"; version = "0.20.5";
@ -1624,8 +1684,8 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"; }; src = fetchCratesIo { inherit name version; sha256 = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "alloc") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "alloc")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
]; ];
}); });
@ -1699,17 +1759,6 @@ in
src = fetchCrateLocal (workspaceSrc + "/src/format-table"); src = fetchCrateLocal (workspaceSrc + "/src/format-table");
}); });
"registry+https://github.com/rust-lang/crates.io-index".fs2."0.4.3" = overridableMkRustCrate (profileName: rec {
name = "fs2";
version = "0.4.3";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"; };
dependencies = {
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.153" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") && hostPlatform.isWindows then "winapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" = overridableMkRustCrate (profileName: rec {
name = "futures"; name = "futures";
version = "0.3.30"; version = "0.3.30";
@ -1861,19 +1910,9 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".fxhash."0.2.1" = overridableMkRustCrate (profileName: rec { "unknown".garage."1.0.1" = overridableMkRustCrate (profileName: rec {
name = "fxhash";
version = "0.2.1";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"; };
dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "byteorder" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.5.0" { inherit profileName; }).out;
};
});
"unknown".garage."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "garage"; name = "garage";
version = "0.9.2"; version = "1.0.1";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/garage"); src = fetchCrateLocal (workspaceSrc + "/src/garage");
features = builtins.concatLists [ features = builtins.concatLists [
@ -1887,8 +1926,9 @@ in
(lib.optional (rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp") "opentelemetry-otlp") (lib.optional (rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp") "opentelemetry-otlp")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus") "opentelemetry-prometheus") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus") "opentelemetry-prometheus")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus") "prometheus") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus") "prometheus")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled") "sled")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite") "sqlite") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite") "sqlite")
(lib.optional (rootFeatures' ? "garage/syslog") "syslog")
(lib.optional (rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing") "syslog-tracing")
(lib.optional (rootFeatures' ? "garage/system-libs") "system-libs") (lib.optional (rootFeatures' ? "garage/system-libs") "system-libs")
(lib.optional (rootFeatures' ? "garage/telemetry-otlp") "telemetry-otlp") (lib.optional (rootFeatures' ? "garage/telemetry-otlp") "telemetry-otlp")
]; ];
@ -1900,15 +1940,15 @@ in
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out; format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.9.2" { inherit profileName; }).out; garage_api = (rustPackages."unknown".garage_api."1.0.1" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.9.2" { inherit profileName; }).out; garage_block = (rustPackages."unknown".garage_block."1.0.1" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.9.2" { inherit profileName; }).out; garage_model = (rustPackages."unknown".garage_model."1.0.1" { inherit profileName; }).out;
garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out; garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.2" { inherit profileName; }).out; garage_rpc = (rustPackages."unknown".garage_rpc."1.0.1" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.2" { inherit profileName; }).out; garage_table = (rustPackages."unknown".garage_table."1.0.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
garage_web = (rustPackages."unknown".garage_web."0.9.2" { inherit profileName; }).out; garage_web = (rustPackages."unknown".garage_web."1.0.1" { inherit profileName; }).out;
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.9" { inherit profileName; }).out; git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.9" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out; sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
@ -1920,7 +1960,9 @@ in
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out; serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
sha1 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha1."0.10.6" { inherit profileName; }).out;
structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out; structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "syslog_tracing" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syslog-tracing."0.3.0" { inherit profileName; }).out;
timeago = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".timeago."0.4.2" { inherit profileName; }).out; timeago = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".timeago."0.4.2" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out; tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
toml = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".toml."0.8.10" { inherit profileName; }).out; toml = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".toml."0.8.10" { inherit profileName; }).out;
@ -1946,9 +1988,9 @@ in
}; };
}); });
"unknown".garage_api."0.9.2" = overridableMkRustCrate (profileName: rec { "unknown".garage_api."1.0.1" = overridableMkRustCrate (profileName: rec {
name = "garage_api"; name = "garage_api";
version = "0.9.2"; version = "1.0.1";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/api"); src = fetchCrateLocal (workspaceSrc + "/src/api");
features = builtins.concatLists [ features = builtins.concatLists [
@ -1958,22 +2000,26 @@ in
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/prometheus") "prometheus") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/prometheus") "prometheus")
]; ];
dependencies = { dependencies = {
aes_gcm = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".aes-gcm."0.10.3" { inherit profileName; }).out;
argon2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".argon2."0.5.3" { inherit profileName; }).out; argon2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".argon2."0.5.3" { inherit profileName; }).out;
async_compression = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".async-compression."0.4.6" { inherit profileName; }).out;
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.77" { profileName = "__noProfile"; }).out; async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.77" { profileName = "__noProfile"; }).out;
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.21.7" { inherit profileName; }).out; base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.21.7" { inherit profileName; }).out;
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out; bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
chrono = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.33" { inherit profileName; }).out; chrono = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.33" { inherit profileName; }).out;
crc32c = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32c."0.6.4" { inherit profileName; }).out;
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.4.0" { inherit profileName; }).out;
crypto_common = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crypto-common."0.1.6" { inherit profileName; }).out; crypto_common = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crypto-common."0.1.6" { inherit profileName; }).out;
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.2.1" { inherit profileName; }).out; form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.2.1" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.9.2" { inherit profileName; }).out; garage_block = (rustPackages."unknown".garage_block."1.0.1" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.9.2" { inherit profileName; }).out; garage_model = (rustPackages."unknown".garage_model."1.0.1" { inherit profileName; }).out;
garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out; garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.2" { inherit profileName; }).out; garage_rpc = (rustPackages."unknown".garage_rpc."1.0.1" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.2" { inherit profileName; }).out; garage_table = (rustPackages."unknown".garage_table."1.0.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out; hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
@ -1996,17 +2042,19 @@ in
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out; serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.113" { inherit profileName; }).out; serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.113" { inherit profileName; }).out;
sha1 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha1."0.10.6" { inherit profileName; }).out;
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out; sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.8" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out; tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.36.0" { inherit profileName; }).out;
tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.14" { inherit profileName; }).out; tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.14" { inherit profileName; }).out;
tokio_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-util."0.7.10" { inherit profileName; }).out;
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out; tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
url = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".url."2.5.0" { inherit profileName; }).out; url = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".url."2.5.0" { inherit profileName; }).out;
}; };
}); });
"unknown".garage_block."0.9.2" = overridableMkRustCrate (profileName: rec { "unknown".garage_block."1.0.1" = overridableMkRustCrate (profileName: rec {
name = "garage_block"; name = "garage_block";
version = "0.9.2"; version = "1.0.1";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/block"); src = fetchCrateLocal (workspaceSrc + "/src/block");
features = builtins.concatLists [ features = builtins.concatLists [
@ -2020,11 +2068,11 @@ in
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.3.0" { inherit profileName; }).out; bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.3.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out; garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.2" { inherit profileName; }).out; garage_rpc = (rustPackages."unknown".garage_rpc."1.0.1" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.2" { inherit profileName; }).out; garage_table = (rustPackages."unknown".garage_table."1.0.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
@ -2037,9 +2085,9 @@ in
}; };
}); });
"unknown".garage_db."0.9.2" = overridableMkRustCrate (profileName: rec { "unknown".garage_db."1.0.1" = overridableMkRustCrate (profileName: rec {
name = "garage_db"; name = "garage_db";
version = "0.9.2"; version = "1.0.1";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/db"); src = fetchCrateLocal (workspaceSrc + "/src/db");
features = builtins.concatLists [ features = builtins.concatLists [
@ -2047,16 +2095,18 @@ in
(lib.optional (rootFeatures' ? "garage_db/default") "default") (lib.optional (rootFeatures' ? "garage_db/default") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "heed") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "heed")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "r2d2")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "r2d2_sqlite")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "rusqlite") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "rusqlite")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "sled")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite")
]; ];
dependencies = { dependencies = {
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed."0.11.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed."0.11.0" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out; hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.30.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "r2d2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".r2d2."0.8.10" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "sled" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "r2d2_sqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".r2d2_sqlite."0.24.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.31.0" { inherit profileName; }).out;
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out; tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.40" { inherit profileName; }).out;
}; };
devDependencies = { devDependencies = {
@ -2064,16 +2114,15 @@ in
}; };
}); });
"unknown".garage_model."0.9.2" = overridableMkRustCrate (profileName: rec { "unknown".garage_model."1.0.1" = overridableMkRustCrate (profileName: rec {
name = "garage_model"; name = "garage_model";
version = "0.9.2"; version = "1.0.1";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/model"); src = fetchCrateLocal (workspaceSrc + "/src/model");
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage_model/default") "default") (lib.optional (rootFeatures' ? "garage_model/default") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "sled")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite")
]; ];
dependencies = { dependencies = {
@ -2085,14 +2134,16 @@ in
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.9.2" { inherit profileName; }).out; garage_block = (rustPackages."unknown".garage_block."1.0.1" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out; garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.2" { inherit profileName; }).out; garage_rpc = (rustPackages."unknown".garage_rpc."1.0.1" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.2" { inherit profileName; }).out; garage_table = (rustPackages."unknown".garage_table."1.0.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
parse_duration = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parse_duration."2.1.1" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out; serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.14" { inherit profileName; }).out;
@ -2102,9 +2153,9 @@ in
}; };
}); });
"unknown".garage_net."0.9.2" = overridableMkRustCrate (profileName: rec { "unknown".garage_net."1.0.1" = overridableMkRustCrate (profileName: rec {
name = "garage_net"; name = "garage_net";
version = "0.9.2"; version = "1.0.1";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/net"); src = fetchCrateLocal (workspaceSrc + "/src/net");
features = builtins.concatLists [ features = builtins.concatLists [
@ -2139,9 +2190,9 @@ in
}; };
}); });
"unknown".garage_rpc."0.9.2" = overridableMkRustCrate (profileName: rec { "unknown".garage_rpc."1.0.1" = overridableMkRustCrate (profileName: rec {
name = "garage_rpc"; name = "garage_rpc";
version = "0.9.2"; version = "1.0.1";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/rpc"); src = fetchCrateLocal (workspaceSrc + "/src/rpc");
features = builtins.concatLists [ features = builtins.concatLists [
@ -2163,11 +2214,12 @@ in
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out; format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out; garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out; gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
ipnet = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ipnet."2.9.0" { inherit profileName; }).out;
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.12.1" { inherit profileName; }).out; itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.12.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.21.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.21.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "kube" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kube."0.88.1" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "kube" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kube."0.88.1" { inherit profileName; }).out;
@ -2187,9 +2239,9 @@ in
}; };
}); });
"unknown".garage_table."0.9.2" = overridableMkRustCrate (profileName: rec { "unknown".garage_table."1.0.1" = overridableMkRustCrate (profileName: rec {
name = "garage_table"; name = "garage_table";
version = "0.9.2"; version = "1.0.1";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/table"); src = fetchCrateLocal (workspaceSrc + "/src/table");
dependencies = { dependencies = {
@ -2198,9 +2250,9 @@ in
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out; bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.2" { inherit profileName; }).out; garage_rpc = (rustPackages."unknown".garage_rpc."1.0.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out; hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
@ -2212,9 +2264,9 @@ in
}; };
}); });
"unknown".garage_util."0.9.2" = overridableMkRustCrate (profileName: rec { "unknown".garage_util."1.0.1" = overridableMkRustCrate (profileName: rec {
name = "garage_util"; name = "garage_util";
version = "0.9.2"; version = "1.0.1";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/util"); src = fetchCrateLocal (workspaceSrc + "/src/util");
features = builtins.concatLists [ features = builtins.concatLists [
@ -2230,8 +2282,8 @@ in
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.7" { inherit profileName; }).out; digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.7" { inherit profileName; }).out;
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out; garage_db = (rustPackages."unknown".garage_db."1.0.1" { inherit profileName; }).out;
garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out; garage_net = (rustPackages."unknown".garage_net."1.0.1" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out; hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
@ -2256,18 +2308,18 @@ in
}; };
}); });
"unknown".garage_web."0.9.2" = overridableMkRustCrate (profileName: rec { "unknown".garage_web."1.0.1" = overridableMkRustCrate (profileName: rec {
name = "garage_web"; name = "garage_web";
version = "0.9.2"; version = "1.0.1";
registry = "unknown"; registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/web"); src = fetchCrateLocal (workspaceSrc + "/src/web");
dependencies = { dependencies = {
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.9.2" { inherit profileName; }).out; garage_api = (rustPackages."unknown".garage_api."1.0.1" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.9.2" { inherit profileName; }).out; garage_model = (rustPackages."unknown".garage_model."1.0.1" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.2" { inherit profileName; }).out; garage_table = (rustPackages."unknown".garage_table."1.0.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out; garage_util = (rustPackages."unknown".garage_util."1.0.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
http_body_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body-util."0.1.0" { inherit profileName; }).out; http_body_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body-util."0.1.0" { inherit profileName; }).out;
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."1.1.0" { inherit profileName; }).out; hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."1.1.0" { inherit profileName; }).out;
@ -2321,6 +2373,17 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".ghash."0.5.1" = overridableMkRustCrate (profileName: rec {
name = "ghash";
version = "0.5.1";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1"; };
dependencies = {
opaque_debug = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opaque-debug."0.3.1" { inherit profileName; }).out;
polyval = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".polyval."0.6.2" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".gimli."0.28.1" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".gimli."0.28.1" = overridableMkRustCrate (profileName: rec {
name = "gimli"; name = "gimli";
version = "0.28.1"; version = "0.28.1";
@ -2422,25 +2485,25 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"; }; src = fetchCratesIo { inherit name version; sha256 = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "ahash") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "ahash")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "allocator-api2") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "allocator-api2")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "inline-more") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "inline-more")
[ "raw" ] [ "raw" ]
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "ahash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.7" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "ahash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.7" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "allocator_api2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "allocator_api2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" { inherit profileName; }).out;
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".hashlink."0.8.4" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".hashlink."0.9.0" = overridableMkRustCrate (profileName: rec {
name = "hashlink"; name = "hashlink";
version = "0.8.4"; version = "0.9.0";
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"; }; src = fetchCratesIo { inherit name version; sha256 = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee"; };
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashbrown" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashbrown."0.14.3" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashbrown" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashbrown."0.14.3" { inherit profileName; }).out;
}; };
}); });
@ -2928,6 +2991,16 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".inout."0.1.3" = overridableMkRustCrate (profileName: rec {
name = "inout";
version = "0.1.3";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5"; };
dependencies = {
generic_array = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".generic-array."0.14.7" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".instant."0.1.12" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".instant."0.1.12" = overridableMkRustCrate (profileName: rec {
name = "instant"; name = "instant";
version = "0.1.12"; version = "0.1.12";
@ -2944,8 +3017,8 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"; }; src = fetchCratesIo { inherit name version; sha256 = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest") "default") [ "default" ]
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest") "std") [ "std" ]
]; ];
}); });
@ -3354,24 +3427,24 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.27.0" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.28.0" = overridableMkRustCrate (profileName: rec {
name = "libsqlite3-sys"; name = "libsqlite3-sys";
version = "0.27.0"; version = "0.28.0";
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716"; }; src = fetchCratesIo { inherit name version; sha256 = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled_bindings") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled_bindings")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "cc") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "cc")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "min_sqlite_version_3_14_0") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "min_sqlite_version_3_14_0")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "pkg-config") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "pkg-config")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "vcpkg") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "vcpkg")
]; ];
buildDependencies = { buildDependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.29" { profileName = "__noProfile"; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.29" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "vcpkg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".vcpkg."0.2.15" { profileName = "__noProfile"; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "vcpkg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".vcpkg."0.2.15" { profileName = "__noProfile"; }).out;
}; };
}); });
@ -3777,6 +3850,13 @@ in
]; ];
}); });
"registry+https://github.com/rust-lang/crates.io-index".opaque-debug."0.3.1" = overridableMkRustCrate (profileName: rec {
name = "opaque-debug";
version = "0.3.1";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381"; };
});
"registry+https://github.com/rust-lang/crates.io-index".openssl-probe."0.1.5" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".openssl-probe."0.1.5" = overridableMkRustCrate (profileName: rec {
name = "openssl-probe"; name = "openssl-probe";
version = "0.1.5"; version = "0.1.5";
@ -3960,11 +4040,11 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"; }; src = fetchCratesIo { inherit name version; sha256 = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "default")
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "lock_api" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lock_api."0.4.11" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "lock_api" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lock_api."0.4.11" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "parking_lot_core" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot_core."0.9.9" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "parking_lot_core" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot_core."0.9.9" { inherit profileName; }).out;
}; };
}); });
@ -3989,11 +4069,11 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"; }; src = fetchCratesIo { inherit name version; sha256 = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"; };
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.153" { inherit profileName; }).out; ${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.153" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.parsed.kernel.name == "redox" then "syscall" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".redox_syscall."0.4.1" { inherit profileName; }).out; ${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.parsed.kernel.name == "redox" then "syscall" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".redox_syscall."0.4.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isWindows then "windows_targets" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows-targets."0.48.5" { inherit profileName; }).out; ${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && hostPlatform.isWindows then "windows_targets" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows-targets."0.48.5" { inherit profileName; }).out;
}; };
}); });
@ -4236,6 +4316,19 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".polyval."0.6.2" = overridableMkRustCrate (profileName: rec {
name = "polyval";
version = "0.6.2";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25"; };
dependencies = {
cfg_if = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
${ if hostPlatform.parsed.cpu.name == "aarch64" || hostPlatform.parsed.cpu.name == "x86_64" || hostPlatform.parsed.cpu.name == "i686" then "cpufeatures" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cpufeatures."0.2.12" { inherit profileName; }).out;
opaque_debug = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opaque-debug."0.3.1" { inherit profileName; }).out;
universal_hash = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".universal-hash."0.5.1" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".powerfmt."0.2.0" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".powerfmt."0.2.0" = overridableMkRustCrate (profileName: rec {
name = "powerfmt"; name = "powerfmt";
version = "0.2.0"; version = "0.2.0";
@ -4435,6 +4528,30 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".r2d2."0.8.10" = overridableMkRustCrate (profileName: rec {
name = "r2d2";
version = "0.8.10";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93"; };
dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "log" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.20" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "parking_lot" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot."0.12.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "scheduled_thread_pool" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".scheduled-thread-pool."0.2.7" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".r2d2_sqlite."0.24.0" = overridableMkRustCrate (profileName: rec {
name = "r2d2_sqlite";
version = "0.24.0";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "6a982edf65c129796dba72f8775b292ef482b40d035e827a9825b3bc07ccc5f2"; };
dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "r2d2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".r2d2."0.8.10" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.31.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "uuid" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".uuid."1.4.1" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" = overridableMkRustCrate (profileName: rec {
name = "rand"; name = "rand";
version = "0.8.5"; version = "0.8.5";
@ -4502,7 +4619,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"; }; src = fetchCratesIo { inherit name version; sha256 = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"; };
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."1.3.2" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/opentelemetry-prometheus" || rootFeatures' ? "garage/prometheus" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus" || rootFeatures' ? "garage_api/prometheus" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."1.3.2" { inherit profileName; }).out;
}; };
}); });
@ -4763,22 +4880,23 @@ in
]; ];
}); });
"registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.30.0" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.31.0" = overridableMkRustCrate (profileName: rec {
name = "rusqlite"; name = "rusqlite";
version = "0.30.0"; version = "0.31.0";
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "a78046161564f5e7cd9008aff3b2990b3850dc8e0349119b98e8f251e099f24d"; }; src = fetchCratesIo { inherit name version; sha256 = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "backup")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "modern_sqlite") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "modern_sqlite")
]; ];
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."2.4.2" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."2.4.2" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-iterator."0.3.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-iterator."0.3.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_streaming_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-streaming-iterator."0.1.9" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_streaming_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-streaming-iterator."0.1.9" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashlink" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashlink."0.8.4" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashlink" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashlink."0.9.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "libsqlite3_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.27.0" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "libsqlite3_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.28.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" { inherit profileName; }).out;
}; };
}); });
@ -4992,6 +5110,16 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".scheduled-thread-pool."0.2.7" = overridableMkRustCrate (profileName: rec {
name = "scheduled-thread-pool";
version = "0.2.7";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"; };
dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "parking_lot" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot."0.12.1" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.16" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.16" = overridableMkRustCrate (profileName: rec {
name = "schemars"; name = "schemars";
version = "0.8.16"; version = "0.8.16";
@ -5338,27 +5466,6 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" = overridableMkRustCrate (profileName: rec {
name = "sled";
version = "0.34.7";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "no_metrics")
];
dependencies = {
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "crc32fast" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "crossbeam_epoch" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-epoch."0.9.18" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "crossbeam_utils" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.19" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") && (hostPlatform.parsed.kernel.name == "linux" || hostPlatform.parsed.kernel.name == "darwin" || hostPlatform.parsed.kernel.name == "windows") then "fs2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fs2."0.4.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "fxhash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fxhash."0.2.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.153" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "log" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.20" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "parking_lot" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot."0.11.2" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".smallvec."1.13.1" = overridableMkRustCrate (profileName: rec {
name = "smallvec"; name = "smallvec";
version = "1.13.1"; version = "1.13.1";
@ -5572,6 +5679,18 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".syslog-tracing."0.3.0" = overridableMkRustCrate (profileName: rec {
name = "syslog-tracing";
version = "0.3.0";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "340b1540dcdb6b066bc2966e7974f977ab1a38f21b2be189014ffb0cc2405768"; };
dependencies = {
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.153" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "tracing_core" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing-core."0.1.32" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/syslog" || rootFeatures' ? "garage/syslog-tracing" then "tracing_subscriber" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing-subscriber."0.3.18" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".system-configuration."0.5.1" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".system-configuration."0.5.1" = overridableMkRustCrate (profileName: rec {
name = "system-configuration"; name = "system-configuration";
version = "0.5.1"; version = "0.5.1";
@ -5662,11 +5781,11 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".time."0.3.34" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".time."0.3.36" = overridableMkRustCrate (profileName: rec {
name = "time"; name = "time";
version = "0.3.34"; version = "0.3.36";
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749"; }; src = fetchCratesIo { inherit name version; sha256 = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"; };
features = builtins.concatLists [ features = builtins.concatLists [
[ "alloc" ] [ "alloc" ]
[ "default" ] [ "default" ]
@ -5679,7 +5798,7 @@ in
powerfmt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".powerfmt."0.2.0" { inherit profileName; }).out; powerfmt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".powerfmt."0.2.0" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.196" { inherit profileName; }).out;
time_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time-core."0.1.2" { inherit profileName; }).out; time_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time-core."0.1.2" { inherit profileName; }).out;
time_macros = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.17" { profileName = "__noProfile"; }).out; time_macros = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.18" { profileName = "__noProfile"; }).out;
}; };
}); });
@ -5690,11 +5809,11 @@ in
src = fetchCratesIo { inherit name version; sha256 = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"; }; src = fetchCratesIo { inherit name version; sha256 = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.17" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".time-macros."0.2.18" = overridableMkRustCrate (profileName: rec {
name = "time-macros"; name = "time-macros";
version = "0.2.17"; version = "0.2.18";
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774"; }; src = fetchCratesIo { inherit name version; sha256 = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"; };
features = builtins.concatLists [ features = builtins.concatLists [
[ "parsing" ] [ "parsing" ]
]; ];
@ -6314,6 +6433,17 @@ in
]; ];
}); });
"registry+https://github.com/rust-lang/crates.io-index".universal-hash."0.5.1" = overridableMkRustCrate (profileName: rec {
name = "universal-hash";
version = "0.5.1";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea"; };
dependencies = {
crypto_common = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crypto-common."0.1.6" { inherit profileName; }).out;
subtle = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".subtle."2.5.0" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".unsafe-libyaml."0.2.10" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".unsafe-libyaml."0.2.10" = overridableMkRustCrate (profileName: rec {
name = "unsafe-libyaml"; name = "unsafe-libyaml";
version = "0.2.10"; version = "0.2.10";
@ -6367,13 +6497,16 @@ in
src = fetchCratesIo { inherit name version; sha256 = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"; }; src = fetchCratesIo { inherit name version; sha256 = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d"; };
features = builtins.concatLists [ features = builtins.concatLists [
[ "default" ] [ "default" ]
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "fast-rng")
[ "getrandom" ] [ "getrandom" ]
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "rand")
[ "rng" ] [ "rng" ]
[ "std" ] [ "std" ]
[ "v4" ] [ "v4" ]
]; ];
dependencies = { dependencies = {
getrandom = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out; getrandom = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.12" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rand" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
}; };
}); });
@ -6591,7 +6724,6 @@ in
[ "minwindef" ] [ "minwindef" ]
[ "ntstatus" ] [ "ntstatus" ]
[ "processenv" ] [ "processenv" ]
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "processthreadsapi")
[ "std" ] [ "std" ]
[ "synchapi" ] [ "synchapi" ]
[ "sysinfoapi" ] [ "sysinfoapi" ]
@ -6894,7 +7026,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"; }; src = fetchCratesIo { inherit name version; sha256 = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"; };
features = builtins.concatLists [ features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "simd") (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "simd")
]; ];
dependencies = { dependencies = {
${ if false then "zerocopy_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy-derive."0.7.32" { profileName = "__noProfile"; }).out; ${ if false then "zerocopy_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".zerocopy-derive."0.7.32" { profileName = "__noProfile"; }).out;
@ -6907,9 +7039,9 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index"; registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"; }; src = fetchCratesIo { inherit name version; sha256 = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"; };
dependencies = { dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "proc_macro2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.78" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "proc_macro2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".proc-macro2."1.0.78" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "quote" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.35" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "quote" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".quote."1.0.35" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "syn" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."2.0.48" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/r2d2_sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "syn" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".syn."2.0.48" { inherit profileName; }).out;
}; };
}); });

View file

@ -21,15 +21,15 @@ default-members = ["src/garage"]
# Internal Garage crates # Internal Garage crates
format_table = { version = "0.1.1", path = "src/format-table" } format_table = { version = "0.1.1", path = "src/format-table" }
garage_api = { version = "0.9.2", path = "src/api" } garage_api = { version = "1.0.1", path = "src/api" }
garage_block = { version = "0.9.2", path = "src/block" } garage_block = { version = "1.0.1", path = "src/block" }
garage_db = { version = "0.9.2", path = "src/db", default-features = false } garage_db = { version = "1.0.1", path = "src/db", default-features = false }
garage_model = { version = "0.9.2", path = "src/model", default-features = false } garage_model = { version = "1.0.1", path = "src/model", default-features = false }
garage_net = { version = "0.9.2", path = "src/net" } garage_net = { version = "1.0.1", path = "src/net" }
garage_rpc = { version = "0.9.2", path = "src/rpc" } garage_rpc = { version = "1.0.1", path = "src/rpc" }
garage_table = { version = "0.9.2", path = "src/table" } garage_table = { version = "1.0.1", path = "src/table" }
garage_util = { version = "0.9.2", path = "src/util" } garage_util = { version = "1.0.1", path = "src/util" }
garage_web = { version = "0.9.2", path = "src/web" } garage_web = { version = "1.0.1", path = "src/web" }
k2v-client = { version = "0.0.4", path = "src/k2v-client" } k2v-client = { version = "0.0.4", path = "src/k2v-client" }
# External crates from crates.io # External crates from crates.io
@ -43,6 +43,8 @@ bytes = "1.0"
bytesize = "1.1" bytesize = "1.1"
cfg-if = "1.0" cfg-if = "1.0"
chrono = "0.4" chrono = "0.4"
crc32fast = "1.4"
crc32c = "0.6"
crypto-common = "0.1" crypto-common = "0.1"
digest = "0.10" digest = "0.10"
err-derive = "0.3" err-derive = "0.3"
@ -53,6 +55,7 @@ hexdump = "0.1"
hmac = "0.12" hmac = "0.12"
idna = "0.5" idna = "0.5"
itertools = "0.12" itertools = "0.12"
ipnet = "2.9.0"
lazy_static = "1.4" lazy_static = "1.4"
md-5 = "0.10" md-5 = "0.10"
mktemp = "0.5" mktemp = "0.5"
@ -62,22 +65,26 @@ parse_duration = "2.1"
pin-project = "1.0.12" pin-project = "1.0.12"
pnet_datalink = "0.34" pnet_datalink = "0.34"
rand = "0.8" rand = "0.8"
sha1 = "0.10"
sha2 = "0.10" sha2 = "0.10"
timeago = { version = "0.4", default-features = false } timeago = { version = "0.4", default-features = false }
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] } xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
aes-gcm = { version = "0.10", features = ["aes", "stream"] }
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" } sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] } kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] }
clap = { version = "4.1", features = ["derive", "env"] } clap = { version = "4.1", features = ["derive", "env"] }
pretty_env_logger = "0.5" pretty_env_logger = "0.5"
structopt = { version = "0.3", default-features = false } structopt = { version = "0.3", default-features = false }
syslog-tracing = "0.3"
tracing = "0.1" tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] }
heed = { version = "0.11", default-features = false, features = ["lmdb"] } heed = { version = "0.11", default-features = false, features = ["lmdb"] }
rusqlite = "0.30.0" rusqlite = "0.31.0"
sled = "0.34" r2d2 = "0.8"
r2d2_sqlite = "0.24"
async-compression = { version = "0.4", features = ["tokio", "zstd"] } async-compression = { version = "0.4", features = ["tokio", "zstd"] }
zstd = { version = "0.13", default-features = false } zstd = { version = "0.13", default-features = false }

View file

@ -40,7 +40,6 @@ in {
features = [ features = [
"garage/bundled-libs" "garage/bundled-libs"
"garage/k2v" "garage/k2v"
"garage/sled"
"garage/lmdb" "garage/lmdb"
"garage/sqlite" "garage/sqlite"
]; ];

View file

@ -98,7 +98,6 @@ paths:
type: string type: string
example: example:
- "k2v" - "k2v"
- "sled"
- "lmdb" - "lmdb"
- "sqlite" - "sqlite"
- "consul-discovery" - "consul-discovery"

View file

@ -23,7 +23,7 @@ client = minio.Minio(
"GKyourapikey", "GKyourapikey",
"abcd[...]1234", "abcd[...]1234",
# Force the region, this is specific to garage # Force the region, this is specific to garage
region="region", region="garage",
) )
``` ```

View file

@ -80,6 +80,53 @@ To test your new configuration, just reload your Nextcloud webpage and start sen
*External link:* [Nextcloud Documentation > Primary Storage](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html) *External link:* [Nextcloud Documentation > Primary Storage](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html)
#### SSE-C encryption (since Garage v1.0)
Since version 1.0, Garage supports server-side encryption with customer keys
(SSE-C). In this mode, Garage is responsible for encrypting and decrypting
objects, but it does not store the encryption key itself. The encryption key
should be provided by Nextcloud upon each request. This mode of operation is
supported by Nextcloud and it has successfully been tested together with
Garage.
To enable SSE-C encryption:
1. Make sure your Garage server is accessible via SSL through a reverse proxy
such as Nginx, and that it is using a valid public certificate (Nextcloud
might be able to connect to an S3 server that is using a self-signed
certificate, but you will lose many hours while trying, so don't).
Configure values for `use_ssl` and `port` accordingly in your `config.php`
file.
2. Generate an encryption key using the following command:
```
openssl rand -base64 32
```
Make sure to keep this key **secret**!
3. Add the encryption key in your `config.php` file as follows:
```php
<?php
$CONFIG = array(
'objectstore' => [
'class' => '\\OC\\Files\\ObjectStore\\S3',
'arguments' => [
...
'sse_c_key' => 'exampleencryptionkeyLbU+5fKYQcVoqnn+RaIOXgo=',
...
],
],
```
Nextcloud will now make Garage encrypt files at rest in the storage bucket.
These files will not be readable by an S3 client that has credentials to the
bucket but doesn't also know the secret encryption key.
### External Storage ### External Storage
**From the GUI.** Activate the "External storage support" app from the "Applications" page (click on your account icon on the top right corner of your screen to display the menu). Go to your parameters page (also located below your account icon). Click on external storage (or the corresponding translation in your language). **From the GUI.** Activate the "External storage support" app from the "Applications" page (click on your account icon on the top right corner of your screen to display the menu). Go to your parameters page (also located below your account icon). Click on external storage (or the corresponding translation in your language).
@ -245,7 +292,7 @@ with average object size ranging from 50 KB to 150 KB.
As such, your Garage cluster should be configured appropriately for good performance: As such, your Garage cluster should be configured appropriately for good performance:
- use Garage v0.8.0 or higher with the [LMDB database engine](@documentation/reference-manual/configuration.md#db-engine-since-v0-8-0). - use Garage v0.8.0 or higher with the [LMDB database engine](@documentation/reference-manual/configuration.md#db-engine-since-v0-8-0).
With the default Sled database engine, your database could quickly end up taking tens of GB of disk space. Older versions of Garage used the Sled database engine which had issues, such as databases quickly ending up taking tens of GB of disk space.
- the Garage database should be stored on a SSD - the Garage database should be stored on a SSD
### Creating your bucket ### Creating your bucket
@ -288,6 +335,7 @@ From the [official Mastodon documentation](https://docs.joinmastodon.org/admin/t
```bash ```bash
$ RAILS_ENV=production bin/tootctl media remove --days 3 $ RAILS_ENV=production bin/tootctl media remove --days 3
$ RAILS_ENV=production bin/tootctl media remove --days 15 --prune-profiles
$ RAILS_ENV=production bin/tootctl media remove-orphans $ RAILS_ENV=production bin/tootctl media remove-orphans
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15 $ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
``` ```
@ -306,8 +354,6 @@ Imports: 1.7 KB
Settings: 0 Bytes Settings: 0 Bytes
``` ```
Unfortunately, [old avatars and headers cannot currently be cleaned up](https://github.com/mastodon/mastodon/issues/9567).
### Migrating your data ### Migrating your data
Data migration should be done with an efficient S3 client. Data migration should be done with an efficient S3 client.

View file

@ -259,7 +259,7 @@ duck --delete garage:/my-files/an-object.txt
## WinSCP (libs3) {#winscp} ## WinSCP (libs3) {#winscp}
*You can find instructions on how to use the GUI in french [in our wiki](https://wiki.deuxfleurs.fr/fr/Guide/Garage/WinSCP).* *You can find instructions on how to use the GUI in french [in our wiki](https://guide.deuxfleurs.fr/prise_en_main/winscp/).*
How to use `winscp.com`, the CLI interface of WinSCP: How to use `winscp.com`, the CLI interface of WinSCP:

View file

@ -53,20 +53,43 @@ and that's also why your nodes have super long identifiers.
Adding TLS support built into Garage is not currently planned. Adding TLS support built into Garage is not currently planned.
## Garage stores data in plain text on the filesystem ## Garage stores data in plain text on the filesystem or encrypted using customer keys (SSE-C)
Garage does not handle data encryption at rest by itself, and instead delegates For standard S3 API requests, Garage does not encrypt data at rest by itself.
to the user to add encryption, either at the storage layer (LUKS, etc) or on For the most generic at rest encryption of data, we recommend setting up your
the client side (or both). There are no current plans to add data encryption storage partitions on encrypted LUKS devices.
directly in Garage.
Implementing data encryption directly in Garage might make things simpler for If you are developping your own client software that makes use of S3 storage,
end users, but also raises many more questions, especially around key we recommend implementing data encryption directly on the client side and never
management: for encryption of data, where could Garage get the encryption keys transmitting plaintext data to Garage. This makes it easy to use an external
from ? If we encrypt data but keep the keys in a plaintext file next to them, untrusted storage provider if necessary.
it's useless. We probably don't want to have to manage secrets in garage as it
would be very hard to do in a secure way. Maybe integrate with an external Garage does support [SSE-C
system such as Hashicorp Vault? encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html),
an encryption mode of Amazon S3 where data is encrypted at rest using
encryption keys given by the client. The encryption keys are passed to the
server in a header in each request, to encrypt or decrypt data at the moment of
reading or writing. The server discards the key as soon as it has finished
using it for the request. This mode allows the data to be encrypted at rest by
Garage itself, but it requires support in the client software. It is also not
adapted to a model where the server is not trusted or assumed to be
compromised, as the server can easily know the encryption keys. Note however
that when using SSE-C encryption, the only Garage node that knows the
encryption key passed in a given request is the node to which the request is
directed (which can be a gateway node), so it is easy to have untrusted nodes
in the cluster as long as S3 API requests containing SSE-C encryption keys are
not directed to them.
Implementing automatic data encryption directly in Garage without client-side
management of keys (something like
[SSE-S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html))
could make things simpler for end users that don't want to setup LUKS, but also
raises many more questions, especially around key management: for encryption of
data, where could Garage get the encryption keys from? If we encrypt data but
keep the keys in a plaintext file next to them, it's useless. We probably don't
want to have to manage secrets in Garage as it would be very hard to do in a
secure way. At the time of speaking, there are no plans to implement this in
Garage.
# Adding data encryption using external tools # Adding data encryption using external tools

View file

@ -90,6 +90,6 @@ The following feature flags are available in v0.8.0:
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API | | `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API | | `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry | | `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
| `sled` | *by default* | Enable using Sled to store Garage's metadata | | `syslog` | optional | Enable logging to Syslog |
| `lmdb` | optional | Enable using LMDB to store Garage's metadata | | `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
| `sqlite` | optional | Enable using Sqlite3 to store Garage's metadata | | `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata |

View file

@ -27,7 +27,7 @@ To run a real-world deployment, make sure the following conditions are met:
[Yggdrasil](https://yggdrasil-network.github.io/) are approaches to consider [Yggdrasil](https://yggdrasil-network.github.io/) are approaches to consider
in addition to building out your own VPN tunneling. in addition to building out your own VPN tunneling.
- This guide will assume you are using Docker containers to deploy Garage on each node. - This guide will assume you are using Docker containers to deploy Garage on each node.
Garage can also be run independently, for instance as a [Systemd service](@/documentation/cookbook/systemd.md). Garage can also be run independently, for instance as a [Systemd service](@/documentation/cookbook/systemd.md).
You can also use an orchestrator such as Nomad or Kubernetes to automatically manage You can also use an orchestrator such as Nomad or Kubernetes to automatically manage
Docker containers on a fleet of nodes. Docker containers on a fleet of nodes.
@ -53,9 +53,9 @@ to store 2 TB of data in total.
### Best practices ### Best practices
- If you have fast dedicated networking between all your nodes, and are planing to store - If you have reasonably fast networking between all your nodes, and are planing to store
very large files, bump the `block_size` configuration parameter to 10 MB mostly large files, bump the `block_size` configuration parameter to 10 MB
(`block_size = 10485760`). (`block_size = "10M"`).
- Garage stores its files in two locations: it uses a metadata directory to store frequently-accessed - Garage stores its files in two locations: it uses a metadata directory to store frequently-accessed
small metadata items, and a data directory to store data blocks of uploaded objects. small metadata items, and a data directory to store data blocks of uploaded objects.
@ -68,31 +68,42 @@ to store 2 TB of data in total.
EXT4 is not recommended as it has more strict limitations on the number of inodes, EXT4 is not recommended as it has more strict limitations on the number of inodes,
which might cause issues with Garage when large numbers of objects are stored. which might cause issues with Garage when large numbers of objects are stored.
- If you only have an HDD and no SSD, it's fine to put your metadata alongside the data
on the same drive. Having lots of RAM for your kernel to cache the metadata will
help a lot with performance. Make sure to use the LMDB database engine,
instead of Sled, which suffers from quite bad performance degradation on HDDs.
Sled is still the default for legacy reasons, but is not recommended anymore.
- For the metadata storage, Garage does not do checksumming and integrity
verification on its own. If you are afraid of bitrot/data corruption,
put your metadata directory on a ZFS or BTRFS partition. Otherwise, just use regular
EXT4 or XFS.
- Servers with multiple HDDs are supported natively by Garage without resorting - Servers with multiple HDDs are supported natively by Garage without resorting
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md). to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
- For the metadata storage, Garage does not do checksumming and integrity
verification on its own, so it is better to use a robust filesystem such as
BTRFS or ZFS. Users have reported that when using the LMDB database engine
(the default), database files have a tendency of becoming corrupted after an
unclean shutdown (e.g. a power outage), so you should take regular snapshots
to be able to recover from such a situation. This can be done using Garage's
built-in automatic snapshotting (since v0.9.4), or by using filesystem level
snapshots. If you cannot do so, you might want to switch to Sqlite which is
more robust.
- LMDB is the fastest and most tested database engine, but it has the following
weaknesses: 1/ data files are not architecture-independent, you cannot simply
move a Garage metadata directory between nodes running different architectures,
and 2/ LMDB is not suited for 32-bit platforms. Sqlite is a viable alternative
if any of these are of concern.
- If you only have an HDD and no SSD, it's fine to put your metadata alongside
the data on the same drive, but then consider your filesystem choice wisely
(see above). Having lots of RAM for your kernel to cache the metadata will
help a lot with performance. The default LMDB database engine is the most
tested and has good performance.
## Get a Docker image ## Get a Docker image
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
We encourage you to use a fixed tag (eg. `v0.9.2`) and not the `latest` tag. We encourage you to use a fixed tag (eg. `v1.0.1`) and not the `latest` tag.
For this example, we will use the latest published version at the time of the writing which is `v0.9.2` but it's up to you For this example, we will use the latest published version at the time of the writing which is `v1.0.1` but it's up to you
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
For example: For example:
``` ```
sudo docker pull dxflrs/garage:v0.9.2 sudo docker pull dxflrs/garage:v1.0.1
``` ```
## Deploying and configuring Garage ## Deploying and configuring Garage
@ -115,8 +126,9 @@ A valid `/etc/garage.toml` for our cluster would look as follows:
metadata_dir = "/var/lib/garage/meta" metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data" data_dir = "/var/lib/garage/data"
db_engine = "lmdb" db_engine = "lmdb"
metadata_auto_snapshot_interval = "6h"
replication_mode = "3" replication_factor = 3
compression_level = 2 compression_level = 2
@ -140,6 +152,8 @@ Check the following for your configuration files:
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring. - Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
This parameter is optional but recommended: if your nodes have trouble communicating with This parameter is optional but recommended: if your nodes have trouble communicating with
one another, consider adding it. one another, consider adding it.
Alternatively, you can also set `rpc_public_addr_subnet`, which can filter
the addresses announced to other peers to a specific subnet.
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key. - Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
You can generate such a key with `openssl rand -hex 32`. You can generate such a key with `openssl rand -hex 32`.
@ -157,7 +171,7 @@ docker run \
-v /etc/garage.toml:/etc/garage.toml \ -v /etc/garage.toml:/etc/garage.toml \
-v /var/lib/garage/meta:/var/lib/garage/meta \ -v /var/lib/garage/meta:/var/lib/garage/meta \
-v /var/lib/garage/data:/var/lib/garage/data \ -v /var/lib/garage/data:/var/lib/garage/data \
dxflrs/garage:v0.9.2 dxflrs/garage:v1.0.1
``` ```
With this command line, Garage should be started automatically at each boot. With this command line, Garage should be started automatically at each boot.
@ -171,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
version: "3" version: "3"
services: services:
garage: garage:
image: dxflrs/garage:v0.9.2 image: dxflrs/garage:v1.0.1
network_mode: "host" network_mode: "host"
restart: unless-stopped restart: unless-stopped
volumes: volumes:
@ -187,7 +201,7 @@ upgrades. With the containerized setup proposed here, the upgrade process
will require stopping and removing the existing container, and re-creating it will require stopping and removing the existing container, and re-creating it
with the upgraded version. with the upgraded version.
## Controling the daemon ## Controlling the daemon
The `garage` binary has two purposes: The `garage` binary has two purposes:
- it acts as a daemon when launched with `garage server` - it acts as a daemon when launched with `garage server`
@ -245,7 +259,7 @@ You can then instruct nodes to connect to one another as follows:
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901 Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
``` ```
You don't nead to instruct all node to connect to all other nodes: You don't need to instruct all node to connect to all other nodes:
nodes will discover one another transitively. nodes will discover one another transitively.
Now if your run `garage status` on any node, you should have an output that looks as follows: Now if your run `garage status` on any node, you should have an output that looks as follows:
@ -328,8 +342,8 @@ Given the information above, we will configure our cluster as follow:
```bash ```bash
garage layout assign 563e -z par1 -c 1T -t mercury garage layout assign 563e -z par1 -c 1T -t mercury
garage layout assign 86f0 -z par1 -c 2T -t venus garage layout assign 86f0 -z par1 -c 2T -t venus
garage layout assign 6814 -z lon1 -c 2T -t earth garage layout assign 6814 -z lon1 -c 2T -t earth
garage layout assign 212f -z bru1 -c 1.5T -t mars garage layout assign 212f -z bru1 -c 1.5T -t mars
``` ```
At this point, the changes in the cluster layout have not yet been applied. At this point, the changes in the cluster layout have not yet been applied.

View file

@ -50,3 +50,20 @@ locations. They use Garage themselves for the following tasks:
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
9 nodes in 3 physical locations. 9 nodes in 3 physical locations.
### Triplebit
[Triplebit](https://www.triplebit.org) is a non-profit hosting provider and
ISP focused on improving access to privacy-related services. They use
Garage themselves for the following tasks:
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
Triplebit's Garage cluster is a multi-site cluster currently composed of
10 nodes in 3 physical locations.

View file

@ -97,7 +97,7 @@ delete a tombstone, the following condition has to be met:
superseeded by the tombstone. This ensures that deleting the tombstone is superseeded by the tombstone. This ensures that deleting the tombstone is
safe and that no deleted value will come back in the system. safe and that no deleted value will come back in the system.
Garage makes use of Sled's atomic operations (such as compare-and-swap and Garage uses atomic database operations (such as compare-and-swap and
transactions) to ensure that only tombstones that have been correctly transactions) to ensure that only tombstones that have been correctly
propagated to other nodes are ever deleted from the local entry tree. propagated to other nodes are ever deleted from the local entry tree.

View file

@ -67,7 +67,7 @@ Pithos has been abandonned and should probably not used yet, in the following we
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too). Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment. From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it. They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
We considered there v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility. We considered their v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
**[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):** **[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):**
*Not written yet* *Not written yet*

View file

@ -19,7 +19,7 @@ connecting to. To run on all nodes, add the `-a` flag as follows:
# Data block operations # Data block operations
## Data store scrub ## Data store scrub {#scrub}
Scrubbing the data store means examining each individual data block to check that Scrubbing the data store means examining each individual data block to check that
their content is correct, by verifying their hash. Any block found to be corrupted their content is correct, by verifying their hash. Any block found to be corrupted
@ -104,6 +104,24 @@ operation will also move out all data from locations marked as read-only.
# Metadata operations # Metadata operations
## Metadata snapshotting
It is good practice to setup automatic snapshotting of your metadata database
file, to recover from situations where it becomes corrupted on disk. This can
be done at the filesystem level if you are using ZFS or BTRFS.
Since Garage v0.9.4, Garage is able to take snapshots of the metadata database
itself. This basically amounts to copying the database file, except that it can
be run live while Garage is running without the risk of corruption or
inconsistencies. This can be setup to run automatically on a schedule using
[`metadata_auto_snapshot_interval`](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval).
A snapshot can also be triggered manually using the `garage meta snapshot`
command. Note that taking a snapshot using this method is very intensive as it
requires making a full copy of the database file, so you might prefer using
filesystem-level snapshots if possible. To recover a corrupted node from such a
snapshot, read the instructions
[here](@/documentation/operations/recovering.md#corrupted_meta).
## Metadata table resync ## Metadata table resync
Garage automatically resyncs all entries stored in the metadata tables every hour, Garage automatically resyncs all entries stored in the metadata tables every hour,
@ -123,4 +141,7 @@ blocks may still be held by Garage. If you suspect that such corruption has occu
in your cluster, you can run one of the following repair procedures: in your cluster, you can run one of the following repair procedures:
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version - `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
- `garage repair block_refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
- `garage repair block-refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
- `garage repair block-rc`: checks that the reference counters for blocks are in sync with the actual number of non-deleted entries in the block reference table

View file

@ -12,7 +12,7 @@ An introduction to building cluster layouts can be found in the [production depl
In Garage, all of the data that can be stored in a given cluster is divided In Garage, all of the data that can be stored in a given cluster is divided
into slices which we call *partitions*. Each partition is stored by into slices which we call *partitions*. Each partition is stored by
one or several nodes in the cluster one or several nodes in the cluster
(see [`replication_mode`](@/documentation/reference-manual/configuration.md#replication_mode)). (see [`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)).
The layout determines the correspondence between these partitions, The layout determines the correspondence between these partitions,
which exist on a logical level, and actual storage nodes. which exist on a logical level, and actual storage nodes.

View file

@ -5,7 +5,7 @@ weight = 40
Garage is meant to work on old, second-hand hardware. Garage is meant to work on old, second-hand hardware.
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed. In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
Fear not! For Garage is fully equipped to handle drive failures, in most common cases. Fear not! Garage is fully equipped to handle drive failures, in most common cases.
## A note on availability of Garage ## A note on availability of Garage
@ -61,7 +61,7 @@ garage repair -a --yes blocks
This will re-synchronize blocks of data that are missing to the new HDD, reading them from copies located on other nodes. This will re-synchronize blocks of data that are missing to the new HDD, reading them from copies located on other nodes.
You can check on the advancement of this process by doing the following command: You can check on the advancement of this process by doing the following command:
```bash ```bash
garage stats -a garage stats -a
@ -108,3 +108,57 @@ garage layout apply # once satisfied, apply the changes
Garage will then start synchronizing all required data on the new node. Garage will then start synchronizing all required data on the new node.
This process can be monitored using the `garage stats -a` command. This process can be monitored using the `garage stats -a` command.
## Replacement scenario 3: corrupted metadata {#corrupted_meta}
In some cases, your metadata DB file might become corrupted, for instance if
your node suffered a power outage and did not shut down properly. In this case,
you can recover without having to change the node ID and rebuilding a cluster
layout. This means that data blocks will not need to be shuffled around, you
must simply find a way to repair the metadata file. The best way is generally
to discard the corrupted file and recover it from another source.
First of all, start by locating the database file in your metadata directory,
which [depends on your `db_engine`
choice](@/documentation/reference-manual/configuration.md#db_engine). Then,
your recovery options are as follows:
- **Option 1: resyncing from other nodes.** In case your cluster is replicated
with two or three copies, you can simply delete the database file, and Garage
will resync from other nodes. To do so, stop Garage, delete the database file
or directory, and restart Garage. Then, do a full table repair by calling
`garage repair -a --yes tables`. This will take a bit of time to complete as
the new node will need to receive copies of the metadata tables from the
network.
- **Option 2: restoring a snapshot taken by Garage.** Since v0.9.4, Garage can
[automatically take regular
snapshots](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval)
of your metadata DB file. This file or directory should be located under
`<metadata_dir>/snapshots`, and is named according to the UTC time at which it
was taken. Stop Garage, discard the database file/directory and replace it by the
snapshot you want to use. For instance, in the case of LMDB:
```bash
cd $METADATA_DIR
mv db.lmdb db.lmdb.bak
cp -r snapshots/2024-03-15T12:13:52Z db.lmdb
```
And for Sqlite:
```bash
cd $METADATA_DIR
mv db.sqlite db.sqlite.bak
cp snapshots/2024-03-15T12:13:52Z db.sqlite
```
Then, restart Garage and run a full table repair by calling `garage repair -a
--yes tables`. This should run relatively fast as only the changes that
occurred since the snapshot was taken will need to be resynchronized. Of
course, if your cluster is not replicated, you will lose all changes that
occurred since the snapshot was taken.
- **Option 3: restoring a filesystem-level snapshot.** If you are using ZFS or
BTRFS to snapshot your metadata partition, refer to their specific
documentation on rolling back or copying files from an old snapshot.

View file

@ -73,6 +73,18 @@ The entire procedure would look something like this:
You can do all of the nodes in a single zone at once as that won't impact global cluster availability. You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
Do not try to make a backup of the metadata folder of a running node. Do not try to make a backup of the metadata folder of a running node.
**Since Garage v0.9.4,** you can use the `garage meta snapshot --all` command
to take a simultaneous snapshot of the metadata database files of all your
nodes. This avoids the tedious process of having to take them down one by
one before upgrading. Be careful that if automatic snapshotting is enabled,
Garage only keeps the last two snapshots and deletes older ones, so you might
want to disable automatic snapshotting in your upgraded configuration file
until you have confirmed that the upgrade ran successfully. In addition to
snapshotting the metadata databases of your nodes, you should back-up at
least the `cluster_layout` file of one of your Garage instances (this file
should be the same on all nodes and you can copy it safely while Garage is
running).
3. Prepare your binaries and configuration files for the new Garage version 3. Prepare your binaries and configuration files for the new Garage version
4. Restart all nodes simultaneously in the new version 4. Restart all nodes simultaneously in the new version

View file

@ -42,6 +42,13 @@ If a binary of the last version is not available for your architecture,
or if you want a build customized for your system, or if you want a build customized for your system,
you can [build Garage from source](@/documentation/cookbook/from-source.md). you can [build Garage from source](@/documentation/cookbook/from-source.md).
If none of these option work for you, you can also run Garage in a Docker
container. When using Docker, the commands used in this guide will not work
anymore. We recommend reading the tutorial on [configuring a
multi-node cluster](@/documentation/cookbook/real-world.md) to learn about
using Garage as a Docker container. For simplicity, a minimal command to launch
Garage using Docker is provided in this quick start guide as well.
## Configuring and starting Garage ## Configuring and starting Garage
@ -57,9 +64,9 @@ to generate unique and private secrets for security reasons:
cat > garage.toml <<EOF cat > garage.toml <<EOF
metadata_dir = "/tmp/meta" metadata_dir = "/tmp/meta"
data_dir = "/tmp/data" data_dir = "/tmp/data"
db_engine = "lmdb" db_engine = "sqlite"
replication_mode = "none" replication_factor = 1
rpc_bind_addr = "[::]:3901" rpc_bind_addr = "[::]:3901"
rpc_public_addr = "127.0.0.1:3901" rpc_public_addr = "127.0.0.1:3901"
@ -85,6 +92,9 @@ metrics_token = "$(openssl rand -base64 32)"
EOF EOF
``` ```
See the [Configuration file format](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/)
for complete options and values.
Now that your configuration file has been created, you may save it to the directory of your choice. Now that your configuration file has been created, you may save it to the directory of your choice.
By default, Garage looks for **`/etc/garage.toml`.** By default, Garage looks for **`/etc/garage.toml`.**
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml` You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
@ -111,6 +121,26 @@ garage -c path/to/garage.toml server
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`. If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
Alternatively, if you cannot or do not wish to run the Garage binary directly,
you may use Docker to run Garage in a container using the following command:
```bash
docker run \
-d \
--name garaged \
-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \
-v /etc/garage.toml:/path/to/garage.toml \
-v /var/lib/garage/meta:/path/to/garage/meta \
-v /var/lib/garage/data:/path/to/garage/data \
dxflrs/garage:v0.9.4
```
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
#### Troubleshooting
Ensure your configuration file, `metadata_dir` and `data_dir` are readable by the user running the `garage` server or Docker.
You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \ You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \
Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`. Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`.
@ -131,6 +161,9 @@ It uses values from the TOML configuration file to find the Garage daemon runnin
local node, therefore if your configuration file is not at `/etc/garage.toml` you will local node, therefore if your configuration file is not at `/etc/garage.toml` you will
again have to specify `-c path/to/garage.toml` at each invocation. again have to specify `-c path/to/garage.toml` at each invocation.
If you are running Garage in a Docker container, you can set `alias garage="docker exec -ti <container name> /garage"`
to use the Garage binary inside your container.
If the `garage` CLI is able to correctly detect the parameters of your local Garage node, If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
the following command should be enough to show the status of your cluster: the following command should be enough to show the status of your cluster:

View file

@ -8,19 +8,21 @@ weight = 20
Here is an example `garage.toml` configuration file that illustrates all of the possible options: Here is an example `garage.toml` configuration file that illustrates all of the possible options:
```toml ```toml
replication_mode = "3" replication_factor = 3
consistency_mode = "consistent"
metadata_dir = "/var/lib/garage/meta" metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data" data_dir = "/var/lib/garage/data"
metadata_fsync = true metadata_fsync = true
data_fsync = false data_fsync = false
disable_scrub = false
metadata_auto_snapshot_interval = "6h"
db_engine = "lmdb" db_engine = "lmdb"
block_size = "1M" block_size = "1M"
block_ram_buffer_max = "256MiB"
sled_cache_capacity = "128MiB"
sled_flush_every_ms = 2000
lmdb_map_size = "1T" lmdb_map_size = "1T"
compression_level = 1 compression_level = 1
@ -29,6 +31,11 @@ rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
rpc_bind_addr = "[::]:3901" rpc_bind_addr = "[::]:3901"
rpc_bind_outgoing = false rpc_bind_outgoing = false
rpc_public_addr = "[fc00:1::1]:3901" rpc_public_addr = "[fc00:1::1]:3901"
# or set rpc_public_adr_subnet to filter down autodiscovery to a subnet:
# rpc_public_addr_subnet = "2001:0db8:f00:b00:/64"
allow_world_readable_secrets = false
bootstrap_peers = [ bootstrap_peers = [
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901", "563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
@ -80,23 +87,29 @@ The following gives details about each available configuration option.
### Index ### Index
[Environment variables](#env_variables).
Top-level configuration options: Top-level configuration options:
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
[`block_ram_buffer_max`](#block_ram_buffer_max),
[`block_size`](#block_size), [`block_size`](#block_size),
[`bootstrap_peers`](#bootstrap_peers), [`bootstrap_peers`](#bootstrap_peers),
[`compression_level`](#compression_level), [`compression_level`](#compression_level),
[`data_dir`](#data_dir), [`data_dir`](#data_dir),
[`data_fsync`](#data_fsync), [`data_fsync`](#data_fsync),
[`db_engine`](#db_engine), [`db_engine`](#db_engine),
[`disable_scrub`](#disable_scrub),
[`lmdb_map_size`](#lmdb_map_size), [`lmdb_map_size`](#lmdb_map_size),
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
[`metadata_dir`](#metadata_dir), [`metadata_dir`](#metadata_dir),
[`metadata_fsync`](#metadata_fsync), [`metadata_fsync`](#metadata_fsync),
[`replication_mode`](#replication_mode), [`replication_factor`](#replication_factor),
[`consistency_mode`](#consistency_mode),
[`rpc_bind_addr`](#rpc_bind_addr), [`rpc_bind_addr`](#rpc_bind_addr),
[`rpc_bind_outgoing`](#rpc_bind_outgoing), [`rpc_bind_outgoing`](#rpc_bind_outgoing),
[`rpc_public_addr`](#rpc_public_addr), [`rpc_public_addr`](#rpc_public_addr),
[`rpc_secret`/`rpc_secret_file`](#rpc_secret), [`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
[`sled_cache_capacity`](#sled_cache_capacity), [`rpc_secret`/`rpc_secret_file`](#rpc_secret).
[`sled_flush_every_ms`](#sled_flush_every_ms).
The `[consul_discovery]` section: The `[consul_discovery]` section:
[`api`](#consul_api), [`api`](#consul_api),
@ -130,14 +143,32 @@ The `[admin]` section:
[`admin_token`/`admin_token_file`](#admin_token), [`admin_token`/`admin_token_file`](#admin_token),
[`trace_sink`](#admin_trace_sink), [`trace_sink`](#admin_trace_sink),
### Environment variables {#env_variables}
The following configuration parameter must be specified as an environment
variable, it does not exist in the configuration file:
- `GARAGE_LOG_TO_SYSLOG` (since v0.9.4): set this to `1` or `true` to make the
Garage daemon send its logs to `syslog` (using the libc `syslog` function)
instead of printing to stderr.
The following environment variables can be used to override the corresponding
values in the configuration file:
- [`GARAGE_ALLOW_WORLD_READABLE_SECRETS`](#allow_world_readable_secrets)
- [`GARAGE_RPC_SECRET` and `GARAGE_RPC_SECRET_FILE`](#rpc_secret)
- [`GARAGE_ADMIN_TOKEN` and `GARAGE_ADMIN_TOKEN_FILE`](#admin_token)
- [`GARAGE_METRICS_TOKEN` and `GARAGE_METRICS_TOKEN`](#admin_metrics_token)
### Top-level configuration options ### Top-level configuration options
#### `replication_mode` {#replication_mode} #### `replication_factor` {#replication_factor}
Garage supports the following replication modes: The replication factor can be any positive integer smaller or equal the node count in your cluster.
The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
- `none` or `1`: data stored on Garage is stored on a single node. There is no - `1`: data stored on Garage is stored on a single node. There is no
redundancy, and data will be unavailable as soon as one node fails or its redundancy, and data will be unavailable as soon as one node fails or its
network is disconnected. Do not use this for anything else than test network is disconnected. Do not use this for anything else than test
deployments. deployments.
@ -148,17 +179,6 @@ Garage supports the following replication modes:
before losing data. Data remains available in read-only mode when one node is before losing data. Data remains available in read-only mode when one node is
down, but write operations will fail. down, but write operations will fail.
- `2-dangerous`: a variant of mode `2`, where written objects are written to
the second replica asynchronously. This means that Garage will return `200
OK` to a PutObject request before the second copy is fully written (or even
before it even starts being written). This means that data can more easily
be lost if the node crashes before a second copy can be completed. This
also means that written objects might not be visible immediately in read
operations. In other words, this mode severely breaks the consistency and
durability guarantees of standard Garage cluster operation. Benefits of
this mode: you can still write to your cluster when one node is
unavailable.
- `3`: data stored on Garage will be stored on three different nodes, if - `3`: data stored on Garage will be stored on three different nodes, if
possible each in a different zones. Garage tolerates two node failure, or possible each in a different zones. Garage tolerates two node failure, or
several node failures but in no more than two zones (in a deployment with at several node failures but in no more than two zones (in a deployment with at
@ -166,55 +186,84 @@ Garage supports the following replication modes:
or node failures are only in a single zone, reading and writing data to or node failures are only in a single zone, reading and writing data to
Garage can continue normally. Garage can continue normally.
- `3-degraded`: a variant of replication mode `3`, that lowers the read - `5`, `7`, ...: When setting the replication factor above 3, it is most useful to
quorum to `1`, to allow you to read data from your cluster when several choose an uneven value, since for every two copies added, one more node can fail
nodes (or nodes in several zones) are unavailable. In this mode, Garage before losing the ability to write and read to the cluster.
does not provide read-after-write consistency anymore. The write quorum is
still 2, ensuring that data successfully written to Garage is stored on at
least two nodes.
- `3-dangerous`: a variant of replication mode `3` that lowers both the read
and write quorums to `1`, to allow you to both read and write to your
cluster when several nodes (or nodes in several zones) are unavailable. It
is the least consistent mode of operation proposed by Garage, and also one
that should probably never be used.
Note that in modes `2` and `3`, Note that in modes `2` and `3`,
if at least the same number of zones are available, an arbitrary number of failures in if at least the same number of zones are available, an arbitrary number of failures in
any given zone is tolerated as copies of data will be spread over several zones. any given zone is tolerated as copies of data will be spread over several zones.
**Make sure `replication_mode` is the same in the configuration files of all nodes. **Make sure `replication_factor` is the same in the configuration files of all nodes.
Never run a Garage cluster where that is not the case.** Never run a Garage cluster where that is not the case.**
It is technically possible to change the replication factor although it's a
dangerous operation that is not officially supported. This requires you to
delete the existing cluster layout and create a new layout from scratch,
meaning that a full rebalancing of your cluster's data will be needed. To do
it, shut down your cluster entirely, delete the `custer_layout` files in the
meta directories of all your nodes, update all your configuration files with
the new `replication_factor` parameter, restart your cluster, and then create a
new layout with all the nodes you want to keep. Rebalancing data will take
some time, and data might temporarily appear unavailable to your users.
It is recommended to shut down public access to the cluster while rebalancing
is in progress. In theory, no data should be lost as rebalancing is a
routine operation for Garage, although we cannot guarantee you that everything
will go right in such an extreme scenario.
#### `consistency_mode` {#consistency_mode}
The consistency mode setting determines the read and write behaviour of your cluster.
- `consistent`: The default setting. This is what the paragraph above describes.
The read and write quorum will be determined so that read-after-write consistency
is guaranteed.
- `degraded`: Lowers the read
quorum to `1`, to allow you to read data from your cluster when several
nodes (or nodes in several zones) are unavailable. In this mode, Garage
does not provide read-after-write consistency anymore.
The write quorum stays the same as in the `consistent` mode, ensuring that
data successfully written to Garage is stored on multiple nodes (depending
the replication factor).
- `dangerous`: This mode lowers both the read
and write quorums to `1`, to allow you to both read and write to your
cluster when several nodes (or nodes in several zones) are unavailable. It
is the least consistent mode of operation proposed by Garage, and also one
that should probably never be used.
Changing the `consistency_mode` between modes while leaving the `replication_factor` untouched
(e.g. setting your node's `consistency_mode` to `degraded` when it was previously unset, or from
`dangerous` to `consistent`), can be done easily by just changing the `consistency_mode`
parameter in your config files and restarting all your Garage nodes.
The consistency mode can be used together with various replication factors, to achieve
a wide range of read and write characteristics. Some examples:
- Replication factor `2`, consistency mode `degraded`: While this mode
technically exists, its properties are the same as with consistency mode `consistent`,
since the read quorum with replication factor `2`, consistency mode `consistent` is already 1.
- Replication factor `2`, consistency mode `dangerous`: written objects are written to
the second replica asynchronously. This means that Garage will return `200
OK` to a PutObject request before the second copy is fully written (or even
before it even starts being written). This means that data can more easily
be lost if the node crashes before a second copy can be completed. This
also means that written objects might not be visible immediately in read
operations. In other words, this configuration severely breaks the consistency and
durability guarantees of standard Garage cluster operation. Benefits of
this configuration: you can still write to your cluster when one node is
unavailable.
The quorums associated with each replication mode are described below: The quorums associated with each replication mode are described below:
| `replication_mode` | Number of replicas | Write quorum | Read quorum | Read-after-write consistency? | | `consistency_mode` | `replication_factor` | Write quorum | Read quorum | Read-after-write consistency? |
| ------------------ | ------------------ | ------------ | ----------- | ----------------------------- | | ------------------ | -------------------- | ------------ | ----------- | ----------------------------- |
| `none` or `1` | 1 | 1 | 1 | yes | | `consistent` | 1 | 1 | 1 | yes |
| `2` | 2 | 2 | 1 | yes | | `consistent` | 2 | 2 | 1 | yes |
| `2-dangerous` | 2 | 1 | 1 | NO | | `dangerous` | 2 | 1 | 1 | NO |
| `3` | 3 | 2 | 2 | yes | | `consistent` | 3 | 2 | 2 | yes |
| `3-degraded` | 3 | 2 | 1 | NO | | `degraded` | 3 | 2 | 1 | NO |
| `3-dangerous` | 3 | 1 | 1 | NO | | `dangerous` | 3 | 1 | 1 | NO |
Changing the `replication_mode` between modes with the same number of replicas
(e.g. from `3` to `3-degraded`, or from `2-dangerous` to `2`), can be done easily by
just changing the `replication_mode` parameter in your config files and restarting all your
Garage nodes.
It is also technically possible to change the replication mode to a mode with a
different numbers of replicas, although it's a dangerous operation that is not
officially supported. This requires you to delete the existing cluster layout
and create a new layout from scratch, meaning that a full rebalancing of your
cluster's data will be needed. To do it, shut down your cluster entirely,
delete the `custer_layout` files in the meta directories of all your nodes,
update all your configuration files with the new `replication_mode` parameter,
restart your cluster, and then create a new layout with all the nodes you want
to keep. Rebalancing data will take some time, and data might temporarily
appear unavailable to your users. It is recommended to shut down public access
to the cluster while rebalancing is in progress. In theory, no data should be
lost as rebalancing is a routine operation for Garage, although we cannot
guarantee you that everything will go right in such an extreme scenario.
#### `metadata_dir` {#metadata_dir} #### `metadata_dir` {#metadata_dir}
@ -250,32 +299,43 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
| DB engine | `db_engine` value | Database path | | DB engine | `db_engine` value | Database path |
| --------- | ----------------- | ------------- | | --------- | ----------------- | ------------- |
| [LMDB](https://www.lmdb.tech) (default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` | | [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
| [Sled](https://sled.rs) (default up to `v0.8.0`) | `"sled"` | `<metadata_dir>/db/` | | [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
| [Sqlite](https://sqlite.org) | `"sqlite"` | `<metadata_dir>/db.sqlite` | | [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
Sled was the only database engine up to Garage v0.7.0. Performance issues and Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
API limitations of Sled prompted the addition of alternative engines in v0.8.0. You can still use an older binary of Garage (e.g. v0.9.4) to migrate
Since v0.9.0, LMDB is the default engine instead of Sled, and Sled is old Sled metadata databases to another engine.
deprecated. We plan to remove Sled in Garage v1.0.
Performance characteristics of the different DB engines are as follows: Performance characteristics of the different DB engines are as follows:
- Sled: tends to produce large data files and also has performance issues, - LMDB: the recommended database engine for high-performance distributed clusters.
especially when the metadata folder is on a traditional HDD and not on SSD. LMDB works very well, but is known to have the following limitations:
- LMDB: the recommended database engine on 64-bit systems, much more - The data format of LMDB is not portable between architectures, so for
space-efficient and slightly faster. Note that the data format of LMDB is not instance the Garage database of an x86-64 node cannot be moved to an ARM64
portable between architectures, so for instance the Garage database of an node.
x86-64 node cannot be moved to an ARM64 node. Also note that, while LMDB can
technically be used on 32-bit systems, this will limit your node to very - While LMDB can technically be used on 32-bit systems, this will limit your
small database sizes due to how LMDB works; it is therefore not recommended. node to very small database sizes due to how LMDB works; it is therefore
not recommended.
- Several users have reported corrupted LMDB database files after an unclean
shutdown (e.g. a power outage). This situation can generally be recovered
from if your cluster is geo-replicated (by rebuilding your metadata db from
other nodes), or if you have saved regular snapshots at the filesystem
level.
- Keys in LMDB are limited to 511 bytes. This limit translates to limits on
object keys in S3 and sort keys in K2V that are limted to 479 bytes.
- Sqlite: Garage supports Sqlite as an alternative storage backend for - Sqlite: Garage supports Sqlite as an alternative storage backend for
metadata, and although it has not been tested as much, it is expected to work metadata, which does not have the issues listed above for LMDB.
satisfactorily. Since Garage v0.9.0, performance issues have largely been On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal
fixed by allowing for a no-fsync mode (see `metadata_fsync`). Sqlite does not performance, which was fixed with the addition of `metadata_fsync`.
have the database size limitation of LMDB on 32-bit systems. Sqlite is still probably slower than LMDB due to the way we use it,
so it is not the best choice for high-performance storage clusters,
but it should work fine in many cases.
It is possible to convert Garage's metadata directory from one format to another It is possible to convert Garage's metadata directory from one format to another
using the `garage convert-db` command, which should be used as follows: using the `garage convert-db` command, which should be used as follows:
@ -302,7 +362,7 @@ Using this option reduces the risk of simultaneous metadata corruption on severa
cluster nodes, which could lead to data loss. cluster nodes, which could lead to data loss.
If multi-site replication is used, this option is most likely not necessary, as If multi-site replication is used, this option is most likely not necessary, as
it is extremely unlikely that two nodes in different locations will have a it is extremely unlikely that two nodes in different locations will have a
power failure at the exact same time. power failure at the exact same time.
(Metadata corruption on a single node is not an issue, the corrupted data file (Metadata corruption on a single node is not an issue, the corrupted data file
@ -312,7 +372,6 @@ Here is how this option impacts the different database engines:
| Database | `metadata_fsync = false` (default) | `metadata_fsync = true` | | Database | `metadata_fsync = false` (default) | `metadata_fsync = true` |
|----------|------------------------------------|-------------------------------| |----------|------------------------------------|-------------------------------|
| Sled | default options | *unsupported* |
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` | | Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` | | LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
@ -331,6 +390,43 @@ at the cost of a moderate drop in write performance.
Similarly to `metatada_fsync`, this is likely not necessary Similarly to `metatada_fsync`, this is likely not necessary
if geographical replication is used. if geographical replication is used.
#### `metadata_auto_snapshot_interval` (since Garage v0.9.4) {#metadata_auto_snapshot_interval}
If this value is set, Garage will automatically take a snapshot of the metadata
DB file at a regular interval and save it in the metadata directory.
This parameter can take any duration string that can be parsed by
the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate.
Snapshots can allow to recover from situations where the metadata DB file is
corrupted, for instance after an unclean shutdown. See [this
page](@/documentation/operations/recovering.md#corrupted_meta) for details.
Garage keeps only the two most recent snapshots of the metadata DB and deletes
older ones automatically.
Note that taking a metadata snapshot is a relatively intensive operation as the
entire data file is copied. A snapshot being taken might have performance
impacts on the Garage node while it is running. If the cluster is under heavy
write load when a snapshot operation is running, this might also cause the
database file to grow in size significantly as pages cannot be recycled easily.
For this reason, it might be better to use filesystem-level snapshots instead
if possible.
#### `disable_scrub` {#disable_scrub}
By default, Garage runs a scrub of the data directory approximately once per
month, with a random delay to avoid all nodes running at the same time. When
it scrubs the data directory, Garage will read all of the data files stored on
disk to check their integrity, and will rebuild any data files that it finds
corrupted, using the remaining valid copies stored on other nodes.
See [this page](@/documentation/operations/durability-repairs.md#scrub) for details.
Set the `disable_scrub` configuration value to `true` if you don't need Garage
to scrub the data directory, for instance if you are already scrubbing at the
filesystem level. Note that in this case, if you find a corrupted data file,
you should delete it from the data directory and then call `garage repair
blocks` on the node to ensure that it re-obtains a copy from another node on
the network.
#### `block_size` {#block_size} #### `block_size` {#block_size}
Garage splits stored objects in consecutive chunks of size `block_size` Garage splits stored objects in consecutive chunks of size `block_size`
@ -346,20 +442,36 @@ files will remain available. This however means that chunks from existing files
will not be deduplicated with chunks from newly uploaded files, meaning you will not be deduplicated with chunks from newly uploaded files, meaning you
might use more storage space that is optimally possible. might use more storage space that is optimally possible.
#### `sled_cache_capacity` {#sled_cache_capacity} #### `block_ram_buffer_max` (since v0.9.4) {#block_ram_buffer_max}
This parameter can be used to tune the capacity of the cache used by A limit on the total size of data blocks kept in RAM by S3 API nodes awaiting
[sled](https://sled.rs), the database Garage uses internally to store metadata. to be sent to storage nodes asynchronously.
Tune this to fit the RAM you wish to make available to your Garage instance.
This value has a conservative default (128MB) so that Garage doesn't use too much
RAM by default, but feel free to increase this for higher performance.
#### `sled_flush_every_ms` {#sled_flush_every_ms} Explanation: since Garage wants to tolerate node failures, it uses quorum
writes to send data blocks to storage nodes: try to write the block to three
nodes, and return ok as soon as two writes complete. So even if all three nodes
are online, the third write always completes asynchronously. In general, there
are not many writes to a cluster, and the third asynchronous write can
terminate early enough so as to not cause unbounded RAM growth. However, if
the S3 API node is continuously receiving large quantities of data and the
third node is never able to catch up, many data blocks will be kept buffered in
RAM as they are awaiting transfer to the third node.
This parameters can be used to tune the flushing interval of sled. The `block_ram_buffer_max` sets a limit to the size of buffers that can be kept
Increase this if sled is thrashing your SSD, at the risk of losing more data in case in RAM in this process. When the limit is reached, backpressure is applied
of a power outage (though this should not matter much as data is replicated on other back to the S3 client.
nodes). The default value, 2000ms, should be appropriate for most use cases.
Note that this only counts buffers that have arrived to a certain stage of
processing (received from the client + encrypted and/or compressed as
necessary) and are ready to send to the storage nodes. Many other buffers will
not be counted and this is not a hard limit on RAM consumption. In particular,
if many clients send requests simultaneously with large objects, the RAM
consumption will always grow linearly with the number of concurrent requests,
as each request will use a few buffers of size `block_size` for receiving and
intermediate processing before even trying to send the data to the storage
node.
The default value is 256MiB.
#### `lmdb_map_size` {#lmdb_map_size} #### `lmdb_map_size` {#lmdb_map_size}
@ -435,6 +547,14 @@ RPC calls. **This parameter is optional but recommended.** In case you have
a NAT that binds the RPC port to a port that is different on your public IP, a NAT that binds the RPC port to a port that is different on your public IP,
this field might help making it work. this field might help making it work.
#### `rpc_public_addr_subnet` {#rpc_public_addr_subnet}
In case `rpc_public_addr` is not set, but autodiscovery is used, this allows
filtering the list of automatically discovered IPs to a specific subnet.
For example, if nodes should pick *their* IP inside a specific subnet, but you
don't want to explicitly write the IP down (as it's dynamic, or you want to
share configs across nodes), you can use this option.
#### `bootstrap_peers` {#bootstrap_peers} #### `bootstrap_peers` {#bootstrap_peers}
A list of peer identifiers on which to contact other Garage peers of this cluster. A list of peer identifiers on which to contact other Garage peers of this cluster.
@ -451,7 +571,7 @@ be obtained by running `garage node id` and then included directly in the
key will be returned by `garage node id` and you will have to add the IP key will be returned by `garage node id` and you will have to add the IP
yourself. yourself.
### `allow_world_readable_secrets` ### `allow_world_readable_secrets` or `GARAGE_ALLOW_WORLD_READABLE_SECRETS` (env) {#allow_world_readable_secrets}
Garage checks the permissions of your secret files to make sure they're not Garage checks the permissions of your secret files to make sure they're not
world-readable. In some cases, the check might fail and consider your files as world-readable. In some cases, the check might fail and consider your files as

View file

@ -39,10 +39,10 @@ Read about cluster layout management [here](@/documentation/operations/layout.md
### Several replication modes ### Several replication modes
Garage supports a variety of replication modes, with 1 copy, 2 copies or 3 copies of your data, Garage supports a variety of replication modes, with configurable replica count,
and with various levels of consistency, in order to adapt to a variety of usage scenarios. and with various levels of consistency, in order to adapt to a variety of usage scenarios.
Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_mode) Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_factor)
to select the replication mode best suited to your use case (hint: in most cases, `replication_mode = "3"` is what you want). to select the replication mode best suited to your use case (hint: in most cases, `replication_factor = 3` is what you want).
### Compression and deduplication ### Compression and deduplication

View file

@ -225,6 +225,17 @@ block_bytes_read 120586322022
block_bytes_written 3386618077 block_bytes_written 3386618077
``` ```
#### `block_ram_buffer_free_kb` (gauge)
Kibibytes available for buffering blocks that have to be sent to remote nodes.
When clients send too much data to this node and a storage node is not receiving
data fast enough due to slower network conditions, this will decrease down to
zero and backpressure will be applied.
```
block_ram_buffer_free_kb 219829
```
#### `block_compression_level` (counter) #### `block_compression_level` (counter)
Exposes the block compression level configured for the Garage node. Exposes the block compression level configured for the Garage node.

View file

@ -33,6 +33,7 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ | | [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ | | [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) | | [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part *Note:* OpenIO does not says if it supports presigned URLs. Because it is part
of signature v4 and they claim they support it without additional precisions, of signature v4 and they claim they support it without additional precisions,

View file

@ -0,0 +1,77 @@
+++
title = "Migrating from 0.9 to 1.0"
weight = 11
+++
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
We don't recommend trying to migrate to 1.0 directly from 0.8 or older.**
This migration procedure has been tested on several clusters without issues.
However, it is still a *critical procedure* that might cause issues.
**Make sure to back up all your data before attempting it!**
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
## Changes introduced in v1.0
The following are **breaking changes** in Garage v1.0 that require your attention when migrating:
- The Sled metadata db engine has been **removed**. If your cluster was still
using Sled, you will need to **use a Garage v0.9.x binary** to convert the
database using the `garage convert-db` subcommand. See
[here](@/documentation/reference-manual/configuration.md#db_engine) for the
details of the procedure.
The following syntax changes have been made to the configuration file:
- The `replication_mode` parameter has been split into two parameters:
[`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)
and
[`consistency_mode`](@/documentation/reference-manual/configuration.md#consistency_mode).
The old syntax using `replication_mode` is still supported for legacy
reasons and can still be used.
- The parameters `sled_cache_capacity` and `sled_flush_every_ms` have been removed.
## Migration procedure
The migration to Garage v1.0 can be done with almost no downtime,
by restarting all nodes at once in the new version.
The migration steps are as follows:
1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
all data seems to be synced correctly between nodes. If you have time, do
additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
etc.)
2. Ensure you have a snapshot of your Garage installation that you can restore
to in case the upgrade goes wrong:
- If you are running Garage v0.9.4 or later, use the `garage meta snapshot
--all` to make a backup snapshot of the metadata directories of your nodes
for backup purposes, and save a copy of the following files in the
metadata directories of your nodes: `cluster_layout`, `data_layout`,
`node_key`, `node_key.pub`.
- If you are running a filesystem such as ZFS or BTRFS that support
snapshotting, you can create a filesystem-level snapshot to be used as a
restoration point if needed.
- In other cases, make a backup using the old procedure: turn off each node
individually; back up its metadata folder (for instance, use the following
command if your metadata directory is `/var/lib/garage/meta`: `cd
/var/lib/garage ; tar -acf meta-v0.9.tar.zst meta/`); turn it back on
again. This will allow you to take a backup of all nodes without
impacting global cluster availability. You can do all nodes of a single
zone at once as this does not impact the availability of Garage.
3. Prepare your updated binaries and configuration files for Garage v1.0
4. Shut down all v0.9 nodes simultaneously, and restart them all simultaneously
in v1.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
achieve this as fast as possible. Garage v1.0 should be in a working state
as soon as enough nodes have started.
5. Monitor your cluster in the following hours to see if it works well under
your production load.

View file

@ -69,11 +69,10 @@ Example response body:
```json ```json
{ {
"node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f", "node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
"garageVersion": "git:v0.9.0-dev", "garageVersion": "v1.0.1",
"garageFeatures": [ "garageFeatures": [
"k2v", "k2v",
"sled",
"lmdb", "lmdb",
"sqlite", "sqlite",
"metrics", "metrics",
@ -81,83 +80,92 @@ Example response body:
], ],
"rustVersion": "1.68.0", "rustVersion": "1.68.0",
"dbEngine": "LMDB (using Heed crate)", "dbEngine": "LMDB (using Heed crate)",
"knownNodes": [ "layoutVersion": 5,
"nodes": [
{ {
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f", "id": "62b218d848e86a64f7fe1909735f29a4350547b54c4b204f91246a14eb0a1a8c",
"addr": "10.0.0.11:3901", "role": {
"id": "62b218d848e86a64f7fe1909735f29a4350547b54c4b204f91246a14eb0a1a8c",
"zone": "dc1",
"capacity": 100000000000,
"tags": []
},
"addr": "10.0.0.3:3901",
"hostname": "node3",
"isUp": true, "isUp": true,
"lastSeenSecsAgo": 9, "lastSeenSecsAgo": 12,
"hostname": "node1" "draining": false,
"dataPartition": {
"available": 660270088192,
"total": 873862266880
},
"metadataPartition": {
"available": 660270088192,
"total": 873862266880
}
}, },
{ {
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff", "id": "a11c7cf18af297379eff8688360155fe68d9061654449ba0ce239252f5a7487f",
"addr": "10.0.0.12:3901", "role": null,
"addr": "10.0.0.2:3901",
"hostname": "node2",
"isUp": true, "isUp": true,
"lastSeenSecsAgo": 1, "lastSeenSecsAgo": 11,
"hostname": "node2" "draining": true,
"dataPartition": {
"available": 660270088192,
"total": 873862266880
},
"metadataPartition": {
"available": 660270088192,
"total": 873862266880
}
}, },
{ {
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27", "id": "a235ac7695e0c54d7b403943025f57504d500fdcc5c3e42c71c5212faca040a2",
"addr": "10.0.0.21:3901", "role": {
"id": "a235ac7695e0c54d7b403943025f57504d500fdcc5c3e42c71c5212faca040a2",
"zone": "dc1",
"capacity": 100000000000,
"tags": []
},
"addr": "127.0.0.1:3904",
"hostname": "lindy",
"isUp": true, "isUp": true,
"lastSeenSecsAgo": 7, "lastSeenSecsAgo": 2,
"hostname": "node3" "draining": false,
"dataPartition": {
"available": 660270088192,
"total": 873862266880
},
"metadataPartition": {
"available": 660270088192,
"total": 873862266880
}
}, },
{ {
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b", "id": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
"addr": "10.0.0.22:3901", "role": {
"id": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
"zone": "dc1",
"capacity": 100000000000,
"tags": []
},
"addr": "10.0.0.1:3901",
"hostname": "node1",
"isUp": true, "isUp": true,
"lastSeenSecsAgo": 1, "lastSeenSecsAgo": 3,
"hostname": "node4" "draining": false,
"dataPartition": {
"available": 660270088192,
"total": 873862266880
},
"metadataPartition": {
"available": 660270088192,
"total": 873862266880
}
} }
], ]
"layout": {
"version": 12,
"roles": [
{
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"zone": "dc1",
"capacity": 10737418240,
"tags": [
"node1"
]
},
{
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
"zone": "dc1",
"capacity": 10737418240,
"tags": [
"node2"
]
},
{
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"zone": "dc2",
"capacity": 10737418240,
"tags": [
"node3"
]
}
],
"stagedRoleChanges": [
{
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
"remove": false,
"zone": "dc2",
"capacity": 10737418240,
"tags": [
"node4"
]
}
{
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"remove": true,
"zone": null,
"capacity": null,
"tags": null,
}
]
}
} }
``` ```

View file

@ -146,7 +146,7 @@ in a bucket, as the partition key becomes the sort key in the index.
How indexing works: How indexing works:
- Each node keeps a local count of how many items it stores for each partition, - Each node keeps a local count of how many items it stores for each partition,
in a local Sled tree that is updated atomically when an item is modified. in a local database tree that is updated atomically when an item is modified.
- These local counters are asynchronously stored in the index table which is - These local counters are asynchronously stored in the index table which is
a regular Garage table spread in the network. Counters are stored as LWW values, a regular Garage table spread in the network. Counters are stored as LWW values,
so basically the final table will have the following structure: so basically the final table will have the following structure:

View file

@ -0,0 +1,17 @@
*
!*.txt
!*.md
!assets
!.gitignore
!*.svg
!*.png
!*.jpg
!*.tex
!Makefile
!.gitignore
!assets/*.drawio.pdf
!talk.pdf

View file

@ -0,0 +1,10 @@
ASSETS=../assets/logos/deuxfleurs.pdf
talk.pdf: talk.tex $(ASSETS)
pdflatex talk.tex
%.pdf: %.svg
inkscape -D -z --file=$^ --export-pdf=$@
%.pdf_tex: %.svg
inkscape -D -z --file=$^ --export-pdf=$@ --export-latex

Binary file not shown.

View file

@ -0,0 +1,543 @@
\nonstopmode
\documentclass[aspectratio=169,xcolor={svgnames}]{beamer}
\usepackage[utf8]{inputenc}
% \usepackage[frenchb]{babel}
\usepackage{amsmath}
\usepackage{mathtools}
\usepackage{breqn}
\usepackage{multirow}
\usetheme{boxes}
\usepackage{graphicx}
\usepackage{import}
\usepackage{adjustbox}
\usepackage[absolute,overlay]{textpos}
%\useoutertheme[footline=authortitle,subsection=false]{miniframes}
%\useoutertheme[footline=authorinstitute,subsection=false]{miniframes}
\useoutertheme{infolines}
\setbeamertemplate{headline}{}
\beamertemplatenavigationsymbolsempty
\definecolor{TitleOrange}{RGB}{255,137,0}
\setbeamercolor{title}{fg=TitleOrange}
\setbeamercolor{frametitle}{fg=TitleOrange}
\definecolor{ListOrange}{RGB}{255,145,5}
\setbeamertemplate{itemize item}{\color{ListOrange}$\blacktriangleright$}
\definecolor{verygrey}{RGB}{70,70,70}
\setbeamercolor{normal text}{fg=verygrey}
\usepackage{tabu}
\usepackage{multicol}
\usepackage{vwcol}
\usepackage{stmaryrd}
\usepackage{graphicx}
\usepackage[normalem]{ulem}
\AtBeginSection[]{
\begin{frame}
\vfill
\centering
\begin{beamercolorbox}[sep=8pt,center,shadow=true,rounded=true]{title}
\usebeamerfont{title}\insertsectionhead\par%
\end{beamercolorbox}
\vfill
\end{frame}
}
\title{Garage}
\author{Alex Auvolat, Deuxfleurs}
\date{Capitoul, 2024-02-29}
\begin{document}
\begin{frame}
\centering
\includegraphics[width=.3\linewidth]{../../sticker/Garage.png}
\vspace{1em}
{\large\bf Alex Auvolat, Deuxfleurs Association}
\vspace{1em}
\url{https://garagehq.deuxfleurs.fr/}
Matrix channel: \texttt{\#garage:deuxfleurs.fr}
\end{frame}
\begin{frame}
\frametitle{Who I am}
\begin{columns}[t]
\begin{column}{.2\textwidth}
\centering
\adjincludegraphics[width=.4\linewidth, valign=t]{../assets/alex.jpg}
\end{column}
\begin{column}{.6\textwidth}
\textbf{Alex Auvolat}\\
PhD; co-founder of Deuxfleurs
\end{column}
\begin{column}{.2\textwidth}
~
\end{column}
\end{columns}
\vspace{2em}
\begin{columns}[t]
\begin{column}{.2\textwidth}
\centering
\adjincludegraphics[width=.5\linewidth, valign=t]{../assets/logos/deuxfleurs.pdf}
\end{column}
\begin{column}{.6\textwidth}
\textbf{Deuxfleurs}\\
A non-profit self-hosting collective,\\
member of the CHATONS network
\end{column}
\begin{column}{.2\textwidth}
\centering
\adjincludegraphics[width=.7\linewidth, valign=t]{../assets/logos/logo_chatons.png}
\end{column}
\end{columns}
\end{frame}
\begin{frame}
\frametitle{Our objective at Deuxfleurs}
\begin{center}
\textbf{Promote self-hosting and small-scale hosting\\
as an alternative to large cloud providers}
\end{center}
\vspace{2em}
\visible<2->{
Why is it hard?
\vspace{2em}
\begin{center}
\textbf{\underline{Resilience}}\\
{\footnotesize we want good uptime/availability with low supervision}
\end{center}
}
\end{frame}
\begin{frame}
\frametitle{Our very low-tech infrastructure}
\only<1,3-6>{
\begin{itemize}
\item \textcolor<4->{gray}{Commodity hardware (e.g. old desktop PCs)\\
\vspace{.5em}
\visible<3->{{\footnotesize (can die at any time)}}}
\vspace{1.5em}
\item<4-> \textcolor<6->{gray}{Regular Internet (e.g. FTTB, FTTH) and power grid connections\\
\vspace{.5em}
\visible<5->{{\footnotesize (can be unavailable randomly)}}}
\vspace{1.5em}
\item<6-> \textbf{Geographical redundancy} (multi-site replication)
\end{itemize}
}
\only<2>{
\begin{center}
\includegraphics[width=.8\linewidth]{../assets/neptune.jpg}
\end{center}
}
\only<7>{
\begin{center}
\includegraphics[width=.8\linewidth]{../assets/inframap_jdll2023.pdf}
\end{center}
}
\end{frame}
\begin{frame}
\frametitle{How to make this happen}
\begin{center}
\only<1>{\includegraphics[width=.8\linewidth]{../assets/intro/slide1.png}}%
\only<2>{\includegraphics[width=.8\linewidth]{../assets/intro/slide2.png}}%
\only<3>{\includegraphics[width=.8\linewidth]{../assets/intro/slide3.png}}%
\end{center}
\end{frame}
\begin{frame}
\frametitle{Distributed file systems are slow}
File systems are complex, for example:
\vspace{1em}
\begin{itemize}
\item Concurrent modification by several processes
\vspace{1em}
\item Folder hierarchies
\vspace{1em}
\item Other requirements of the POSIX spec (e.g.~locks)
\end{itemize}
\vspace{1em}
Coordination in a distributed system is costly
\vspace{1em}
Costs explode with commodity hardware / Internet connections\\
{\small (we experienced this!)}
\end{frame}
\begin{frame}
\frametitle{A simpler solution: object storage}
Only two operations:
\vspace{1em}
\begin{itemize}
\item Put an object at a key
\vspace{1em}
\item Retrieve an object from its key
\end{itemize}
\vspace{1em}
{\footnotesize (and a few others)}
\vspace{1em}
Sufficient for many applications!
\end{frame}
\begin{frame}
\frametitle{A simpler solution: object storage}
\begin{center}
\includegraphics[height=6em]{../assets/logos/Amazon-S3.jpg}
\hspace{3em}
\visible<2->{\includegraphics[height=5em]{../assets/logos/minio.png}}
\hspace{3em}
\visible<3>{\includegraphics[height=6em]{../../logo/garage_hires_crop.png}}
\end{center}
\vspace{1em}
S3: a de-facto standard, many compatible applications
\vspace{1em}
\visible<2->{MinIO is self-hostable but not suited for geo-distributed deployments}
\vspace{1em}
\visible<3->{\textbf{Garage is a self-hosted drop-in replacement for the Amazon S3 object store}}
\end{frame}
% --------- BASED ON CRDTS ----------
\section{Principle 1: based on CRDTs}
\begin{frame}
\frametitle{CRDTs / weak consistency instead of consensus}
\underline{Internally, Garage uses only CRDTs} (conflict-free replicated data types)
\vspace{2em}
Why not Raft, Paxos, ...? Issues of consensus algorithms:
\vspace{1em}
\begin{itemize}
\item<2-> \textbf{Software complexity}
\vspace{1em}
\item<3-> \textbf{Performance issues:}
\vspace{.5em}
\begin{itemize}
\item<4-> The leader is a \textbf{bottleneck} for all requests\\
\vspace{.5em}
\item<5-> \textbf{Sensitive to higher latency} between nodes
\vspace{.5em}
\item<6-> \textbf{Takes time to reconverge} when disrupted (e.g. node going down)
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{The data model of object storage}
Object storage is basically a \textbf{key-value store}:
\vspace{.5em}
{\scriptsize
\begin{center}
\begin{tabular}{|l|p{7cm}|}
\hline
\textbf{Key: file path + name} & \textbf{Value: file data + metadata} \\
\hline
\hline
\texttt{index.html} &
\texttt{Content-Type: text/html; charset=utf-8} \newline
\texttt{Content-Length: 24929} \newline
\texttt{<binary blob>} \\
\hline
\texttt{img/logo.svg} &
\texttt{Content-Type: text/svg+xml} \newline
\texttt{Content-Length: 13429} \newline
\texttt{<binary blob>} \\
\hline
\texttt{download/index.html} &
\texttt{Content-Type: text/html; charset=utf-8} \newline
\texttt{Content-Length: 26563} \newline
\texttt{<binary blob>} \\
\hline
\end{tabular}
\end{center}
}
\vspace{.5em}
\begin{itemize}
\item<2-> Maps well to CRDT data types
\item<3> Read-after-write consistency with quorums
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Performance gains in practice}
\begin{center}
\includegraphics[width=.8\linewidth]{../assets/perf/endpoint_latency_0.7_0.8_minio.png}
\end{center}
\end{frame}
% --------- GEO-DISTRIBUTED MODEL ----------
\section{Principle 2: geo-distributed data model}
\begin{frame}
\frametitle{Key-value stores, upgraded: the Dynamo model}
\textbf{Two keys:}
\begin{itemize}
\item Partition key: used to divide data into partitions {\small (a.k.a.~shards)}
\item Sort key: used to identify items inside a partition
\end{itemize}
\vspace{1em}
\begin{center}
\begin{tabular}{|l|l|p{3cm}|}
\hline
\textbf{Partition key: bucket} & \textbf{Sort key: filename} & \textbf{Value} \\
\hline
\hline
\texttt{website} & \texttt{index.html} & (file data) \\
\hline
\texttt{website} & \texttt{img/logo.svg} & (file data) \\
\hline
\texttt{website} & \texttt{download/index.html} & (file data) \\
\hline
\hline
\texttt{backup} & \texttt{borg/index.2822} & (file data) \\
\hline
\texttt{backup} & \texttt{borg/data/2/2329} & (file data) \\
\hline
\texttt{backup} & \texttt{borg/data/2/2680} & (file data) \\
\hline
\hline
\texttt{private} & \texttt{qq3a2nbe1qjq0ebbvo6ocsp6co} & (file data) \\
\hline
\end{tabular}
\end{center}
\end{frame}
\begin{frame}
\frametitle{Layout computation}
\begin{overprint}
\onslide<1>
\begin{center}
\includegraphics[width=\linewidth, trim=0 0 0 -4cm]{../assets/screenshots/garage_status_0.9_prod_zonehl.png}
\end{center}
\onslide<2>
\begin{center}
\includegraphics[width=.7\linewidth]{../assets/map.png}
\end{center}
\end{overprint}
\vspace{1em}
Garage stores replicas on different zones when possible
\end{frame}
\begin{frame}
\frametitle{What a "layout" is}
\textbf{A layout is a precomputed index table:}
\vspace{1em}
{\footnotesize
\begin{center}
\begin{tabular}{|l|l|l|l|}
\hline
\textbf{Partition} & \textbf{Node 1} & \textbf{Node 2} & \textbf{Node 3} \\
\hline
\hline
Partition 0 & df-ymk (bespin) & Abricot (scorpio) & Courgette (neptune) \\
\hline
Partition 1 & Ananas (scorpio) & Courgette (neptune) & df-ykl (bespin) \\
\hline
Partition 2 & df-ymf (bespin) & Celeri (neptune) & Abricot (scorpio) \\
\hline
\hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ \\
\hline
Partition 255 & Concombre (neptune) & df-ykl (bespin) & Abricot (scorpio) \\
\hline
\end{tabular}
\end{center}
}
\vspace{2em}
\visible<2->{
The index table is built centrally using an optimal algorithm,\\
then propagated to all nodes
}
\vspace{1em}
\visible<3->{
\footnotesize
Oulamara, M., \& Auvolat, A. (2023). \emph{An algorithm for geo-distributed and redundant storage in Garage}.\\ arXiv preprint arXiv:2302.13798.
}
\end{frame}
\begin{frame}
\frametitle{The relationship between \emph{partition} and \emph{partition key}}
\begin{center}
\begin{tabular}{|l|l|l|l|}
\hline
\textbf{Partition key} & \textbf{Partition} & \textbf{Sort key} & \textbf{Value} \\
\hline
\hline
\texttt{website} & Partition 12 & \texttt{index.html} & (file data) \\
\hline
\texttt{website} & Partition 12 & \texttt{img/logo.svg} & (file data) \\
\hline
\texttt{website} & Partition 12 &\texttt{download/index.html} & (file data) \\
\hline
\hline
\texttt{backup} & Partition 42 & \texttt{borg/index.2822} & (file data) \\
\hline
\texttt{backup} & Partition 42 & \texttt{borg/data/2/2329} & (file data) \\
\hline
\texttt{backup} & Partition 42 & \texttt{borg/data/2/2680} & (file data) \\
\hline
\hline
\texttt{private} & Partition 42 & \texttt{qq3a2nbe1qjq0ebbvo6ocsp6co} & (file data) \\
\hline
\end{tabular}
\end{center}
\vspace{1em}
\textbf{To read or write an item:} hash partition key
\\ \hspace{5cm} $\to$ determine partition number (first 8 bits)
\\ \hspace{5cm} $\to$ find associated nodes
\end{frame}
\begin{frame}
\frametitle{Garage's internal data structures}
\centering
\includegraphics[width=.75\columnwidth]{../assets/garage_tables.pdf}
\end{frame}
% ---------- OPERATING GARAGE ---------
\section{Operating Garage clusters}
\begin{frame}
\frametitle{Operating Garage}
\begin{center}
\only<1-2>{
\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_0.10.png}
\\\vspace{1em}
\visible<2>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_unhealthy_0.10.png}}
}
\end{center}
\end{frame}
\begin{frame}
\frametitle{Background synchronization}
\begin{center}
\includegraphics[width=.6\linewidth]{../assets/garage_sync.drawio.pdf}
\end{center}
\end{frame}
\begin{frame}
\frametitle{Digging deeper}
\begin{center}
\only<1>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_stats_0.10.png}}
\only<2>{\includegraphics[width=.5\linewidth]{../assets/screenshots/garage_worker_list_0.10.png}}
\only<3>{\includegraphics[width=.6\linewidth]{../assets/screenshots/garage_worker_param_0.10.png}}
\end{center}
\end{frame}
\begin{frame}
\frametitle{Monitoring with Prometheus + Grafana}
\begin{center}
\includegraphics[width=.9\linewidth]{../assets/screenshots/grafana_dashboard.png}
\end{center}
\end{frame}
\begin{frame}
\frametitle{Debugging with traces}
\begin{center}
\includegraphics[width=.8\linewidth]{../assets/screenshots/jaeger_listobjects.png}
\end{center}
\end{frame}
% ---------- SCALING GARAGE ---------
\section{Scaling Garage clusters}
\begin{frame}
\frametitle{Potential limitations and bottlenecks}
\begin{itemize}
\item Global:
\begin{itemize}
\item Max. $\sim$100 nodes per cluster (excluding gateways)
\end{itemize}
\vspace{1em}
\item Metadata:
\begin{itemize}
\item One big bucket = bottleneck, object list on 3 nodes only
\end{itemize}
\vspace{1em}
\item Block manager:
\begin{itemize}
\item Lots of small files on disk
\item Processing the resync queue can be slow
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Deployment advice for very large clusters}
\begin{itemize}
\item Metadata storage:
\begin{itemize}
\item ZFS mirror (x2) on fast NVMe
\item Use LMDB storage engine
\end{itemize}
\vspace{.5em}
\item Data block storage:
\begin{itemize}
\item Use Garage's native multi-HDD support
\item XFS on individual drives
\item Increase block size (1MB $\to$ 10MB, requires more RAM and good networking)
\item Tune \texttt{resync-tranquility} and \texttt{resync-worker-count} dynamically
\end{itemize}
\vspace{.5em}
\item Other :
\begin{itemize}
\item Split data over several buckets
\item Use less than 100 storage nodes
\item Use gateway nodes
\end{itemize}
\vspace{.5em}
\end{itemize}
Our deployments: $< 10$ TB. Some people have done more!
\end{frame}
% ======================================== END
% ======================================== END
% ======================================== END
\begin{frame}
\frametitle{Where to find us}
\begin{center}
\includegraphics[width=.25\linewidth]{../../logo/garage_hires.png}\\
\vspace{-1em}
\url{https://garagehq.deuxfleurs.fr/}\\
\url{mailto:garagehq@deuxfleurs.fr}\\
\texttt{\#garage:deuxfleurs.fr} on Matrix
\vspace{1.5em}
\includegraphics[width=.06\linewidth]{../assets/logos/rust_logo.png}
\includegraphics[width=.13\linewidth]{../assets/logos/AGPLv3_Logo.png}
\end{center}
\end{frame}
\end{document}
%% vim: set ts=4 sw=4 tw=0 noet spelllang=en :

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

File diff suppressed because it is too large Load diff

After

Width:  |  Height:  |  Size: 315 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 286 KiB

View file

@ -28,11 +28,11 @@
}, },
"flake-compat": { "flake-compat": {
"locked": { "locked": {
"lastModified": 1688025799, "lastModified": 1717312683,
"narHash": "sha256-ktpB4dRtnksm9F5WawoIkEneh1nrEvuxb5lJFt1iOyw=", "narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
"owner": "nix-community", "owner": "nix-community",
"repo": "flake-compat", "repo": "flake-compat",
"rev": "8bf105319d44f6b9f0d764efa4fdef9f1cc9ba1c", "rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -42,33 +42,12 @@
} }
}, },
"flake-utils": { "flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": { "locked": {
"lastModified": 1681202837, "lastModified": 1659877975,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=", "narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401", "rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -79,11 +58,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1682109806, "lastModified": 1724395761,
"narHash": "sha256-d9g7RKNShMLboTWwukM+RObDWWpHKaqTYXB48clBWXI=", "narHash": "sha256-zRkDV/nbrnp3Y8oCADf5ETl1sDrdmAW6/bBVJ8EbIdQ=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "2362848adf8def2866fabbffc50462e929d7fffb", "rev": "ae815cee91b417be55d43781eb4b73ae1ecc396c",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -95,17 +74,17 @@
}, },
"nixpkgs_2": { "nixpkgs_2": {
"locked": { "locked": {
"lastModified": 1707091808, "lastModified": 1724681257,
"narHash": "sha256-LahKBAfGbY836gtpVNnWwBTIzN7yf/uYM/S0g393r0Y=", "narHash": "sha256-EJRuc5Qp7yfXko5ZNeEMYAs4DzAvkCyALuJ/tGllhN4=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e", "rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e", "rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
"type": "github" "type": "github"
} }
}, },
@ -122,15 +101,14 @@
}, },
"rust-overlay": { "rust-overlay": {
"inputs": { "inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs" "nixpkgs": "nixpkgs"
}, },
"locked": { "locked": {
"lastModified": 1707271822, "lastModified": 1724638882,
"narHash": "sha256-/DZsoPH5GBzOpVEGz5PgJ7vh8Q6TcrJq5u8FcBjqAfI=", "narHash": "sha256-ap2jIQi/FuUHR6HCht6ASWhoz8EiB99XmI8Esot38VE=",
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "7a94fe7690d2bdfe1aab475382a505e14dc114a6", "rev": "19b70f147b9c67a759e35824b241f1ed92e46694",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -138,36 +116,6 @@
"repo": "rust-overlay", "repo": "rust-overlay",
"type": "github" "type": "github"
} }
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
} }
}, },
"root": "root", "root": "root",

View file

@ -2,9 +2,9 @@
description = description =
"Garage, an S3-compatible distributed object store for self-hosted deployments"; "Garage, an S3-compatible distributed object store for self-hosted deployments";
# Nixpkgs 23.11 as of 2024-02-07, has rustc v1.73 # Nixpkgs 24.05 as of 2024-08-26 has rustc v1.77
inputs.nixpkgs.url = inputs.nixpkgs.url =
"github:NixOS/nixpkgs/9f2ee8c91ac42da3ae6c6a1d21555f283458247e"; "github:NixOS/nixpkgs/0239aeb2f82ea27ccd6b61582b8f7fb8750eeada";
inputs.flake-compat.url = "github:nix-community/flake-compat"; inputs.flake-compat.url = "github:nix-community/flake-compat";
@ -17,9 +17,9 @@
# - rustc v1.66 # - rustc v1.66
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2"; # url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
# Rust overlay as of 2024-02-07 # Rust overlay as of 2024-08-26
inputs.rust-overlay.url = inputs.rust-overlay.url =
"github:oxalica/rust-overlay/7a94fe7690d2bdfe1aab475382a505e14dc114a6"; "github:oxalica/rust-overlay/19b70f147b9c67a759e35824b241f1ed92e46694";
inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-compat.follows = "flake-compat"; inputs.flake-compat.follows = "flake-compat";
@ -76,6 +76,7 @@
# import the full shell using `nix develop .#full` # import the full shell using `nix develop .#full`
full = shellWithPackages (with pkgs; [ full = shellWithPackages (with pkgs; [
rustfmt rustfmt
rust-analyzer
clang clang
mold mold
# ---- extra packages for dev tasks ---- # ---- extra packages for dev tasks ----

View file

@ -1,158 +0,0 @@
#!/usr/bin/env python
import os
import requests
from datetime import datetime
# let's talk to our AWS Elasticsearch cluster
#from requests_aws4auth import AWS4Auth
#auth = AWS4Auth('GK31c2f218a2e44f485b94239e',
# 'b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
# 'us-east-1',
# 's3')
from aws_requests_auth.aws_auth import AWSRequestsAuth
auth = AWSRequestsAuth(aws_access_key='GK31c2f218a2e44f485b94239e',
aws_secret_access_key='b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
aws_host='localhost:3812',
aws_region='us-east-1',
aws_service='k2v')
print("-- ReadIndex")
response = requests.get('http://localhost:3812/alex',
auth=auth)
print(response.headers)
print(response.text)
sort_keys = ["a", "b", "c", "d"]
for sk in sort_keys:
print("-- (%s) Put initial (no CT)"%sk)
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
auth=auth,
data='{}: Hello, world!'.format(datetime.timestamp(datetime.now())))
print(response.headers)
print(response.text)
print("-- Get")
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
auth=auth)
print(response.headers)
print(response.text)
ct = response.headers["x-garage-causality-token"]
print("-- ReadIndex")
response = requests.get('http://localhost:3812/alex',
auth=auth)
print(response.headers)
print(response.text)
print("-- Put with CT")
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
auth=auth,
headers={'x-garage-causality-token': ct},
data='{}: Good bye, world!'.format(datetime.timestamp(datetime.now())))
print(response.headers)
print(response.text)
print("-- Get")
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
auth=auth)
print(response.headers)
print(response.text)
print("-- Put again with same CT (concurrent)")
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
auth=auth,
headers={'x-garage-causality-token': ct},
data='{}: Concurrent value, oops'.format(datetime.timestamp(datetime.now())))
print(response.headers)
print(response.text)
for sk in sort_keys:
print("-- (%s) Get"%sk)
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
auth=auth)
print(response.headers)
print(response.text)
ct = response.headers["x-garage-causality-token"]
print("-- Delete")
response = requests.delete('http://localhost:3812/alex/root?sort_key=%s'%sk,
headers={'x-garage-causality-token': ct},
auth=auth)
print(response.headers)
print(response.text)
print("-- ReadIndex")
response = requests.get('http://localhost:3812/alex',
auth=auth)
print(response.headers)
print(response.text)
print("-- InsertBatch")
response = requests.post('http://localhost:3812/alex',
auth=auth,
data='''
[
{"pk": "root", "sk": "a", "ct": null, "v": "aW5pdGlhbCB0ZXN0Cg=="},
{"pk": "root", "sk": "b", "ct": null, "v": "aW5pdGlhbCB0ZXN1Cg=="},
{"pk": "root", "sk": "c", "ct": null, "v": "aW5pdGlhbCB0ZXN2Cg=="}
]
''')
print(response.headers)
print(response.text)
print("-- ReadIndex")
response = requests.get('http://localhost:3812/alex',
auth=auth)
print(response.headers)
print(response.text)
for sk in sort_keys:
print("-- (%s) Get"%sk)
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
auth=auth)
print(response.headers)
print(response.text)
ct = response.headers["x-garage-causality-token"]
print("-- ReadBatch")
response = requests.post('http://localhost:3812/alex?search',
auth=auth,
data='''
[
{"partitionKey": "root"},
{"partitionKey": "root", "tombstones": true},
{"partitionKey": "root", "tombstones": true, "limit": 2},
{"partitionKey": "root", "start": "c", "singleItem": true},
{"partitionKey": "root", "start": "b", "end": "d", "tombstones": true}
]
''')
print(response.headers)
print(response.text)
print("-- DeleteBatch")
response = requests.post('http://localhost:3812/alex?delete',
auth=auth,
data='''
[
{"partitionKey": "root", "start": "b", "end": "c"}
]
''')
print(response.headers)
print(response.text)
print("-- ReadBatch")
response = requests.post('http://localhost:3812/alex?search',
auth=auth,
data='''
[
{"partitionKey": "root"}
]
''')
print(response.headers)
print(response.text)

View file

@ -20,7 +20,7 @@ let
}; };
toolchainOptions = { toolchainOptions = {
rustVersion = "1.73.0"; rustVersion = "1.77.0";
extraRustComponents = [ "clippy" ]; extraRustComponents = [ "clippy" ];
}; };
@ -168,13 +168,12 @@ let
rootFeatures = if features != null then rootFeatures = if features != null then
features features
else else
([ "garage/bundled-libs" "garage/sled" "garage/lmdb" "garage/k2v" ] ++ (if release then [ ([ "garage/bundled-libs" "garage/lmdb" "garage/sqlite" "garage/k2v" ] ++ (if release then [
"garage/consul-discovery" "garage/consul-discovery"
"garage/kubernetes-discovery" "garage/kubernetes-discovery"
"garage/metrics" "garage/metrics"
"garage/telemetry-otlp" "garage/telemetry-otlp"
"garage/lmdb" "garage/syslog"
"garage/sqlite"
] else ] else
[ ])); [ ]));

View file

@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.4.1 version: 0.5.1
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "v0.9.2" appVersion: "v1.0.1"

View file

@ -11,6 +11,7 @@ spec:
{{- if eq .Values.deployment.kind "StatefulSet" }} {{- if eq .Values.deployment.kind "StatefulSet" }}
replicas: {{ .Values.deployment.replicaCount }} replicas: {{ .Values.deployment.replicaCount }}
serviceName: {{ include "garage.fullname" . }} serviceName: {{ include "garage.fullname" . }}
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
{{- end }} {{- end }}
template: template:
metadata: metadata:
@ -63,6 +64,10 @@ spec:
name: web-api name: web-api
- containerPort: 3903 - containerPort: 3903
name: admin name: admin
{{- with .Values.environment }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts: volumeMounts:
- name: meta - name: meta
mountPath: /mnt/meta mountPath: /mnt/meta
@ -71,6 +76,9 @@ spec:
- name: etc - name: etc
mountPath: /etc/garage.toml mountPath: /etc/garage.toml
subPath: garage.toml subPath: garage.toml
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
# TODO # TODO
# livenessProbe: # livenessProbe:
# httpGet: # httpGet:
@ -105,6 +113,9 @@ spec:
- name: data - name: data
emptyDir: {} emptyDir: {}
{{- end }} {{- end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }} {{- with .Values.nodeSelector }}
nodeSelector: nodeSelector:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}

View file

@ -6,18 +6,13 @@
garage: garage:
# Can be changed for better performance on certain systems # Can be changed for better performance on certain systems
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
dbEngine: "sled" dbEngine: "lmdb"
# Defaults is 1MB # Defaults is 1MB
# An increase can result in better performance in certain scenarios # An increase can result in better performance in certain scenarios
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
blockSize: "1048576" blockSize: "1048576"
# Tuning parameters for the sled DB engine
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#sled-cache-capacity
sledCacheCapacity: "134217728"
sledFlushEveryMs: "2000"
# Default to 3 replicas, see the replication_mode section at # Default to 3 replicas, see the replication_mode section at
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
replicationMode: "3" replicationMode: "3"
@ -50,11 +45,6 @@ garage:
block_size = {{ .Values.garage.blockSize }} block_size = {{ .Values.garage.blockSize }}
{{- if eq .Values.garage.dbEngine "sled"}}
sled_cache_capacity = {{ .Values.garage.sledCacheCapacity }}
sled_flush_every_ms = {{ .Values.garage.sledFlushEveryMs }}
{{- end }}
replication_mode = "{{ .Values.garage.replicationMode }}" replication_mode = "{{ .Values.garage.replicationMode }}"
compression_level = {{ .Values.garage.compressionLevel }} compression_level = {{ .Values.garage.compressionLevel }}
@ -106,6 +96,8 @@ deployment:
kind: StatefulSet kind: StatefulSet
# Number of StatefulSet replicas/garage nodes to start # Number of StatefulSet replicas/garage nodes to start
replicaCount: 3 replicaCount: 3
# If using statefulset, allow Parallel or OrderedReady (default)
podManagementPolicy: OrderedReady
image: image:
repository: dxflrs/amd64_garage repository: dxflrs/amd64_garage
@ -224,6 +216,12 @@ tolerations: []
affinity: {} affinity: {}
environment: {}
extraVolumes: {}
extraVolumeMounts: {}
monitoring: monitoring:
metrics: metrics:
# If true, a service for monitoring is created with a prometheus.io/scrape annotation # If true, a service for monitoring is created with a prometheus.io/scrape annotation

View file

@ -30,11 +30,11 @@ Vagrant.configure("2") do |config|
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end #config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end #config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end #config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end #config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end #config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end #config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end #config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
end end

View file

@ -3,11 +3,10 @@
set -x set -x
#for ppatch in task3c task3a tsfix2; do #for ppatch in task3c task3a tsfix2; do
for ppatch in tsfix2; do for ppatch in v093 v1rc1; do
#for psc in c cp cdp r pr cpr dpr; do #for psc in c cp cdp r pr cpr dpr; do
for psc in cdp r pr cpr dpr; do for ptsk in reg2 set2; do
#for ptsk in reg2 set1 set2; do for psc in c cp cdp r pr cpr dpr; do
for ptsk in set1; do
for irun in $(seq 10); do for irun in $(seq 10); do
lein run test --nodes-file nodes.vagrant \ lein run test --nodes-file nodes.vagrant \
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \ --time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \

View file

@ -38,7 +38,9 @@
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1" "tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834" "task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997" "task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"}) "task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"
"v093" "v0.9.3"
"v1rc1" "v1.0.0-rc1"})
(def cli-opts (def cli-opts
"Additional command line options." "Additional command line options."

View file

@ -43,7 +43,7 @@
"rpc_bind_addr = \"0.0.0.0:3901\"\n" "rpc_bind_addr = \"0.0.0.0:3901\"\n"
"rpc_public_addr = \"" node ":3901\"\n" "rpc_public_addr = \"" node ":3901\"\n"
"db_engine = \"lmdb\"\n" "db_engine = \"lmdb\"\n"
"replication_mode = \"2\"\n" "replication_mode = \"3\"\n"
"data_dir = \"" data-dir "\"\n" "data_dir = \"" data-dir "\"\n"
"metadata_dir = \"" meta-dir "\"\n" "metadata_dir = \"" meta-dir "\"\n"
"[s3_api]\n" "[s3_api]\n"

View file

@ -82,6 +82,19 @@ if [ -z "$SKIP_AWS" ]; then
exit 1 exit 1
fi fi
aws s3api delete-object --bucket eprouvette --key upload aws s3api delete-object --bucket eprouvette --key upload
echo "🛠️ Test SSE-C with awscli (aws s3)"
SSEC_KEY="u8zCfnEyt5Imo/krN+sxA1DQXxLWtPJavU6T6gOVj1Y="
SSEC_KEY_MD5="jMGbs3GyZkYjJUP6q5jA7g=="
echo "$SSEC_KEY" | base64 -d > /tmp/garage.ssec-key
for idx in {1,2}.rnd; do
aws s3 cp --sse-c AES256 --sse-c-key fileb:///tmp/garage.ssec-key \
"/tmp/garage.$idx" "s3://eprouvette/garage.$idx.aws.sse-c"
aws s3 cp --sse-c AES256 --sse-c-key fileb:///tmp/garage.ssec-key \
"s3://eprouvette/garage.$idx.aws.sse-c" "/tmp/garage.$idx.dl.sse-c"
diff "/tmp/garage.$idx" "/tmp/garage.$idx.dl.sse-c"
aws s3api delete-object --bucket eprouvette --key "garage.$idx.aws.sse-c"
done
fi fi
# S3CMD # S3CMD

View file

@ -11,6 +11,7 @@ in
{ {
# --- Dev shell inherited from flake.nix --- # --- Dev shell inherited from flake.nix ---
devShell = devShells.default; devShell = devShells.default;
devShellFull = devShells.full;
# --- Continuous integration shell --- # --- Continuous integration shell ---
# The shell used for all CI jobs (along with devShell) # The shell used for all CI jobs (along with devShell)

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_api" name = "garage_api"
version = "0.9.2" version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -21,11 +21,15 @@ garage_net.workspace = true
garage_util.workspace = true garage_util.workspace = true
garage_rpc.workspace = true garage_rpc.workspace = true
aes-gcm.workspace = true
argon2.workspace = true argon2.workspace = true
async-compression.workspace = true
async-trait.workspace = true async-trait.workspace = true
base64.workspace = true base64.workspace = true
bytes.workspace = true bytes.workspace = true
chrono.workspace = true chrono.workspace = true
crc32fast.workspace = true
crc32c.workspace = true
crypto-common.workspace = true crypto-common.workspace = true
err-derive.workspace = true err-derive.workspace = true
hex.workspace = true hex.workspace = true
@ -35,12 +39,14 @@ tracing.workspace = true
md-5.workspace = true md-5.workspace = true
nom.workspace = true nom.workspace = true
pin-project.workspace = true pin-project.workspace = true
sha1.workspace = true
sha2.workspace = true sha2.workspace = true
futures.workspace = true futures.workspace = true
futures-util.workspace = true futures-util.workspace = true
tokio.workspace = true tokio.workspace = true
tokio-stream.workspace = true tokio-stream.workspace = true
tokio-util.workspace = true
form_urlencoded.workspace = true form_urlencoded.workspace = true
http.workspace = true http.workspace = true

View file

@ -276,7 +276,7 @@ impl ApiHandler for AdminApiServer {
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await, Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await, Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await, Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage, req).await, Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage).await,
// Keys // Keys
Endpoint::ListKeys => handle_list_keys(&self.garage).await, Endpoint::ListKeys => handle_list_keys(&self.garage).await,
Endpoint::GetKeyInfo { Endpoint::GetKeyInfo {

View file

@ -123,7 +123,7 @@ async fn bucket_info_results(
.table .table
.get(&bucket_id, &EmptyKey) .get(&bucket_id, &EmptyKey)
.await? .await?
.map(|x| x.filtered_values(&garage.system.ring.borrow())) .map(|x| x.filtered_values(&garage.system.cluster_layout()))
.unwrap_or_default(); .unwrap_or_default();
let mpu_counters = garage let mpu_counters = garage
@ -131,7 +131,7 @@ async fn bucket_info_results(
.table .table
.get(&bucket_id, &EmptyKey) .get(&bucket_id, &EmptyKey)
.await? .await?
.map(|x| x.filtered_values(&garage.system.ring.borrow())) .map(|x| x.filtered_values(&garage.system.cluster_layout()))
.unwrap_or_default(); .unwrap_or_default();
let mut relevant_keys = HashMap::new(); let mut relevant_keys = HashMap::new();

View file

@ -1,3 +1,4 @@
use std::collections::HashMap;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
@ -16,25 +17,99 @@ use crate::admin::error::*;
use crate::helpers::{json_ok_response, parse_json_body}; use crate::helpers::{json_ok_response, parse_json_body};
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> { pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let layout = garage.system.cluster_layout();
let mut nodes = garage
.system
.get_known_nodes()
.into_iter()
.map(|i| {
(
i.id,
NodeResp {
id: hex::encode(i.id),
addr: i.addr,
hostname: i.status.hostname,
is_up: i.is_up,
last_seen_secs_ago: i.last_seen_secs_ago,
data_partition: i
.status
.data_disk_avail
.map(|(avail, total)| FreeSpaceResp {
available: avail,
total,
}),
metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
FreeSpaceResp {
available: avail,
total,
}
}),
..Default::default()
},
)
})
.collect::<HashMap<_, _>>();
for (id, _, role) in layout.current().roles.items().iter() {
if let layout::NodeRoleV(Some(r)) = role {
let role = NodeRoleResp {
id: hex::encode(id),
zone: r.zone.to_string(),
capacity: r.capacity,
tags: r.tags.clone(),
};
match nodes.get_mut(id) {
None => {
nodes.insert(
*id,
NodeResp {
id: hex::encode(id),
role: Some(role),
..Default::default()
},
);
}
Some(n) => {
n.role = Some(role);
}
}
}
}
for ver in layout.versions().iter().rev().skip(1) {
for (id, _, role) in ver.roles.items().iter() {
if let layout::NodeRoleV(Some(r)) = role {
if r.capacity.is_some() {
if let Some(n) = nodes.get_mut(id) {
if n.role.is_none() {
n.draining = true;
}
} else {
nodes.insert(
*id,
NodeResp {
id: hex::encode(id),
draining: true,
..Default::default()
},
);
}
}
}
}
}
let mut nodes = nodes.into_values().collect::<Vec<_>>();
nodes.sort_by(|x, y| x.id.cmp(&y.id));
let res = GetClusterStatusResponse { let res = GetClusterStatusResponse {
node: hex::encode(garage.system.id), node: hex::encode(garage.system.id),
garage_version: garage_util::version::garage_version(), garage_version: garage_util::version::garage_version(),
garage_features: garage_util::version::garage_features(), garage_features: garage_util::version::garage_features(),
rust_version: garage_util::version::rust_version(), rust_version: garage_util::version::rust_version(),
db_engine: garage.db.engine(), db_engine: garage.db.engine(),
known_nodes: garage layout_version: layout.current().version,
.system nodes,
.get_known_nodes()
.into_iter()
.map(|i| KnownNodeResp {
id: hex::encode(i.id),
addr: i.addr,
is_up: i.is_up,
last_seen_secs_ago: i.last_seen_secs_ago,
hostname: i.status.hostname,
})
.collect(),
layout: format_cluster_layout(&garage.system.get_cluster_layout()),
}; };
Ok(json_ok_response(&res)?) Ok(json_ok_response(&res)?)
@ -85,13 +160,14 @@ pub async fn handle_connect_cluster_nodes(
} }
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> { pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let res = format_cluster_layout(&garage.system.get_cluster_layout()); let res = format_cluster_layout(garage.system.cluster_layout().inner());
Ok(json_ok_response(&res)?) Ok(json_ok_response(&res)?)
} }
fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResponse { fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse {
let roles = layout let roles = layout
.current()
.roles .roles
.items() .items()
.iter() .iter()
@ -105,10 +181,12 @@ fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResp
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let staged_role_changes = layout let staged_role_changes = layout
.staging_roles .staging
.get()
.roles
.items() .items()
.iter() .iter()
.filter(|(k, _, v)| layout.roles.get(k) != Some(v)) .filter(|(k, _, v)| layout.current().roles.get(k) != Some(v))
.map(|(k, _, v)| match &v.0 { .map(|(k, _, v)| match &v.0 {
None => NodeRoleChange { None => NodeRoleChange {
id: hex::encode(k), id: hex::encode(k),
@ -126,7 +204,7 @@ fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResp
.collect::<Vec<_>>(); .collect::<Vec<_>>();
GetClusterLayoutResponse { GetClusterLayoutResponse {
version: layout.version, version: layout.current().version,
roles, roles,
staged_role_changes, staged_role_changes,
} }
@ -155,8 +233,8 @@ struct GetClusterStatusResponse {
garage_features: Option<&'static [&'static str]>, garage_features: Option<&'static [&'static str]>,
rust_version: &'static str, rust_version: &'static str,
db_engine: String, db_engine: String,
known_nodes: Vec<KnownNodeResp>, layout_version: u64,
layout: GetClusterLayoutResponse, nodes: Vec<NodeResp>,
} }
#[derive(Serialize)] #[derive(Serialize)]
@ -190,14 +268,27 @@ struct NodeRoleResp {
tags: Vec<String>, tags: Vec<String>,
} }
#[derive(Serialize)] #[derive(Serialize, Default)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct KnownNodeResp { struct FreeSpaceResp {
available: u64,
total: u64,
}
#[derive(Serialize, Default)]
#[serde(rename_all = "camelCase")]
struct NodeResp {
id: String, id: String,
addr: SocketAddr, role: Option<NodeRoleResp>,
addr: Option<SocketAddr>,
hostname: Option<String>,
is_up: bool, is_up: bool,
last_seen_secs_ago: Option<u64>, last_seen_secs_ago: Option<u64>,
hostname: String, draining: bool,
#[serde(skip_serializing_if = "Option::is_none")]
data_partition: Option<FreeSpaceResp>,
#[serde(skip_serializing_if = "Option::is_none")]
metadata_partition: Option<FreeSpaceResp>,
} }
// ---- update functions ---- // ---- update functions ----
@ -208,10 +299,10 @@ pub async fn handle_update_cluster_layout(
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?; let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
let mut layout = garage.system.get_cluster_layout(); let mut layout = garage.system.cluster_layout().inner().clone();
let mut roles = layout.roles.clone(); let mut roles = layout.current().roles.clone();
roles.merge(&layout.staging_roles); roles.merge(&layout.staging.get().roles);
for change in updates { for change in updates {
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?; let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
@ -232,11 +323,17 @@ pub async fn handle_update_cluster_layout(
}; };
layout layout
.staging_roles .staging
.get_mut()
.roles
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role))); .merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
} }
garage.system.update_cluster_layout(&layout).await?; garage
.system
.layout_manager
.update_cluster_layout(&layout)
.await?;
let res = format_cluster_layout(&layout); let res = format_cluster_layout(&layout);
Ok(json_ok_response(&res)?) Ok(json_ok_response(&res)?)
@ -246,12 +343,16 @@ pub async fn handle_apply_cluster_layout(
garage: &Arc<Garage>, garage: &Arc<Garage>,
req: Request<IncomingBody>, req: Request<IncomingBody>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let param = parse_json_body::<ApplyRevertLayoutRequest, _, Error>(req).await?; let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
let layout = garage.system.get_cluster_layout(); let layout = garage.system.cluster_layout().inner().clone();
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?; let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
garage.system.update_cluster_layout(&layout).await?; garage
.system
.layout_manager
.update_cluster_layout(&layout)
.await?;
let res = ApplyClusterLayoutResponse { let res = ApplyClusterLayoutResponse {
message: msg, message: msg,
@ -262,13 +363,14 @@ pub async fn handle_apply_cluster_layout(
pub async fn handle_revert_cluster_layout( pub async fn handle_revert_cluster_layout(
garage: &Arc<Garage>, garage: &Arc<Garage>,
req: Request<IncomingBody>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let param = parse_json_body::<ApplyRevertLayoutRequest, _, Error>(req).await?; let layout = garage.system.cluster_layout().inner().clone();
let layout = layout.revert_staged_changes()?;
let layout = garage.system.get_cluster_layout(); garage
let layout = layout.revert_staged_changes(Some(param.version))?; .system
garage.system.update_cluster_layout(&layout).await?; .layout_manager
.update_cluster_layout(&layout)
.await?;
let res = format_cluster_layout(&layout); let res = format_cluster_layout(&layout);
Ok(json_ok_response(&res)?) Ok(json_ok_response(&res)?)
@ -280,7 +382,7 @@ type UpdateClusterLayoutRequest = Vec<NodeRoleChange>;
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct ApplyRevertLayoutRequest { struct ApplyLayoutRequest {
version: u64, version: u64,
} }

View file

@ -59,9 +59,7 @@ impl CommonError {
pub fn http_status_code(&self) -> StatusCode { pub fn http_status_code(&self) -> StatusCode {
match self { match self {
CommonError::InternalError( CommonError::InternalError(
GarageError::Timeout GarageError::Timeout | GarageError::RemoteError(_) | GarageError::Quorum(..),
| GarageError::RemoteError(_)
| GarageError::Quorum(_, _, _, _),
) => StatusCode::SERVICE_UNAVAILABLE, ) => StatusCode::SERVICE_UNAVAILABLE,
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => { CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
StatusCode::INTERNAL_SERVER_ERROR StatusCode::INTERNAL_SERVER_ERROR
@ -80,9 +78,7 @@ impl CommonError {
match self { match self {
CommonError::Forbidden(_) => "AccessDenied", CommonError::Forbidden(_) => "AccessDenied",
CommonError::InternalError( CommonError::InternalError(
GarageError::Timeout GarageError::Timeout | GarageError::RemoteError(_) | GarageError::Quorum(..),
| GarageError::RemoteError(_)
| GarageError::Quorum(_, _, _, _),
) => "ServiceUnavailable", ) => "ServiceUnavailable",
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => { CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
"InternalError" "InternalError"

View file

@ -2,6 +2,7 @@ use std::convert::Infallible;
use std::fs::{self, Permissions}; use std::fs::{self, Permissions};
use std::os::unix::fs::PermissionsExt; use std::os::unix::fs::PermissionsExt;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait; use async_trait::async_trait;
@ -19,6 +20,7 @@ use hyper_util::rt::TokioIo;
use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite};
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream}; use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
use tokio::sync::watch; use tokio::sync::watch;
use tokio::time::{sleep_until, Instant};
use opentelemetry::{ use opentelemetry::{
global, global,
@ -291,7 +293,7 @@ where
let connection_collector = tokio::spawn({ let connection_collector = tokio::spawn({
let server_name = server_name.clone(); let server_name = server_name.clone();
async move { async move {
let mut connections = FuturesUnordered::new(); let mut connections = FuturesUnordered::<tokio::task::JoinHandle<()>>::new();
loop { loop {
let collect_next = async { let collect_next = async {
if connections.is_empty() { if connections.is_empty() {
@ -312,23 +314,34 @@ where
} }
} }
} }
if !connections.is_empty() { let deadline = Instant::now() + Duration::from_secs(10);
while !connections.is_empty() {
info!( info!(
"{} server: {} connections still open", "{} server: {} connections still open, deadline in {:.2}s",
server_name, server_name,
connections.len() connections.len(),
(deadline - Instant::now()).as_secs_f32(),
); );
while let Some(conn_res) = connections.next().await { tokio::select! {
trace!( conn_res = connections.next() => {
"{} server: HTTP connection finished: {:?}", trace!(
server_name, "{} server: HTTP connection finished: {:?}",
conn_res server_name,
); conn_res.unwrap(),
info!( );
"{} server: {} connections still open", }
server_name, _ = sleep_until(deadline) => {
connections.len() warn!("{} server: exit deadline reached with {} connections still open, killing them now",
); server_name,
connections.len());
for conn in connections.iter() {
conn.abort();
}
for conn in connections {
assert!(conn.await.unwrap_err().is_cancelled());
}
break;
}
} }
} }
} }

View file

@ -1,4 +1,5 @@
use std::convert::Infallible; use std::convert::Infallible;
use std::sync::Arc;
use futures::{Stream, StreamExt, TryStreamExt}; use futures::{Stream, StreamExt, TryStreamExt};
@ -10,6 +11,10 @@ use hyper::{
use idna::domain_to_unicode; use idna::domain_to_unicode;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use garage_model::bucket_table::BucketParams;
use garage_model::garage::Garage;
use garage_model::key_table::Key;
use garage_util::data::Uuid;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
use crate::common_error::{CommonError as Error, *}; use crate::common_error::{CommonError as Error, *};
@ -27,6 +32,15 @@ pub enum Authorization {
Owner, Owner,
} }
/// The values which are known for each request related to a bucket
pub struct ReqCtx {
pub garage: Arc<Garage>,
pub bucket_id: Uuid,
pub bucket_name: String,
pub bucket_params: BucketParams,
pub api_key: Key,
}
/// Host to bucket /// Host to bucket
/// ///
/// Convert a host, like "bucket.garage-site.tld" to the corresponding bucket "bucket", /// Convert a host, like "bucket.garage-site.tld" to the corresponding bucket "bucket",

View file

@ -95,6 +95,7 @@ impl ApiHandler for K2VApiServer {
.bucket_helper() .bucket_helper()
.get_existing_bucket(bucket_id) .get_existing_bucket(bucket_id)
.await?; .await?;
let bucket_params = bucket.state.into_option().unwrap();
let allowed = match endpoint.authorization_type() { let allowed = match endpoint.authorization_type() {
Authorization::Read => api_key.allow_read(&bucket_id), Authorization::Read => api_key.allow_read(&bucket_id),
@ -112,40 +113,42 @@ impl ApiHandler for K2VApiServer {
// are always preflighted, i.e. the browser should make // are always preflighted, i.e. the browser should make
// an OPTIONS call before to check it is allowed // an OPTIONS call before to check it is allowed
let matching_cors_rule = match *req.method() { let matching_cors_rule = match *req.method() {
Method::GET | Method::HEAD | Method::POST => find_matching_cors_rule(&bucket, &req) Method::GET | Method::HEAD | Method::POST => {
.ok_or_internal_error("Error looking up CORS rule")?, find_matching_cors_rule(&bucket_params, &req)
.ok_or_internal_error("Error looking up CORS rule")?
.cloned()
}
_ => None, _ => None,
}; };
let ctx = ReqCtx {
garage,
bucket_id,
bucket_name,
bucket_params,
api_key,
};
let resp = match endpoint { let resp = match endpoint {
Endpoint::DeleteItem { Endpoint::DeleteItem {
partition_key, partition_key,
sort_key, sort_key,
} => handle_delete_item(garage, req, bucket_id, &partition_key, &sort_key).await, } => handle_delete_item(ctx, req, &partition_key, &sort_key).await,
Endpoint::InsertItem { Endpoint::InsertItem {
partition_key, partition_key,
sort_key, sort_key,
} => handle_insert_item(garage, req, bucket_id, &partition_key, &sort_key).await, } => handle_insert_item(ctx, req, &partition_key, &sort_key).await,
Endpoint::ReadItem { Endpoint::ReadItem {
partition_key, partition_key,
sort_key, sort_key,
} => handle_read_item(garage, &req, bucket_id, &partition_key, &sort_key).await, } => handle_read_item(ctx, &req, &partition_key, &sort_key).await,
Endpoint::PollItem { Endpoint::PollItem {
partition_key, partition_key,
sort_key, sort_key,
causality_token, causality_token,
timeout, timeout,
} => { } => {
handle_poll_item( handle_poll_item(ctx, &req, partition_key, sort_key, causality_token, timeout).await
garage,
&req,
bucket_id,
partition_key,
sort_key,
causality_token,
timeout,
)
.await
} }
Endpoint::ReadIndex { Endpoint::ReadIndex {
prefix, prefix,
@ -153,12 +156,12 @@ impl ApiHandler for K2VApiServer {
end, end,
limit, limit,
reverse, reverse,
} => handle_read_index(garage, bucket_id, prefix, start, end, limit, reverse).await, } => handle_read_index(ctx, prefix, start, end, limit, reverse).await,
Endpoint::InsertBatch {} => handle_insert_batch(garage, bucket_id, req).await, Endpoint::InsertBatch {} => handle_insert_batch(ctx, req).await,
Endpoint::ReadBatch {} => handle_read_batch(garage, bucket_id, req).await, Endpoint::ReadBatch {} => handle_read_batch(ctx, req).await,
Endpoint::DeleteBatch {} => handle_delete_batch(garage, bucket_id, req).await, Endpoint::DeleteBatch {} => handle_delete_batch(ctx, req).await,
Endpoint::PollRange { partition_key } => { Endpoint::PollRange { partition_key } => {
handle_poll_range(garage, bucket_id, &partition_key, req).await handle_poll_range(ctx, &partition_key, req).await
} }
Endpoint::Options => unreachable!(), Endpoint::Options => unreachable!(),
}; };
@ -167,7 +170,7 @@ impl ApiHandler for K2VApiServer {
// add the corresponding CORS headers to the response // add the corresponding CORS headers to the response
let mut resp_ok = resp?; let mut resp_ok = resp?;
if let Some(rule) = matching_cors_rule { if let Some(rule) = matching_cors_rule {
add_cors_headers(&mut resp_ok, rule) add_cors_headers(&mut resp_ok, &rule)
.ok_or_internal_error("Invalid bucket CORS configuration")?; .ok_or_internal_error("Invalid bucket CORS configuration")?;
} }

View file

@ -1,14 +1,9 @@
use std::sync::Arc;
use base64::prelude::*; use base64::prelude::*;
use hyper::{Request, Response, StatusCode}; use hyper::{Request, Response, StatusCode};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use garage_util::data::*;
use garage_table::{EnumerationOrder, TableSchema}; use garage_table::{EnumerationOrder, TableSchema};
use garage_model::garage::Garage;
use garage_model::k2v::causality::*; use garage_model::k2v::causality::*;
use garage_model::k2v::item_table::*; use garage_model::k2v::item_table::*;
@ -18,10 +13,12 @@ use crate::k2v::error::*;
use crate::k2v::range::read_range; use crate::k2v::range::read_range;
pub async fn handle_insert_batch( pub async fn handle_insert_batch(
garage: Arc<Garage>, ctx: ReqCtx,
bucket_id: Uuid,
req: Request<ReqBody>, req: Request<ReqBody>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage, bucket_id, ..
} = &ctx;
let items = parse_json_body::<Vec<InsertBatchItem>, _, Error>(req).await?; let items = parse_json_body::<Vec<InsertBatchItem>, _, Error>(req).await?;
let mut items2 = vec![]; let mut items2 = vec![];
@ -38,7 +35,7 @@ pub async fn handle_insert_batch(
items2.push((it.pk, it.sk, ct, v)); items2.push((it.pk, it.sk, ct, v));
} }
garage.k2v.rpc.insert_batch(bucket_id, items2).await?; garage.k2v.rpc.insert_batch(*bucket_id, items2).await?;
Ok(Response::builder() Ok(Response::builder()
.status(StatusCode::NO_CONTENT) .status(StatusCode::NO_CONTENT)
@ -46,8 +43,7 @@ pub async fn handle_insert_batch(
} }
pub async fn handle_read_batch( pub async fn handle_read_batch(
garage: Arc<Garage>, ctx: ReqCtx,
bucket_id: Uuid,
req: Request<ReqBody>, req: Request<ReqBody>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let queries = parse_json_body::<Vec<ReadBatchQuery>, _, Error>(req).await?; let queries = parse_json_body::<Vec<ReadBatchQuery>, _, Error>(req).await?;
@ -55,7 +51,7 @@ pub async fn handle_read_batch(
let resp_results = futures::future::join_all( let resp_results = futures::future::join_all(
queries queries
.into_iter() .into_iter()
.map(|q| handle_read_batch_query(&garage, bucket_id, q)), .map(|q| handle_read_batch_query(&ctx, q)),
) )
.await; .await;
@ -68,12 +64,15 @@ pub async fn handle_read_batch(
} }
async fn handle_read_batch_query( async fn handle_read_batch_query(
garage: &Arc<Garage>, ctx: &ReqCtx,
bucket_id: Uuid,
query: ReadBatchQuery, query: ReadBatchQuery,
) -> Result<ReadBatchResponse, Error> { ) -> Result<ReadBatchResponse, Error> {
let ReqCtx {
garage, bucket_id, ..
} = ctx;
let partition = K2VItemPartition { let partition = K2VItemPartition {
bucket_id, bucket_id: *bucket_id,
partition_key: query.partition_key.clone(), partition_key: query.partition_key.clone(),
}; };
@ -138,8 +137,7 @@ async fn handle_read_batch_query(
} }
pub async fn handle_delete_batch( pub async fn handle_delete_batch(
garage: Arc<Garage>, ctx: ReqCtx,
bucket_id: Uuid,
req: Request<ReqBody>, req: Request<ReqBody>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let queries = parse_json_body::<Vec<DeleteBatchQuery>, _, Error>(req).await?; let queries = parse_json_body::<Vec<DeleteBatchQuery>, _, Error>(req).await?;
@ -147,7 +145,7 @@ pub async fn handle_delete_batch(
let resp_results = futures::future::join_all( let resp_results = futures::future::join_all(
queries queries
.into_iter() .into_iter()
.map(|q| handle_delete_batch_query(&garage, bucket_id, q)), .map(|q| handle_delete_batch_query(&ctx, q)),
) )
.await; .await;
@ -160,12 +158,15 @@ pub async fn handle_delete_batch(
} }
async fn handle_delete_batch_query( async fn handle_delete_batch_query(
garage: &Arc<Garage>, ctx: &ReqCtx,
bucket_id: Uuid,
query: DeleteBatchQuery, query: DeleteBatchQuery,
) -> Result<DeleteBatchResponse, Error> { ) -> Result<DeleteBatchResponse, Error> {
let ReqCtx {
garage, bucket_id, ..
} = &ctx;
let partition = K2VItemPartition { let partition = K2VItemPartition {
bucket_id, bucket_id: *bucket_id,
partition_key: query.partition_key.clone(), partition_key: query.partition_key.clone(),
}; };
@ -195,7 +196,7 @@ async fn handle_delete_batch_query(
.k2v .k2v
.rpc .rpc
.insert( .insert(
bucket_id, *bucket_id,
i.partition.partition_key, i.partition.partition_key,
i.sort_key, i.sort_key,
Some(cc), Some(cc),
@ -235,7 +236,7 @@ async fn handle_delete_batch_query(
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let n = items.len(); let n = items.len();
garage.k2v.rpc.insert_batch(bucket_id, items).await?; garage.k2v.rpc.insert_batch(*bucket_id, items).await?;
n n
}; };
@ -251,11 +252,13 @@ async fn handle_delete_batch_query(
} }
pub(crate) async fn handle_poll_range( pub(crate) async fn handle_poll_range(
garage: Arc<Garage>, ctx: ReqCtx,
bucket_id: Uuid,
partition_key: &str, partition_key: &str,
req: Request<ReqBody>, req: Request<ReqBody>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage, bucket_id, ..
} = ctx;
use garage_model::k2v::sub::PollRange; use garage_model::k2v::sub::PollRange;
let query = parse_json_body::<PollRangeQuery, _, Error>(req).await?; let query = parse_json_body::<PollRangeQuery, _, Error>(req).await?;

View file

@ -1,14 +1,8 @@
use std::sync::Arc;
use hyper::Response; use hyper::Response;
use serde::Serialize; use serde::Serialize;
use garage_util::data::*;
use garage_rpc::ring::Ring;
use garage_table::util::*; use garage_table::util::*;
use garage_model::garage::Garage;
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES}; use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
use crate::helpers::*; use crate::helpers::*;
@ -17,17 +11,24 @@ use crate::k2v::error::*;
use crate::k2v::range::read_range; use crate::k2v::range::read_range;
pub async fn handle_read_index( pub async fn handle_read_index(
garage: Arc<Garage>, ctx: ReqCtx,
bucket_id: Uuid,
prefix: Option<String>, prefix: Option<String>,
start: Option<String>, start: Option<String>,
end: Option<String>, end: Option<String>,
limit: Option<u64>, limit: Option<u64>,
reverse: Option<bool>, reverse: Option<bool>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage, bucket_id, ..
} = &ctx;
let reverse = reverse.unwrap_or(false); let reverse = reverse.unwrap_or(false);
let ring: Arc<Ring> = garage.system.ring.borrow().clone(); let node_id_vec = garage
.system
.cluster_layout()
.all_nongateway_nodes()
.to_vec();
let (partition_keys, more, next_start) = read_range( let (partition_keys, more, next_start) = read_range(
&garage.k2v.counter_table.table, &garage.k2v.counter_table.table,
@ -36,7 +37,7 @@ pub async fn handle_read_index(
&start, &start,
&end, &end,
limit, limit,
Some((DeletedFilter::NotDeleted, ring.layout.node_id_vec.clone())), Some((DeletedFilter::NotDeleted, node_id_vec)),
EnumerationOrder::from_reverse(reverse), EnumerationOrder::from_reverse(reverse),
) )
.await?; .await?;
@ -55,7 +56,7 @@ pub async fn handle_read_index(
partition_keys: partition_keys partition_keys: partition_keys
.into_iter() .into_iter()
.map(|part| { .map(|part| {
let vals = part.filtered_values(&ring); let vals = part.filtered_values(&garage.system.cluster_layout());
ReadIndexResponseEntry { ReadIndexResponseEntry {
pk: part.sk, pk: part.sk,
entries: *vals.get(&s_entries).unwrap_or(&0), entries: *vals.get(&s_entries).unwrap_or(&0),

View file

@ -1,13 +1,8 @@
use std::sync::Arc;
use base64::prelude::*; use base64::prelude::*;
use http::header; use http::header;
use hyper::{Request, Response, StatusCode}; use hyper::{Request, Response, StatusCode};
use garage_util::data::*;
use garage_model::garage::Garage;
use garage_model::k2v::causality::*; use garage_model::k2v::causality::*;
use garage_model::k2v::item_table::*; use garage_model::k2v::item_table::*;
@ -100,12 +95,15 @@ impl ReturnFormat {
/// Handle ReadItem request /// Handle ReadItem request
#[allow(clippy::ptr_arg)] #[allow(clippy::ptr_arg)]
pub async fn handle_read_item( pub async fn handle_read_item(
garage: Arc<Garage>, ctx: ReqCtx,
req: &Request<ReqBody>, req: &Request<ReqBody>,
bucket_id: Uuid,
partition_key: &str, partition_key: &str,
sort_key: &String, sort_key: &String,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage, bucket_id, ..
} = &ctx;
let format = ReturnFormat::from(req)?; let format = ReturnFormat::from(req)?;
let item = garage let item = garage
@ -113,7 +111,7 @@ pub async fn handle_read_item(
.item_table .item_table
.get( .get(
&K2VItemPartition { &K2VItemPartition {
bucket_id, bucket_id: *bucket_id,
partition_key: partition_key.to_string(), partition_key: partition_key.to_string(),
}, },
sort_key, sort_key,
@ -125,12 +123,14 @@ pub async fn handle_read_item(
} }
pub async fn handle_insert_item( pub async fn handle_insert_item(
garage: Arc<Garage>, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
bucket_id: Uuid,
partition_key: &str, partition_key: &str,
sort_key: &str, sort_key: &str,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage, bucket_id, ..
} = &ctx;
let causal_context = req let causal_context = req
.headers() .headers()
.get(X_GARAGE_CAUSALITY_TOKEN) .get(X_GARAGE_CAUSALITY_TOKEN)
@ -149,7 +149,7 @@ pub async fn handle_insert_item(
.k2v .k2v
.rpc .rpc
.insert( .insert(
bucket_id, *bucket_id,
partition_key.to_string(), partition_key.to_string(),
sort_key.to_string(), sort_key.to_string(),
causal_context, causal_context,
@ -163,12 +163,14 @@ pub async fn handle_insert_item(
} }
pub async fn handle_delete_item( pub async fn handle_delete_item(
garage: Arc<Garage>, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
bucket_id: Uuid,
partition_key: &str, partition_key: &str,
sort_key: &str, sort_key: &str,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage, bucket_id, ..
} = &ctx;
let causal_context = req let causal_context = req
.headers() .headers()
.get(X_GARAGE_CAUSALITY_TOKEN) .get(X_GARAGE_CAUSALITY_TOKEN)
@ -183,7 +185,7 @@ pub async fn handle_delete_item(
.k2v .k2v
.rpc .rpc
.insert( .insert(
bucket_id, *bucket_id,
partition_key.to_string(), partition_key.to_string(),
sort_key.to_string(), sort_key.to_string(),
causal_context, causal_context,
@ -199,14 +201,16 @@ pub async fn handle_delete_item(
/// Handle ReadItem request /// Handle ReadItem request
#[allow(clippy::ptr_arg)] #[allow(clippy::ptr_arg)]
pub async fn handle_poll_item( pub async fn handle_poll_item(
garage: Arc<Garage>, ctx: ReqCtx,
req: &Request<ReqBody>, req: &Request<ReqBody>,
bucket_id: Uuid,
partition_key: String, partition_key: String,
sort_key: String, sort_key: String,
causality_token: String, causality_token: String,
timeout_secs: Option<u64>, timeout_secs: Option<u64>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage, bucket_id, ..
} = &ctx;
let format = ReturnFormat::from(req)?; let format = ReturnFormat::from(req)?;
let causal_context = let causal_context =
@ -218,7 +222,7 @@ pub async fn handle_poll_item(
.k2v .k2v
.rpc .rpc
.poll_item( .poll_item(
bucket_id, *bucket_id,
partition_key, partition_key,
sort_key, sort_key,
causal_context, causal_context,

View file

@ -155,6 +155,7 @@ impl ApiHandler for S3ApiServer {
.bucket_helper() .bucket_helper()
.get_existing_bucket(bucket_id) .get_existing_bucket(bucket_id)
.await?; .await?;
let bucket_params = bucket.state.into_option().unwrap();
let allowed = match endpoint.authorization_type() { let allowed = match endpoint.authorization_type() {
Authorization::Read => api_key.allow_read(&bucket_id), Authorization::Read => api_key.allow_read(&bucket_id),
@ -167,12 +168,20 @@ impl ApiHandler for S3ApiServer {
return Err(Error::forbidden("Operation is not allowed for this key.")); return Err(Error::forbidden("Operation is not allowed for this key."));
} }
let matching_cors_rule = find_matching_cors_rule(&bucket, &req)?; let matching_cors_rule = find_matching_cors_rule(&bucket_params, &req)?.cloned();
let ctx = ReqCtx {
garage,
bucket_id,
bucket_name,
bucket_params,
api_key,
};
let resp = match endpoint { let resp = match endpoint {
Endpoint::HeadObject { Endpoint::HeadObject {
key, part_number, .. key, part_number, ..
} => handle_head(garage, &req, bucket_id, &key, part_number).await, } => handle_head(ctx, &req, &key, part_number).await,
Endpoint::GetObject { Endpoint::GetObject {
key, key,
part_number, part_number,
@ -192,74 +201,37 @@ impl ApiHandler for S3ApiServer {
response_content_type, response_content_type,
response_expires, response_expires,
}; };
handle_get(garage, &req, bucket_id, &key, part_number, overrides).await handle_get(ctx, &req, &key, part_number, overrides).await
} }
Endpoint::UploadPart { Endpoint::UploadPart {
key, key,
part_number, part_number,
upload_id, upload_id,
} => { } => handle_put_part(ctx, req, &key, part_number, &upload_id, content_sha256).await,
handle_put_part( Endpoint::CopyObject { key } => handle_copy(ctx, &req, &key).await,
garage,
req,
bucket_id,
&key,
part_number,
&upload_id,
content_sha256,
)
.await
}
Endpoint::CopyObject { key } => {
handle_copy(garage, &api_key, &req, bucket_id, &key).await
}
Endpoint::UploadPartCopy { Endpoint::UploadPartCopy {
key, key,
part_number, part_number,
upload_id, upload_id,
} => { } => handle_upload_part_copy(ctx, &req, &key, part_number, &upload_id).await,
handle_upload_part_copy( Endpoint::PutObject { key } => handle_put(ctx, req, &key, content_sha256).await,
garage,
&api_key,
&req,
bucket_id,
&key,
part_number,
&upload_id,
)
.await
}
Endpoint::PutObject { key } => {
handle_put(garage, req, &bucket, &key, content_sha256).await
}
Endpoint::AbortMultipartUpload { key, upload_id } => { Endpoint::AbortMultipartUpload { key, upload_id } => {
handle_abort_multipart_upload(garage, bucket_id, &key, &upload_id).await handle_abort_multipart_upload(ctx, &key, &upload_id).await
} }
Endpoint::DeleteObject { key, .. } => handle_delete(garage, bucket_id, &key).await, Endpoint::DeleteObject { key, .. } => handle_delete(ctx, &key).await,
Endpoint::CreateMultipartUpload { key } => { Endpoint::CreateMultipartUpload { key } => {
handle_create_multipart_upload(garage, &req, &bucket_name, bucket_id, &key).await handle_create_multipart_upload(ctx, &req, &key).await
} }
Endpoint::CompleteMultipartUpload { key, upload_id } => { Endpoint::CompleteMultipartUpload { key, upload_id } => {
handle_complete_multipart_upload( handle_complete_multipart_upload(ctx, req, &key, &upload_id, content_sha256).await
garage,
req,
&bucket_name,
&bucket,
&key,
&upload_id,
content_sha256,
)
.await
} }
Endpoint::CreateBucket {} => unreachable!(), Endpoint::CreateBucket {} => unreachable!(),
Endpoint::HeadBucket {} => { Endpoint::HeadBucket {} => {
let response = Response::builder().body(empty_body()).unwrap(); let response = Response::builder().body(empty_body()).unwrap();
Ok(response) Ok(response)
} }
Endpoint::DeleteBucket {} => { Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await,
handle_delete_bucket(&garage, bucket_id, bucket_name, &api_key.key_id).await Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx),
}
Endpoint::GetBucketLocation {} => handle_get_bucket_location(garage),
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(), Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
Endpoint::ListObjects { Endpoint::ListObjects {
delimiter, delimiter,
@ -268,24 +240,21 @@ impl ApiHandler for S3ApiServer {
max_keys, max_keys,
prefix, prefix,
} => { } => {
handle_list( let query = ListObjectsQuery {
garage, common: ListQueryCommon {
&ListObjectsQuery { bucket_name: ctx.bucket_name.clone(),
common: ListQueryCommon { bucket_id,
bucket_name, delimiter,
bucket_id, page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
delimiter, prefix: prefix.unwrap_or_default(),
page_size: max_keys.unwrap_or(1000).clamp(1, 1000), urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
prefix: prefix.unwrap_or_default(),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
},
is_v2: false,
marker,
continuation_token: None,
start_after: None,
}, },
) is_v2: false,
.await marker,
continuation_token: None,
start_after: None,
};
handle_list(ctx, &query).await
} }
Endpoint::ListObjectsV2 { Endpoint::ListObjectsV2 {
delimiter, delimiter,
@ -298,24 +267,21 @@ impl ApiHandler for S3ApiServer {
.. ..
} => { } => {
if list_type == "2" { if list_type == "2" {
handle_list( let query = ListObjectsQuery {
garage, common: ListQueryCommon {
&ListObjectsQuery { bucket_name: ctx.bucket_name.clone(),
common: ListQueryCommon { bucket_id,
bucket_name, delimiter,
bucket_id, page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
delimiter, urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
page_size: max_keys.unwrap_or(1000).clamp(1, 1000), prefix: prefix.unwrap_or_default(),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
prefix: prefix.unwrap_or_default(),
},
is_v2: true,
marker: None,
continuation_token,
start_after,
}, },
) is_v2: true,
.await marker: None,
continuation_token,
start_after,
};
handle_list(ctx, &query).await
} else { } else {
Err(Error::bad_request(format!( Err(Error::bad_request(format!(
"Invalid endpoint: list-type={}", "Invalid endpoint: list-type={}",
@ -331,22 +297,19 @@ impl ApiHandler for S3ApiServer {
prefix, prefix,
upload_id_marker, upload_id_marker,
} => { } => {
handle_list_multipart_upload( let query = ListMultipartUploadsQuery {
garage, common: ListQueryCommon {
&ListMultipartUploadsQuery { bucket_name: ctx.bucket_name.clone(),
common: ListQueryCommon { bucket_id,
bucket_name, delimiter,
bucket_id, page_size: max_uploads.unwrap_or(1000).clamp(1, 1000),
delimiter, prefix: prefix.unwrap_or_default(),
page_size: max_uploads.unwrap_or(1000).clamp(1, 1000), urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
prefix: prefix.unwrap_or_default(),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
},
key_marker,
upload_id_marker,
}, },
) key_marker,
.await upload_id_marker,
};
handle_list_multipart_upload(ctx, &query).await
} }
Endpoint::ListParts { Endpoint::ListParts {
key, key,
@ -354,39 +317,28 @@ impl ApiHandler for S3ApiServer {
part_number_marker, part_number_marker,
upload_id, upload_id,
} => { } => {
handle_list_parts( let query = ListPartsQuery {
garage, bucket_name: ctx.bucket_name.clone(),
&ListPartsQuery { bucket_id,
bucket_name, key,
bucket_id, upload_id,
key, part_number_marker: part_number_marker.map(|p| p.min(10000)),
upload_id, max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
part_number_marker: part_number_marker.map(|p| p.min(10000)), };
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000), handle_list_parts(ctx, req, &query).await
},
)
.await
} }
Endpoint::DeleteObjects {} => { Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
handle_delete_objects(garage, bucket_id, req, content_sha256).await Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
} Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req, content_sha256).await,
Endpoint::GetBucketWebsite {} => handle_get_website(&bucket).await, Endpoint::DeleteBucketWebsite {} => handle_delete_website(ctx).await,
Endpoint::PutBucketWebsite {} => { Endpoint::GetBucketCors {} => handle_get_cors(ctx).await,
handle_put_website(garage, bucket.clone(), req, content_sha256).await Endpoint::PutBucketCors {} => handle_put_cors(ctx, req, content_sha256).await,
} Endpoint::DeleteBucketCors {} => handle_delete_cors(ctx).await,
Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket.clone()).await, Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(ctx).await,
Endpoint::GetBucketCors {} => handle_get_cors(&bucket).await,
Endpoint::PutBucketCors {} => {
handle_put_cors(garage, bucket.clone(), req, content_sha256).await
}
Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket.clone()).await,
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(&bucket).await,
Endpoint::PutBucketLifecycleConfiguration {} => { Endpoint::PutBucketLifecycleConfiguration {} => {
handle_put_lifecycle(garage, bucket.clone(), req, content_sha256).await handle_put_lifecycle(ctx, req, content_sha256).await
}
Endpoint::DeleteBucketLifecycle {} => {
handle_delete_lifecycle(garage, bucket.clone()).await
} }
Endpoint::DeleteBucketLifecycle {} => handle_delete_lifecycle(ctx).await,
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
}; };
@ -394,7 +346,7 @@ impl ApiHandler for S3ApiServer {
// add the corresponding CORS headers to the response // add the corresponding CORS headers to the response
let mut resp_ok = resp?; let mut resp_ok = resp?;
if let Some(rule) = matching_cors_rule { if let Some(rule) = matching_cors_rule {
add_cors_headers(&mut resp_ok, rule) add_cors_headers(&mut resp_ok, &rule)
.ok_or_internal_error("Invalid bucket CORS configuration")?; .ok_or_internal_error("Invalid bucket CORS configuration")?;
} }

View file

@ -1,5 +1,4 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc;
use http_body_util::BodyExt; use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode}; use hyper::{Request, Response, StatusCode};
@ -21,7 +20,8 @@ use crate::s3::error::*;
use crate::s3::xml as s3_xml; use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content; use crate::signature::verify_signed_content;
pub fn handle_get_bucket_location(garage: Arc<Garage>) -> Result<Response<ResBody>, Error> { pub fn handle_get_bucket_location(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { garage, .. } = ctx;
let loc = s3_xml::LocationConstraint { let loc = s3_xml::LocationConstraint {
xmlns: (), xmlns: (),
region: garage.config.s3_api.s3_region.to_string(), region: garage.config.s3_api.s3_region.to_string(),
@ -204,21 +204,20 @@ pub async fn handle_create_bucket(
.unwrap()) .unwrap())
} }
pub async fn handle_delete_bucket( pub async fn handle_delete_bucket(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
garage: &Garage, let ReqCtx {
bucket_id: Uuid, garage,
bucket_name: String, bucket_id,
api_key_id: &String, bucket_name,
) -> Result<Response<ResBody>, Error> { bucket_params: bucket_state,
api_key,
..
} = &ctx;
let helper = garage.locked_helper().await; let helper = garage.locked_helper().await;
let api_key = helper.key().get_existing_key(api_key_id).await?;
let key_params = api_key.params().unwrap(); let key_params = api_key.params().unwrap();
let is_local_alias = matches!(key_params.local_aliases.get(&bucket_name), Some(Some(_))); let is_local_alias = matches!(key_params.local_aliases.get(bucket_name), Some(Some(_)));
let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
let bucket_state = bucket.state.as_option().unwrap();
// If the bucket has no other aliases, this is a true deletion. // If the bucket has no other aliases, this is a true deletion.
// Otherwise, it is just an alias removal. // Otherwise, it is just an alias removal.
@ -228,20 +227,20 @@ pub async fn handle_delete_bucket(
.items() .items()
.iter() .iter()
.filter(|(_, _, active)| *active) .filter(|(_, _, active)| *active)
.any(|(n, _, _)| is_local_alias || (*n != bucket_name)); .any(|(n, _, _)| is_local_alias || (*n != *bucket_name));
let has_other_local_aliases = bucket_state let has_other_local_aliases = bucket_state
.local_aliases .local_aliases
.items() .items()
.iter() .iter()
.filter(|(_, _, active)| *active) .filter(|(_, _, active)| *active)
.any(|((k, n), _, _)| !is_local_alias || *n != bucket_name || *k != api_key.key_id); .any(|((k, n), _, _)| !is_local_alias || *n != *bucket_name || *k != api_key.key_id);
if !has_other_global_aliases && !has_other_local_aliases { if !has_other_global_aliases && !has_other_local_aliases {
// Delete bucket // Delete bucket
// Check bucket is empty // Check bucket is empty
if !helper.bucket().is_bucket_empty(bucket_id).await? { if !helper.bucket().is_bucket_empty(*bucket_id).await? {
return Err(CommonError::BucketNotEmpty.into()); return Err(CommonError::BucketNotEmpty.into());
} }
@ -249,33 +248,36 @@ pub async fn handle_delete_bucket(
// 1. delete bucket alias // 1. delete bucket alias
if is_local_alias { if is_local_alias {
helper helper
.unset_local_bucket_alias(bucket_id, &api_key.key_id, &bucket_name) .unset_local_bucket_alias(*bucket_id, &api_key.key_id, bucket_name)
.await?; .await?;
} else { } else {
helper helper
.unset_global_bucket_alias(bucket_id, &bucket_name) .unset_global_bucket_alias(*bucket_id, bucket_name)
.await?; .await?;
} }
// 2. delete authorization from keys that had access // 2. delete authorization from keys that had access
for (key_id, _) in bucket.authorized_keys() { for (key_id, _) in bucket_state.authorized_keys.items() {
helper helper
.set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS) .set_bucket_key_permissions(*bucket_id, key_id, BucketKeyPerm::NO_PERMISSIONS)
.await?; .await?;
} }
let bucket = Bucket {
id: *bucket_id,
state: Deletable::delete(),
};
// 3. delete bucket // 3. delete bucket
bucket.state = Deletable::delete();
garage.bucket_table.insert(&bucket).await?; garage.bucket_table.insert(&bucket).await?;
} else if is_local_alias { } else if is_local_alias {
// Just unalias // Just unalias
helper helper
.unset_local_bucket_alias(bucket_id, &api_key.key_id, &bucket_name) .unset_local_bucket_alias(*bucket_id, &api_key.key_id, bucket_name)
.await?; .await?;
} else { } else {
// Just unalias (but from global namespace) // Just unalias (but from global namespace)
helper helper
.unset_global_bucket_alias(bucket_id, &bucket_name) .unset_global_bucket_alias(*bucket_id, bucket_name)
.await?; .await?;
} }

406
src/api/s3/checksum.rs Normal file
View file

@ -0,0 +1,406 @@
use std::convert::{TryFrom, TryInto};
use std::hash::Hasher;
use base64::prelude::*;
use crc32c::Crc32cHasher as Crc32c;
use crc32fast::Hasher as Crc32;
use md5::{Digest, Md5};
use sha1::Sha1;
use sha2::Sha256;
use http::{HeaderMap, HeaderName, HeaderValue};
use garage_util::data::*;
use garage_util::error::OkOrMessage;
use garage_model::s3::object_table::*;
use crate::s3::error::*;
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
HeaderName::from_static("x-amz-checksum-algorithm");
pub const X_AMZ_CHECKSUM_MODE: HeaderName = HeaderName::from_static("x-amz-checksum-mode");
pub const X_AMZ_CHECKSUM_CRC32: HeaderName = HeaderName::from_static("x-amz-checksum-crc32");
pub const X_AMZ_CHECKSUM_CRC32C: HeaderName = HeaderName::from_static("x-amz-checksum-crc32c");
pub const X_AMZ_CHECKSUM_SHA1: HeaderName = HeaderName::from_static("x-amz-checksum-sha1");
pub const X_AMZ_CHECKSUM_SHA256: HeaderName = HeaderName::from_static("x-amz-checksum-sha256");
pub type Crc32Checksum = [u8; 4];
pub type Crc32cChecksum = [u8; 4];
pub type Md5Checksum = [u8; 16];
pub type Sha1Checksum = [u8; 20];
pub type Sha256Checksum = [u8; 32];
#[derive(Debug, Default)]
pub(crate) struct ExpectedChecksums {
// base64-encoded md5 (content-md5 header)
pub md5: Option<String>,
// content_sha256 (as a Hash / FixedBytes32)
pub sha256: Option<Hash>,
// extra x-amz-checksum-* header
pub extra: Option<ChecksumValue>,
}
pub(crate) struct Checksummer {
pub crc32: Option<Crc32>,
pub crc32c: Option<Crc32c>,
pub md5: Option<Md5>,
pub sha1: Option<Sha1>,
pub sha256: Option<Sha256>,
}
#[derive(Default)]
pub(crate) struct Checksums {
pub crc32: Option<Crc32Checksum>,
pub crc32c: Option<Crc32cChecksum>,
pub md5: Option<Md5Checksum>,
pub sha1: Option<Sha1Checksum>,
pub sha256: Option<Sha256Checksum>,
}
impl Checksummer {
pub(crate) fn init(expected: &ExpectedChecksums, require_md5: bool) -> Self {
let mut ret = Self {
crc32: None,
crc32c: None,
md5: None,
sha1: None,
sha256: None,
};
if expected.md5.is_some() || require_md5 {
ret.md5 = Some(Md5::new());
}
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
ret.sha256 = Some(Sha256::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
ret.crc32 = Some(Crc32::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
ret.crc32c = Some(Crc32c::default());
}
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
ret.sha1 = Some(Sha1::new());
}
ret
}
pub(crate) fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
match algo {
Some(ChecksumAlgorithm::Crc32) => {
self.crc32 = Some(Crc32::new());
}
Some(ChecksumAlgorithm::Crc32c) => {
self.crc32c = Some(Crc32c::default());
}
Some(ChecksumAlgorithm::Sha1) => {
self.sha1 = Some(Sha1::new());
}
Some(ChecksumAlgorithm::Sha256) => {
self.sha256 = Some(Sha256::new());
}
None => (),
}
self
}
pub(crate) fn update(&mut self, bytes: &[u8]) {
if let Some(crc32) = &mut self.crc32 {
crc32.update(bytes);
}
if let Some(crc32c) = &mut self.crc32c {
crc32c.write(bytes);
}
if let Some(md5) = &mut self.md5 {
md5.update(bytes);
}
if let Some(sha1) = &mut self.sha1 {
sha1.update(bytes);
}
if let Some(sha256) = &mut self.sha256 {
sha256.update(bytes);
}
}
pub(crate) fn finalize(self) -> Checksums {
Checksums {
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
crc32c: self
.crc32c
.map(|x| u32::to_be_bytes(u32::try_from(x.finish()).unwrap())),
md5: self.md5.map(|x| x.finalize()[..].try_into().unwrap()),
sha1: self.sha1.map(|x| x.finalize()[..].try_into().unwrap()),
sha256: self.sha256.map(|x| x.finalize()[..].try_into().unwrap()),
}
}
}
impl Checksums {
pub fn verify(&self, expected: &ExpectedChecksums) -> Result<(), Error> {
if let Some(expected_md5) = &expected.md5 {
match self.md5 {
Some(md5) if BASE64_STANDARD.encode(&md5) == expected_md5.trim_matches('"') => (),
_ => {
return Err(Error::InvalidDigest(
"MD5 checksum verification failed (from content-md5)".into(),
))
}
}
}
if let Some(expected_sha256) = &expected.sha256 {
match self.sha256 {
Some(sha256) if &sha256[..] == expected_sha256.as_slice() => (),
_ => {
return Err(Error::InvalidDigest(
"SHA256 checksum verification failed (from x-amz-content-sha256)".into(),
))
}
}
}
if let Some(extra) = expected.extra {
let algo = extra.algorithm();
if self.extract(Some(algo)) != Some(extra) {
return Err(Error::InvalidDigest(format!(
"Failed to validate checksum for algorithm {:?}",
algo
)));
}
}
Ok(())
}
pub fn extract(&self, algo: Option<ChecksumAlgorithm>) -> Option<ChecksumValue> {
match algo {
None => None,
Some(ChecksumAlgorithm::Crc32) => Some(ChecksumValue::Crc32(self.crc32.unwrap())),
Some(ChecksumAlgorithm::Crc32c) => Some(ChecksumValue::Crc32c(self.crc32c.unwrap())),
Some(ChecksumAlgorithm::Sha1) => Some(ChecksumValue::Sha1(self.sha1.unwrap())),
Some(ChecksumAlgorithm::Sha256) => Some(ChecksumValue::Sha256(self.sha256.unwrap())),
}
}
}
// ----
#[derive(Default)]
pub(crate) struct MultipartChecksummer {
pub md5: Md5,
pub extra: Option<MultipartExtraChecksummer>,
}
pub(crate) enum MultipartExtraChecksummer {
Crc32(Crc32),
Crc32c(Crc32c),
Sha1(Sha1),
Sha256(Sha256),
}
impl MultipartChecksummer {
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
Self {
md5: Md5::new(),
extra: match algo {
None => None,
Some(ChecksumAlgorithm::Crc32) => {
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
}
Some(ChecksumAlgorithm::Crc32c) => {
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
}
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
Some(ChecksumAlgorithm::Sha256) => {
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
}
},
}
}
pub(crate) fn update(
&mut self,
etag: &str,
checksum: Option<ChecksumValue>,
) -> Result<(), Error> {
self.md5
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
match (&mut self.extra, checksum) {
(None, _) => (),
(
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
Some(ChecksumValue::Crc32(x)),
) => {
crc32.update(&x);
}
(
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
Some(ChecksumValue::Crc32c(x)),
) => {
crc32c.write(&x);
}
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
sha1.update(&x);
}
(
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
Some(ChecksumValue::Sha256(x)),
) => {
sha256.update(&x);
}
(Some(_), b) => {
return Err(Error::internal_error(format!(
"part checksum was not computed correctly, got: {:?}",
b
)))
}
}
Ok(())
}
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
let md5 = self.md5.finalize()[..].try_into().unwrap();
let extra = match self.extra {
None => None,
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
}
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
)),
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
}
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
sha256.finalize()[..].try_into().unwrap(),
)),
};
(md5, extra)
}
}
// ----
/// Extract the value of the x-amz-checksum-algorithm header
pub(crate) fn request_checksum_algorithm(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumAlgorithm>, Error> {
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
None => Ok(None),
Some(x) if x == "CRC32" => Ok(Some(ChecksumAlgorithm::Crc32)),
Some(x) if x == "CRC32C" => Ok(Some(ChecksumAlgorithm::Crc32c)),
Some(x) if x == "SHA1" => Ok(Some(ChecksumAlgorithm::Sha1)),
Some(x) if x == "SHA256" => Ok(Some(ChecksumAlgorithm::Sha256)),
_ => Err(Error::bad_request("invalid checksum algorithm")),
}
}
/// Extract the value of any of the x-amz-checksum-* headers
pub(crate) fn request_checksum_value(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumValue>, Error> {
let mut ret = vec![];
if let Some(crc32_str) = headers.get(X_AMZ_CHECKSUM_CRC32) {
let crc32 = BASE64_STANDARD
.decode(&crc32_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
ret.push(ChecksumValue::Crc32(crc32))
}
if let Some(crc32c_str) = headers.get(X_AMZ_CHECKSUM_CRC32C) {
let crc32c = BASE64_STANDARD
.decode(&crc32c_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
ret.push(ChecksumValue::Crc32c(crc32c))
}
if let Some(sha1_str) = headers.get(X_AMZ_CHECKSUM_SHA1) {
let sha1 = BASE64_STANDARD
.decode(&sha1_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
ret.push(ChecksumValue::Sha1(sha1))
}
if let Some(sha256_str) = headers.get(X_AMZ_CHECKSUM_SHA256) {
let sha256 = BASE64_STANDARD
.decode(&sha256_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
ret.push(ChecksumValue::Sha256(sha256))
}
if ret.len() > 1 {
return Err(Error::bad_request(
"multiple x-amz-checksum-* headers given",
));
}
Ok(ret.pop())
}
/// Checks for the presense of x-amz-checksum-algorithm
/// if so extract the corrseponding x-amz-checksum-* value
pub(crate) fn request_checksum_algorithm_value(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumValue>, Error> {
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
Some(x) if x == "CRC32" => {
let crc32 = headers
.get(X_AMZ_CHECKSUM_CRC32)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
Ok(Some(ChecksumValue::Crc32(crc32)))
}
Some(x) if x == "CRC32C" => {
let crc32c = headers
.get(X_AMZ_CHECKSUM_CRC32C)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
Ok(Some(ChecksumValue::Crc32c(crc32c)))
}
Some(x) if x == "SHA1" => {
let sha1 = headers
.get(X_AMZ_CHECKSUM_SHA1)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
Ok(Some(ChecksumValue::Sha1(sha1)))
}
Some(x) if x == "SHA256" => {
let sha256 = headers
.get(X_AMZ_CHECKSUM_SHA256)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
Ok(Some(ChecksumValue::Sha256(sha256)))
}
Some(_) => Err(Error::bad_request("invalid x-amz-checksum-algorithm")),
None => Ok(None),
}
}
pub(crate) fn add_checksum_response_headers(
checksum: &Option<ChecksumValue>,
mut resp: http::response::Builder,
) -> http::response::Builder {
match checksum {
Some(ChecksumValue::Crc32(crc32)) => {
resp = resp.header(X_AMZ_CHECKSUM_CRC32, BASE64_STANDARD.encode(&crc32));
}
Some(ChecksumValue::Crc32c(crc32c)) => {
resp = resp.header(X_AMZ_CHECKSUM_CRC32C, BASE64_STANDARD.encode(&crc32c));
}
Some(ChecksumValue::Sha1(sha1)) => {
resp = resp.header(X_AMZ_CHECKSUM_SHA1, BASE64_STANDARD.encode(&sha1));
}
Some(ChecksumValue::Sha256(sha256)) => {
resp = resp.header(X_AMZ_CHECKSUM_SHA256, BASE64_STANDARD.encode(&sha256));
}
None => (),
}
resp
}

View file

@ -1,22 +1,20 @@
use std::pin::Pin; use std::pin::Pin;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, SystemTime, UNIX_EPOCH};
use futures::{stream, stream::Stream, StreamExt}; use futures::{stream, stream::Stream, StreamExt, TryStreamExt};
use md5::{Digest as Md5Digest, Md5};
use bytes::Bytes; use bytes::Bytes;
use hyper::{Request, Response}; use hyper::{Request, Response};
use serde::Serialize; use serde::Serialize;
use garage_net::bytes_buf::BytesBuf; use garage_net::bytes_buf::BytesBuf;
use garage_net::stream::read_stream_to_end;
use garage_rpc::rpc_helper::OrderTag; use garage_rpc::rpc_helper::OrderTag;
use garage_table::*; use garage_table::*;
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::Error as GarageError;
use garage_util::time::*; use garage_util::time::*;
use garage_model::garage::Garage;
use garage_model::key_table::Key;
use garage_model::s3::block_ref_table::*; use garage_model::s3::block_ref_table::*;
use garage_model::s3::mpu_table::*; use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
@ -24,21 +22,26 @@ use garage_model::s3::version_table::*;
use crate::helpers::*; use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody}; use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::checksum::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*; use crate::s3::error::*;
use crate::s3::get::full_object_byte_stream;
use crate::s3::multipart; use crate::s3::multipart;
use crate::s3::put::get_headers; use crate::s3::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
use crate::s3::xml::{self as s3_xml, xmlns_tag}; use crate::s3::xml::{self as s3_xml, xmlns_tag};
// -------- CopyObject ---------
pub async fn handle_copy( pub async fn handle_copy(
garage: Arc<Garage>, ctx: ReqCtx,
api_key: &Key,
req: &Request<ReqBody>, req: &Request<ReqBody>,
dest_bucket_id: Uuid,
dest_key: &str, dest_key: &str,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let copy_precondition = CopyPreconditionHeaders::parse(req)?; let copy_precondition = CopyPreconditionHeaders::parse(req)?;
let source_object = get_copy_source(&garage, api_key, req).await?; let checksum_algorithm = request_checksum_algorithm(req.headers())?;
let source_object = get_copy_source(&ctx, req).await?;
let (source_version, source_version_data, source_version_meta) = let (source_version, source_version_data, source_version_meta) =
extract_source_info(&source_object)?; extract_source_info(&source_object)?;
@ -46,26 +49,150 @@ pub async fn handle_copy(
// Check precondition, e.g. x-amz-copy-source-if-match // Check precondition, e.g. x-amz-copy-source-if-match
copy_precondition.check(source_version, &source_version_meta.etag)?; copy_precondition.check(source_version, &source_version_meta.etag)?;
// Determine encryption parameters
let (source_encryption, source_object_meta_inner) =
EncryptionParams::check_decrypt_for_copy_source(
&ctx.garage,
req.headers(),
&source_version_meta.encryption,
)?;
let dest_encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
// Extract source checksum info before source_object_meta_inner is consumed
let source_checksum = source_object_meta_inner.checksum;
let source_checksum_algorithm = source_checksum.map(|x| x.algorithm());
// If source object has a checksum, the destination object must as well.
// The x-amz-checksum-algorihtm header allows to change that algorithm,
// but if it is absent, we must use the same as before
let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm);
// Determine metadata of destination object
let was_multipart = source_version_meta.etag.contains('-');
let dest_object_meta = ObjectVersionMetaInner {
headers: match req.headers().get("x-amz-metadata-directive") {
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
get_headers(req.headers())?
}
_ => source_object_meta_inner.into_owned().headers,
},
checksum: source_checksum,
};
// Do actual object copying
//
// In any of the following scenarios, we need to read the whole object
// data and re-write it again:
//
// - the data needs to be decrypted or encrypted
// - the requested checksum algorithm requires us to recompute a checksum
// - the original object was a multipart upload and a checksum algorithm
// is defined (AWS specifies that in this case, we must recompute the
// checksum from scratch as if this was a single big object and not
// a multipart object, as the checksums are not computed in the same way)
//
// In other cases, we can just copy the metadata and reference the same blocks.
//
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
let must_recopy = !EncryptionParams::is_same(&source_encryption, &dest_encryption)
|| source_checksum_algorithm != checksum_algorithm
|| (was_multipart && checksum_algorithm.is_some());
let res = if !must_recopy {
// In most cases, we can just copy the metadata and link blocks of the
// old object from the new object.
handle_copy_metaonly(
ctx,
dest_key,
dest_object_meta,
dest_encryption,
source_version,
source_version_data,
source_version_meta,
)
.await?
} else {
let expected_checksum = ExpectedChecksums {
md5: None,
sha256: None,
extra: source_checksum,
};
let checksum_mode = if was_multipart || source_checksum_algorithm != checksum_algorithm {
ChecksumMode::Calculate(checksum_algorithm)
} else {
ChecksumMode::Verify(&expected_checksum)
};
// If source and dest encryption use different keys,
// we must decrypt content and re-encrypt, so rewrite all data blocks.
handle_copy_reencrypt(
ctx,
dest_key,
dest_object_meta,
dest_encryption,
source_version,
source_version_data,
source_encryption,
checksum_mode,
)
.await?
};
let last_modified = msec_to_rfc3339(res.version_timestamp);
let result = CopyObjectResult {
last_modified: s3_xml::Value(last_modified),
etag: s3_xml::Value(format!("\"{}\"", res.etag)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
let mut resp = Response::builder()
.header("Content-Type", "application/xml")
.header("x-amz-version-id", hex::encode(res.version_uuid))
.header(
"x-amz-copy-source-version-id",
hex::encode(source_version.uuid),
);
dest_encryption.add_response_headers(&mut resp);
Ok(resp.body(string_body(xml))?)
}
async fn handle_copy_metaonly(
ctx: ReqCtx,
dest_key: &str,
dest_object_meta: ObjectVersionMetaInner,
dest_encryption: EncryptionParams,
source_version: &ObjectVersion,
source_version_data: &ObjectVersionData,
source_version_meta: &ObjectVersionMeta,
) -> Result<SaveStreamResult, Error> {
let ReqCtx {
garage,
bucket_id: dest_bucket_id,
..
} = ctx;
// Generate parameters for copied object // Generate parameters for copied object
let new_uuid = gen_uuid(); let new_uuid = gen_uuid();
let new_timestamp = now_msec(); let new_timestamp = now_msec();
// Implement x-amz-metadata-directive: REPLACE let new_meta = ObjectVersionMeta {
let new_meta = match req.headers().get("x-amz-metadata-directive") { encryption: dest_encryption.encrypt_meta(dest_object_meta)?,
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => ObjectVersionMeta { size: source_version_meta.size,
headers: get_headers(req.headers())?, etag: source_version_meta.etag.clone(),
size: source_version_meta.size,
etag: source_version_meta.etag.clone(),
},
_ => source_version_meta.clone(),
}; };
let etag = new_meta.etag.to_string(); let res = SaveStreamResult {
version_uuid: new_uuid,
version_timestamp: new_timestamp,
etag: new_meta.etag.clone(),
};
// Save object copy // Save object copy
match source_version_data { match source_version_data {
ObjectVersionData::DeleteMarker => unreachable!(), ObjectVersionData::DeleteMarker => unreachable!(),
ObjectVersionData::Inline(_meta, bytes) => { ObjectVersionData::Inline(_meta, bytes) => {
// bytes is either plaintext before&after or encrypted with the
// same keys, so it's ok to just copy it as is
let dest_object_version = ObjectVersion { let dest_object_version = ObjectVersion {
uuid: new_uuid, uuid: new_uuid,
timestamp: new_timestamp, timestamp: new_timestamp,
@ -96,7 +223,8 @@ pub async fn handle_copy(
uuid: new_uuid, uuid: new_uuid,
timestamp: new_timestamp, timestamp: new_timestamp,
state: ObjectVersionState::Uploading { state: ObjectVersionState::Uploading {
headers: new_meta.headers.clone(), encryption: new_meta.encryption.clone(),
checksum_algorithm: None,
multipart: false, multipart: false,
}, },
}; };
@ -163,28 +291,45 @@ pub async fn handle_copy(
} }
} }
let last_modified = msec_to_rfc3339(new_timestamp); Ok(res)
let result = CopyObjectResult {
last_modified: s3_xml::Value(last_modified),
etag: s3_xml::Value(format!("\"{}\"", etag)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::builder()
.header("Content-Type", "application/xml")
.header("x-amz-version-id", hex::encode(new_uuid))
.header(
"x-amz-copy-source-version-id",
hex::encode(source_version.uuid),
)
.body(string_body(xml))?)
} }
async fn handle_copy_reencrypt(
ctx: ReqCtx,
dest_key: &str,
dest_object_meta: ObjectVersionMetaInner,
dest_encryption: EncryptionParams,
source_version: &ObjectVersion,
source_version_data: &ObjectVersionData,
source_encryption: EncryptionParams,
checksum_mode: ChecksumMode<'_>,
) -> Result<SaveStreamResult, Error> {
// basically we will read the source data (decrypt if necessary)
// and save that in a new object (encrypt if necessary),
// by combining the code used in getobject and putobject
let source_stream = full_object_byte_stream(
ctx.garage.clone(),
source_version,
source_version_data,
source_encryption,
);
save_stream(
&ctx,
dest_object_meta,
dest_encryption,
source_stream.map_err(|e| Error::from(GarageError::from(e))),
&dest_key.to_string(),
checksum_mode,
)
.await
}
// -------- UploadPartCopy ---------
pub async fn handle_upload_part_copy( pub async fn handle_upload_part_copy(
garage: Arc<Garage>, ctx: ReqCtx,
api_key: &Key,
req: &Request<ReqBody>, req: &Request<ReqBody>,
dest_bucket_id: Uuid,
dest_key: &str, dest_key: &str,
part_number: u64, part_number: u64,
upload_id: &str, upload_id: &str,
@ -194,17 +339,37 @@ pub async fn handle_upload_part_copy(
let dest_upload_id = multipart::decode_upload_id(upload_id)?; let dest_upload_id = multipart::decode_upload_id(upload_id)?;
let dest_key = dest_key.to_string(); let dest_key = dest_key.to_string();
let (source_object, (_, _, mut dest_mpu)) = futures::try_join!( let (source_object, (_, dest_version, mut dest_mpu)) = futures::try_join!(
get_copy_source(&garage, api_key, req), get_copy_source(&ctx, req),
multipart::get_upload(&garage, &dest_bucket_id, &dest_key, &dest_upload_id) multipart::get_upload(&ctx, &dest_key, &dest_upload_id)
)?; )?;
let ReqCtx { garage, .. } = ctx;
let (source_object_version, source_version_data, source_version_meta) = let (source_object_version, source_version_data, source_version_meta) =
extract_source_info(&source_object)?; extract_source_info(&source_object)?;
// Check precondition on source, e.g. x-amz-copy-source-if-match // Check precondition on source, e.g. x-amz-copy-source-if-match
copy_precondition.check(source_object_version, &source_version_meta.etag)?; copy_precondition.check(source_object_version, &source_version_meta.etag)?;
// Determine encryption parameters
let (source_encryption, _) = EncryptionParams::check_decrypt_for_copy_source(
&garage,
req.headers(),
&source_version_meta.encryption,
)?;
let (dest_object_encryption, dest_object_checksum_algorithm) = match dest_version.state {
ObjectVersionState::Uploading {
encryption,
checksum_algorithm,
..
} => (encryption, checksum_algorithm),
_ => unreachable!(),
};
let (dest_encryption, _) =
EncryptionParams::check_decrypt(&garage, req.headers(), &dest_object_encryption)?;
let same_encryption = EncryptionParams::is_same(&source_encryption, &dest_encryption);
// Check source range is valid // Check source range is valid
let source_range = match req.headers().get("x-amz-copy-source-range") { let source_range = match req.headers().get("x-amz-copy-source-range") {
Some(range) => { Some(range) => {
@ -226,21 +391,16 @@ pub async fn handle_upload_part_copy(
}; };
// Check source version is not inlined // Check source version is not inlined
match source_version_data { if matches!(source_version_data, ObjectVersionData::Inline(_, _)) {
ObjectVersionData::DeleteMarker => unreachable!(), // This is only for small files, we don't bother handling this.
ObjectVersionData::Inline(_meta, _bytes) => { // (in AWS UploadPartCopy works for parts at least 5MB which
// This is only for small files, we don't bother handling this. // is never the case of an inline object)
// (in AWS UploadPartCopy works for parts at least 5MB which return Err(Error::bad_request(
// is never the case of an inline object) "Source object is too small (minimum part size is 5Mb)",
return Err(Error::bad_request( ));
"Source object is too small (minimum part size is 5Mb)", }
));
}
ObjectVersionData::FirstBlock(_meta, _first_block_hash) => (),
};
// Fetch source versin with its block list, // Fetch source version with its block list
// and destination version to check part hasn't yet been uploaded
let source_version = garage let source_version = garage
.version_table .version_table
.get(&source_object_version.uuid, &EmptyKey) .get(&source_object_version.uuid, &EmptyKey)
@ -250,7 +410,9 @@ pub async fn handle_upload_part_copy(
// We want to reuse blocks from the source version as much as possible. // We want to reuse blocks from the source version as much as possible.
// However, we still need to get the data from these blocks // However, we still need to get the data from these blocks
// because we need to know it to calculate the MD5sum of the part // because we need to know it to calculate the MD5sum of the part
// which is used as its ETag. // which is used as its ETag. For encrypted sources or destinations,
// we must always read(+decrypt) and then write(+encrypt), so we
// can never reuse data blocks as is.
// First, calculate what blocks we want to keep, // First, calculate what blocks we want to keep,
// and the subrange of the block to take, if the bounds of the // and the subrange of the block to take, if the bounds of the
@ -299,7 +461,9 @@ pub async fn handle_upload_part_copy(
dest_mpu_part_key, dest_mpu_part_key,
MpuPart { MpuPart {
version: dest_version_id, version: dest_version_id,
// These are all filled in later (bottom of this function)
etag: None, etag: None,
checksum: None,
size: None, size: None,
}, },
); );
@ -312,32 +476,55 @@ pub async fn handle_upload_part_copy(
}, },
false, false,
); );
// write an empty version now to be the parent of the block_ref entries
garage.version_table.insert(&dest_version).await?;
// Now, actually copy the blocks // Now, actually copy the blocks
let mut md5hasher = Md5::new(); let mut checksummer = Checksummer::init(&Default::default(), !dest_encryption.is_encrypted())
.add(dest_object_checksum_algorithm);
// First, create a stream that is able to read the source blocks // First, create a stream that is able to read the source blocks
// and extract the subrange if necessary. // and extract the subrange if necessary.
// The second returned value is an Option<Hash>, that is Some // The second returned value is an Option<Hash>, that is Some
// if and only if the block returned is a block that already existed // if and only if the block returned is a block that already existed
// in the Garage data store (thus we don't need to save it again). // in the Garage data store and can be reused as-is instead of having
// to save it again. This excludes encrypted source blocks that we had
// to decrypt.
let garage2 = garage.clone(); let garage2 = garage.clone();
let order_stream = OrderTag::stream(); let order_stream = OrderTag::stream();
let source_blocks = stream::iter(blocks_to_copy) let source_blocks = stream::iter(blocks_to_copy)
.enumerate() .enumerate()
.flat_map(|(i, (block_hash, range_to_copy))| { .map(|(i, (block_hash, range_to_copy))| {
let garage3 = garage2.clone(); let garage3 = garage2.clone();
stream::once(async move { async move {
let data = garage3 let stream = source_encryption
.block_manager .get_block(&garage3, &block_hash, Some(order_stream.order(i as u64)))
.rpc_get_block(&block_hash, Some(order_stream.order(i as u64)))
.await?; .await?;
let data = read_stream_to_end(stream).await?.into_bytes();
// For each item, we return a tuple of:
// 1. the full data block (decrypted)
// 2. an Option<Hash> that indicates the hash of the block in the block store,
// only if it can be re-used as-is in the copied object
match range_to_copy { match range_to_copy {
Some(r) => Ok((data.slice(r), None)), Some(r) => {
None => Ok((data, Some(block_hash))), // If we are taking a subslice of the data, we cannot reuse the block as-is
Ok((data.slice(r), None))
}
None if same_encryption => {
// If the data is unencrypted before & after, or if we are using
// the same encryption key, we can reuse the stored block, no need
// to re-send it to storage nodes.
Ok((data, Some(block_hash)))
}
None => {
// If we are decrypting / (re)encrypting with different keys,
// we cannot reuse the block as-is
Ok((data, None))
}
} }
}) }
}) })
.buffered(2)
.peekable(); .peekable();
// The defragmenter is a custom stream (defined below) that concatenates // The defragmenter is a custom stream (defined below) that concatenates
@ -345,22 +532,39 @@ pub async fn handle_upload_part_copy(
// It returns a series of (Vec<u8>, Option<Hash>). // It returns a series of (Vec<u8>, Option<Hash>).
// When it is done, it returns an empty vec. // When it is done, it returns an empty vec.
// Same as the previous iterator, the Option is Some(_) if and only if // Same as the previous iterator, the Option is Some(_) if and only if
// it's an existing block of the Garage data store. // it's an existing block of the Garage data store that can be reused.
let mut defragmenter = Defragmenter::new(garage.config.block_size, Box::pin(source_blocks)); let mut defragmenter = Defragmenter::new(garage.config.block_size, Box::pin(source_blocks));
let mut current_offset = 0; let mut current_offset = 0;
let mut next_block = defragmenter.next().await?; let mut next_block = defragmenter.next().await?;
// TODO this could be optimized similarly to read_and_put_blocks
// low priority because uploadpartcopy is rarely used
loop { loop {
let (data, existing_block_hash) = next_block; let (data, existing_block_hash) = next_block;
if data.is_empty() { if data.is_empty() {
break; break;
} }
md5hasher.update(&data[..]); let data_len = data.len() as u64;
let must_upload = existing_block_hash.is_none(); let (checksummer_updated, (data_to_upload, final_hash)) =
let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..])); tokio::task::spawn_blocking(move || {
checksummer.update(&data[..]);
let tup = match existing_block_hash {
Some(hash) if same_encryption => (None, hash),
_ => {
let data_enc = dest_encryption.encrypt_block(data)?;
let hash = blake2sum(&data_enc);
(Some(data_enc), hash)
}
};
Ok::<_, Error>((checksummer, tup))
})
.await
.unwrap()?;
checksummer = checksummer_updated;
dest_version.blocks.clear(); dest_version.blocks.clear();
dest_version.blocks.put( dest_version.blocks.put(
@ -370,10 +574,10 @@ pub async fn handle_upload_part_copy(
}, },
VersionBlock { VersionBlock {
hash: final_hash, hash: final_hash,
size: data.len() as u64, size: data_len,
}, },
); );
current_offset += data.len() as u64; current_offset += data_len;
let block_ref = BlockRef { let block_ref = BlockRef {
block: final_hash, block: final_hash,
@ -381,36 +585,34 @@ pub async fn handle_upload_part_copy(
deleted: false.into(), deleted: false.into(),
}; };
let garage2 = garage.clone(); let (_, _, _, next) = futures::try_join!(
let res = futures::try_join!(
// Thing 1: if the block is not exactly a block that existed before, // Thing 1: if the block is not exactly a block that existed before,
// we need to insert that data as a new block. // we need to insert that data as a new block.
async move { async {
if must_upload { if let Some(final_data) = data_to_upload {
garage2 garage
.block_manager .block_manager
.rpc_put_block(final_hash, data, None) .rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None)
.await .await
} else { } else {
Ok(()) Ok(())
} }
}, },
async { // Thing 2: we need to insert the block in the version
// Thing 2: we need to insert the block in the version garage.version_table.insert(&dest_version),
garage.version_table.insert(&dest_version).await?; // Thing 3: we need to add a block reference
// Thing 3: we need to add a block reference garage.block_ref_table.insert(&block_ref),
garage.block_ref_table.insert(&block_ref).await // Thing 4: we need to read the next block
},
// Thing 4: we need to prefetch the next block
defragmenter.next(), defragmenter.next(),
)?; )?;
next_block = res.2; next_block = next;
} }
assert_eq!(current_offset, source_range.length); assert_eq!(current_offset, source_range.length);
let data_md5sum = md5hasher.finalize(); let checksums = checksummer.finalize();
let etag = hex::encode(data_md5sum); let etag = dest_encryption.etag_from_md5(&checksums.md5);
let checksum = checksums.extract(dest_object_checksum_algorithm);
// Put the part's ETag in the Versiontable // Put the part's ETag in the Versiontable
dest_mpu.parts.put( dest_mpu.parts.put(
@ -418,6 +620,7 @@ pub async fn handle_upload_part_copy(
MpuPart { MpuPart {
version: dest_version_id, version: dest_version_id,
etag: Some(etag.clone()), etag: Some(etag.clone()),
checksum,
size: Some(current_offset), size: Some(current_offset),
}, },
); );
@ -430,20 +633,21 @@ pub async fn handle_upload_part_copy(
last_modified: s3_xml::Value(msec_to_rfc3339(source_object_version.timestamp)), last_modified: s3_xml::Value(msec_to_rfc3339(source_object_version.timestamp)),
})?; })?;
Ok(Response::builder() let mut resp = Response::builder()
.header("Content-Type", "application/xml") .header("Content-Type", "application/xml")
.header( .header(
"x-amz-copy-source-version-id", "x-amz-copy-source-version-id",
hex::encode(source_object_version.uuid), hex::encode(source_object_version.uuid),
) );
.body(string_body(resp_xml))?) dest_encryption.add_response_headers(&mut resp);
Ok(resp.body(string_body(resp_xml))?)
} }
async fn get_copy_source( async fn get_copy_source(ctx: &ReqCtx, req: &Request<ReqBody>) -> Result<Object, Error> {
garage: &Garage, let ReqCtx {
api_key: &Key, garage, api_key, ..
req: &Request<ReqBody>, } = ctx;
) -> Result<Object, Error> {
let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?; let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?;
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?; let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;

View file

@ -21,16 +21,13 @@ use crate::s3::error::*;
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value}; use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content; use crate::signature::verify_signed_content;
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule}; use garage_model::bucket_table::{Bucket, BucketParams, CorsRule as GarageCorsRule};
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_util::data::*; use garage_util::data::*;
pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<ResBody>, Error> { pub async fn handle_get_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let param = bucket let ReqCtx { bucket_params, .. } = ctx;
.params() if let Some(cors) = bucket_params.cors_config.get() {
.ok_or_internal_error("Bucket should not be deleted at this point")?;
if let Some(cors) = param.cors_config.get() {
let wc = CorsConfiguration { let wc = CorsConfiguration {
xmlns: (), xmlns: (),
cors_rules: cors cors_rules: cors
@ -50,16 +47,18 @@ pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<ResBody>, Error
} }
} }
pub async fn handle_delete_cors( pub async fn handle_delete_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
garage: Arc<Garage>, let ReqCtx {
mut bucket: Bucket, garage,
) -> Result<Response<ResBody>, Error> { bucket_id,
let param = bucket mut bucket_params,
.params_mut() ..
.ok_or_internal_error("Bucket should not be deleted at this point")?; } = ctx;
bucket_params.cors_config.update(None);
param.cors_config.update(None); garage
garage.bucket_table.insert(&bucket).await?; .bucket_table
.insert(&Bucket::present(bucket_id, bucket_params))
.await?;
Ok(Response::builder() Ok(Response::builder()
.status(StatusCode::NO_CONTENT) .status(StatusCode::NO_CONTENT)
@ -67,28 +66,33 @@ pub async fn handle_delete_cors(
} }
pub async fn handle_put_cors( pub async fn handle_put_cors(
garage: Arc<Garage>, ctx: ReqCtx,
mut bucket: Bucket,
req: Request<ReqBody>, req: Request<ReqBody>,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage,
bucket_id,
mut bucket_params,
..
} = ctx;
let body = BodyExt::collect(req.into_body()).await?.to_bytes(); let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 { if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?; verify_signed_content(content_sha256, &body[..])?;
} }
let param = bucket
.params_mut()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
let conf: CorsConfiguration = from_reader(&body as &[u8])?; let conf: CorsConfiguration = from_reader(&body as &[u8])?;
conf.validate()?; conf.validate()?;
param bucket_params
.cors_config .cors_config
.update(Some(conf.into_garage_cors_config()?)); .update(Some(conf.into_garage_cors_config()?));
garage.bucket_table.insert(&bucket).await?; garage
.bucket_table
.insert(&Bucket::present(bucket_id, bucket_params))
.await?;
Ok(Response::builder() Ok(Response::builder()
.status(StatusCode::OK) .status(StatusCode::OK)
@ -115,7 +119,8 @@ pub async fn handle_options_api(
let bucket_id = helper.resolve_global_bucket_name(&bn).await?; let bucket_id = helper.resolve_global_bucket_name(&bn).await?;
if let Some(id) = bucket_id { if let Some(id) = bucket_id {
let bucket = garage.bucket_helper().get_existing_bucket(id).await?; let bucket = garage.bucket_helper().get_existing_bucket(id).await?;
handle_options_for_bucket(req, &bucket) let bucket_params = bucket.state.into_option().unwrap();
handle_options_for_bucket(req, &bucket_params)
} else { } else {
// If there is a bucket name in the request, but that name // If there is a bucket name in the request, but that name
// does not correspond to a global alias for a bucket, // does not correspond to a global alias for a bucket,
@ -145,7 +150,7 @@ pub async fn handle_options_api(
pub fn handle_options_for_bucket( pub fn handle_options_for_bucket(
req: &Request<IncomingBody>, req: &Request<IncomingBody>,
bucket: &Bucket, bucket_params: &BucketParams,
) -> Result<Response<EmptyBody>, CommonError> { ) -> Result<Response<EmptyBody>, CommonError> {
let origin = req let origin = req
.headers() .headers()
@ -162,7 +167,7 @@ pub fn handle_options_for_bucket(
None => vec![], None => vec![],
}; };
if let Some(cors_config) = bucket.params().unwrap().cors_config.get() { if let Some(cors_config) = bucket_params.cors_config.get() {
let matching_rule = cors_config let matching_rule = cors_config
.iter() .iter()
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter())); .find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
@ -181,10 +186,10 @@ pub fn handle_options_for_bucket(
} }
pub fn find_matching_cors_rule<'a>( pub fn find_matching_cors_rule<'a>(
bucket: &'a Bucket, bucket_params: &'a BucketParams,
req: &Request<impl Body>, req: &Request<impl Body>,
) -> Result<Option<&'a GarageCorsRule>, Error> { ) -> Result<Option<&'a GarageCorsRule>, Error> {
if let Some(cors_config) = bucket.params().unwrap().cors_config.get() { if let Some(cors_config) = bucket_params.cors_config.get() {
if let Some(origin) = req.headers().get("Origin") { if let Some(origin) = req.headers().get("Origin") {
let origin = origin.to_str()?; let origin = origin.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) { let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {

View file

@ -1,11 +1,8 @@
use std::sync::Arc;
use http_body_util::BodyExt; use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode}; use hyper::{Request, Response, StatusCode};
use garage_util::data::*; use garage_util::data::*;
use garage_model::garage::Garage;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use crate::helpers::*; use crate::helpers::*;
@ -15,14 +12,13 @@ use crate::s3::put::next_timestamp;
use crate::s3::xml as s3_xml; use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content; use crate::signature::verify_signed_content;
async fn handle_delete_internal( async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid), Error> {
garage: &Garage, let ReqCtx {
bucket_id: Uuid, garage, bucket_id, ..
key: &str, } = ctx;
) -> Result<(Uuid, Uuid), Error> {
let object = garage let object = garage
.object_table .object_table
.get(&bucket_id, &key.to_string()) .get(bucket_id, &key.to_string())
.await? .await?
.ok_or(Error::NoSuchKey)?; // No need to delete .ok_or(Error::NoSuchKey)?; // No need to delete
@ -44,7 +40,7 @@ async fn handle_delete_internal(
}; };
let object = Object::new( let object = Object::new(
bucket_id, *bucket_id,
key.into(), key.into(),
vec![ObjectVersion { vec![ObjectVersion {
uuid: del_uuid, uuid: del_uuid,
@ -58,12 +54,8 @@ async fn handle_delete_internal(
Ok((deleted_version, del_uuid)) Ok((deleted_version, del_uuid))
} }
pub async fn handle_delete( pub async fn handle_delete(ctx: ReqCtx, key: &str) -> Result<Response<ResBody>, Error> {
garage: Arc<Garage>, match handle_delete_internal(&ctx, key).await {
bucket_id: Uuid,
key: &str,
) -> Result<Response<ResBody>, Error> {
match handle_delete_internal(&garage, bucket_id, key).await {
Ok(_) | Err(Error::NoSuchKey) => Ok(Response::builder() Ok(_) | Err(Error::NoSuchKey) => Ok(Response::builder()
.status(StatusCode::NO_CONTENT) .status(StatusCode::NO_CONTENT)
.body(empty_body()) .body(empty_body())
@ -73,8 +65,7 @@ pub async fn handle_delete(
} }
pub async fn handle_delete_objects( pub async fn handle_delete_objects(
garage: Arc<Garage>, ctx: ReqCtx,
bucket_id: Uuid,
req: Request<ReqBody>, req: Request<ReqBody>,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
@ -91,7 +82,7 @@ pub async fn handle_delete_objects(
let mut ret_errors = Vec::new(); let mut ret_errors = Vec::new();
for obj in cmd.objects.iter() { for obj in cmd.objects.iter() {
match handle_delete_internal(&garage, bucket_id, &obj.key).await { match handle_delete_internal(&ctx, &obj.key).await {
Ok((deleted_version, delete_marker_version)) => { Ok((deleted_version, delete_marker_version)) => {
if cmd.quiet { if cmd.quiet {
continue; continue;

595
src/api/s3/encryption.rs Normal file
View file

@ -0,0 +1,595 @@
use std::borrow::Cow;
use std::convert::TryInto;
use std::pin::Pin;
use aes_gcm::{
aead::stream::{DecryptorLE31, EncryptorLE31, StreamLE31},
aead::{Aead, AeadCore, KeyInit, OsRng},
aes::cipher::crypto_common::rand_core::RngCore,
aes::cipher::typenum::Unsigned,
Aes256Gcm, Key, Nonce,
};
use base64::prelude::*;
use bytes::Bytes;
use futures::stream::Stream;
use futures::task;
use tokio::io::BufReader;
use http::header::{HeaderMap, HeaderName, HeaderValue};
use garage_net::bytes_buf::BytesBuf;
use garage_net::stream::{stream_asyncread, ByteStream};
use garage_rpc::rpc_helper::OrderTag;
use garage_util::data::Hash;
use garage_util::error::Error as GarageError;
use garage_util::migrate::Migrate;
use garage_model::garage::Garage;
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
use crate::common_error::*;
use crate::s3::checksum::Md5Checksum;
use crate::s3::error::Error;
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
HeaderName::from_static("x-amz-server-side-encryption-customer-algorithm");
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: HeaderName =
HeaderName::from_static("x-amz-server-side-encryption-customer-key");
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: HeaderName =
HeaderName::from_static("x-amz-server-side-encryption-customer-key-md5");
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-algorithm");
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: HeaderName =
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-key");
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: HeaderName =
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-key-md5");
const CUSTOMER_ALGORITHM_AES256: &[u8] = b"AES256";
type Md5Output = md5::digest::Output<md5::Md5Core>;
type StreamNonceSize = aes_gcm::aead::stream::NonceSize<Aes256Gcm, StreamLE31<Aes256Gcm>>;
// Data blocks are encrypted by smaller chunks of size 4096 bytes,
// so that data can be streamed when reading.
// This size has to be known and has to be constant, or data won't be
// readable anymore. DO NOT CHANGE THIS VALUE.
const STREAM_ENC_PLAIN_CHUNK_SIZE: usize = 0x1000; // 4096 bytes
const STREAM_ENC_CYPER_CHUNK_SIZE: usize = STREAM_ENC_PLAIN_CHUNK_SIZE + 16;
#[derive(Clone, Copy)]
pub enum EncryptionParams {
Plaintext,
SseC {
client_key: Key<Aes256Gcm>,
client_key_md5: Md5Output,
compression_level: Option<i32>,
},
}
impl EncryptionParams {
pub fn is_encrypted(&self) -> bool {
!matches!(self, Self::Plaintext)
}
pub fn is_same(a: &Self, b: &Self) -> bool {
let relevant_info = |x: &Self| match x {
Self::Plaintext => None,
Self::SseC {
client_key,
compression_level,
..
} => Some((*client_key, compression_level.is_some())),
};
relevant_info(a) == relevant_info(b)
}
pub fn new_from_headers(
garage: &Garage,
headers: &HeaderMap,
) -> Result<EncryptionParams, Error> {
let key = parse_request_headers(
headers,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
)?;
match key {
Some((client_key, client_key_md5)) => Ok(EncryptionParams::SseC {
client_key,
client_key_md5,
compression_level: garage.config.compression_level,
}),
None => Ok(EncryptionParams::Plaintext),
}
}
pub fn add_response_headers(&self, resp: &mut http::response::Builder) {
if let Self::SseC { client_key_md5, .. } = self {
let md5 = BASE64_STANDARD.encode(&client_key_md5);
resp.headers_mut().unwrap().insert(
X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
HeaderValue::from_bytes(CUSTOMER_ALGORITHM_AES256).unwrap(),
);
resp.headers_mut().unwrap().insert(
X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
HeaderValue::from_bytes(md5.as_bytes()).unwrap(),
);
}
}
pub fn check_decrypt<'a>(
garage: &Garage,
headers: &HeaderMap,
obj_enc: &'a ObjectVersionEncryption,
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
let key = parse_request_headers(
headers,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
)?;
Self::check_decrypt_common(garage, key, obj_enc)
}
pub fn check_decrypt_for_copy_source<'a>(
garage: &Garage,
headers: &HeaderMap,
obj_enc: &'a ObjectVersionEncryption,
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
let key = parse_request_headers(
headers,
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
)?;
Self::check_decrypt_common(garage, key, obj_enc)
}
fn check_decrypt_common<'a>(
garage: &Garage,
key: Option<(Key<Aes256Gcm>, Md5Output)>,
obj_enc: &'a ObjectVersionEncryption,
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
match (key, &obj_enc) {
(
Some((client_key, client_key_md5)),
ObjectVersionEncryption::SseC { inner, compressed },
) => {
let enc = Self::SseC {
client_key,
client_key_md5,
compression_level: if *compressed {
Some(garage.config.compression_level.unwrap_or(1))
} else {
None
},
};
let plaintext = enc.decrypt_blob(&inner)?;
let inner = ObjectVersionMetaInner::decode(&plaintext)
.ok_or_internal_error("Could not decode encrypted metadata")?;
Ok((enc, Cow::Owned(inner)))
}
(None, ObjectVersionEncryption::Plaintext { inner }) => {
Ok((Self::Plaintext, Cow::Borrowed(inner)))
}
(_, ObjectVersionEncryption::SseC { .. }) => {
Err(Error::bad_request("Object is encrypted"))
}
(Some(_), _) => {
// TODO: should this be an OK scenario?
Err(Error::bad_request("Trying to decrypt a plaintext object"))
}
}
}
pub fn encrypt_meta(
&self,
meta: ObjectVersionMetaInner,
) -> Result<ObjectVersionEncryption, Error> {
match self {
Self::SseC {
compression_level, ..
} => {
let plaintext = meta.encode().map_err(GarageError::from)?;
let ciphertext = self.encrypt_blob(&plaintext)?;
Ok(ObjectVersionEncryption::SseC {
inner: ciphertext.into_owned(),
compressed: compression_level.is_some(),
})
}
Self::Plaintext => Ok(ObjectVersionEncryption::Plaintext { inner: meta }),
}
}
// ---- generating object Etag values ----
pub fn etag_from_md5(&self, md5sum: &Option<Md5Checksum>) -> String {
match self {
Self::Plaintext => md5sum
.map(|x| hex::encode(&x[..]))
.expect("md5 digest should have been computed"),
Self::SseC { .. } => {
// AWS specifies that for encrypted objects, the Etag is not
// the md5sum of the data, but doesn't say what it is.
// So we just put some random bytes.
let mut random = [0u8; 16];
OsRng.fill_bytes(&mut random);
hex::encode(&random)
}
}
}
// ---- generic function for encrypting / decrypting blobs ----
// Prepends a randomly-generated nonce to the encrypted value.
// This is used for encrypting object metadata and inlined data for small objects.
// This does not compress anything.
pub fn encrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
match self {
Self::SseC { client_key, .. } => {
let cipher = Aes256Gcm::new(&client_key);
let nonce = Aes256Gcm::generate_nonce(&mut OsRng);
let ciphertext = cipher
.encrypt(&nonce, blob)
.ok_or_internal_error("Encryption failed")?;
Ok(Cow::Owned([nonce.to_vec(), ciphertext].concat()))
}
Self::Plaintext => Ok(Cow::Borrowed(blob)),
}
}
pub fn decrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
match self {
Self::SseC { client_key, .. } => {
let cipher = Aes256Gcm::new(&client_key);
let nonce_size = <Aes256Gcm as AeadCore>::NonceSize::to_usize();
let nonce = Nonce::from_slice(
blob.get(..nonce_size)
.ok_or_internal_error("invalid encrypted data")?,
);
let plaintext = cipher
.decrypt(nonce, &blob[nonce_size..])
.ok_or_bad_request(
"Invalid encryption key, could not decrypt object metadata.",
)?;
Ok(Cow::Owned(plaintext))
}
Self::Plaintext => Ok(Cow::Borrowed(blob)),
}
}
// ---- function for encrypting / decrypting byte streams ----
/// Get a data block from the storage node, and decrypt+decompress it
/// if necessary. If object is plaintext, just get it without any processing.
pub async fn get_block(
&self,
garage: &Garage,
hash: &Hash,
order: Option<OrderTag>,
) -> Result<ByteStream, GarageError> {
let raw_block = garage
.block_manager
.rpc_get_block_streaming(hash, order)
.await?;
Ok(self.decrypt_block_stream(raw_block))
}
pub fn decrypt_block_stream(&self, stream: ByteStream) -> ByteStream {
match self {
Self::Plaintext => stream,
Self::SseC {
client_key,
compression_level,
..
} => {
let plaintext = DecryptStream::new(stream, *client_key);
if compression_level.is_some() {
let reader = stream_asyncread(Box::pin(plaintext));
let reader = BufReader::new(reader);
let reader = async_compression::tokio::bufread::ZstdDecoder::new(reader);
Box::pin(tokio_util::io::ReaderStream::new(reader))
} else {
Box::pin(plaintext)
}
}
}
}
/// Encrypt a data block if encryption is set, for use before
/// putting the data blocks into storage
pub fn encrypt_block(&self, block: Bytes) -> Result<Bytes, Error> {
match self {
Self::Plaintext => Ok(block),
Self::SseC {
client_key,
compression_level,
..
} => {
let block = if let Some(level) = compression_level {
Cow::Owned(
garage_block::zstd_encode(block.as_ref(), *level)
.ok_or_internal_error("failed to compress data block")?,
)
} else {
Cow::Borrowed(block.as_ref())
};
let mut ret = Vec::with_capacity(block.len() + 32 + block.len() / 64);
let mut nonce: Nonce<StreamNonceSize> = Default::default();
OsRng.fill_bytes(&mut nonce);
ret.extend_from_slice(nonce.as_slice());
let mut cipher = EncryptorLE31::<Aes256Gcm>::new(&client_key, &nonce);
let mut iter = block.chunks(STREAM_ENC_PLAIN_CHUNK_SIZE).peekable();
if iter.peek().is_none() {
// Empty stream: we encrypt an empty last chunk
let chunk_enc = cipher
.encrypt_last(&[][..])
.ok_or_internal_error("failed to encrypt chunk")?;
ret.extend_from_slice(&chunk_enc);
} else {
loop {
let chunk = iter.next().unwrap();
if iter.peek().is_some() {
let chunk_enc = cipher
.encrypt_next(chunk)
.ok_or_internal_error("failed to encrypt chunk")?;
assert_eq!(chunk.len(), STREAM_ENC_PLAIN_CHUNK_SIZE);
assert_eq!(chunk_enc.len(), STREAM_ENC_CYPER_CHUNK_SIZE);
ret.extend_from_slice(&chunk_enc);
} else {
// use encrypt_last for the last chunk
let chunk_enc = cipher
.encrypt_last(chunk)
.ok_or_internal_error("failed to encrypt chunk")?;
ret.extend_from_slice(&chunk_enc);
break;
}
}
}
Ok(ret.into())
}
}
}
}
fn parse_request_headers(
headers: &HeaderMap,
alg_header: &HeaderName,
key_header: &HeaderName,
md5_header: &HeaderName,
) -> Result<Option<(Key<Aes256Gcm>, Md5Output)>, Error> {
let alg = headers.get(alg_header).map(HeaderValue::as_bytes);
let key = headers.get(key_header).map(HeaderValue::as_bytes);
let md5 = headers.get(md5_header).map(HeaderValue::as_bytes);
match alg {
Some(CUSTOMER_ALGORITHM_AES256) => {
use md5::{Digest, Md5};
let key_b64 =
key.ok_or_bad_request("Missing server-side-encryption-customer-key header")?;
let key_bytes: [u8; 32] = BASE64_STANDARD
.decode(&key_b64)
.ok_or_bad_request(
"Invalid server-side-encryption-customer-key header: invalid base64",
)?
.try_into()
.ok()
.ok_or_bad_request(
"Invalid server-side-encryption-customer-key header: invalid length",
)?;
let md5_b64 =
md5.ok_or_bad_request("Missing server-side-encryption-customer-key-md5 header")?;
let md5_bytes = BASE64_STANDARD.decode(&md5_b64).ok_or_bad_request(
"Invalid server-side-encryption-customer-key-md5 header: invalid bass64",
)?;
let mut hasher = Md5::new();
hasher.update(&key_bytes[..]);
let our_md5 = hasher.finalize();
if our_md5.as_slice() != md5_bytes.as_slice() {
return Err(Error::bad_request(
"Server-side encryption client key MD5 checksum does not match",
));
}
Ok(Some((key_bytes.into(), our_md5)))
}
Some(alg) => Err(Error::InvalidEncryptionAlgorithm(
String::from_utf8_lossy(alg).into_owned(),
)),
None => {
if key.is_some() || md5.is_some() {
Err(Error::bad_request(
"Unexpected server-side-encryption-customer-key{,-md5} header(s)",
))
} else {
Ok(None)
}
}
}
}
// ---- encrypt & decrypt streams ----
#[pin_project::pin_project]
struct DecryptStream {
#[pin]
stream: ByteStream,
done_reading: bool,
buf: BytesBuf,
key: Key<Aes256Gcm>,
state: DecryptStreamState,
}
enum DecryptStreamState {
Starting,
Running(DecryptorLE31<Aes256Gcm>),
Done,
}
impl DecryptStream {
fn new(stream: ByteStream, key: Key<Aes256Gcm>) -> Self {
Self {
stream,
done_reading: false,
buf: BytesBuf::new(),
key,
state: DecryptStreamState::Starting,
}
}
}
impl Stream for DecryptStream {
type Item = Result<Bytes, std::io::Error>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> task::Poll<Option<Self::Item>> {
use std::task::Poll;
let mut this = self.project();
// The first bytes of the stream should contain the starting nonce.
// If we don't have a Running state, it means that we haven't
// yet read the nonce.
while matches!(this.state, DecryptStreamState::Starting) {
let nonce_size = StreamNonceSize::to_usize();
if let Some(nonce) = this.buf.take_exact(nonce_size) {
let nonce = Nonce::from_slice(nonce.as_ref());
*this.state = DecryptStreamState::Running(DecryptorLE31::new(&this.key, nonce));
break;
}
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
Some(Ok(bytes)) => {
this.buf.extend(bytes);
}
Some(Err(e)) => {
return Poll::Ready(Some(Err(e)));
}
None => {
return Poll::Ready(Some(Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
"Decrypt: unexpected EOF, could not read nonce",
))));
}
}
}
// Read at least one byte more than the encrypted chunk size
// (if possible), so that we know if we are decrypting the
// last chunk or not.
while !*this.done_reading && this.buf.len() <= STREAM_ENC_CYPER_CHUNK_SIZE {
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
Some(Ok(bytes)) => {
this.buf.extend(bytes);
}
Some(Err(e)) => {
return Poll::Ready(Some(Err(e)));
}
None => {
*this.done_reading = true;
break;
}
}
}
if matches!(this.state, DecryptStreamState::Done) {
if !this.buf.is_empty() {
return Poll::Ready(Some(Err(std::io::Error::new(
std::io::ErrorKind::Other,
"Decrypt: unexpected bytes after last encrypted chunk",
))));
}
return Poll::Ready(None);
}
let res = if this.buf.len() > STREAM_ENC_CYPER_CHUNK_SIZE {
// we have strictly more bytes than the encrypted chunk size,
// so we know this is not the last
let DecryptStreamState::Running(ref mut cipher) = this.state else {
unreachable!()
};
let chunk = this.buf.take_exact(STREAM_ENC_CYPER_CHUNK_SIZE).unwrap();
let chunk_dec = cipher.decrypt_next(chunk.as_ref());
if let Ok(c) = &chunk_dec {
assert_eq!(c.len(), STREAM_ENC_PLAIN_CHUNK_SIZE);
}
chunk_dec
} else {
// We have one encrypted chunk size or less, even though we tried
// to read more, so this is the last chunk. Decrypt using the
// appropriate decrypt_last() function that then destroys the cipher.
let state = std::mem::replace(this.state, DecryptStreamState::Done);
let DecryptStreamState::Running(cipher) = state else {
unreachable!()
};
let chunk = this.buf.take_all();
cipher.decrypt_last(chunk.as_ref())
};
match res {
Ok(bytes) if bytes.is_empty() => Poll::Ready(None),
Ok(bytes) => Poll::Ready(Some(Ok(bytes.into()))),
Err(_) => Poll::Ready(Some(Err(std::io::Error::new(
std::io::ErrorKind::Other,
"Decryption failed",
)))),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::stream::StreamExt;
use garage_net::stream::read_stream_to_end;
fn stream() -> ByteStream {
Box::pin(
futures::stream::iter(16usize..1024)
.map(|i| Ok(Bytes::from(vec![(i % 256) as u8; (i * 37) % 1024]))),
)
}
async fn test_block_enc(compression_level: Option<i32>) {
let enc = EncryptionParams::SseC {
client_key: Aes256Gcm::generate_key(&mut OsRng),
client_key_md5: Default::default(), // not needed
compression_level,
};
let block_plain = read_stream_to_end(stream()).await.unwrap().into_bytes();
let block_enc = enc.encrypt_block(block_plain.clone()).unwrap();
let block_dec =
enc.decrypt_block_stream(Box::pin(futures::stream::once(async { Ok(block_enc) })));
let block_dec = read_stream_to_end(block_dec).await.unwrap().into_bytes();
assert_eq!(block_plain, block_dec);
assert!(block_dec.len() > 128000);
}
#[tokio::test]
async fn test_encrypt_block() {
test_block_enc(None).await
}
#[tokio::test]
async fn test_encrypt_block_compressed() {
test_block_enc(Some(1)).await
}
}

View file

@ -65,6 +65,14 @@ pub enum Error {
#[error(display = "Invalid HTTP range: {:?}", _0)] #[error(display = "Invalid HTTP range: {:?}", _0)]
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)), InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
/// The client sent a range header with invalid value
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
InvalidEncryptionAlgorithm(String),
/// The client sent invalid XML data
#[error(display = "Invalid digest: {}", _0)]
InvalidDigest(String),
/// The client sent a request for an action not supported by garage /// The client sent a request for an action not supported by garage
#[error(display = "Unimplemented action: {}", _0)] #[error(display = "Unimplemented action: {}", _0)]
NotImplemented(String), NotImplemented(String),
@ -125,7 +133,9 @@ impl Error {
Error::NotImplemented(_) => "NotImplemented", Error::NotImplemented(_) => "NotImplemented",
Error::InvalidXml(_) => "MalformedXML", Error::InvalidXml(_) => "MalformedXML",
Error::InvalidRange(_) => "InvalidRange", Error::InvalidRange(_) => "InvalidRange",
Error::InvalidDigest(_) => "InvalidDigest",
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest", Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest",
Error::InvalidEncryptionAlgorithm(_) => "InvalidEncryptionAlgorithmError",
} }
} }
} }
@ -143,6 +153,8 @@ impl ApiError for Error {
| Error::InvalidPart | Error::InvalidPart
| Error::InvalidPartOrder | Error::InvalidPartOrder
| Error::EntityTooSmall | Error::EntityTooSmall
| Error::InvalidDigest(_)
| Error::InvalidEncryptionAlgorithm(_)
| Error::InvalidXml(_) | Error::InvalidXml(_)
| Error::InvalidUtf8Str(_) | Error::InvalidUtf8Str(_)
| Error::InvalidUtf8String(_) => StatusCode::BAD_REQUEST, | Error::InvalidUtf8String(_) => StatusCode::BAD_REQUEST,

View file

@ -1,10 +1,12 @@
//! Function related to GET and HEAD requests //! Function related to GET and HEAD requests
use std::collections::BTreeMap;
use std::convert::TryInto; use std::convert::TryInto;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, UNIX_EPOCH}; use std::time::{Duration, UNIX_EPOCH};
use bytes::Bytes;
use futures::future; use futures::future;
use futures::stream::{self, StreamExt}; use futures::stream::{self, Stream, StreamExt};
use http::header::{ use http::header::{
ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE, ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE,
CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MODIFIED_SINCE, IF_NONE_MATCH, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MODIFIED_SINCE, IF_NONE_MATCH,
@ -25,6 +27,8 @@ use garage_model::s3::version_table::*;
use crate::helpers::*; use crate::helpers::*;
use crate::s3::api_server::ResBody; use crate::s3::api_server::ResBody;
use crate::s3::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*; use crate::s3::error::*;
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count"; const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
@ -42,6 +46,9 @@ pub struct GetObjectOverrides {
fn object_headers( fn object_headers(
version: &ObjectVersion, version: &ObjectVersion,
version_meta: &ObjectVersionMeta, version_meta: &ObjectVersionMeta,
meta_inner: &ObjectVersionMetaInner,
encryption: EncryptionParams,
checksum_mode: ChecksumMode,
) -> http::response::Builder { ) -> http::response::Builder {
debug!("Version meta: {:?}", version_meta); debug!("Version meta: {:?}", version_meta);
@ -49,7 +56,6 @@ fn object_headers(
let date_str = httpdate::fmt_http_date(date); let date_str = httpdate::fmt_http_date(date);
let mut resp = Response::builder() let mut resp = Response::builder()
.header(CONTENT_TYPE, version_meta.headers.content_type.to_string())
.header(LAST_MODIFIED, date_str) .header(LAST_MODIFIED, date_str)
.header(ACCEPT_RANGES, "bytes".to_string()); .header(ACCEPT_RANGES, "bytes".to_string());
@ -57,10 +63,31 @@ fn object_headers(
resp = resp.header(ETAG, format!("\"{}\"", version_meta.etag)); resp = resp.header(ETAG, format!("\"{}\"", version_meta.etag));
} }
for (k, v) in version_meta.headers.other.iter() { // When metadata is retrieved through the REST API, Amazon S3 combines headers that
resp = resp.header(k, v.to_string()); // have the same name (ignoring case) into a comma-delimited list.
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
let mut headers_by_name = BTreeMap::new();
for (name, value) in meta_inner.headers.iter() {
match headers_by_name.get_mut(name) {
None => {
headers_by_name.insert(name, vec![value.as_str()]);
}
Some(headers) => {
headers.push(value.as_str());
}
}
} }
for (name, values) in headers_by_name {
resp = resp.header(name, values.join(","));
}
if checksum_mode.enabled {
resp = add_checksum_response_headers(&meta_inner.checksum, resp);
}
encryption.add_response_headers(&mut resp);
resp resp
} }
@ -131,6 +158,16 @@ fn try_answer_cached(
/// Handle HEAD request /// Handle HEAD request
pub async fn handle_head( pub async fn handle_head(
ctx: ReqCtx,
req: &Request<impl Body>,
key: &str,
part_number: Option<u64>,
) -> Result<Response<ResBody>, Error> {
handle_head_without_ctx(ctx.garage, req, ctx.bucket_id, key, part_number).await
}
/// Handle HEAD request for website
pub async fn handle_head_without_ctx(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<impl Body>, req: &Request<impl Body>,
bucket_id: Uuid, bucket_id: Uuid,
@ -165,21 +202,33 @@ pub async fn handle_head(
return Ok(cached); return Ok(cached);
} }
let (encryption, headers) =
EncryptionParams::check_decrypt(&garage, req.headers(), &version_meta.encryption)?;
let checksum_mode = checksum_mode(&req);
if let Some(pn) = part_number { if let Some(pn) = part_number {
match version_data { match version_data {
ObjectVersionData::Inline(_, bytes) => { ObjectVersionData::Inline(_, _) => {
if pn != 1 { if pn != 1 {
return Err(Error::InvalidPart); return Err(Error::InvalidPart);
} }
Ok(object_headers(object_version, version_meta) let bytes_len = version_meta.size;
.header(CONTENT_LENGTH, format!("{}", bytes.len())) Ok(object_headers(
.header( object_version,
CONTENT_RANGE, version_meta,
format!("bytes 0-{}/{}", bytes.len() - 1, bytes.len()), &headers,
) encryption,
.header(X_AMZ_MP_PARTS_COUNT, "1") checksum_mode,
.status(StatusCode::PARTIAL_CONTENT) )
.body(empty_body())?) .header(CONTENT_LENGTH, format!("{}", bytes_len))
.header(
CONTENT_RANGE,
format!("bytes 0-{}/{}", bytes_len - 1, bytes_len),
)
.header(X_AMZ_MP_PARTS_COUNT, "1")
.status(StatusCode::PARTIAL_CONTENT)
.body(empty_body())?)
} }
ObjectVersionData::FirstBlock(_, _) => { ObjectVersionData::FirstBlock(_, _) => {
let version = garage let version = garage
@ -191,33 +240,56 @@ pub async fn handle_head(
let (part_offset, part_end) = let (part_offset, part_end) =
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?; calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
Ok(object_headers(object_version, version_meta) Ok(object_headers(
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset)) object_version,
.header( version_meta,
CONTENT_RANGE, &headers,
format!( encryption,
"bytes {}-{}/{}", checksum_mode,
part_offset, )
part_end - 1, .header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
version_meta.size .header(
), CONTENT_RANGE,
) format!(
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?)) "bytes {}-{}/{}",
.status(StatusCode::PARTIAL_CONTENT) part_offset,
.body(empty_body())?) part_end - 1,
version_meta.size
),
)
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
.status(StatusCode::PARTIAL_CONTENT)
.body(empty_body())?)
} }
_ => unreachable!(), _ => unreachable!(),
} }
} else { } else {
Ok(object_headers(object_version, version_meta) Ok(object_headers(
.header(CONTENT_LENGTH, format!("{}", version_meta.size)) object_version,
.status(StatusCode::OK) version_meta,
.body(empty_body())?) &headers,
encryption,
checksum_mode,
)
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
.status(StatusCode::OK)
.body(empty_body())?)
} }
} }
/// Handle GET request /// Handle GET request
pub async fn handle_get( pub async fn handle_get(
ctx: ReqCtx,
req: &Request<impl Body>,
key: &str,
part_number: Option<u64>,
overrides: GetObjectOverrides,
) -> Result<Response<ResBody>, Error> {
handle_get_without_ctx(ctx.garage, req, ctx.bucket_id, key, part_number, overrides).await
}
/// Handle GET request
pub async fn handle_get_without_ctx(
garage: Arc<Garage>, garage: Arc<Garage>,
req: &Request<impl Body>, req: &Request<impl Body>,
bucket_id: Uuid, bucket_id: Uuid,
@ -252,23 +324,55 @@ pub async fn handle_get(
return Ok(cached); return Ok(cached);
} }
let (enc, headers) =
EncryptionParams::check_decrypt(&garage, req.headers(), &last_v_meta.encryption)?;
let checksum_mode = checksum_mode(&req);
match (part_number, parse_range_header(req, last_v_meta.size)?) { match (part_number, parse_range_header(req, last_v_meta.size)?) {
(Some(_), Some(_)) => Err(Error::bad_request( (Some(_), Some(_)) => Err(Error::bad_request(
"Cannot specify both partNumber and Range header", "Cannot specify both partNumber and Range header",
)), )),
(Some(pn), None) => handle_get_part(garage, last_v, last_v_data, last_v_meta, pn).await, (Some(pn), None) => {
handle_get_part(
garage,
last_v,
last_v_data,
last_v_meta,
enc,
&headers,
pn,
checksum_mode,
)
.await
}
(None, Some(range)) => { (None, Some(range)) => {
handle_get_range( handle_get_range(
garage, garage,
last_v, last_v,
last_v_data, last_v_data,
last_v_meta, last_v_meta,
enc,
&headers,
range.start, range.start,
range.start + range.length, range.start + range.length,
checksum_mode,
)
.await
}
(None, None) => {
handle_get_full(
garage,
last_v,
last_v_data,
last_v_meta,
enc,
&headers,
overrides,
checksum_mode,
) )
.await .await
} }
(None, None) => handle_get_full(garage, last_v, last_v_data, last_v_meta, overrides).await,
} }
} }
@ -277,17 +381,43 @@ async fn handle_get_full(
version: &ObjectVersion, version: &ObjectVersion,
version_data: &ObjectVersionData, version_data: &ObjectVersionData,
version_meta: &ObjectVersionMeta, version_meta: &ObjectVersionMeta,
encryption: EncryptionParams,
meta_inner: &ObjectVersionMetaInner,
overrides: GetObjectOverrides, overrides: GetObjectOverrides,
checksum_mode: ChecksumMode,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let mut resp_builder = object_headers(version, version_meta) let mut resp_builder = object_headers(
.header(CONTENT_LENGTH, format!("{}", version_meta.size)) version,
.status(StatusCode::OK); version_meta,
&meta_inner,
encryption,
checksum_mode,
)
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
.status(StatusCode::OK);
getobject_override_headers(overrides, &mut resp_builder)?; getobject_override_headers(overrides, &mut resp_builder)?;
let stream = full_object_byte_stream(garage, version, version_data, encryption);
Ok(resp_builder.body(response_body_from_stream(stream))?)
}
pub fn full_object_byte_stream(
garage: Arc<Garage>,
version: &ObjectVersion,
version_data: &ObjectVersionData,
encryption: EncryptionParams,
) -> ByteStream {
match &version_data { match &version_data {
ObjectVersionData::DeleteMarker => unreachable!(), ObjectVersionData::DeleteMarker => unreachable!(),
ObjectVersionData::Inline(_, bytes) => { ObjectVersionData::Inline(_, bytes) => {
Ok(resp_builder.body(bytes_body(bytes.to_vec().into()))?) let bytes = bytes.to_vec();
Box::pin(futures::stream::once(async move {
encryption
.decrypt_blob(&bytes)
.map(|x| Bytes::from(x.to_vec()))
.map_err(std_error_from_read_error)
}))
} }
ObjectVersionData::FirstBlock(_, first_block_hash) => { ObjectVersionData::FirstBlock(_, first_block_hash) => {
let (tx, rx) = mpsc::channel::<ByteStream>(2); let (tx, rx) = mpsc::channel::<ByteStream>(2);
@ -303,19 +433,18 @@ async fn handle_get_full(
garage2.version_table.get(&version_uuid, &EmptyKey).await garage2.version_table.get(&version_uuid, &EmptyKey).await
}); });
let stream_block_0 = garage let stream_block_0 = encryption
.block_manager .get_block(&garage, &first_block_hash, Some(order_stream.order(0)))
.rpc_get_block_streaming(&first_block_hash, Some(order_stream.order(0)))
.await?; .await?;
tx.send(stream_block_0) tx.send(stream_block_0)
.await .await
.ok_or_message("channel closed")?; .ok_or_message("channel closed")?;
let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?; let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?;
for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) { for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) {
let stream_block_i = garage let stream_block_i = encryption
.block_manager .get_block(&garage, &vb.hash, Some(order_stream.order(i as u64)))
.rpc_get_block_streaming(&vb.hash, Some(order_stream.order(i as u64)))
.await?; .await?;
tx.send(stream_block_i) tx.send(stream_block_i)
.await .await
@ -333,8 +462,7 @@ async fn handle_get_full(
} }
}); });
let body = response_body_from_block_stream(rx); Box::pin(tokio_stream::wrappers::ReceiverStream::new(rx).flatten())
Ok(resp_builder.body(body)?)
} }
} }
} }
@ -344,13 +472,16 @@ async fn handle_get_range(
version: &ObjectVersion, version: &ObjectVersion,
version_data: &ObjectVersionData, version_data: &ObjectVersionData,
version_meta: &ObjectVersionMeta, version_meta: &ObjectVersionMeta,
encryption: EncryptionParams,
meta_inner: &ObjectVersionMetaInner,
begin: u64, begin: u64,
end: u64, end: u64,
checksum_mode: ChecksumMode,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
// Here we do not use getobject_override_headers because we don't // Here we do not use getobject_override_headers because we don't
// want to add any overridden headers (those should not be added // want to add any overridden headers (those should not be added
// when returning PARTIAL_CONTENT) // when returning PARTIAL_CONTENT)
let resp_builder = object_headers(version, version_meta) let resp_builder = object_headers(version, version_meta, meta_inner, encryption, checksum_mode)
.header(CONTENT_LENGTH, format!("{}", end - begin)) .header(CONTENT_LENGTH, format!("{}", end - begin))
.header( .header(
CONTENT_RANGE, CONTENT_RANGE,
@ -361,6 +492,7 @@ async fn handle_get_range(
match &version_data { match &version_data {
ObjectVersionData::DeleteMarker => unreachable!(), ObjectVersionData::DeleteMarker => unreachable!(),
ObjectVersionData::Inline(_meta, bytes) => { ObjectVersionData::Inline(_meta, bytes) => {
let bytes = encryption.decrypt_blob(&bytes)?;
if end as usize <= bytes.len() { if end as usize <= bytes.len() {
let body = bytes_body(bytes[begin as usize..end as usize].to_vec().into()); let body = bytes_body(bytes[begin as usize..end as usize].to_vec().into());
Ok(resp_builder.body(body)?) Ok(resp_builder.body(body)?)
@ -377,7 +509,8 @@ async fn handle_get_range(
.await? .await?
.ok_or(Error::NoSuchKey)?; .ok_or(Error::NoSuchKey)?;
let body = body_from_blocks_range(garage, version.blocks.items(), begin, end); let body =
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
Ok(resp_builder.body(body)?) Ok(resp_builder.body(body)?)
} }
} }
@ -388,17 +521,28 @@ async fn handle_get_part(
object_version: &ObjectVersion, object_version: &ObjectVersion,
version_data: &ObjectVersionData, version_data: &ObjectVersionData,
version_meta: &ObjectVersionMeta, version_meta: &ObjectVersionMeta,
encryption: EncryptionParams,
meta_inner: &ObjectVersionMetaInner,
part_number: u64, part_number: u64,
checksum_mode: ChecksumMode,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
// Same as for get_range, no getobject_override_headers // Same as for get_range, no getobject_override_headers
let resp_builder = let resp_builder = object_headers(
object_headers(object_version, version_meta).status(StatusCode::PARTIAL_CONTENT); object_version,
version_meta,
meta_inner,
encryption,
checksum_mode,
)
.status(StatusCode::PARTIAL_CONTENT);
match version_data { match version_data {
ObjectVersionData::Inline(_, bytes) => { ObjectVersionData::Inline(_, bytes) => {
if part_number != 1 { if part_number != 1 {
return Err(Error::InvalidPart); return Err(Error::InvalidPart);
} }
let bytes = encryption.decrypt_blob(&bytes)?;
assert_eq!(bytes.len() as u64, version_meta.size);
Ok(resp_builder Ok(resp_builder
.header(CONTENT_LENGTH, format!("{}", bytes.len())) .header(CONTENT_LENGTH, format!("{}", bytes.len()))
.header( .header(
@ -406,7 +550,7 @@ async fn handle_get_part(
format!("bytes {}-{}/{}", 0, bytes.len() - 1, bytes.len()), format!("bytes {}-{}/{}", 0, bytes.len() - 1, bytes.len()),
) )
.header(X_AMZ_MP_PARTS_COUNT, "1") .header(X_AMZ_MP_PARTS_COUNT, "1")
.body(bytes_body(bytes.to_vec().into()))?) .body(bytes_body(bytes.into_owned().into()))?)
} }
ObjectVersionData::FirstBlock(_, _) => { ObjectVersionData::FirstBlock(_, _) => {
let version = garage let version = garage
@ -418,7 +562,8 @@ async fn handle_get_part(
let (begin, end) = let (begin, end) =
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?; calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
let body = body_from_blocks_range(garage, version.blocks.items(), begin, end); let body =
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
Ok(resp_builder Ok(resp_builder
.header(CONTENT_LENGTH, format!("{}", end - begin)) .header(CONTENT_LENGTH, format!("{}", end - begin))
@ -471,8 +616,23 @@ fn calculate_part_bounds(v: &Version, part_number: u64) -> Option<(u64, u64)> {
None None
} }
struct ChecksumMode {
enabled: bool,
}
fn checksum_mode(req: &Request<impl Body>) -> ChecksumMode {
ChecksumMode {
enabled: req
.headers()
.get(X_AMZ_CHECKSUM_MODE)
.map(|x| x == "ENABLED")
.unwrap_or(false),
}
}
fn body_from_blocks_range( fn body_from_blocks_range(
garage: Arc<Garage>, garage: Arc<Garage>,
encryption: EncryptionParams,
all_blocks: &[(VersionBlockKey, VersionBlock)], all_blocks: &[(VersionBlockKey, VersionBlock)],
begin: u64, begin: u64,
end: u64, end: u64,
@ -502,12 +662,11 @@ fn body_from_blocks_range(
tokio::spawn(async move { tokio::spawn(async move {
match async { match async {
let garage = garage.clone();
for (i, (block, block_offset)) in blocks.iter().enumerate() { for (i, (block, block_offset)) in blocks.iter().enumerate() {
let block_stream = garage let block_stream = encryption
.block_manager .get_block(&garage, &block.hash, Some(order_stream.order(i as u64)))
.rpc_get_block_streaming(&block.hash, Some(order_stream.order(i as u64))) .await?;
.await? let block_stream = block_stream
.scan(*block_offset, move |chunk_offset, chunk| { .scan(*block_offset, move |chunk_offset, chunk| {
let r = match chunk { let r = match chunk {
Ok(chunk_bytes) => { Ok(chunk_bytes) => {
@ -567,19 +726,30 @@ fn body_from_blocks_range(
} }
fn response_body_from_block_stream(rx: mpsc::Receiver<ByteStream>) -> ResBody { fn response_body_from_block_stream(rx: mpsc::Receiver<ByteStream>) -> ResBody {
let body_stream = tokio_stream::wrappers::ReceiverStream::new(rx) let body_stream = tokio_stream::wrappers::ReceiverStream::new(rx).flatten();
.flatten() response_body_from_stream(body_stream)
.map(|x| { }
x.map(hyper::body::Frame::data)
.map_err(|e| Error::from(garage_util::error::Error::from(e))) fn response_body_from_stream<S>(stream: S) -> ResBody
}); where
S: Stream<Item = Result<Bytes, std::io::Error>> + Send + Sync + 'static,
{
let body_stream = stream.map(|x| {
x.map(hyper::body::Frame::data)
.map_err(|e| Error::from(garage_util::error::Error::from(e)))
});
ResBody::new(http_body_util::StreamBody::new(body_stream)) ResBody::new(http_body_util::StreamBody::new(body_stream))
} }
fn error_stream_item<E: std::fmt::Display>(e: E) -> ByteStream { fn error_stream_item<E: std::fmt::Display>(e: E) -> ByteStream {
let err = std::io::Error::new( Box::pin(stream::once(future::ready(Err(std_error_from_read_error(
std::io::ErrorKind::Other, e,
format!("Error while getting object data: {}", e), )))))
); }
Box::pin(stream::once(future::ready(Err(err))))
fn std_error_from_read_error<E: std::fmt::Display>(e: E) -> std::io::Error {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("Error while reading object data: {}", e),
)
} }

View file

@ -1,5 +1,4 @@
use quick_xml::de::from_reader; use quick_xml::de::from_reader;
use std::sync::Arc;
use http_body_util::BodyExt; use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode}; use hyper::{Request, Response, StatusCode};
@ -16,15 +15,12 @@ use garage_model::bucket_table::{
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration, parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule, LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule,
}; };
use garage_model::garage::Garage;
use garage_util::data::*; use garage_util::data::*;
pub async fn handle_get_lifecycle(bucket: &Bucket) -> Result<Response<ResBody>, Error> { pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let param = bucket let ReqCtx { bucket_params, .. } = ctx;
.params()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
if let Some(lifecycle) = param.lifecycle_config.get() { if let Some(lifecycle) = bucket_params.lifecycle_config.get() {
let wc = LifecycleConfiguration::from_garage_lifecycle_config(lifecycle); let wc = LifecycleConfiguration::from_garage_lifecycle_config(lifecycle);
let xml = to_xml_with_header(&wc)?; let xml = to_xml_with_header(&wc)?;
Ok(Response::builder() Ok(Response::builder()
@ -38,16 +34,18 @@ pub async fn handle_get_lifecycle(bucket: &Bucket) -> Result<Response<ResBody>,
} }
} }
pub async fn handle_delete_lifecycle( pub async fn handle_delete_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
garage: Arc<Garage>, let ReqCtx {
mut bucket: Bucket, garage,
) -> Result<Response<ResBody>, Error> { bucket_id,
let param = bucket mut bucket_params,
.params_mut() ..
.ok_or_internal_error("Bucket should not be deleted at this point")?; } = ctx;
bucket_params.lifecycle_config.update(None);
param.lifecycle_config.update(None); garage
garage.bucket_table.insert(&bucket).await?; .bucket_table
.insert(&Bucket::present(bucket_id, bucket_params))
.await?;
Ok(Response::builder() Ok(Response::builder()
.status(StatusCode::NO_CONTENT) .status(StatusCode::NO_CONTENT)
@ -55,28 +53,33 @@ pub async fn handle_delete_lifecycle(
} }
pub async fn handle_put_lifecycle( pub async fn handle_put_lifecycle(
garage: Arc<Garage>, ctx: ReqCtx,
mut bucket: Bucket,
req: Request<ReqBody>, req: Request<ReqBody>,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage,
bucket_id,
mut bucket_params,
..
} = ctx;
let body = BodyExt::collect(req.into_body()).await?.to_bytes(); let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 { if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?; verify_signed_content(content_sha256, &body[..])?;
} }
let param = bucket
.params_mut()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
let conf: LifecycleConfiguration = from_reader(&body as &[u8])?; let conf: LifecycleConfiguration = from_reader(&body as &[u8])?;
let config = conf let config = conf
.validate_into_garage_lifecycle_config() .validate_into_garage_lifecycle_config()
.ok_or_bad_request("Invalid lifecycle configuration")?; .ok_or_bad_request("Invalid lifecycle configuration")?;
param.lifecycle_config.update(Some(config)); bucket_params.lifecycle_config.update(Some(config));
garage.bucket_table.insert(&bucket).await?; garage
.bucket_table
.insert(&Bucket::present(bucket_id, bucket_params))
.await?;
Ok(Response::builder() Ok(Response::builder()
.status(StatusCode::OK) .status(StatusCode::OK)

View file

@ -1,15 +1,13 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::iter::{Iterator, Peekable}; use std::iter::{Iterator, Peekable};
use std::sync::Arc;
use base64::prelude::*; use base64::prelude::*;
use hyper::Response; use hyper::{Request, Response};
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
use garage_util::time::*; use garage_util::time::*;
use garage_model::garage::Garage;
use garage_model::s3::mpu_table::*; use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
@ -17,7 +15,8 @@ use garage_table::EnumerationOrder;
use crate::encoding::*; use crate::encoding::*;
use crate::helpers::*; use crate::helpers::*;
use crate::s3::api_server::ResBody; use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*; use crate::s3::error::*;
use crate::s3::multipart as s3_multipart; use crate::s3::multipart as s3_multipart;
use crate::s3::xml as s3_xml; use crate::s3::xml as s3_xml;
@ -62,9 +61,10 @@ pub struct ListPartsQuery {
} }
pub async fn handle_list( pub async fn handle_list(
garage: Arc<Garage>, ctx: ReqCtx,
query: &ListObjectsQuery, query: &ListObjectsQuery,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx { garage, .. } = &ctx;
let io = |bucket, key, count| { let io = |bucket, key, count| {
let t = &garage.object_table; let t = &garage.object_table;
async move { async move {
@ -167,9 +167,11 @@ pub async fn handle_list(
} }
pub async fn handle_list_multipart_upload( pub async fn handle_list_multipart_upload(
garage: Arc<Garage>, ctx: ReqCtx,
query: &ListMultipartUploadsQuery, query: &ListMultipartUploadsQuery,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx { garage, .. } = &ctx;
let io = |bucket, key, count| { let io = |bucket, key, count| {
let t = &garage.object_table; let t = &garage.object_table;
async move { async move {
@ -269,15 +271,22 @@ pub async fn handle_list_multipart_upload(
} }
pub async fn handle_list_parts( pub async fn handle_list_parts(
garage: Arc<Garage>, ctx: ReqCtx,
req: Request<ReqBody>,
query: &ListPartsQuery, query: &ListPartsQuery,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
debug!("ListParts {:?}", query); debug!("ListParts {:?}", query);
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?; let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
let (_, _, mpu) = let (_, object_version, mpu) = s3_multipart::get_upload(&ctx, &query.key, &upload_id).await?;
s3_multipart::get_upload(&garage, &query.bucket_id, &query.key, &upload_id).await?;
let object_encryption = match object_version.state {
ObjectVersionState::Uploading { encryption, .. } => encryption,
_ => unreachable!(),
};
let encryption_res =
EncryptionParams::check_decrypt(&ctx.garage, req.headers(), &object_encryption);
let (info, next) = fetch_part_info(query, &mpu)?; let (info, next) = fetch_part_info(query, &mpu)?;
@ -296,11 +305,40 @@ pub async fn handle_list_parts(
is_truncated: s3_xml::Value(format!("{}", next.is_some())), is_truncated: s3_xml::Value(format!("{}", next.is_some())),
parts: info parts: info
.iter() .iter()
.map(|part| s3_xml::PartItem { .map(|part| {
etag: s3_xml::Value(format!("\"{}\"", part.etag)), // hide checksum if object is encrypted and the decryption
last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)), // keys are not provided
part_number: s3_xml::IntValue(part.part_number as i64), let checksum = part.checksum.filter(|_| encryption_res.is_ok());
size: s3_xml::IntValue(part.size as i64), s3_xml::PartItem {
etag: s3_xml::Value(format!("\"{}\"", part.etag)),
last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)),
part_number: s3_xml::IntValue(part.part_number as i64),
size: s3_xml::IntValue(part.size as i64),
checksum_crc32: match &checksum {
Some(ChecksumValue::Crc32(x)) => {
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
}
_ => None,
},
checksum_crc32c: match &checksum {
Some(ChecksumValue::Crc32c(x)) => {
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
}
_ => None,
},
checksum_sha1: match &checksum {
Some(ChecksumValue::Sha1(x)) => {
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
}
_ => None,
},
checksum_sha256: match &checksum {
Some(ChecksumValue::Sha256(x)) => {
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
}
_ => None,
},
}
}) })
.collect(), .collect(),
@ -346,6 +384,7 @@ struct PartInfo<'a> {
timestamp: u64, timestamp: u64,
part_number: u64, part_number: u64,
size: u64, size: u64,
checksum: Option<ChecksumValue>,
} }
enum ExtractionResult { enum ExtractionResult {
@ -486,6 +525,7 @@ fn fetch_part_info<'a>(
timestamp: pk.timestamp, timestamp: pk.timestamp,
etag, etag,
size, size,
checksum: p.checksum,
}; };
match parts.last_mut() { match parts.last_mut() {
Some(lastpart) if lastpart.part_number == pk.part_number => { Some(lastpart) if lastpart.part_number == pk.part_number => {
@ -944,10 +984,13 @@ mod tests {
timestamp: TS, timestamp: TS,
state: ObjectVersionState::Uploading { state: ObjectVersionState::Uploading {
multipart: true, multipart: true,
headers: ObjectVersionHeaders { encryption: ObjectVersionEncryption::Plaintext {
content_type: "text/plain".to_string(), inner: ObjectVersionMetaInner {
other: BTreeMap::<String, String>::new(), headers: vec![],
checksum: None,
},
}, },
checksum_algorithm: None,
}, },
} }
} }
@ -1136,6 +1179,7 @@ mod tests {
version: uuid, version: uuid,
size: Some(3), size: Some(3),
etag: Some("etag1".into()), etag: Some("etag1".into()),
checksum: None,
}, },
), ),
( (
@ -1147,6 +1191,7 @@ mod tests {
version: uuid, version: uuid,
size: None, size: None,
etag: None, etag: None,
checksum: None,
}, },
), ),
( (
@ -1158,6 +1203,7 @@ mod tests {
version: uuid, version: uuid,
size: Some(10), size: Some(10),
etag: Some("etag2".into()), etag: Some("etag2".into()),
checksum: None,
}, },
), ),
( (
@ -1169,6 +1215,7 @@ mod tests {
version: uuid, version: uuid,
size: Some(7), size: Some(7),
etag: Some("etag3".into()), etag: Some("etag3".into()),
checksum: None,
}, },
), ),
( (
@ -1180,6 +1227,7 @@ mod tests {
version: uuid, version: uuid,
size: Some(5), size: Some(5),
etag: Some("etag4".into()), etag: Some("etag4".into()),
checksum: None,
}, },
), ),
]; ];
@ -1218,12 +1266,14 @@ mod tests {
etag: "etag1", etag: "etag1",
timestamp: TS, timestamp: TS,
part_number: 1, part_number: 1,
size: 3 size: 3,
checksum: None,
}, },
PartInfo { PartInfo {
etag: "etag2", etag: "etag2",
timestamp: TS, timestamp: TS,
part_number: 3, part_number: 3,
checksum: None,
size: 10 size: 10
}, },
] ]
@ -1239,12 +1289,14 @@ mod tests {
PartInfo { PartInfo {
etag: "etag3", etag: "etag3",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 5, part_number: 5,
size: 7 size: 7
}, },
PartInfo { PartInfo {
etag: "etag4", etag: "etag4",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 8, part_number: 8,
size: 5 size: 5
}, },
@ -1268,24 +1320,28 @@ mod tests {
PartInfo { PartInfo {
etag: "etag1", etag: "etag1",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 1, part_number: 1,
size: 3 size: 3
}, },
PartInfo { PartInfo {
etag: "etag2", etag: "etag2",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 3, part_number: 3,
size: 10 size: 10
}, },
PartInfo { PartInfo {
etag: "etag3", etag: "etag3",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 5, part_number: 5,
size: 7 size: 7
}, },
PartInfo { PartInfo {
etag: "etag4", etag: "etag4",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 8, part_number: 8,
size: 5 size: 5
}, },

View file

@ -13,5 +13,7 @@ mod post_object;
mod put; mod put;
mod website; mod website;
mod checksum;
mod encryption;
mod router; mod router;
pub mod xml; pub mod xml;

View file

@ -1,14 +1,14 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::TryInto;
use std::sync::Arc; use std::sync::Arc;
use base64::prelude::*;
use futures::prelude::*; use futures::prelude::*;
use hyper::{Request, Response}; use hyper::{Request, Response};
use md5::{Digest as Md5Digest, Md5};
use garage_table::*; use garage_table::*;
use garage_util::data::*; use garage_util::data::*;
use garage_model::bucket_table::Bucket;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::s3::block_ref_table::*; use garage_model::s3::block_ref_table::*;
use garage_model::s3::mpu_table::*; use garage_model::s3::mpu_table::*;
@ -17,6 +17,8 @@ use garage_model::s3::version_table::*;
use crate::helpers::*; use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody}; use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::checksum::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*; use crate::s3::error::*;
use crate::s3::put::*; use crate::s3::put::*;
use crate::s3::xml as s3_xml; use crate::s3::xml as s3_xml;
@ -25,18 +27,32 @@ use crate::signature::verify_signed_content;
// ---- // ----
pub async fn handle_create_multipart_upload( pub async fn handle_create_multipart_upload(
garage: Arc<Garage>, ctx: ReqCtx,
req: &Request<ReqBody>, req: &Request<ReqBody>,
bucket_name: &str,
bucket_id: Uuid,
key: &String, key: &String,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage,
bucket_id,
bucket_name,
..
} = &ctx;
let existing_object = garage.object_table.get(&bucket_id, &key).await?; let existing_object = garage.object_table.get(&bucket_id, &key).await?;
let upload_id = gen_uuid(); let upload_id = gen_uuid();
let timestamp = next_timestamp(existing_object.as_ref()); let timestamp = next_timestamp(existing_object.as_ref());
let headers = get_headers(req.headers())?; let headers = get_headers(req.headers())?;
let meta = ObjectVersionMetaInner {
headers,
checksum: None,
};
// Determine whether object should be encrypted, and if so the key
let encryption = EncryptionParams::new_from_headers(&garage, req.headers())?;
let object_encryption = encryption.encrypt_meta(meta)?;
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
// Create object in object table // Create object in object table
let object_version = ObjectVersion { let object_version = ObjectVersion {
@ -44,16 +60,17 @@ pub async fn handle_create_multipart_upload(
timestamp, timestamp,
state: ObjectVersionState::Uploading { state: ObjectVersionState::Uploading {
multipart: true, multipart: true,
headers, encryption: object_encryption,
checksum_algorithm,
}, },
}; };
let object = Object::new(bucket_id, key.to_string(), vec![object_version]); let object = Object::new(*bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
// Create multipart upload in mpu table // Create multipart upload in mpu table
// This multipart upload will hold references to uploaded parts // This multipart upload will hold references to uploaded parts
// (which are entries in the Version table) // (which are entries in the Version table)
let mpu = MultipartUpload::new(upload_id, timestamp, bucket_id, key.into(), false); let mpu = MultipartUpload::new(upload_id, timestamp, *bucket_id, key.into(), false);
garage.mpu_table.insert(&mpu).await?; garage.mpu_table.insert(&mpu).await?;
// Send success response // Send success response
@ -65,35 +82,53 @@ pub async fn handle_create_multipart_upload(
}; };
let xml = s3_xml::to_xml_with_header(&result)?; let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(string_body(xml))) let mut resp = Response::builder();
encryption.add_response_headers(&mut resp);
Ok(resp.body(string_body(xml))?)
} }
pub async fn handle_put_part( pub async fn handle_put_part(
garage: Arc<Garage>, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
bucket_id: Uuid,
key: &str, key: &str,
part_number: u64, part_number: u64,
upload_id: &str, upload_id: &str,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx { garage, .. } = &ctx;
let upload_id = decode_upload_id(upload_id)?; let upload_id = decode_upload_id(upload_id)?;
let content_md5 = match req.headers().get("content-md5") { let expected_checksums = ExpectedChecksums {
Some(x) => Some(x.to_str()?.to_string()), md5: match req.headers().get("content-md5") {
None => None, Some(x) => Some(x.to_str()?.to_string()),
None => None,
},
sha256: content_sha256,
extra: request_checksum_value(req.headers())?,
}; };
// Read first chuck, and at the same time try to get object to see if it exists // Read first chuck, and at the same time try to get object to see if it exists
let key = key.to_string(); let key = key.to_string();
let stream = body_stream(req.into_body()); let (req_head, req_body) = req.into_parts();
let stream = body_stream(req_body);
let mut chunker = StreamChunker::new(stream, garage.config.block_size); let mut chunker = StreamChunker::new(stream, garage.config.block_size);
let ((_, _, mut mpu), first_block) = futures::try_join!( let ((_, object_version, mut mpu), first_block) =
get_upload(&garage, &bucket_id, &key, &upload_id), futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?;
chunker.next(),
)?; // Check encryption params
let (object_encryption, checksum_algorithm) = match object_version.state {
ObjectVersionState::Uploading {
encryption,
checksum_algorithm,
..
} => (encryption, checksum_algorithm),
_ => unreachable!(),
};
let (encryption, _) =
EncryptionParams::check_decrypt(&garage, &req_head.headers, &object_encryption)?;
// Check object is valid and part can be accepted // Check object is valid and part can be accepted
let first_block = first_block.ok_or_bad_request("Empty body")?; let first_block = first_block.ok_or_bad_request("Empty body")?;
@ -120,7 +155,9 @@ pub async fn handle_put_part(
mpu_part_key, mpu_part_key,
MpuPart { MpuPart {
version: version_uuid, version: version_uuid,
// all these are filled in later, at the end of this function
etag: None, etag: None,
checksum: None,
size: None, size: None,
}, },
); );
@ -134,24 +171,31 @@ pub async fn handle_put_part(
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Copy data to version // Copy data to version
let (total_size, data_md5sum, data_sha256sum, _) = let checksummer =
read_and_put_blocks(&garage, &version, part_number, first_block, &mut chunker).await?; Checksummer::init(&expected_checksums, !encryption.is_encrypted()).add(checksum_algorithm);
let (total_size, checksums, _) = read_and_put_blocks(
&ctx,
&version,
encryption,
part_number,
first_block,
&mut chunker,
checksummer,
)
.await?;
// Verify that checksums map // Verify that checksums map
ensure_checksum_matches( checksums.verify(&expected_checksums)?;
data_md5sum.as_slice(),
data_sha256sum,
content_md5.as_deref(),
content_sha256,
)?;
// Store part etag in version // Store part etag in version
let data_md5sum_hex = hex::encode(data_md5sum); let etag = encryption.etag_from_md5(&checksums.md5);
mpu.parts.put( mpu.parts.put(
mpu_part_key, mpu_part_key,
MpuPart { MpuPart {
version: version_uuid, version: version_uuid,
etag: Some(data_md5sum_hex.clone()), etag: Some(etag.clone()),
checksum: checksums.extract(checksum_algorithm),
size: Some(total_size), size: Some(total_size),
}, },
); );
@ -161,11 +205,10 @@ pub async fn handle_put_part(
// We won't have to clean up on drop. // We won't have to clean up on drop.
interrupted_cleanup.cancel(); interrupted_cleanup.cancel();
let response = Response::builder() let mut resp = Response::builder().header("ETag", format!("\"{}\"", etag));
.header("ETag", format!("\"{}\"", data_md5sum_hex)) encryption.add_response_headers(&mut resp);
.body(empty_body()) let resp = add_checksum_response_headers(&expected_checksums.extra, resp);
.unwrap(); Ok(resp.body(empty_body())?)
Ok(response)
} }
struct InterruptedCleanup(Option<InterruptedCleanupInner>); struct InterruptedCleanup(Option<InterruptedCleanupInner>);
@ -200,17 +243,23 @@ impl Drop for InterruptedCleanup {
} }
pub async fn handle_complete_multipart_upload( pub async fn handle_complete_multipart_upload(
garage: Arc<Garage>, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
bucket_name: &str,
bucket: &Bucket,
key: &str, key: &str,
upload_id: &str, upload_id: &str,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let body = http_body_util::BodyExt::collect(req.into_body()) let ReqCtx {
.await? garage,
.to_bytes(); bucket_id,
bucket_name,
..
} = &ctx;
let (req_head, req_body) = req.into_parts();
let expected_checksum = request_checksum_value(&req_head.headers)?;
let body = http_body_util::BodyExt::collect(req_body).await?.to_bytes();
if let Some(content_sha256) = content_sha256 { if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?; verify_signed_content(content_sha256, &body[..])?;
@ -228,15 +277,18 @@ pub async fn handle_complete_multipart_upload(
// Get object and multipart upload // Get object and multipart upload
let key = key.to_string(); let key = key.to_string();
let (object, mut object_version, mpu) = let (object, mut object_version, mpu) = get_upload(&ctx, &key, &upload_id).await?;
get_upload(&garage, &bucket.id, &key, &upload_id).await?;
if mpu.parts.is_empty() { if mpu.parts.is_empty() {
return Err(Error::bad_request("No data was uploaded")); return Err(Error::bad_request("No data was uploaded"));
} }
let headers = match object_version.state { let (object_encryption, checksum_algorithm) = match object_version.state {
ObjectVersionState::Uploading { headers, .. } => headers, ObjectVersionState::Uploading {
encryption,
checksum_algorithm,
..
} => (encryption, checksum_algorithm),
_ => unreachable!(), _ => unreachable!(),
}; };
@ -264,6 +316,13 @@ pub async fn handle_complete_multipart_upload(
for req_part in body_list_of_parts.iter() { for req_part in body_list_of_parts.iter() {
match have_parts.get(&req_part.part_number) { match have_parts.get(&req_part.part_number) {
Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => { Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => {
// alternative version: if req_part.checksum.is_some() && part.checksum != req_part.checksum {
if part.checksum != req_part.checksum {
return Err(Error::InvalidDigest(format!(
"Invalid checksum for part {}: in request = {:?}, uploaded part = {:?}",
req_part.part_number, req_part.checksum, part.checksum
)));
}
parts.push(*part) parts.push(*part)
} }
_ => return Err(Error::InvalidPart), _ => return Err(Error::InvalidPart),
@ -283,7 +342,7 @@ pub async fn handle_complete_multipart_upload(
let mut final_version = Version::new( let mut final_version = Version::new(
upload_id, upload_id,
VersionBacklink::Object { VersionBacklink::Object {
bucket_id: bucket.id, bucket_id: *bucket_id,
key: key.to_string(), key: key.to_string(),
}, },
false, false,
@ -311,41 +370,60 @@ pub async fn handle_complete_multipart_upload(
}); });
garage.block_ref_table.insert_many(block_refs).await?; garage.block_ref_table.insert_many(block_refs).await?;
// Calculate etag of final object // Calculate checksum and etag of final object
// To understand how etags are calculated, read more here: // To understand how etags are calculated, read more here:
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
// https://teppen.io/2018/06/23/aws_s3_etags/ // https://teppen.io/2018/06/23/aws_s3_etags/
let mut etag_md5_hasher = Md5::new(); let mut checksummer = MultipartChecksummer::init(checksum_algorithm);
for part in parts.iter() { for part in parts.iter() {
etag_md5_hasher.update(part.etag.as_ref().unwrap().as_bytes()); checksummer.update(part.etag.as_ref().unwrap(), part.checksum)?;
} }
let etag = format!( let (checksum_md5, checksum_extra) = checksummer.finalize();
"{}-{}",
hex::encode(etag_md5_hasher.finalize()), if expected_checksum.is_some() && checksum_extra != expected_checksum {
parts.len() return Err(Error::InvalidDigest(
); "Failed to validate x-amz-checksum-*".into(),
));
}
let etag = format!("{}-{}", hex::encode(&checksum_md5[..]), parts.len());
// Calculate total size of final object // Calculate total size of final object
let total_size = parts.iter().map(|x| x.size.unwrap()).sum(); let total_size = parts.iter().map(|x| x.size.unwrap()).sum();
if let Err(e) = check_quotas(&garage, bucket, total_size, Some(&object)).await { if let Err(e) = check_quotas(&ctx, total_size, Some(&object)).await {
object_version.state = ObjectVersionState::Aborted; object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]); let final_object = Object::new(*bucket_id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?; garage.object_table.insert(&final_object).await?;
return Err(e); return Err(e);
} }
// If there is a checksum algorithm, update metadata with checksum
let object_encryption = match checksum_algorithm {
None => object_encryption,
Some(_) => {
let (encryption, meta) =
EncryptionParams::check_decrypt(&garage, &req_head.headers, &object_encryption)?;
let new_meta = ObjectVersionMetaInner {
headers: meta.into_owned().headers,
checksum: checksum_extra,
};
encryption.encrypt_meta(new_meta)?
}
};
// Write final object version // Write final object version
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
ObjectVersionMeta { ObjectVersionMeta {
headers, encryption: object_encryption,
size: total_size, size: total_size,
etag: etag.clone(), etag: etag.clone(),
}, },
final_version.blocks.items()[0].1.hash, final_version.blocks.items()[0].1.hash,
)); ));
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]); let final_object = Object::new(*bucket_id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?; garage.object_table.insert(&final_object).await?;
// Send response saying ok we're done // Send response saying ok we're done
@ -355,25 +433,45 @@ pub async fn handle_complete_multipart_upload(
bucket: s3_xml::Value(bucket_name.to_string()), bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key), key: s3_xml::Value(key),
etag: s3_xml::Value(format!("\"{}\"", etag)), etag: s3_xml::Value(format!("\"{}\"", etag)),
checksum_crc32: match &checksum_extra {
Some(ChecksumValue::Crc32(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
_ => None,
},
checksum_crc32c: match &checksum_extra {
Some(ChecksumValue::Crc32c(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
_ => None,
},
checksum_sha1: match &checksum_extra {
Some(ChecksumValue::Sha1(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
_ => None,
},
checksum_sha256: match &checksum_extra {
Some(ChecksumValue::Sha256(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
_ => None,
},
}; };
let xml = s3_xml::to_xml_with_header(&result)?; let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(string_body(xml))) let resp = Response::builder();
let resp = add_checksum_response_headers(&expected_checksum, resp);
Ok(resp.body(string_body(xml))?)
} }
pub async fn handle_abort_multipart_upload( pub async fn handle_abort_multipart_upload(
garage: Arc<Garage>, ctx: ReqCtx,
bucket_id: Uuid,
key: &str, key: &str,
upload_id: &str, upload_id: &str,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage, bucket_id, ..
} = &ctx;
let upload_id = decode_upload_id(upload_id)?; let upload_id = decode_upload_id(upload_id)?;
let (_, mut object_version, _) = let (_, mut object_version, _) = get_upload(&ctx, &key.to_string(), &upload_id).await?;
get_upload(&garage, &bucket_id, &key.to_string(), &upload_id).await?;
object_version.state = ObjectVersionState::Aborted; object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]); let final_object = Object::new(*bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&final_object).await?; garage.object_table.insert(&final_object).await?;
Ok(Response::new(empty_body())) Ok(Response::new(empty_body()))
@ -383,11 +481,13 @@ pub async fn handle_abort_multipart_upload(
#[allow(clippy::ptr_arg)] #[allow(clippy::ptr_arg)]
pub(crate) async fn get_upload( pub(crate) async fn get_upload(
garage: &Garage, ctx: &ReqCtx,
bucket_id: &Uuid,
key: &String, key: &String,
upload_id: &Uuid, upload_id: &Uuid,
) -> Result<(Object, ObjectVersion, MultipartUpload), Error> { ) -> Result<(Object, ObjectVersion, MultipartUpload), Error> {
let ReqCtx {
garage, bucket_id, ..
} = ctx;
let (object, mpu) = futures::try_join!( let (object, mpu) = futures::try_join!(
garage.object_table.get(bucket_id, key).map_err(Error::from), garage.object_table.get(bucket_id, key).map_err(Error::from),
garage garage
@ -423,6 +523,7 @@ pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
struct CompleteMultipartUploadPart { struct CompleteMultipartUploadPart {
etag: String, etag: String,
part_number: u64, part_number: u64,
checksum: Option<ChecksumValue>,
} }
fn parse_complete_multipart_upload_body( fn parse_complete_multipart_upload_body(
@ -448,9 +549,41 @@ fn parse_complete_multipart_upload_body(
.children() .children()
.find(|e| e.has_tag_name("PartNumber"))? .find(|e| e.has_tag_name("PartNumber"))?
.text()?; .text()?;
let checksum = if let Some(crc32) =
item.children().find(|e| e.has_tag_name("ChecksumCRC32"))
{
Some(ChecksumValue::Crc32(
BASE64_STANDARD.decode(crc32.text()?).ok()?[..]
.try_into()
.ok()?,
))
} else if let Some(crc32c) = item.children().find(|e| e.has_tag_name("ChecksumCRC32C"))
{
Some(ChecksumValue::Crc32c(
BASE64_STANDARD.decode(crc32c.text()?).ok()?[..]
.try_into()
.ok()?,
))
} else if let Some(sha1) = item.children().find(|e| e.has_tag_name("ChecksumSHA1")) {
Some(ChecksumValue::Sha1(
BASE64_STANDARD.decode(sha1.text()?).ok()?[..]
.try_into()
.ok()?,
))
} else if let Some(sha256) = item.children().find(|e| e.has_tag_name("ChecksumSHA256"))
{
Some(ChecksumValue::Sha256(
BASE64_STANDARD.decode(sha256.text()?).ok()?[..]
.try_into()
.ok()?,
))
} else {
None
};
parts.push(CompleteMultipartUploadPart { parts.push(CompleteMultipartUploadPart {
etag: etag.trim_matches('"').to_string(), etag: etag.trim_matches('"').to_string(),
part_number: part_number.parse().ok()?, part_number: part_number.parse().ok()?,
checksum,
}); });
} else { } else {
return None; return None;

View file

@ -14,12 +14,15 @@ use multer::{Constraints, Multipart, SizeLimit};
use serde::Deserialize; use serde::Deserialize;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::s3::object_table::*;
use crate::helpers::*; use crate::helpers::*;
use crate::s3::api_server::ResBody; use crate::s3::api_server::ResBody;
use crate::s3::checksum::*;
use crate::s3::cors::*; use crate::s3::cors::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*; use crate::s3::error::*;
use crate::s3::put::{get_headers, save_stream}; use crate::s3::put::{get_headers, save_stream, ChecksumMode};
use crate::s3::xml as s3_xml; use crate::s3::xml as s3_xml;
use crate::signature::payload::{verify_v4, Authorization}; use crate::signature::payload::{verify_v4, Authorization};
@ -48,13 +51,17 @@ pub async fn handle_post_object(
let mut multipart = Multipart::with_constraints(stream, boundary, constraints); let mut multipart = Multipart::with_constraints(stream, boundary, constraints);
let mut params = HeaderMap::new(); let mut params = HeaderMap::new();
let field = loop { let file_field = loop {
let field = if let Some(field) = multipart.next_field().await? { let field = if let Some(field) = multipart.next_field().await? {
field field
} else { } else {
return Err(Error::bad_request("Request did not contain a file")); return Err(Error::bad_request("Request did not contain a file"));
}; };
let name: HeaderName = if let Some(Ok(name)) = field.name().map(TryInto::try_into) { let name: HeaderName = if let Some(Ok(name)) = field
.name()
.map(str::to_ascii_lowercase)
.map(TryInto::try_into)
{
name name
} else { } else {
continue; continue;
@ -64,21 +71,11 @@ pub async fn handle_post_object(
} }
if let Ok(content) = HeaderValue::from_str(&field.text().await?) { if let Ok(content) = HeaderValue::from_str(&field.text().await?) {
match name.as_str() { if params.insert(&name, content).is_some() {
"tag" => (/* tag need to be reencoded, but we don't support them yet anyway */), return Err(Error::bad_request(format!(
"acl" => { "Field '{}' provided more than once",
if params.insert("x-amz-acl", content).is_some() { name
return Err(Error::bad_request("Field 'acl' provided more than once")); )));
}
}
_ => {
if params.insert(&name, content).is_some() {
return Err(Error::bad_request(format!(
"Field '{}' provided more than once",
name
)));
}
}
} }
} }
}; };
@ -96,7 +93,7 @@ pub async fn handle_post_object(
let key = if key.contains("${filename}") { let key = if key.contains("${filename}") {
// if no filename is provided, don't replace. This matches the behavior of AWS. // if no filename is provided, don't replace. This matches the behavior of AWS.
if let Some(filename) = field.file_name() { if let Some(filename) = file_field.file_name() {
key.replace("${filename}", filename) key.replace("${filename}", filename)
} else { } else {
key.to_owned() key.to_owned()
@ -120,6 +117,12 @@ pub async fn handle_post_object(
.bucket_helper() .bucket_helper()
.get_existing_bucket(bucket_id) .get_existing_bucket(bucket_id)
.await?; .await?;
let bucket_params = bucket.state.into_option().unwrap();
let matching_cors_rule = find_matching_cors_rule(
&bucket_params,
&Request::from_parts(head.clone(), empty_body::<Infallible>()),
)?
.cloned();
let decoded_policy = BASE64_STANDARD let decoded_policy = BASE64_STANDARD
.decode(policy) .decode(policy)
@ -137,9 +140,8 @@ pub async fn handle_post_object(
let mut conditions = decoded_policy.into_conditions()?; let mut conditions = decoded_policy.into_conditions()?;
for (param_key, value) in params.iter() { for (param_key, value) in params.iter() {
let mut param_key = param_key.to_string(); let param_key = param_key.as_str();
param_key.make_ascii_lowercase(); match param_key {
match param_key.as_str() {
"policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields "policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields
"content-type" => { "content-type" => {
let conds = conditions.params.remove("content-type").ok_or_else(|| { let conds = conditions.params.remove("content-type").ok_or_else(|| {
@ -184,7 +186,7 @@ pub async fn handle_post_object(
// how aws seems to behave. // how aws seems to behave.
continue; continue;
} }
let conds = conditions.params.remove(&param_key).ok_or_else(|| { let conds = conditions.params.remove(param_key).ok_or_else(|| {
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key)) Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
})?; })?;
for cond in conds { for cond in conds {
@ -210,21 +212,47 @@ pub async fn handle_post_object(
))); )));
} }
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
// arround here to make sure the rest of the machinery takes our acl into account.
let headers = get_headers(&params)?; let headers = get_headers(&params)?;
let stream = field.map(|r| r.map_err(Into::into)); let expected_checksums = ExpectedChecksums {
let (_, md5) = save_stream( md5: params
garage, .get("content-md5")
.map(HeaderValue::to_str)
.transpose()?
.map(str::to_string),
sha256: None,
extra: request_checksum_algorithm_value(&params)?,
};
let meta = ObjectVersionMetaInner {
headers, headers,
checksum: expected_checksums.extra,
};
let encryption = EncryptionParams::new_from_headers(&garage, &params)?;
let stream = file_field.map(|r| r.map_err(Into::into));
let ctx = ReqCtx {
garage,
bucket_id,
bucket_name,
bucket_params,
api_key,
};
let res = save_stream(
&ctx,
meta,
encryption,
StreamLimiter::new(stream, conditions.content_length), StreamLimiter::new(stream, conditions.content_length),
&bucket,
&key, &key,
None, ChecksumMode::Verify(&expected_checksums),
None,
) )
.await?; .await?;
let etag = format!("\"{}\"", md5); let etag = format!("\"{}\"", res.etag);
let mut resp = if let Some(mut target) = params let mut resp = if let Some(mut target) = params
.get("success_action_redirect") .get("success_action_redirect")
@ -234,15 +262,16 @@ pub async fn handle_post_object(
{ {
target target
.query_pairs_mut() .query_pairs_mut()
.append_pair("bucket", &bucket_name) .append_pair("bucket", &ctx.bucket_name)
.append_pair("key", &key) .append_pair("key", &key)
.append_pair("etag", &etag); .append_pair("etag", &etag);
let target = target.to_string(); let target = target.to_string();
Response::builder() let mut resp = Response::builder()
.status(StatusCode::SEE_OTHER) .status(StatusCode::SEE_OTHER)
.header(header::LOCATION, target.clone()) .header(header::LOCATION, target.clone())
.header(header::ETAG, etag) .header(header::ETAG, etag);
.body(string_body(target))? encryption.add_response_headers(&mut resp);
resp.body(string_body(target))?
} else { } else {
let path = head let path = head
.uri .uri
@ -269,16 +298,17 @@ pub async fn handle_post_object(
.get("success_action_status") .get("success_action_status")
.and_then(|h| h.to_str().ok()) .and_then(|h| h.to_str().ok())
.unwrap_or("204"); .unwrap_or("204");
let builder = Response::builder() let mut builder = Response::builder()
.header(header::LOCATION, location.clone()) .header(header::LOCATION, location.clone())
.header(header::ETAG, etag.clone()); .header(header::ETAG, etag.clone());
encryption.add_response_headers(&mut builder);
match action { match action {
"200" => builder.status(StatusCode::OK).body(empty_body())?, "200" => builder.status(StatusCode::OK).body(empty_body())?,
"201" => { "201" => {
let xml = s3_xml::PostObject { let xml = s3_xml::PostObject {
xmlns: (), xmlns: (),
location: s3_xml::Value(location), location: s3_xml::Value(location),
bucket: s3_xml::Value(bucket_name), bucket: s3_xml::Value(ctx.bucket_name),
key: s3_xml::Value(key), key: s3_xml::Value(key),
etag: s3_xml::Value(etag), etag: s3_xml::Value(etag),
}; };
@ -291,12 +321,8 @@ pub async fn handle_post_object(
} }
}; };
let matching_cors_rule = find_matching_cors_rule(
&bucket,
&Request::from_parts(head, empty_body::<Infallible>()),
)?;
if let Some(rule) = matching_cors_rule { if let Some(rule) = matching_cors_rule {
add_cors_headers(&mut resp, rule) add_cors_headers(&mut resp, &rule)
.ok_or_internal_error("Invalid bucket CORS configuration")?; .ok_or_internal_error("Invalid bucket CORS configuration")?;
} }

View file

@ -1,12 +1,9 @@
use std::collections::{BTreeMap, HashMap}; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use base64::prelude::*;
use futures::prelude::*; use futures::prelude::*;
use futures::stream::FuturesOrdered; use futures::stream::FuturesOrdered;
use futures::try_join; use futures::try_join;
use md5::{digest::generic_array::*, Digest as Md5Digest, Md5};
use sha2::Sha256;
use tokio::sync::mpsc; use tokio::sync::mpsc;
@ -22,13 +19,11 @@ use opentelemetry::{
use garage_net::bytes_buf::BytesBuf; use garage_net::bytes_buf::BytesBuf;
use garage_rpc::rpc_helper::OrderTag; use garage_rpc::rpc_helper::OrderTag;
use garage_table::*; use garage_table::*;
use garage_util::async_hash::*;
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
use garage_util::time::*; use garage_util::time::*;
use garage_block::manager::INLINE_THRESHOLD; use garage_block::manager::INLINE_THRESHOLD;
use garage_model::bucket_table::Bucket;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::index_counter::CountedItem; use garage_model::index_counter::CountedItem;
use garage_model::s3::block_ref_table::*; use garage_model::s3::block_ref_table::*;
@ -37,14 +32,27 @@ use garage_model::s3::version_table::*;
use crate::helpers::*; use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody}; use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::checksum::*;
use crate::s3::encryption::EncryptionParams;
use crate::s3::error::*; use crate::s3::error::*;
const PUT_BLOCKS_MAX_PARALLEL: usize = 3; const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
pub(crate) struct SaveStreamResult {
pub(crate) version_uuid: Uuid,
pub(crate) version_timestamp: u64,
/// Etag WITHOUT THE QUOTES (just the hex value)
pub(crate) etag: String,
}
pub(crate) enum ChecksumMode<'a> {
Verify(&'a ExpectedChecksums),
Calculate(Option<ChecksumAlgorithm>),
}
pub async fn handle_put( pub async fn handle_put(
garage: Arc<Garage>, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
bucket: &Bucket,
key: &String, key: &String,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
@ -52,42 +60,59 @@ pub async fn handle_put(
let headers = get_headers(req.headers())?; let headers = get_headers(req.headers())?;
debug!("Object headers: {:?}", headers); debug!("Object headers: {:?}", headers);
let content_md5 = match req.headers().get("content-md5") { let expected_checksums = ExpectedChecksums {
Some(x) => Some(x.to_str()?.to_string()), md5: match req.headers().get("content-md5") {
None => None, Some(x) => Some(x.to_str()?.to_string()),
None => None,
},
sha256: content_sha256,
extra: request_checksum_value(req.headers())?,
}; };
let meta = ObjectVersionMetaInner {
headers,
checksum: expected_checksums.extra,
};
// Determine whether object should be encrypted, and if so the key
let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
let stream = body_stream(req.into_body()); let stream = body_stream(req.into_body());
save_stream( let res = save_stream(
garage, &ctx,
headers, meta,
encryption,
stream, stream,
bucket,
key, key,
content_md5, ChecksumMode::Verify(&expected_checksums),
content_sha256,
) )
.await .await?;
.map(|(uuid, md5)| put_response(uuid, md5))
let mut resp = Response::builder()
.header("x-amz-version-id", hex::encode(res.version_uuid))
.header("ETag", format!("\"{}\"", res.etag));
encryption.add_response_headers(&mut resp);
let resp = add_checksum_response_headers(&expected_checksums.extra, resp);
Ok(resp.body(empty_body())?)
} }
pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>( pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
garage: Arc<Garage>, ctx: &ReqCtx,
headers: ObjectVersionHeaders, mut meta: ObjectVersionMetaInner,
encryption: EncryptionParams,
body: S, body: S,
bucket: &Bucket,
key: &String, key: &String,
content_md5: Option<String>, checksum_mode: ChecksumMode<'_>,
content_sha256: Option<FixedBytes32>, ) -> Result<SaveStreamResult, Error> {
) -> Result<(Uuid, String), Error> { let ReqCtx {
garage, bucket_id, ..
} = ctx;
let mut chunker = StreamChunker::new(body, garage.config.block_size); let mut chunker = StreamChunker::new(body, garage.config.block_size);
let (first_block_opt, existing_object) = try_join!( let (first_block_opt, existing_object) = try_join!(
chunker.next(), chunker.next(),
garage garage.object_table.get(bucket_id, key).map_err(Error::from),
.object_table
.get(&bucket.id, key)
.map_err(Error::from),
)?; )?;
let first_block = first_block_opt.unwrap_or_default(); let first_block = first_block_opt.unwrap_or_default();
@ -96,43 +121,55 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
let version_uuid = gen_uuid(); let version_uuid = gen_uuid();
let version_timestamp = next_timestamp(existing_object.as_ref()); let version_timestamp = next_timestamp(existing_object.as_ref());
let mut checksummer = match checksum_mode {
ChecksumMode::Verify(expected) => Checksummer::init(expected, !encryption.is_encrypted()),
ChecksumMode::Calculate(algo) => {
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(algo)
}
};
// If body is small enough, store it directly in the object table // If body is small enough, store it directly in the object table
// as "inline data". We can then return immediately. // as "inline data". We can then return immediately.
if first_block.len() < INLINE_THRESHOLD { if first_block.len() < INLINE_THRESHOLD {
let mut md5sum = Md5::new(); checksummer.update(&first_block);
md5sum.update(&first_block[..]); let checksums = checksummer.finalize();
let data_md5sum = md5sum.finalize();
let data_md5sum_hex = hex::encode(data_md5sum); match checksum_mode {
ChecksumMode::Verify(expected) => {
checksums.verify(&expected)?;
}
ChecksumMode::Calculate(algo) => {
meta.checksum = checksums.extract(algo);
}
};
let data_sha256sum = sha256sum(&first_block[..]);
let size = first_block.len() as u64; let size = first_block.len() as u64;
check_quotas(ctx, size, existing_object.as_ref()).await?;
ensure_checksum_matches( let etag = encryption.etag_from_md5(&checksums.md5);
data_md5sum.as_slice(), let inline_data = encryption.encrypt_blob(&first_block)?.to_vec();
data_sha256sum,
content_md5.as_deref(),
content_sha256,
)?;
check_quotas(&garage, bucket, size, existing_object.as_ref()).await?;
let object_version = ObjectVersion { let object_version = ObjectVersion {
uuid: version_uuid, uuid: version_uuid,
timestamp: version_timestamp, timestamp: version_timestamp,
state: ObjectVersionState::Complete(ObjectVersionData::Inline( state: ObjectVersionState::Complete(ObjectVersionData::Inline(
ObjectVersionMeta { ObjectVersionMeta {
headers, encryption: encryption.encrypt_meta(meta)?,
size, size,
etag: data_md5sum_hex.clone(), etag: etag.clone(),
}, },
first_block.to_vec(), inline_data,
)), )),
}; };
let object = Object::new(bucket.id, key.into(), vec![object_version]); let object = Object::new(*bucket_id, key.into(), vec![object_version]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
return Ok((version_uuid, data_md5sum_hex)); return Ok(SaveStreamResult {
version_uuid,
version_timestamp,
etag,
});
} }
// The following consists in many steps that can each fail. // The following consists in many steps that can each fail.
@ -140,7 +177,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
// before everything is finished (cleanup is done using the Drop trait). // before everything is finished (cleanup is done using the Drop trait).
let mut interrupted_cleanup = InterruptedCleanup(Some(InterruptedCleanupInner { let mut interrupted_cleanup = InterruptedCleanup(Some(InterruptedCleanupInner {
garage: garage.clone(), garage: garage.clone(),
bucket_id: bucket.id, bucket_id: *bucket_id,
key: key.into(), key: key.into(),
version_uuid, version_uuid,
version_timestamp, version_timestamp,
@ -152,11 +189,12 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
uuid: version_uuid, uuid: version_uuid,
timestamp: version_timestamp, timestamp: version_timestamp,
state: ObjectVersionState::Uploading { state: ObjectVersionState::Uploading {
headers: headers.clone(), encryption: encryption.encrypt_meta(meta.clone())?,
checksum_algorithm: None, // don't care; overwritten later
multipart: false, multipart: false,
}, },
}; };
let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]); let object = Object::new(*bucket_id, key.into(), vec![object_version.clone()]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
// Initialize corresponding entry in version table // Initialize corresponding entry in version table
@ -166,81 +204,77 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
let version = Version::new( let version = Version::new(
version_uuid, version_uuid,
VersionBacklink::Object { VersionBacklink::Object {
bucket_id: bucket.id, bucket_id: *bucket_id,
key: key.into(), key: key.into(),
}, },
false, false,
); );
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Transfer data and verify checksum // Transfer data
let (total_size, data_md5sum, data_sha256sum, first_block_hash) = let (total_size, checksums, first_block_hash) = read_and_put_blocks(
read_and_put_blocks(&garage, &version, 1, first_block, &mut chunker).await?; ctx,
&version,
encryption,
1,
first_block,
&mut chunker,
checksummer,
)
.await?;
ensure_checksum_matches( // Verify checksums are ok / add calculated checksum to metadata
data_md5sum.as_slice(), match checksum_mode {
data_sha256sum, ChecksumMode::Verify(expected) => {
content_md5.as_deref(), checksums.verify(&expected)?;
content_sha256, }
)?; ChecksumMode::Calculate(algo) => {
meta.checksum = checksums.extract(algo);
}
};
check_quotas(&garage, bucket, total_size, existing_object.as_ref()).await?; // Verify quotas are respsected
check_quotas(ctx, total_size, existing_object.as_ref()).await?;
// Save final object state, marked as Complete // Save final object state, marked as Complete
let md5sum_hex = hex::encode(data_md5sum); let etag = encryption.etag_from_md5(&checksums.md5);
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
ObjectVersionMeta { ObjectVersionMeta {
headers, encryption: encryption.encrypt_meta(meta)?,
size: total_size, size: total_size,
etag: md5sum_hex.clone(), etag: etag.clone(),
}, },
first_block_hash, first_block_hash,
)); ));
let object = Object::new(bucket.id, key.into(), vec![object_version]); let object = Object::new(*bucket_id, key.into(), vec![object_version]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
// We were not interrupted, everything went fine. // We were not interrupted, everything went fine.
// We won't have to clean up on drop. // We won't have to clean up on drop.
interrupted_cleanup.cancel(); interrupted_cleanup.cancel();
Ok((version_uuid, md5sum_hex)) Ok(SaveStreamResult {
} version_uuid,
version_timestamp,
/// Validate MD5 sum against content-md5 header etag,
/// and sha256sum against signed content-sha256 })
pub(crate) fn ensure_checksum_matches(
data_md5sum: &[u8],
data_sha256sum: garage_util::data::FixedBytes32,
content_md5: Option<&str>,
content_sha256: Option<garage_util::data::FixedBytes32>,
) -> Result<(), Error> {
if let Some(expected_sha256) = content_sha256 {
if expected_sha256 != data_sha256sum {
return Err(Error::bad_request(
"Unable to validate x-amz-content-sha256",
));
} else {
trace!("Successfully validated x-amz-content-sha256");
}
}
if let Some(expected_md5) = content_md5 {
if expected_md5.trim_matches('"') != BASE64_STANDARD.encode(data_md5sum) {
return Err(Error::bad_request("Unable to validate content-md5"));
} else {
trace!("Successfully validated content-md5");
}
}
Ok(())
} }
/// Check that inserting this object with this size doesn't exceed bucket quotas /// Check that inserting this object with this size doesn't exceed bucket quotas
pub(crate) async fn check_quotas( pub(crate) async fn check_quotas(
garage: &Arc<Garage>, ctx: &ReqCtx,
bucket: &Bucket,
size: u64, size: u64,
prev_object: Option<&Object>, prev_object: Option<&Object>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let quotas = bucket.state.as_option().unwrap().quotas.get(); let ReqCtx {
garage,
bucket_id,
bucket_params,
..
} = ctx;
let quotas = bucket_params.quotas.get();
if quotas.max_objects.is_none() && quotas.max_size.is_none() { if quotas.max_objects.is_none() && quotas.max_size.is_none() {
return Ok(()); return Ok(());
}; };
@ -248,11 +282,11 @@ pub(crate) async fn check_quotas(
let counters = garage let counters = garage
.object_counter_table .object_counter_table
.table .table
.get(&bucket.id, &EmptyKey) .get(bucket_id, &EmptyKey)
.await?; .await?;
let counters = counters let counters = counters
.map(|x| x.filtered_values(&garage.system.ring.borrow())) .map(|x| x.filtered_values(&garage.system.cluster_layout()))
.unwrap_or_default(); .unwrap_or_default();
let (prev_cnt_obj, prev_cnt_size) = match prev_object { let (prev_cnt_obj, prev_cnt_size) = match prev_object {
@ -292,12 +326,14 @@ pub(crate) async fn check_quotas(
} }
pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>( pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
garage: &Garage, ctx: &ReqCtx,
version: &Version, version: &Version,
encryption: EncryptionParams,
part_number: u64, part_number: u64,
first_block: Bytes, first_block: Bytes,
chunker: &mut StreamChunker<S>, chunker: &mut StreamChunker<S>,
) -> Result<(u64, GenericArray<u8, typenum::U16>, Hash, Hash), Error> { checksummer: Checksummer,
) -> Result<(u64, Checksums, Hash), Error> {
let tracer = opentelemetry::global::tracer("garage"); let tracer = opentelemetry::global::tracer("garage");
let (block_tx, mut block_rx) = mpsc::channel::<Result<Bytes, Error>>(2); let (block_tx, mut block_rx) = mpsc::channel::<Result<Bytes, Error>>(2);
@ -325,20 +361,20 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
let (block_tx2, mut block_rx2) = mpsc::channel::<Result<Bytes, Error>>(1); let (block_tx2, mut block_rx2) = mpsc::channel::<Result<Bytes, Error>>(1);
let hash_stream = async { let hash_stream = async {
let md5hasher = AsyncHasher::<Md5>::new(); let mut checksummer = checksummer;
let sha256hasher = AsyncHasher::<Sha256>::new();
while let Some(next) = block_rx.recv().await { while let Some(next) = block_rx.recv().await {
match next { match next {
Ok(block) => { Ok(block) => {
block_tx2.send(Ok(block.clone())).await?; block_tx2.send(Ok(block.clone())).await?;
futures::future::join( checksummer = tokio::task::spawn_blocking(move || {
md5hasher.update(block.clone()), checksummer.update(&block);
sha256hasher.update(block.clone()), checksummer
) })
.with_context(Context::current_with_span( .with_context(Context::current_with_span(
tracer.start("Hash block (md5, sha256)"), tracer.start("Hash block (md5, sha256)"),
)) ))
.await; .await
.unwrap()
} }
Err(e) => { Err(e) => {
block_tx2.send(Err(e)).await?; block_tx2.send(Err(e)).await?;
@ -347,27 +383,38 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
} }
} }
drop(block_tx2); drop(block_tx2);
Ok::<_, mpsc::error::SendError<_>>(futures::join!( Ok::<_, mpsc::error::SendError<_>>(checksummer)
md5hasher.finalize(),
sha256hasher.finalize()
))
}; };
let (block_tx3, mut block_rx3) = mpsc::channel::<Result<(Bytes, Hash), Error>>(1); let (block_tx3, mut block_rx3) = mpsc::channel::<Result<(Bytes, u64, Hash), Error>>(1);
let hash_blocks = async { let encrypt_hash_blocks = async {
let mut first_block_hash = None; let mut first_block_hash = None;
while let Some(next) = block_rx2.recv().await { while let Some(next) = block_rx2.recv().await {
match next { match next {
Ok(block) => { Ok(block) => {
let hash = async_blake2sum(block.clone()) let unencrypted_len = block.len() as u64;
.with_context(Context::current_with_span( let res = tokio::task::spawn_blocking(move || {
tracer.start("Hash block (blake2)"), let block = encryption.encrypt_block(block)?;
)) let hash = blake2sum(&block);
.await; Ok((block, hash))
if first_block_hash.is_none() { })
first_block_hash = Some(hash); .with_context(Context::current_with_span(
tracer.start("Encrypt and hash (blake2) block"),
))
.await
.unwrap();
match res {
Ok((block, hash)) => {
if first_block_hash.is_none() {
first_block_hash = Some(hash);
}
block_tx3.send(Ok((block, unencrypted_len, hash))).await?;
}
Err(e) => {
block_tx3.send(Err(e)).await?;
break;
}
} }
block_tx3.send(Ok((block, hash))).await?;
} }
Err(e) => { Err(e) => {
block_tx3.send(Err(e)).await?; block_tx3.send(Err(e)).await?;
@ -402,7 +449,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
block_rx3.recv().await block_rx3.recv().await
} }
}; };
let (block, hash) = tokio::select! { let (block, unencrypted_len, hash) = tokio::select! {
result = write_futs_next => { result = write_futs_next => {
result?; result?;
continue; continue;
@ -414,17 +461,18 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
}; };
// For next block to be written: count its size and spawn future to write it // For next block to be written: count its size and spawn future to write it
let offset = written_bytes;
written_bytes += block.len() as u64;
write_futs.push_back(put_block_and_meta( write_futs.push_back(put_block_and_meta(
garage, ctx,
version, version,
part_number, part_number,
offset, written_bytes,
hash, hash,
block, block,
unencrypted_len,
encryption.is_encrypted(),
order_stream.order(written_bytes), order_stream.order(written_bytes),
)); ));
written_bytes += unencrypted_len;
} }
while let Some(res) = write_futs.next().await { while let Some(res) = write_futs.next().await {
res?; res?;
@ -433,38 +481,37 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
}; };
let (_, stream_hash_result, block_hash_result, final_result) = let (_, stream_hash_result, block_hash_result, final_result) =
futures::join!(read_blocks, hash_stream, hash_blocks, put_blocks); futures::join!(read_blocks, hash_stream, encrypt_hash_blocks, put_blocks);
let total_size = final_result?; let total_size = final_result?;
// unwrap here is ok, because if hasher failed, it is because something failed // unwrap here is ok, because if hasher failed, it is because something failed
// later in the pipeline which already caused a return at the ? on previous line // later in the pipeline which already caused a return at the ? on previous line
let (data_md5sum, data_sha256sum) = stream_hash_result.unwrap();
let first_block_hash = block_hash_result.unwrap(); let first_block_hash = block_hash_result.unwrap();
let checksums = stream_hash_result.unwrap().finalize();
let data_sha256sum = Hash::try_from(&data_sha256sum[..]).unwrap(); Ok((total_size, checksums, first_block_hash))
Ok((total_size, data_md5sum, data_sha256sum, first_block_hash))
} }
async fn put_block_and_meta( async fn put_block_and_meta(
garage: &Garage, ctx: &ReqCtx,
version: &Version, version: &Version,
part_number: u64, part_number: u64,
offset: u64, offset: u64,
hash: Hash, hash: Hash,
block: Bytes, block: Bytes,
size: u64,
is_encrypted: bool,
order_tag: OrderTag, order_tag: OrderTag,
) -> Result<(), GarageError> { ) -> Result<(), GarageError> {
let ReqCtx { garage, .. } = ctx;
let mut version = version.clone(); let mut version = version.clone();
version.blocks.put( version.blocks.put(
VersionBlockKey { VersionBlockKey {
part_number, part_number,
offset, offset,
}, },
VersionBlock { VersionBlock { hash, size },
hash,
size: block.len() as u64,
},
); );
let block_ref = BlockRef { let block_ref = BlockRef {
@ -476,7 +523,7 @@ async fn put_block_and_meta(
futures::try_join!( futures::try_join!(
garage garage
.block_manager .block_manager
.rpc_put_block(hash, block, Some(order_tag)), .rpc_put_block(hash, block, is_encrypted, Some(order_tag)),
garage.version_table.insert(&version), garage.version_table.insert(&version),
garage.block_ref_table.insert(&block_ref), garage.block_ref_table.insert(&block_ref),
)?; )?;
@ -519,14 +566,6 @@ impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> {
} }
} }
pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<ResBody> {
Response::builder()
.header("x-amz-version-id", hex::encode(version_uuid))
.header("ETag", format!("\"{}\"", md5sum_hex))
.body(empty_body())
.unwrap()
}
struct InterruptedCleanup(Option<InterruptedCleanupInner>); struct InterruptedCleanup(Option<InterruptedCleanupInner>);
struct InterruptedCleanupInner { struct InterruptedCleanupInner {
garage: Arc<Garage>, garage: Arc<Garage>,
@ -561,57 +600,35 @@ impl Drop for InterruptedCleanup {
// ============ helpers ============ // ============ helpers ============
pub(crate) fn get_mime_type(headers: &HeaderMap<HeaderValue>) -> Result<String, Error> { pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList, Error> {
Ok(headers let mut ret = Vec::new();
.get(hyper::header::CONTENT_TYPE)
.map(|x| x.to_str())
.unwrap_or(Ok("blob"))?
.to_string())
}
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<ObjectVersionHeaders, Error> {
let content_type = get_mime_type(headers)?;
let mut other = BTreeMap::new();
// Preserve standard headers // Preserve standard headers
let standard_header = vec![ let standard_header = vec![
hyper::header::CONTENT_TYPE,
hyper::header::CACHE_CONTROL, hyper::header::CACHE_CONTROL,
hyper::header::CONTENT_DISPOSITION, hyper::header::CONTENT_DISPOSITION,
hyper::header::CONTENT_ENCODING, hyper::header::CONTENT_ENCODING,
hyper::header::CONTENT_LANGUAGE, hyper::header::CONTENT_LANGUAGE,
hyper::header::EXPIRES, hyper::header::EXPIRES,
]; ];
for h in standard_header.iter() { for name in standard_header.iter() {
if let Some(v) = headers.get(h) { if let Some(value) = headers.get(name) {
match v.to_str() { ret.push((name.to_string(), value.to_str()?.to_string()));
Ok(v_str) => {
other.insert(h.to_string(), v_str.to_string());
}
Err(e) => {
warn!("Discarding header {}, error in .to_str(): {}", h, e);
}
}
} }
} }
// Preserve x-amz-meta- headers // Preserve x-amz-meta- headers
for (k, v) in headers.iter() { for (name, value) in headers.iter() {
if k.as_str().starts_with("x-amz-meta-") { if name.as_str().starts_with("x-amz-meta-") {
match v.to_str() { ret.push((
Ok(v_str) => { name.to_string(),
other.insert(k.to_string(), v_str.to_string()); std::str::from_utf8(value.as_bytes())?.to_string(),
} ));
Err(e) => {
warn!("Discarding header {}, error in .to_str(): {}", k, e);
}
}
} }
} }
Ok(ObjectVersionHeaders { Ok(ret)
content_type,
other,
})
} }
pub(crate) fn next_timestamp(existing_object: Option<&Object>) -> u64 { pub(crate) fn next_timestamp(existing_object: Option<&Object>) -> u64 {

View file

@ -1,5 +1,4 @@
use quick_xml::de::from_reader; use quick_xml::de::from_reader;
use std::sync::Arc;
use http_body_util::BodyExt; use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode}; use hyper::{Request, Response, StatusCode};
@ -12,15 +11,11 @@ use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content; use crate::signature::verify_signed_content;
use garage_model::bucket_table::*; use garage_model::bucket_table::*;
use garage_model::garage::Garage;
use garage_util::data::*; use garage_util::data::*;
pub async fn handle_get_website(bucket: &Bucket) -> Result<Response<ResBody>, Error> { pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let param = bucket let ReqCtx { bucket_params, .. } = ctx;
.params() if let Some(website) = bucket_params.website_config.get() {
.ok_or_internal_error("Bucket should not be deleted at this point")?;
if let Some(website) = param.website_config.get() {
let wc = WebsiteConfiguration { let wc = WebsiteConfiguration {
xmlns: (), xmlns: (),
error_document: website.error_document.as_ref().map(|v| Key { error_document: website.error_document.as_ref().map(|v| Key {
@ -44,16 +39,18 @@ pub async fn handle_get_website(bucket: &Bucket) -> Result<Response<ResBody>, Er
} }
} }
pub async fn handle_delete_website( pub async fn handle_delete_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
garage: Arc<Garage>, let ReqCtx {
mut bucket: Bucket, garage,
) -> Result<Response<ResBody>, Error> { bucket_id,
let param = bucket mut bucket_params,
.params_mut() ..
.ok_or_internal_error("Bucket should not be deleted at this point")?; } = ctx;
bucket_params.website_config.update(None);
param.website_config.update(None); garage
garage.bucket_table.insert(&bucket).await?; .bucket_table
.insert(&Bucket::present(bucket_id, bucket_params))
.await?;
Ok(Response::builder() Ok(Response::builder()
.status(StatusCode::NO_CONTENT) .status(StatusCode::NO_CONTENT)
@ -61,28 +58,33 @@ pub async fn handle_delete_website(
} }
pub async fn handle_put_website( pub async fn handle_put_website(
garage: Arc<Garage>, ctx: ReqCtx,
mut bucket: Bucket,
req: Request<ReqBody>, req: Request<ReqBody>,
content_sha256: Option<Hash>, content_sha256: Option<Hash>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let ReqCtx {
garage,
bucket_id,
mut bucket_params,
..
} = ctx;
let body = BodyExt::collect(req.into_body()).await?.to_bytes(); let body = BodyExt::collect(req.into_body()).await?.to_bytes();
if let Some(content_sha256) = content_sha256 { if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?; verify_signed_content(content_sha256, &body[..])?;
} }
let param = bucket
.params_mut()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?; let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
conf.validate()?; conf.validate()?;
param bucket_params
.website_config .website_config
.update(Some(conf.into_garage_website_config()?)); .update(Some(conf.into_garage_website_config()?));
garage.bucket_table.insert(&bucket).await?; garage
.bucket_table
.insert(&Bucket::present(bucket_id, bucket_params))
.await?;
Ok(Response::builder() Ok(Response::builder()
.status(StatusCode::OK) .status(StatusCode::OK)

View file

@ -131,6 +131,14 @@ pub struct CompleteMultipartUploadResult {
pub key: Value, pub key: Value,
#[serde(rename = "ETag")] #[serde(rename = "ETag")]
pub etag: Value, pub etag: Value,
#[serde(rename = "ChecksumCRC32")]
pub checksum_crc32: Option<Value>,
#[serde(rename = "ChecksumCRC32C")]
pub checksum_crc32c: Option<Value>,
#[serde(rename = "ChecksumSHA1")]
pub checksum_sha1: Option<Value>,
#[serde(rename = "ChecksumSHA256")]
pub checksum_sha256: Option<Value>,
} }
#[derive(Debug, Serialize, PartialEq, Eq)] #[derive(Debug, Serialize, PartialEq, Eq)]
@ -197,6 +205,14 @@ pub struct PartItem {
pub part_number: IntValue, pub part_number: IntValue,
#[serde(rename = "Size")] #[serde(rename = "Size")]
pub size: IntValue, pub size: IntValue,
#[serde(rename = "ChecksumCRC32")]
pub checksum_crc32: Option<Value>,
#[serde(rename = "ChecksumCRC32C")]
pub checksum_crc32c: Option<Value>,
#[serde(rename = "ChecksumSHA1")]
pub checksum_sha1: Option<Value>,
#[serde(rename = "ChecksumSHA256")]
pub checksum_sha256: Option<Value>,
} }
#[derive(Debug, Serialize, PartialEq, Eq)] #[derive(Debug, Serialize, PartialEq, Eq)]
@ -500,6 +516,10 @@ mod tests {
bucket: Value("mybucket".to_string()), bucket: Value("mybucket".to_string()),
key: Value("a/plop".to_string()), key: Value("a/plop".to_string()),
etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()), etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()),
checksum_crc32: None,
checksum_crc32c: None,
checksum_sha1: Some(Value("ZJAnHyG8PeKz9tI8UTcHrJos39A=".into())),
checksum_sha256: None,
}; };
assert_eq!( assert_eq!(
to_xml_with_header(&result)?, to_xml_with_header(&result)?,
@ -509,6 +529,7 @@ mod tests {
<Bucket>mybucket</Bucket>\ <Bucket>mybucket</Bucket>\
<Key>a/plop</Key>\ <Key>a/plop</Key>\
<ETag>&quot;3858f62230ac3c915f300c664312c11f-9&quot;</ETag>\ <ETag>&quot;3858f62230ac3c915f300c664312c11f-9&quot;</ETag>\
<ChecksumSHA1>ZJAnHyG8PeKz9tI8UTcHrJos39A=</ChecksumSHA1>\
</CompleteMultipartUploadResult>" </CompleteMultipartUploadResult>"
); );
Ok(()) Ok(())
@ -780,12 +801,22 @@ mod tests {
last_modified: Value("2010-11-10T20:48:34.000Z".to_string()), last_modified: Value("2010-11-10T20:48:34.000Z".to_string()),
part_number: IntValue(2), part_number: IntValue(2),
size: IntValue(10485760), size: IntValue(10485760),
checksum_crc32: None,
checksum_crc32c: None,
checksum_sha256: Some(Value(
"5RQ3A5uk0w7ojNjvegohch4JRBBGN/cLhsNrPzfv/hA=".into(),
)),
checksum_sha1: None,
}, },
PartItem { PartItem {
etag: Value("\"aaaa18db4cc2f85cedef654fccc4a4x8\"".to_string()), etag: Value("\"aaaa18db4cc2f85cedef654fccc4a4x8\"".to_string()),
last_modified: Value("2010-11-10T20:48:33.000Z".to_string()), last_modified: Value("2010-11-10T20:48:33.000Z".to_string()),
part_number: IntValue(3), part_number: IntValue(3),
size: IntValue(10485760), size: IntValue(10485760),
checksum_sha256: None,
checksum_crc32c: None,
checksum_crc32: Some(Value("ZJAnHyG8=".into())),
checksum_sha1: None,
}, },
], ],
initiator: Initiator { initiator: Initiator {
@ -820,12 +851,14 @@ mod tests {
<LastModified>2010-11-10T20:48:34.000Z</LastModified>\ <LastModified>2010-11-10T20:48:34.000Z</LastModified>\
<PartNumber>2</PartNumber>\ <PartNumber>2</PartNumber>\
<Size>10485760</Size>\ <Size>10485760</Size>\
<ChecksumSHA256>5RQ3A5uk0w7ojNjvegohch4JRBBGN/cLhsNrPzfv/hA=</ChecksumSHA256>\
</Part>\ </Part>\
<Part>\ <Part>\
<ETag>&quot;aaaa18db4cc2f85cedef654fccc4a4x8&quot;</ETag>\ <ETag>&quot;aaaa18db4cc2f85cedef654fccc4a4x8&quot;</ETag>\
<LastModified>2010-11-10T20:48:33.000Z</LastModified>\ <LastModified>2010-11-10T20:48:33.000Z</LastModified>\
<PartNumber>3</PartNumber>\ <PartNumber>3</PartNumber>\
<Size>10485760</Size>\ <Size>10485760</Size>\
<ChecksumCRC32>ZJAnHyG8=</ChecksumCRC32>\
</Part>\ </Part>\
<Initiator>\ <Initiator>\
<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\ <DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\

View file

@ -31,7 +31,13 @@ pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD"; pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"; pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
pub type QueryMap = HashMap<String, String>; pub type QueryMap = HeaderMap<QueryValue>;
pub struct QueryValue {
/// Original key with potential uppercase characters,
/// for use in signature calculation
key: String,
value: String,
}
pub async fn check_payload_signature( pub async fn check_payload_signature(
garage: &Garage, garage: &Garage,
@ -40,7 +46,7 @@ pub async fn check_payload_signature(
) -> Result<(Option<Key>, Option<Hash>), Error> { ) -> Result<(Option<Key>, Option<Hash>), Error> {
let query = parse_query_map(request.uri())?; let query = parse_query_map(request.uri())?;
if query.contains_key(X_AMZ_ALGORITHM.as_str()) { if query.contains_key(&X_AMZ_ALGORITHM) {
// We check for presigned-URL-style authentification first, because // We check for presigned-URL-style authentification first, because
// the browser or someting else could inject an Authorization header // the browser or someting else could inject an Authorization header
// that is totally unrelated to AWS signatures. // that is totally unrelated to AWS signatures.
@ -122,8 +128,8 @@ async fn check_presigned_signature(
request: &mut Request<IncomingBody>, request: &mut Request<IncomingBody>,
mut query: QueryMap, mut query: QueryMap,
) -> Result<(Option<Key>, Option<Hash>), Error> { ) -> Result<(Option<Key>, Option<Hash>), Error> {
let algorithm = query.get(X_AMZ_ALGORITHM.as_str()).unwrap(); let algorithm = query.get(&X_AMZ_ALGORITHM).unwrap();
let authorization = Authorization::parse_presigned(algorithm, &query)?; let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
// Verify that all necessary request headers are included in signed_headers // Verify that all necessary request headers are included in signed_headers
// For AWSv4 pre-signed URLs, the following must be incldued: // For AWSv4 pre-signed URLs, the following must be incldued:
@ -136,7 +142,7 @@ async fn check_presigned_signature(
// but the signature cannot be computed from a string that contains itself. // but the signature cannot be computed from a string that contains itself.
// AWS specifies that all query params except X-Amz-Signature are included // AWS specifies that all query params except X-Amz-Signature are included
// in the canonical request. // in the canonical request.
query.remove(X_AMZ_SIGNATURE.as_str()); query.remove(&X_AMZ_SIGNATURE);
let canonical_request = canonical_request( let canonical_request = canonical_request(
service, service,
request.method(), request.method(),
@ -162,10 +168,8 @@ async fn check_presigned_signature(
// then an InvalidRequest error is raised. // then an InvalidRequest error is raised.
let headers_mut = request.headers_mut(); let headers_mut = request.headers_mut();
for (name, value) in query.iter() { for (name, value) in query.iter() {
let name = if let Some(existing) = headers_mut.get(name) {
HeaderName::from_bytes(name.as_bytes()).ok_or_bad_request("Invalid header name")?; if signed_headers.contains(&name) && existing.as_bytes() != value.value.as_bytes() {
if let Some(existing) = headers_mut.get(&name) {
if signed_headers.contains(&name) && existing.as_bytes() != value.as_bytes() {
return Err(Error::bad_request(format!( return Err(Error::bad_request(format!(
"Conflicting values for `{}` in query parameters and request headers", "Conflicting values for `{}` in query parameters and request headers",
name name
@ -181,7 +185,7 @@ async fn check_presigned_signature(
// that are not signed, however there is not much reason that this would happen) // that are not signed, however there is not much reason that this would happen)
headers_mut.insert( headers_mut.insert(
name, name,
HeaderValue::from_bytes(value.as_bytes()) HeaderValue::from_bytes(value.value.as_bytes())
.ok_or_bad_request("invalid query parameter value")?, .ok_or_bad_request("invalid query parameter value")?,
); );
} }
@ -193,11 +197,19 @@ async fn check_presigned_signature(
} }
pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> { pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> {
let mut query = QueryMap::new(); let mut query = QueryMap::with_capacity(0);
if let Some(query_str) = uri.query() { if let Some(query_str) = uri.query() {
let query_pairs = url::form_urlencoded::parse(query_str.as_bytes()); let query_pairs = url::form_urlencoded::parse(query_str.as_bytes());
for (key, val) in query_pairs { for (key, val) in query_pairs {
if query.insert(key.to_string(), val.into_owned()).is_some() { let name =
HeaderName::from_bytes(key.as_bytes()).ok_or_bad_request("Invalid header name")?;
let value = QueryValue {
key: key.to_string(),
value: val.into_owned(),
};
if query.insert(name, value).is_some() {
return Err(Error::bad_request(format!( return Err(Error::bad_request(format!(
"duplicate query parameter: `{}`", "duplicate query parameter: `{}`",
key key
@ -306,7 +318,7 @@ pub fn canonical_request(
// Canonical query string from passed HeaderMap // Canonical query string from passed HeaderMap
let canonical_query_string = { let canonical_query_string = {
let mut items = Vec::with_capacity(query.len()); let mut items = Vec::with_capacity(query.len());
for (key, value) in query.iter() { for (_, QueryValue { key, value }) in query.iter() {
items.push(uri_encode(&key, true) + "=" + &uri_encode(&value, true)); items.push(uri_encode(&key, true) + "=" + &uri_encode(&value, true));
} }
items.sort(); items.sort();
@ -319,8 +331,8 @@ pub fn canonical_request(
.map(|name| { .map(|name| {
let value = headers let value = headers
.get(name) .get(name)
.ok_or_bad_request(format!("signed header `{}` is not present", name))? .ok_or_bad_request(format!("signed header `{}` is not present", name))?;
.to_str()?; let value = std::str::from_utf8(value.as_bytes())?;
Ok(format!("{}:{}", name.as_str(), value.trim())) Ok(format!("{}:{}", name.as_str(), value.trim()))
}) })
.collect::<Result<Vec<String>, Error>>()? .collect::<Result<Vec<String>, Error>>()?
@ -464,18 +476,19 @@ impl Authorization {
} }
let cred = query let cred = query
.get(X_AMZ_CREDENTIAL.as_str()) .get(&X_AMZ_CREDENTIAL)
.ok_or_bad_request("X-Amz-Credential not found in query parameters")?; .ok_or_bad_request("X-Amz-Credential not found in query parameters")?;
let signed_headers = query let signed_headers = query
.get(X_AMZ_SIGNEDHEADERS.as_str()) .get(&X_AMZ_SIGNEDHEADERS)
.ok_or_bad_request("X-Amz-SignedHeaders not found in query parameters")?; .ok_or_bad_request("X-Amz-SignedHeaders not found in query parameters")?;
let signature = query let signature = query
.get(X_AMZ_SIGNATURE.as_str()) .get(&X_AMZ_SIGNATURE)
.ok_or_bad_request("X-Amz-Signature not found in query parameters")?; .ok_or_bad_request("X-Amz-Signature not found in query parameters")?;
let duration = query let duration = query
.get(X_AMZ_EXPIRES.as_str()) .get(&X_AMZ_EXPIRES)
.ok_or_bad_request("X-Amz-Expires not found in query parameters")? .ok_or_bad_request("X-Amz-Expires not found in query parameters")?
.value
.parse() .parse()
.map_err(|_| Error::bad_request("X-Amz-Expires is not a number".to_string()))?; .map_err(|_| Error::bad_request("X-Amz-Expires is not a number".to_string()))?;
@ -486,20 +499,20 @@ impl Authorization {
} }
let date = query let date = query
.get(X_AMZ_DATE.as_str()) .get(&X_AMZ_DATE)
.ok_or_bad_request("Missing X-Amz-Date field")?; .ok_or_bad_request("Missing X-Amz-Date field")?;
let date = parse_date(date)?; let date = parse_date(&date.value)?;
if Utc::now() - date > Duration::seconds(duration) { if Utc::now() - date > Duration::seconds(duration) {
return Err(Error::bad_request("Date is too old".to_string())); return Err(Error::bad_request("Date is too old".to_string()));
} }
let (key_id, scope) = parse_credential(cred)?; let (key_id, scope) = parse_credential(&cred.value)?;
Ok(Authorization { Ok(Authorization {
key_id, key_id,
scope, scope,
signed_headers: signed_headers.to_string(), signed_headers: signed_headers.value.clone(),
signature: signature.to_string(), signature: signature.value.clone(),
content_sha256: UNSIGNED_PAYLOAD.to_string(), content_sha256: UNSIGNED_PAYLOAD.to_string(),
date, date,
}) })

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_block" name = "garage_block"
version = "0.9.2" version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"

View file

@ -96,7 +96,7 @@ impl DataBlock {
} }
} }
fn zstd_encode<R: std::io::Read>(mut source: R, level: i32) -> std::io::Result<Vec<u8>> { pub fn zstd_encode<R: std::io::Read>(mut source: R, level: i32) -> std::io::Result<Vec<u8>> {
let mut result = Vec::<u8>::new(); let mut result = Vec::<u8>::new();
let mut encoder = Encoder::new(&mut result, level)?; let mut encoder = Encoder::new(&mut result, level)?;
encoder.include_checksum(true)?; encoder.include_checksum(true)?;

View file

@ -1,3 +1,4 @@
use std::collections::HashMap;
use std::path::PathBuf; use std::path::PathBuf;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -13,9 +14,12 @@ const DRIVE_NPART: usize = 1024;
const HASH_DRIVE_BYTES: (usize, usize) = (2, 3); const HASH_DRIVE_BYTES: (usize, usize) = (2, 3);
const MARKER_FILE_NAME: &str = "garage-marker";
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub(crate) struct DataLayout { pub(crate) struct DataLayout {
pub(crate) data_dirs: Vec<DataDir>, pub(crate) data_dirs: Vec<DataDir>,
markers: HashMap<PathBuf, String>,
/// Primary storage location (index in data_dirs) for each partition /// Primary storage location (index in data_dirs) for each partition
/// = the location where the data is supposed to be, blocks are always /// = the location where the data is supposed to be, blocks are always
@ -75,16 +79,17 @@ impl DataLayout {
Ok(Self { Ok(Self {
data_dirs, data_dirs,
markers: HashMap::new(),
part_prim, part_prim,
part_sec, part_sec,
}) })
} }
pub(crate) fn update(&mut self, dirs: &DataDirEnum) -> Result<(), Error> { pub(crate) fn update(self, dirs: &DataDirEnum) -> Result<Self, Error> {
// Make list of new data directories, exit if nothing changed // Make list of new data directories, exit if nothing changed
let data_dirs = make_data_dirs(dirs)?; let data_dirs = make_data_dirs(dirs)?;
if data_dirs == self.data_dirs { if data_dirs == self.data_dirs {
return Ok(()); return Ok(self);
} }
let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>(); let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>();
@ -214,11 +219,43 @@ impl DataLayout {
} }
// Apply newly generated config // Apply newly generated config
*self = Self { Ok(Self {
data_dirs, data_dirs,
markers: self.markers,
part_prim, part_prim,
part_sec, part_sec,
}; })
}
pub(crate) fn check_markers(&mut self) -> Result<(), Error> {
let data_dirs = &self.data_dirs;
self.markers
.retain(|k, _| data_dirs.iter().any(|x| x.path == *k));
for dir in self.data_dirs.iter() {
let mut marker_path = dir.path.clone();
marker_path.push(MARKER_FILE_NAME);
let existing_marker = std::fs::read_to_string(&marker_path).ok();
match (existing_marker, self.markers.get(&dir.path)) {
(Some(m1), Some(m2)) => {
if m1 != *m2 {
return Err(Error::Message(format!("Mismatched content for marker file `{}` in data directory `{}`. If you moved data directories or changed their mountpoints, you should remove the `data_layout` file in Garage's metadata directory and restart Garage.", MARKER_FILE_NAME, dir.path.display())));
}
}
(None, Some(_)) => {
return Err(Error::Message(format!("Could not find expected marker file `{}` in data directory `{}`, make sure this data directory is mounted correctly.", MARKER_FILE_NAME, dir.path.display())));
}
(Some(mkr), None) => {
self.markers.insert(dir.path.clone(), mkr);
}
(None, None) => {
let mkr = hex::encode(garage_util::data::gen_uuid().as_slice());
std::fs::write(&marker_path, &mkr)?;
self.markers.insert(dir.path.clone(), mkr);
}
}
}
Ok(()) Ok(())
} }
@ -255,6 +292,7 @@ impl DataLayout {
pub(crate) fn without_secondary_locations(&self) -> Self { pub(crate) fn without_secondary_locations(&self) -> Self {
Self { Self {
data_dirs: self.data_dirs.clone(), data_dirs: self.data_dirs.clone(),
markers: self.markers.clone(),
part_prim: self.part_prim.clone(), part_prim: self.part_prim.clone(),
part_sec: self.part_sec.iter().map(|_| vec![]).collect::<Vec<_>>(), part_sec: self.part_sec.iter().map(|_| vec![]).collect::<Vec<_>>(),
} }
@ -322,14 +360,12 @@ fn make_data_dirs(dirs: &DataDirEnum) -> Result<Vec<DataDir>, Error> {
fn dir_not_empty(path: &PathBuf) -> Result<bool, Error> { fn dir_not_empty(path: &PathBuf) -> Result<bool, Error> {
for entry in std::fs::read_dir(&path)? { for entry in std::fs::read_dir(&path)? {
let dir = entry?; let dir = entry?;
if dir.file_type()?.is_dir() let ft = dir.file_type()?;
&& dir let name = dir.file_name().into_string().ok();
.file_name() if ft.is_file() && name.as_deref() == Some(MARKER_FILE_NAME) {
.into_string() return Ok(true);
.ok() }
.and_then(|hex| hex::decode(&hex).ok()) if ft.is_dir() && name.and_then(|hex| hex::decode(&hex).ok()).is_some() {
.is_some()
{
return Ok(true); return Ok(true);
} }
} }

View file

@ -9,3 +9,6 @@ mod block;
mod layout; mod layout;
mod metrics; mod metrics;
mod rc; mod rc;
pub use block::zstd_encode;
pub use rc::CalculateRefcount;

View file

@ -1,3 +1,4 @@
use std::convert::TryInto;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
@ -10,7 +11,7 @@ use serde::{Deserialize, Serialize};
use tokio::fs; use tokio::fs;
use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader}; use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader};
use tokio::sync::{mpsc, Mutex, MutexGuard}; use tokio::sync::{mpsc, Mutex, MutexGuard, Semaphore};
use opentelemetry::{ use opentelemetry::{
trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer}, trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer},
@ -22,7 +23,7 @@ use garage_net::stream::{read_stream_to_end, stream_asyncread, ByteStream};
use garage_db as db; use garage_db as db;
use garage_util::background::{vars, BackgroundRunner}; use garage_util::background::{vars, BackgroundRunner};
use garage_util::config::DataDirEnum; use garage_util::config::Config;
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::*; use garage_util::error::*;
use garage_util::metrics::RecordDuration; use garage_util::metrics::RecordDuration;
@ -84,14 +85,16 @@ pub struct BlockManager {
data_fsync: bool, data_fsync: bool,
compression_level: Option<i32>, compression_level: Option<i32>,
disable_scrub: bool,
mutation_lock: Vec<Mutex<BlockManagerLocked>>, mutation_lock: Vec<Mutex<BlockManagerLocked>>,
pub(crate) rc: BlockRc, pub rc: BlockRc,
pub resync: BlockResyncManager, pub resync: BlockResyncManager,
pub(crate) system: Arc<System>, pub(crate) system: Arc<System>,
pub(crate) endpoint: Arc<Endpoint<BlockRpc, Self>>, pub(crate) endpoint: Arc<Endpoint<BlockRpc, Self>>,
buffer_kb_semaphore: Arc<Semaphore>,
pub(crate) metrics: BlockManagerMetrics, pub(crate) metrics: BlockManagerMetrics,
@ -119,24 +122,22 @@ struct BlockManagerLocked();
impl BlockManager { impl BlockManager {
pub fn new( pub fn new(
db: &db::Db, db: &db::Db,
data_dir: DataDirEnum, config: &Config,
data_fsync: bool,
compression_level: Option<i32>,
replication: TableShardedReplication, replication: TableShardedReplication,
system: Arc<System>, system: Arc<System>,
) -> Result<Arc<Self>, Error> { ) -> Result<Arc<Self>, Error> {
// Load or compute layout, i.e. assignment of data blocks to the different data directories // Load or compute layout, i.e. assignment of data blocks to the different data directories
let data_layout_persister: Persister<DataLayout> = let data_layout_persister: Persister<DataLayout> =
Persister::new(&system.metadata_dir, "data_layout"); Persister::new(&system.metadata_dir, "data_layout");
let data_layout = match data_layout_persister.load() { let mut data_layout = match data_layout_persister.load() {
Ok(mut layout) => { Ok(layout) => layout
layout .update(&config.data_dir)
.update(&data_dir) .ok_or_message("invalid data_dir config")?,
.ok_or_message("invalid data_dir config")?; Err(_) => {
layout DataLayout::initialize(&config.data_dir).ok_or_message("invalid data_dir config")?
} }
Err(_) => DataLayout::initialize(&data_dir).ok_or_message("invalid data_dir config")?,
}; };
data_layout.check_markers()?;
data_layout_persister data_layout_persister
.save(&data_layout) .save(&data_layout)
.expect("cannot save data_layout"); .expect("cannot save data_layout");
@ -153,11 +154,14 @@ impl BlockManager {
.netapp .netapp
.endpoint("garage_block/manager.rs/Rpc".to_string()); .endpoint("garage_block/manager.rs/Rpc".to_string());
let buffer_kb_semaphore = Arc::new(Semaphore::new(config.block_ram_buffer_max / 1024));
let metrics = BlockManagerMetrics::new( let metrics = BlockManagerMetrics::new(
compression_level, config.compression_level,
rc.rc.clone(), rc.rc_table.clone(),
resync.queue.clone(), resync.queue.clone(),
resync.errors.clone(), resync.errors.clone(),
buffer_kb_semaphore.clone(),
); );
let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info"); let scrub_persister = PersisterShared::new(&system.metadata_dir, "scrub_info");
@ -166,8 +170,9 @@ impl BlockManager {
replication, replication,
data_layout: ArcSwap::new(Arc::new(data_layout)), data_layout: ArcSwap::new(Arc::new(data_layout)),
data_layout_persister, data_layout_persister,
data_fsync, data_fsync: config.data_fsync,
compression_level, disable_scrub: config.disable_scrub,
compression_level: config.compression_level,
mutation_lock: vec![(); MUTEX_COUNT] mutation_lock: vec![(); MUTEX_COUNT]
.iter() .iter()
.map(|_| Mutex::new(BlockManagerLocked())) .map(|_| Mutex::new(BlockManagerLocked()))
@ -176,6 +181,7 @@ impl BlockManager {
resync, resync,
system, system,
endpoint, endpoint,
buffer_kb_semaphore,
metrics, metrics,
scrub_persister, scrub_persister,
tx_scrub_command: ArcSwapOption::new(None), tx_scrub_command: ArcSwapOption::new(None),
@ -194,33 +200,43 @@ impl BlockManager {
} }
// Spawn scrub worker // Spawn scrub worker
let (scrub_tx, scrub_rx) = mpsc::channel(1); if !self.disable_scrub {
self.tx_scrub_command.store(Some(Arc::new(scrub_tx))); let (scrub_tx, scrub_rx) = mpsc::channel(1);
bg.spawn_worker(ScrubWorker::new( self.tx_scrub_command.store(Some(Arc::new(scrub_tx)));
self.clone(), bg.spawn_worker(ScrubWorker::new(
scrub_rx, self.clone(),
self.scrub_persister.clone(), scrub_rx,
)); self.scrub_persister.clone(),
));
}
} }
pub fn register_bg_vars(&self, vars: &mut vars::BgVars) { pub fn register_bg_vars(&self, vars: &mut vars::BgVars) {
self.resync.register_bg_vars(vars); self.resync.register_bg_vars(vars);
vars.register_rw( if !self.disable_scrub {
&self.scrub_persister, vars.register_rw(
"scrub-tranquility", &self.scrub_persister,
|p| p.get_with(|x| x.tranquility), "scrub-tranquility",
|p, tranquility| p.set_with(|x| x.tranquility = tranquility), |p| p.get_with(|x| x.tranquility),
); |p, tranquility| p.set_with(|x| x.tranquility = tranquility),
vars.register_ro(&self.scrub_persister, "scrub-last-completed", |p| { );
p.get_with(|x| msec_to_rfc3339(x.time_last_complete_scrub)) vars.register_ro(&self.scrub_persister, "scrub-last-completed", |p| {
}); p.get_with(|x| msec_to_rfc3339(x.time_last_complete_scrub))
vars.register_ro(&self.scrub_persister, "scrub-next-run", |p| { });
p.get_with(|x| msec_to_rfc3339(x.time_next_run_scrub)) vars.register_ro(&self.scrub_persister, "scrub-next-run", |p| {
}); p.get_with(|x| msec_to_rfc3339(x.time_next_run_scrub))
vars.register_ro(&self.scrub_persister, "scrub-corruptions_detected", |p| { });
p.get_with(|x| x.corruptions_detected) vars.register_ro(&self.scrub_persister, "scrub-corruptions_detected", |p| {
}); p.get_with(|x| x.corruptions_detected)
});
}
}
/// Initialization: set how block references are recalculated
/// for repair operations
pub fn set_recalc_rc(&self, recalc: Vec<CalculateRefcount>) {
self.rc.recalc_rc.store(Some(Arc::new(recalc)));
} }
/// Ask nodes that might have a (possibly compressed) block for it /// Ask nodes that might have a (possibly compressed) block for it
@ -228,10 +244,16 @@ impl BlockManager {
async fn rpc_get_raw_block_streaming( async fn rpc_get_raw_block_streaming(
&self, &self,
hash: &Hash, hash: &Hash,
priority: RequestPriority,
order_tag: Option<OrderTag>, order_tag: Option<OrderTag>,
) -> Result<DataBlockStream, Error> { ) -> Result<DataBlockStream, Error> {
self.rpc_get_raw_block_internal(hash, order_tag, |stream| async move { Ok(stream) }) self.rpc_get_raw_block_internal(
.await hash,
priority,
order_tag,
|stream| async move { Ok(stream) },
)
.await
} }
/// Ask nodes that might have a (possibly compressed) block for it /// Ask nodes that might have a (possibly compressed) block for it
@ -239,9 +261,10 @@ impl BlockManager {
pub(crate) async fn rpc_get_raw_block( pub(crate) async fn rpc_get_raw_block(
&self, &self,
hash: &Hash, hash: &Hash,
priority: RequestPriority,
order_tag: Option<OrderTag>, order_tag: Option<OrderTag>,
) -> Result<DataBlock, Error> { ) -> Result<DataBlock, Error> {
self.rpc_get_raw_block_internal(hash, order_tag, |block_stream| async move { self.rpc_get_raw_block_internal(hash, priority, order_tag, |block_stream| async move {
let (header, stream) = block_stream.into_parts(); let (header, stream) = block_stream.into_parts();
read_stream_to_end(stream) read_stream_to_end(stream)
.await .await
@ -254,6 +277,7 @@ impl BlockManager {
async fn rpc_get_raw_block_internal<F, Fut, T>( async fn rpc_get_raw_block_internal<F, Fut, T>(
&self, &self,
hash: &Hash, hash: &Hash,
priority: RequestPriority,
order_tag: Option<OrderTag>, order_tag: Option<OrderTag>,
f: F, f: F,
) -> Result<T, Error> ) -> Result<T, Error>
@ -261,15 +285,17 @@ impl BlockManager {
F: Fn(DataBlockStream) -> Fut, F: Fn(DataBlockStream) -> Fut,
Fut: futures::Future<Output = Result<T, Error>>, Fut: futures::Future<Output = Result<T, Error>>,
{ {
let who = self.replication.read_nodes(hash); let who = self
let who = self.system.rpc.request_order(&who); .system
.rpc_helper()
.block_read_nodes_of(hash, self.system.rpc_helper());
for node in who.iter() { for node in who.iter() {
let node_id = NodeID::from(*node); let node_id = NodeID::from(*node);
let rpc = self.endpoint.call_streaming( let rpc = self.endpoint.call_streaming(
&node_id, &node_id,
BlockRpc::GetBlock(*hash, order_tag), BlockRpc::GetBlock(*hash, order_tag),
PRIO_NORMAL | PRIO_SECONDARY, priority,
); );
tokio::select! { tokio::select! {
res = rpc => { res = rpc => {
@ -302,15 +328,15 @@ impl BlockManager {
// if the first one doesn't succeed rapidly // if the first one doesn't succeed rapidly
// TODO: keep first request running when initiating a new one and take the // TODO: keep first request running when initiating a new one and take the
// one that finishes earlier // one that finishes earlier
_ = tokio::time::sleep(self.system.rpc.rpc_timeout()) => { _ = tokio::time::sleep(self.system.rpc_helper().rpc_timeout()) => {
debug!("Get block {:?}: node {:?} didn't return block in time, trying next.", hash, node); debug!("Get block {:?}: node {:?} didn't return block in time, trying next.", hash, node);
} }
}; };
} }
let msg = format!("Get block {:?}: no node returned a valid block", hash); let err = Error::MissingBlock(*hash);
debug!("{}", msg); debug!("{}", err);
Err(Error::Message(msg)) Err(err)
} }
// ---- Public interface ---- // ---- Public interface ----
@ -321,7 +347,9 @@ impl BlockManager {
hash: &Hash, hash: &Hash,
order_tag: Option<OrderTag>, order_tag: Option<OrderTag>,
) -> Result<ByteStream, Error> { ) -> Result<ByteStream, Error> {
let block_stream = self.rpc_get_raw_block_streaming(hash, order_tag).await?; let block_stream = self
.rpc_get_raw_block_streaming(hash, PRIO_NORMAL | PRIO_SECONDARY, order_tag)
.await?;
let (header, stream) = block_stream.into_parts(); let (header, stream) = block_stream.into_parts();
match header { match header {
DataBlockHeader::Plain => Ok(stream), DataBlockHeader::Plain => Ok(stream),
@ -335,28 +363,28 @@ impl BlockManager {
} }
} }
/// Ask nodes that might have a block for it, return it as one big Bytes
pub async fn rpc_get_block(
&self,
hash: &Hash,
order_tag: Option<OrderTag>,
) -> Result<Bytes, Error> {
let stream = self.rpc_get_block_streaming(hash, order_tag).await?;
Ok(read_stream_to_end(stream).await?.into_bytes())
}
/// Send block to nodes that should have it /// Send block to nodes that should have it
pub async fn rpc_put_block( pub async fn rpc_put_block(
&self, &self,
hash: Hash, hash: Hash,
data: Bytes, data: Bytes,
prevent_compression: bool,
order_tag: Option<OrderTag>, order_tag: Option<OrderTag>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let who = self.replication.write_nodes(&hash); let who = self.replication.write_sets(&hash);
let (header, bytes) = DataBlock::from_buffer(data, self.compression_level) let compression_level = self.compression_level.filter(|_| !prevent_compression);
let (header, bytes) = DataBlock::from_buffer(data, compression_level)
.await .await
.into_parts(); .into_parts();
let permit = self
.buffer_kb_semaphore
.clone()
.acquire_many_owned((bytes.len() / 1024).try_into().unwrap())
.await
.ok_or_message("could not reserve space for buffer of data to send to remote nodes")?;
let put_block_rpc = let put_block_rpc =
Req::new(BlockRpc::PutBlock { hash, header })?.with_stream_from_buffer(bytes); Req::new(BlockRpc::PutBlock { hash, header })?.with_stream_from_buffer(bytes);
let put_block_rpc = if let Some(tag) = order_tag { let put_block_rpc = if let Some(tag) = order_tag {
@ -366,12 +394,13 @@ impl BlockManager {
}; };
self.system self.system
.rpc .rpc_helper()
.try_call_many( .try_write_many_sets(
&self.endpoint, &self.endpoint,
&who[..], who.as_ref(),
put_block_rpc, put_block_rpc,
RequestStrategy::with_priority(PRIO_NORMAL | PRIO_SECONDARY) RequestStrategy::with_priority(PRIO_NORMAL | PRIO_SECONDARY)
.with_drop_on_completion(permit)
.with_quorum(self.replication.write_quorum()), .with_quorum(self.replication.write_quorum()),
) )
.await?; .await?;
@ -381,12 +410,7 @@ impl BlockManager {
/// Get number of items in the refcount table /// Get number of items in the refcount table
pub fn rc_len(&self) -> Result<usize, Error> { pub fn rc_len(&self) -> Result<usize, Error> {
Ok(self.rc.rc.len()?) Ok(self.rc.rc_table.len()?)
}
/// Get number of items in the refcount table
pub fn rc_fast_len(&self) -> Result<Option<usize>, Error> {
Ok(self.rc.rc.fast_len()?)
} }
/// Send command to start/stop/manager scrub worker /// Send command to start/stop/manager scrub worker
@ -404,7 +428,7 @@ impl BlockManager {
/// List all resync errors /// List all resync errors
pub fn list_resync_errors(&self) -> Result<Vec<BlockResyncErrorInfo>, Error> { pub fn list_resync_errors(&self) -> Result<Vec<BlockResyncErrorInfo>, Error> {
let mut blocks = Vec::with_capacity(self.resync.errors.len()); let mut blocks = Vec::with_capacity(self.resync.errors.len()?);
for ent in self.resync.errors.iter()? { for ent in self.resync.errors.iter()? {
let (hash, cnt) = ent?; let (hash, cnt) = ent?;
let cnt = ErrorCounter::decode(&cnt); let cnt = ErrorCounter::decode(&cnt);
@ -442,7 +466,7 @@ impl BlockManager {
tokio::spawn(async move { tokio::spawn(async move {
if let Err(e) = this if let Err(e) = this
.resync .resync
.put_to_resync(&hash, 2 * this.system.rpc.rpc_timeout()) .put_to_resync(&hash, 2 * this.system.rpc_helper().rpc_timeout())
{ {
error!("Block {:?} could not be put in resync queue: {}.", hash, e); error!("Block {:?} could not be put in resync queue: {}.", hash, e);
} }
@ -536,7 +560,7 @@ impl BlockManager {
None => { None => {
// Not found but maybe we should have had it ?? // Not found but maybe we should have had it ??
self.resync self.resync
.put_to_resync(hash, 2 * self.system.rpc.rpc_timeout())?; .put_to_resync(hash, 2 * self.system.rpc_helper().rpc_timeout())?;
return Err(Error::Message(format!( return Err(Error::Message(format!(
"block {:?} not found on node", "block {:?} not found on node",
hash hash

View file

@ -1,7 +1,10 @@
use std::sync::Arc;
use tokio::sync::Semaphore;
use opentelemetry::{global, metrics::*}; use opentelemetry::{global, metrics::*};
use garage_db as db; use garage_db as db;
use garage_db::counted_tree_hack::CountedTree;
/// TableMetrics reference all counter used for metrics /// TableMetrics reference all counter used for metrics
pub struct BlockManagerMetrics { pub struct BlockManagerMetrics {
@ -9,6 +12,7 @@ pub struct BlockManagerMetrics {
pub(crate) _rc_size: ValueObserver<u64>, pub(crate) _rc_size: ValueObserver<u64>,
pub(crate) _resync_queue_len: ValueObserver<u64>, pub(crate) _resync_queue_len: ValueObserver<u64>,
pub(crate) _resync_errored_blocks: ValueObserver<u64>, pub(crate) _resync_errored_blocks: ValueObserver<u64>,
pub(crate) _buffer_free_kb: ValueObserver<u64>,
pub(crate) resync_counter: BoundCounter<u64>, pub(crate) resync_counter: BoundCounter<u64>,
pub(crate) resync_error_counter: BoundCounter<u64>, pub(crate) resync_error_counter: BoundCounter<u64>,
@ -29,8 +33,9 @@ impl BlockManagerMetrics {
pub fn new( pub fn new(
compression_level: Option<i32>, compression_level: Option<i32>,
rc_tree: db::Tree, rc_tree: db::Tree,
resync_queue: CountedTree, resync_queue: db::Tree,
resync_errors: CountedTree, resync_errors: db::Tree,
buffer_semaphore: Arc<Semaphore>,
) -> Self { ) -> Self {
let meter = global::meter("garage_model/block"); let meter = global::meter("garage_model/block");
Self { Self {
@ -45,15 +50,17 @@ impl BlockManagerMetrics {
.init(), .init(),
_rc_size: meter _rc_size: meter
.u64_value_observer("block.rc_size", move |observer| { .u64_value_observer("block.rc_size", move |observer| {
if let Ok(Some(v)) = rc_tree.fast_len() { if let Ok(value) = rc_tree.len() {
observer.observe(v as u64, &[]) observer.observe(value as u64, &[])
} }
}) })
.with_description("Number of blocks known to the reference counter") .with_description("Number of blocks known to the reference counter")
.init(), .init(),
_resync_queue_len: meter _resync_queue_len: meter
.u64_value_observer("block.resync_queue_length", move |observer| { .u64_value_observer("block.resync_queue_length", move |observer| {
observer.observe(resync_queue.len() as u64, &[]) if let Ok(value) = resync_queue.len() {
observer.observe(value as u64, &[]);
}
}) })
.with_description( .with_description(
"Number of block hashes queued for local check and possible resync", "Number of block hashes queued for local check and possible resync",
@ -61,11 +68,22 @@ impl BlockManagerMetrics {
.init(), .init(),
_resync_errored_blocks: meter _resync_errored_blocks: meter
.u64_value_observer("block.resync_errored_blocks", move |observer| { .u64_value_observer("block.resync_errored_blocks", move |observer| {
observer.observe(resync_errors.len() as u64, &[]) if let Ok(value) = resync_errors.len() {
observer.observe(value as u64, &[]);
}
}) })
.with_description("Number of block hashes whose last resync resulted in an error") .with_description("Number of block hashes whose last resync resulted in an error")
.init(), .init(),
_buffer_free_kb: meter
.u64_value_observer("block.ram_buffer_free_kb", move |observer| {
observer.observe(buffer_semaphore.available_permits() as u64, &[])
})
.with_description(
"Available RAM in KiB to use for buffering data blocks to be written to remote nodes",
)
.init(),
resync_counter: meter resync_counter: meter
.u64_counter("block.resync_counter") .u64_counter("block.resync_counter")
.with_description("Number of calls to resync_block") .with_description("Number of calls to resync_block")

View file

@ -1,5 +1,7 @@
use std::convert::TryInto; use std::convert::TryInto;
use arc_swap::ArcSwapOption;
use garage_db as db; use garage_db as db;
use garage_util::data::*; use garage_util::data::*;
@ -8,13 +10,20 @@ use garage_util::time::*;
use crate::manager::BLOCK_GC_DELAY; use crate::manager::BLOCK_GC_DELAY;
pub type CalculateRefcount =
Box<dyn Fn(&db::Transaction, &Hash) -> db::TxResult<usize, Error> + Send + Sync>;
pub struct BlockRc { pub struct BlockRc {
pub(crate) rc: db::Tree, pub rc_table: db::Tree,
pub(crate) recalc_rc: ArcSwapOption<Vec<CalculateRefcount>>,
} }
impl BlockRc { impl BlockRc {
pub(crate) fn new(rc: db::Tree) -> Self { pub(crate) fn new(rc: db::Tree) -> Self {
Self { rc } Self {
rc_table: rc,
recalc_rc: ArcSwapOption::new(None),
}
} }
/// Increment the reference counter associated to a hash. /// Increment the reference counter associated to a hash.
@ -24,9 +33,9 @@ impl BlockRc {
tx: &mut db::Transaction, tx: &mut db::Transaction,
hash: &Hash, hash: &Hash,
) -> db::TxOpResult<bool> { ) -> db::TxOpResult<bool> {
let old_rc = RcEntry::parse_opt(tx.get(&self.rc, hash)?); let old_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
match old_rc.increment().serialize() { match old_rc.increment().serialize() {
Some(x) => tx.insert(&self.rc, hash, x)?, Some(x) => tx.insert(&self.rc_table, hash, x)?,
None => unreachable!(), None => unreachable!(),
}; };
Ok(old_rc.is_zero()) Ok(old_rc.is_zero())
@ -39,28 +48,28 @@ impl BlockRc {
tx: &mut db::Transaction, tx: &mut db::Transaction,
hash: &Hash, hash: &Hash,
) -> db::TxOpResult<bool> { ) -> db::TxOpResult<bool> {
let new_rc = RcEntry::parse_opt(tx.get(&self.rc, hash)?).decrement(); let new_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?).decrement();
match new_rc.serialize() { match new_rc.serialize() {
Some(x) => tx.insert(&self.rc, hash, x)?, Some(x) => tx.insert(&self.rc_table, hash, x)?,
None => tx.remove(&self.rc, hash)?, None => tx.remove(&self.rc_table, hash)?,
}; };
Ok(matches!(new_rc, RcEntry::Deletable { .. })) Ok(matches!(new_rc, RcEntry::Deletable { .. }))
} }
/// Read a block's reference count /// Read a block's reference count
pub(crate) fn get_block_rc(&self, hash: &Hash) -> Result<RcEntry, Error> { pub(crate) fn get_block_rc(&self, hash: &Hash) -> Result<RcEntry, Error> {
Ok(RcEntry::parse_opt(self.rc.get(hash.as_ref())?)) Ok(RcEntry::parse_opt(self.rc_table.get(hash.as_ref())?))
} }
/// Delete an entry in the RC table if it is deletable and the /// Delete an entry in the RC table if it is deletable and the
/// deletion time has passed /// deletion time has passed
pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> { pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> {
let now = now_msec(); let now = now_msec();
self.rc.db().transaction(|tx| { self.rc_table.db().transaction(|tx| {
let rcval = RcEntry::parse_opt(tx.get(&self.rc, hash)?); let rcval = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
match rcval { match rcval {
RcEntry::Deletable { at_time } if now > at_time => { RcEntry::Deletable { at_time } if now > at_time => {
tx.remove(&self.rc, hash)?; tx.remove(&self.rc_table, hash)?;
} }
_ => (), _ => (),
}; };
@ -68,6 +77,58 @@ impl BlockRc {
})?; })?;
Ok(()) Ok(())
} }
/// Recalculate the reference counter of a block
/// to fix potential inconsistencies
pub fn recalculate_rc(&self, hash: &Hash) -> Result<(usize, bool), Error> {
if let Some(recalc_fns) = self.recalc_rc.load().as_ref() {
trace!("Repair block RC for {:?}", hash);
let res = self
.rc_table
.db()
.transaction(|tx| {
let mut cnt = 0;
for f in recalc_fns.iter() {
cnt += f(&tx, hash)?;
}
let old_rc = RcEntry::parse_opt(tx.get(&self.rc_table, hash)?);
trace!(
"Block RC for {:?}: stored={}, calculated={}",
hash,
old_rc.as_u64(),
cnt
);
if cnt as u64 != old_rc.as_u64() {
warn!(
"Fixing inconsistent block RC for {:?}: was {}, should be {}",
hash,
old_rc.as_u64(),
cnt
);
let new_rc = if cnt > 0 {
RcEntry::Present { count: cnt as u64 }
} else {
RcEntry::Deletable {
at_time: now_msec() + BLOCK_GC_DELAY.as_millis() as u64,
}
};
tx.insert(&self.rc_table, hash, new_rc.serialize().unwrap())?;
Ok((cnt, true))
} else {
Ok((cnt, false))
}
})
.map_err(Error::from);
if let Err(e) = &res {
error!("Failed to fix RC for block {:?}: {}", hash, e);
}
res
} else {
Err(Error::Message(
"Block RC recalculation is not available at this point".into(),
))
}
}
} }
/// Describes the state of the reference counter for a block /// Describes the state of the reference counter for a block

View file

@ -107,7 +107,7 @@ impl Worker for RepairWorker {
for entry in self for entry in self
.manager .manager
.rc .rc
.rc .rc_table
.range::<&[u8], _>((start_bound, Bound::Unbounded))? .range::<&[u8], _>((start_bound, Bound::Unbounded))?
{ {
let (hash, _) = entry?; let (hash, _) = entry?;

View file

@ -15,7 +15,6 @@ use opentelemetry::{
}; };
use garage_db as db; use garage_db as db;
use garage_db::counted_tree_hack::CountedTree;
use garage_util::background::*; use garage_util::background::*;
use garage_util::data::*; use garage_util::data::*;
@ -47,9 +46,9 @@ pub(crate) const MAX_RESYNC_WORKERS: usize = 8;
const INITIAL_RESYNC_TRANQUILITY: u32 = 2; const INITIAL_RESYNC_TRANQUILITY: u32 = 2;
pub struct BlockResyncManager { pub struct BlockResyncManager {
pub(crate) queue: CountedTree, pub(crate) queue: db::Tree,
pub(crate) notify: Arc<Notify>, pub(crate) notify: Arc<Notify>,
pub(crate) errors: CountedTree, pub(crate) errors: db::Tree,
busy_set: BusySet, busy_set: BusySet,
@ -90,12 +89,10 @@ impl BlockResyncManager {
let queue = db let queue = db
.open_tree("block_local_resync_queue") .open_tree("block_local_resync_queue")
.expect("Unable to open block_local_resync_queue tree"); .expect("Unable to open block_local_resync_queue tree");
let queue = CountedTree::new(queue).expect("Could not count block_local_resync_queue");
let errors = db let errors = db
.open_tree("block_local_resync_errors") .open_tree("block_local_resync_errors")
.expect("Unable to open block_local_resync_errors tree"); .expect("Unable to open block_local_resync_errors tree");
let errors = CountedTree::new(errors).expect("Could not count block_local_resync_errors");
let persister = PersisterShared::new(&system.metadata_dir, "resync_cfg"); let persister = PersisterShared::new(&system.metadata_dir, "resync_cfg");
@ -110,16 +107,12 @@ impl BlockResyncManager {
/// Get lenght of resync queue /// Get lenght of resync queue
pub fn queue_len(&self) -> Result<usize, Error> { pub fn queue_len(&self) -> Result<usize, Error> {
// This currently can't return an error because the CountedTree hack Ok(self.queue.len()?)
// doesn't error on .len(), but this will change when we remove the hack
// (hopefully someday!)
Ok(self.queue.len())
} }
/// Get number of blocks that have an error /// Get number of blocks that have an error
pub fn errors_len(&self) -> Result<usize, Error> { pub fn errors_len(&self) -> Result<usize, Error> {
// (see queue_len comment) Ok(self.errors.len()?)
Ok(self.errors.len())
} }
/// Clear the error counter for a block and put it in queue immediately /// Clear the error counter for a block and put it in queue immediately
@ -180,7 +173,7 @@ impl BlockResyncManager {
// deleted once the garbage collection delay has passed. // deleted once the garbage collection delay has passed.
// //
// Here are some explanations on how the resync queue works. // Here are some explanations on how the resync queue works.
// There are two Sled trees that are used to have information // There are two db trees that are used to have information
// about the status of blocks that need to be resynchronized: // about the status of blocks that need to be resynchronized:
// //
// - resync.queue: a tree that is ordered first by a timestamp // - resync.queue: a tree that is ordered first by a timestamp
@ -374,10 +367,17 @@ impl BlockResyncManager {
} }
if exists && rc.is_deletable() { if exists && rc.is_deletable() {
if manager.rc.recalculate_rc(hash)?.0 > 0 {
return Err(Error::Message(format!(
"Refcount for block {:?} was inconsistent, retrying later",
hash
)));
}
info!("Resync block {:?}: offloading and deleting", hash); info!("Resync block {:?}: offloading and deleting", hash);
let existing_path = existing_path.unwrap(); let existing_path = existing_path.unwrap();
let mut who = manager.replication.write_nodes(hash); let mut who = manager.replication.storage_nodes(hash);
if who.len() < manager.replication.write_quorum() { if who.len() < manager.replication.write_quorum() {
return Err(Error::Message("Not trying to offload block because we don't have a quorum of nodes to write to".to_string())); return Err(Error::Message("Not trying to offload block because we don't have a quorum of nodes to write to".to_string()));
} }
@ -385,7 +385,7 @@ impl BlockResyncManager {
let who_needs_resps = manager let who_needs_resps = manager
.system .system
.rpc .rpc_helper()
.call_many( .call_many(
&manager.endpoint, &manager.endpoint,
&who, &who,
@ -431,12 +431,12 @@ impl BlockResyncManager {
.with_stream_from_buffer(bytes); .with_stream_from_buffer(bytes);
manager manager
.system .system
.rpc .rpc_helper()
.try_call_many( .try_call_many(
&manager.endpoint, &manager.endpoint,
&need_nodes[..], &need_nodes,
put_block_message, put_block_message,
RequestStrategy::with_priority(PRIO_BACKGROUND) RequestStrategy::with_priority(PRIO_BACKGROUND | PRIO_SECONDARY)
.with_quorum(need_nodes.len()), .with_quorum(need_nodes.len()),
) )
.await .await
@ -460,7 +460,17 @@ impl BlockResyncManager {
hash hash
); );
let block_data = manager.rpc_get_raw_block(hash, None).await?; let block_data = manager
.rpc_get_raw_block(hash, PRIO_BACKGROUND | PRIO_SECONDARY, None)
.await;
if matches!(block_data, Err(Error::MissingBlock(_))) {
warn!(
"Could not fetch needed block {:?}, no node returned valid data. Checking that refcount is correct.",
hash
);
manager.rc.recalculate_rc(hash)?;
}
let block_data = block_data?;
manager.metrics.resync_recv_counter.add(1); manager.metrics.resync_recv_counter.add(1);
@ -541,9 +551,9 @@ impl Worker for ResyncWorker {
Ok(WorkerState::Idle) Ok(WorkerState::Idle)
} }
Err(e) => { Err(e) => {
// The errors that we have here are only Sled errors // The errors that we have here are only db errors
// We don't really know how to handle them so just ¯\_(ツ)_/¯ // We don't really know how to handle them so just ¯\_(ツ)_/¯
// (there is kind of an assumption that Sled won't error on us, // (there is kind of an assumption that the db won't error on us,
// if it does there is not much we can do -- TODO should we just panic?) // if it does there is not much we can do -- TODO should we just panic?)
// Here we just give the error to the worker manager, // Here we just give the error to the worker manager,
// it will print it to the logs and increment a counter // it will print it to the logs and increment a counter

View file

@ -1,6 +1,6 @@
[package] [package]
name = "garage_db" name = "garage_db"
version = "0.9.2" version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -17,14 +17,15 @@ hexdump.workspace = true
tracing.workspace = true tracing.workspace = true
heed = { workspace = true, optional = true } heed = { workspace = true, optional = true }
rusqlite = { workspace = true, optional = true } rusqlite = { workspace = true, optional = true, features = ["backup"] }
sled = { workspace = true, optional = true } r2d2 = { workspace = true, optional = true }
r2d2_sqlite = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]
mktemp.workspace = true mktemp.workspace = true
[features] [features]
default = [ "sled", "lmdb", "sqlite" ] default = [ "lmdb", "sqlite" ]
bundled-libs = [ "rusqlite?/bundled" ] bundled-libs = [ "rusqlite?/bundled" ]
lmdb = [ "heed" ] lmdb = [ "heed" ]
sqlite = [ "rusqlite" ] sqlite = [ "rusqlite", "r2d2", "r2d2_sqlite" ]

View file

@ -1,127 +0,0 @@
//! This hack allows a db tree to keep in RAM a counter of the number of entries
//! it contains, which is used to call .len() on it. This is usefull only for
//! the sled backend where .len() otherwise would have to traverse the whole
//! tree to count items. For sqlite and lmdb, this is mostly useless (but
//! hopefully not harmfull!). Note that a CountedTree cannot be part of a
//! transaction.
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use crate::{Result, Tree, TxError, Value, ValueIter};
#[derive(Clone)]
pub struct CountedTree(Arc<CountedTreeInternal>);
struct CountedTreeInternal {
tree: Tree,
len: AtomicUsize,
}
impl CountedTree {
pub fn new(tree: Tree) -> Result<Self> {
let len = tree.len()?;
Ok(Self(Arc::new(CountedTreeInternal {
tree,
len: AtomicUsize::new(len),
})))
}
pub fn len(&self) -> usize {
self.0.len.load(Ordering::SeqCst)
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn get<K: AsRef<[u8]>>(&self, key: K) -> Result<Option<Value>> {
self.0.tree.get(key)
}
pub fn first(&self) -> Result<Option<(Value, Value)>> {
self.0.tree.first()
}
pub fn iter(&self) -> Result<ValueIter<'_>> {
self.0.tree.iter()
}
// ---- writing functions ----
pub fn insert<K, V>(&self, key: K, value: V) -> Result<Option<Value>>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let old_val = self.0.tree.insert(key, value)?;
if old_val.is_none() {
self.0.len.fetch_add(1, Ordering::SeqCst);
}
Ok(old_val)
}
pub fn remove<K: AsRef<[u8]>>(&self, key: K) -> Result<Option<Value>> {
let old_val = self.0.tree.remove(key)?;
if old_val.is_some() {
self.0.len.fetch_sub(1, Ordering::SeqCst);
}
Ok(old_val)
}
pub fn compare_and_swap<K, OV, NV>(
&self,
key: K,
expected_old: Option<OV>,
new: Option<NV>,
) -> Result<bool>
where
K: AsRef<[u8]>,
OV: AsRef<[u8]>,
NV: AsRef<[u8]>,
{
let old_some = expected_old.is_some();
let new_some = new.is_some();
let tx_res = self.0.tree.db().transaction(|tx| {
let old_val = tx.get(&self.0.tree, &key)?;
let is_same = match (&old_val, &expected_old) {
(None, None) => true,
(Some(x), Some(y)) if x == y.as_ref() => true,
_ => false,
};
if is_same {
match &new {
Some(v) => {
tx.insert(&self.0.tree, &key, v)?;
}
None => {
tx.remove(&self.0.tree, &key)?;
}
}
Ok(())
} else {
Err(TxError::Abort(()))
}
});
match tx_res {
Ok(()) => {
match (old_some, new_some) {
(false, true) => {
self.0.len.fetch_add(1, Ordering::SeqCst);
}
(true, false) => {
self.0.len.fetch_sub(1, Ordering::SeqCst);
}
_ => (),
}
Ok(true)
}
Err(TxError::Abort(())) => Ok(false),
Err(TxError::Db(e)) => Err(e),
}
}
}

View file

@ -1,15 +1,12 @@
#[macro_use] #[macro_use]
#[cfg(feature = "sqlite")]
extern crate tracing; extern crate tracing;
#[cfg(feature = "lmdb")] #[cfg(feature = "lmdb")]
pub mod lmdb_adapter; pub mod lmdb_adapter;
#[cfg(feature = "sled")]
pub mod sled_adapter;
#[cfg(feature = "sqlite")] #[cfg(feature = "sqlite")]
pub mod sqlite_adapter; pub mod sqlite_adapter;
pub mod counted_tree_hack; pub mod open;
#[cfg(test)] #[cfg(test)]
pub mod test; pub mod test;
@ -18,10 +15,13 @@ use core::ops::{Bound, RangeBounds};
use std::borrow::Cow; use std::borrow::Cow;
use std::cell::Cell; use std::cell::Cell;
use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use err_derive::Error; use err_derive::Error;
pub use open::*;
pub(crate) type OnCommit = Vec<Box<dyn FnOnce()>>; pub(crate) type OnCommit = Vec<Box<dyn FnOnce()>>;
#[derive(Clone)] #[derive(Clone)]
@ -45,6 +45,12 @@ pub type TxValueIter<'a> = Box<dyn std::iter::Iterator<Item = TxOpResult<(Value,
#[error(display = "{}", _0)] #[error(display = "{}", _0)]
pub struct Error(pub Cow<'static, str>); pub struct Error(pub Cow<'static, str>);
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Error {
Error(format!("IO: {}", e).into())
}
}
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Error)] #[derive(Debug, Error)]
@ -52,6 +58,7 @@ pub type Result<T> = std::result::Result<T, Error>;
pub struct TxOpError(pub(crate) Error); pub struct TxOpError(pub(crate) Error);
pub type TxOpResult<T> = std::result::Result<T, TxOpError>; pub type TxOpResult<T> = std::result::Result<T, TxOpError>;
#[derive(Debug)]
pub enum TxError<E> { pub enum TxError<E> {
Abort(E), Abort(E),
Db(Error), Db(Error),
@ -126,6 +133,10 @@ impl Db {
} }
} }
pub fn snapshot(&self, path: &PathBuf) -> Result<()> {
self.0.snapshot(path)
}
pub fn import(&self, other: &Db) -> Result<()> { pub fn import(&self, other: &Db) -> Result<()> {
let existing_trees = self.list_trees()?; let existing_trees = self.list_trees()?;
if !existing_trees.is_empty() { if !existing_trees.is_empty() {
@ -171,48 +182,6 @@ impl Db {
} }
} }
/// List of supported database engine types
///
/// The `enum` holds list of *all* database engines that are are be supported by crate, no matter
/// if relevant feature is enabled or not. It allows us to distinguish between invalid engine
/// and valid engine, whose support is not enabled via feature flag.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Engine {
Lmdb,
Sqlite,
Sled,
}
impl Engine {
/// Return variant name as static `&str`
pub fn as_str(&self) -> &'static str {
match self {
Self::Lmdb => "lmdb",
Self::Sqlite => "sqlite",
Self::Sled => "sled",
}
}
}
impl std::fmt::Display for Engine {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
self.as_str().fmt(fmt)
}
}
impl std::str::FromStr for Engine {
type Err = Error;
fn from_str(text: &str) -> Result<Engine> {
match text {
"lmdb" | "heed" => Ok(Self::Lmdb),
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
"sled" => Ok(Self::Sled),
kind => Err(Error(format!("Invalid DB engine: {}", kind).into())),
}
}
}
#[allow(clippy::len_without_is_empty)] #[allow(clippy::len_without_is_empty)]
impl Tree { impl Tree {
#[inline] #[inline]
@ -228,10 +197,6 @@ impl Tree {
pub fn len(&self) -> Result<usize> { pub fn len(&self) -> Result<usize> {
self.0.len(self.1) self.0.len(self.1)
} }
#[inline]
pub fn fast_len(&self) -> Result<Option<usize>> {
self.0.fast_len(self.1)
}
#[inline] #[inline]
pub fn first(&self) -> Result<Option<(Value, Value)>> { pub fn first(&self) -> Result<Option<(Value, Value)>> {
@ -246,16 +211,12 @@ impl Tree {
/// Returns the old value if there was one /// Returns the old value if there was one
#[inline] #[inline]
pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>( pub fn insert<T: AsRef<[u8]>, U: AsRef<[u8]>>(&self, key: T, value: U) -> Result<()> {
&self,
key: T,
value: U,
) -> Result<Option<Value>> {
self.0.insert(self.1, key.as_ref(), value.as_ref()) self.0.insert(self.1, key.as_ref(), value.as_ref())
} }
/// Returns the old value if there was one /// Returns the old value if there was one
#[inline] #[inline]
pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<Option<Value>> { pub fn remove<T: AsRef<[u8]>>(&self, key: T) -> Result<()> {
self.0.remove(self.1, key.as_ref()) self.0.remove(self.1, key.as_ref())
} }
/// Clears all values from the tree /// Clears all values from the tree
@ -313,14 +274,19 @@ impl<'a> Transaction<'a> {
tree: &Tree, tree: &Tree,
key: T, key: T,
value: U, value: U,
) -> TxOpResult<Option<Value>> { ) -> TxOpResult<()> {
self.tx.insert(tree.1, key.as_ref(), value.as_ref()) self.tx.insert(tree.1, key.as_ref(), value.as_ref())
} }
/// Returns the old value if there was one /// Returns the old value if there was one
#[inline] #[inline]
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> { pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<()> {
self.tx.remove(tree.1, key.as_ref()) self.tx.remove(tree.1, key.as_ref())
} }
/// Clears all values in a tree
#[inline]
pub fn clear(&mut self, tree: &Tree) -> TxOpResult<()> {
self.tx.clear(tree.1)
}
#[inline] #[inline]
pub fn iter(&self, tree: &Tree) -> TxOpResult<TxValueIter<'_>> { pub fn iter(&self, tree: &Tree) -> TxOpResult<TxValueIter<'_>> {
@ -364,15 +330,13 @@ pub(crate) trait IDb: Send + Sync {
fn engine(&self) -> String; fn engine(&self) -> String;
fn open_tree(&self, name: &str) -> Result<usize>; fn open_tree(&self, name: &str) -> Result<usize>;
fn list_trees(&self) -> Result<Vec<String>>; fn list_trees(&self) -> Result<Vec<String>>;
fn snapshot(&self, path: &PathBuf) -> Result<()>;
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>; fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
fn len(&self, tree: usize) -> Result<usize>; fn len(&self, tree: usize) -> Result<usize>;
fn fast_len(&self, _tree: usize) -> Result<Option<usize>> {
Ok(None)
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>>; fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()>;
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>; fn remove(&self, tree: usize, key: &[u8]) -> Result<()>;
fn clear(&self, tree: usize) -> Result<()>; fn clear(&self, tree: usize) -> Result<()>;
fn iter(&self, tree: usize) -> Result<ValueIter<'_>>; fn iter(&self, tree: usize) -> Result<ValueIter<'_>>;
@ -398,8 +362,9 @@ pub(crate) trait ITx {
fn get(&self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>; fn get(&self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>;
fn len(&self, tree: usize) -> TxOpResult<usize>; fn len(&self, tree: usize) -> TxOpResult<usize>;
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>>; fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()>;
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>>; fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()>;
fn clear(&mut self, tree: usize) -> TxOpResult<()>;
fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>>; fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>>;
fn iter_rev(&self, tree: usize) -> TxOpResult<TxValueIter<'_>>; fn iter_rev(&self, tree: usize) -> TxOpResult<TxValueIter<'_>>;

View file

@ -3,6 +3,8 @@ use core::ptr::NonNull;
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::TryInto; use std::convert::TryInto;
use std::path::PathBuf;
use std::pin::Pin;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use heed::types::ByteSlice; use heed::types::ByteSlice;
@ -102,6 +104,15 @@ impl IDb for LmdbDb {
Ok(ret2) Ok(ret2)
} }
fn snapshot(&self, to: &PathBuf) -> Result<()> {
std::fs::create_dir_all(to)?;
let mut path = to.clone();
path.push("data.mdb");
self.db
.copy_to_path(path, heed::CompactionOption::Disabled)?;
Ok(())
}
// ---- // ----
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> { fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
@ -121,26 +132,20 @@ impl IDb for LmdbDb {
Ok(tree.len(&tx)?.try_into().unwrap()) Ok(tree.len(&tx)?.try_into().unwrap())
} }
fn fast_len(&self, tree: usize) -> Result<Option<usize>> { fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
Ok(Some(self.len(tree)?))
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
let mut tx = self.db.write_txn()?; let mut tx = self.db.write_txn()?;
let old_val = tree.get(&tx, key)?.map(Vec::from);
tree.put(&mut tx, key, value)?; tree.put(&mut tx, key, value)?;
tx.commit()?; tx.commit()?;
Ok(old_val) Ok(())
} }
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> { fn remove(&self, tree: usize, key: &[u8]) -> Result<()> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
let mut tx = self.db.write_txn()?; let mut tx = self.db.write_txn()?;
let old_val = tree.get(&tx, key)?.map(Vec::from);
tree.delete(&mut tx, key)?; tree.delete(&mut tx, key)?;
tx.commit()?; tx.commit()?;
Ok(old_val) Ok(())
} }
fn clear(&self, tree: usize) -> Result<()> { fn clear(&self, tree: usize) -> Result<()> {
@ -242,49 +247,63 @@ impl<'a> ITx for LmdbTx<'a> {
None => Ok(None), None => Ok(None),
} }
} }
fn len(&self, _tree: usize) -> TxOpResult<usize> { fn len(&self, tree: usize) -> TxOpResult<usize> {
unimplemented!(".len() in transaction not supported with LMDB backend") let tree = self.get_tree(tree)?;
Ok(tree.len(&self.tx)? as usize)
} }
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>> { fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
let tree = *self.get_tree(tree)?; let tree = *self.get_tree(tree)?;
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
tree.put(&mut self.tx, key, value)?; tree.put(&mut self.tx, key, value)?;
Ok(old_val) Ok(())
} }
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> { fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()> {
let tree = *self.get_tree(tree)?; let tree = *self.get_tree(tree)?;
let old_val = tree.get(&self.tx, key)?.map(Vec::from);
tree.delete(&mut self.tx, key)?; tree.delete(&mut self.tx, key)?;
Ok(old_val) Ok(())
}
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
let tree = *self.get_tree(tree)?;
tree.clear(&mut self.tx)?;
Ok(())
} }
fn iter(&self, _tree: usize) -> TxOpResult<TxValueIter<'_>> { fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
unimplemented!("Iterators in transactions not supported with LMDB backend"); let tree = *self.get_tree(tree)?;
Ok(Box::new(tree.iter(&self.tx)?.map(tx_iter_item)))
} }
fn iter_rev(&self, _tree: usize) -> TxOpResult<TxValueIter<'_>> { fn iter_rev(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
unimplemented!("Iterators in transactions not supported with LMDB backend"); let tree = *self.get_tree(tree)?;
Ok(Box::new(tree.rev_iter(&self.tx)?.map(tx_iter_item)))
} }
fn range<'r>( fn range<'r>(
&self, &self,
_tree: usize, tree: usize,
_low: Bound<&'r [u8]>, low: Bound<&'r [u8]>,
_high: Bound<&'r [u8]>, high: Bound<&'r [u8]>,
) -> TxOpResult<TxValueIter<'_>> { ) -> TxOpResult<TxValueIter<'_>> {
unimplemented!("Iterators in transactions not supported with LMDB backend"); let tree = *self.get_tree(tree)?;
Ok(Box::new(
tree.range(&self.tx, &(low, high))?.map(tx_iter_item),
))
} }
fn range_rev<'r>( fn range_rev<'r>(
&self, &self,
_tree: usize, tree: usize,
_low: Bound<&'r [u8]>, low: Bound<&'r [u8]>,
_high: Bound<&'r [u8]>, high: Bound<&'r [u8]>,
) -> TxOpResult<TxValueIter<'_>> { ) -> TxOpResult<TxValueIter<'_>> {
unimplemented!("Iterators in transactions not supported with LMDB backend"); let tree = *self.get_tree(tree)?;
Ok(Box::new(
tree.rev_range(&self.tx, &(low, high))?.map(tx_iter_item),
))
} }
} }
// ---- // ---- iterators outside transactions ----
// complicated, they must hold the transaction object
// therefore a bit of unsafe code (it is a self-referential struct)
type IteratorItem<'a> = heed::Result<( type IteratorItem<'a> = heed::Result<(
<ByteSlice as BytesDecode<'a>>::DItem, <ByteSlice as BytesDecode<'a>>::DItem,
@ -307,12 +326,20 @@ where
where where
F: FnOnce(&'a RoTxn<'a>) -> Result<I>, F: FnOnce(&'a RoTxn<'a>) -> Result<I>,
{ {
let mut res = TxAndIterator { tx, iter: None }; let res = TxAndIterator { tx, iter: None };
let mut boxed = Box::pin(res);
let tx = unsafe { NonNull::from(&res.tx).as_ref() }; // This unsafe allows us to bypass lifetime checks
res.iter = Some(iterfun(tx)?); let tx = unsafe { NonNull::from(&boxed.tx).as_ref() };
let iter = iterfun(tx)?;
Ok(Box::new(res)) let mut_ref = Pin::as_mut(&mut boxed);
// This unsafe allows us to write in a field of the pinned struct
unsafe {
Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
}
Ok(Box::new(TxAndIteratorPin(boxed)))
} }
} }
@ -321,18 +348,26 @@ where
I: Iterator<Item = IteratorItem<'a>> + 'a, I: Iterator<Item = IteratorItem<'a>> + 'a,
{ {
fn drop(&mut self) { fn drop(&mut self) {
// ensure the iterator is dropped before the RoTxn it references
drop(self.iter.take()); drop(self.iter.take());
} }
} }
impl<'a, I> Iterator for TxAndIterator<'a, I> struct TxAndIteratorPin<'a, I>(Pin<Box<TxAndIterator<'a, I>>>)
where
I: Iterator<Item = IteratorItem<'a>> + 'a;
impl<'a, I> Iterator for TxAndIteratorPin<'a, I>
where where
I: Iterator<Item = IteratorItem<'a>> + 'a, I: Iterator<Item = IteratorItem<'a>> + 'a,
{ {
type Item = Result<(Value, Value)>; type Item = Result<(Value, Value)>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
match self.iter.as_mut().unwrap().next() { let mut_ref = Pin::as_mut(&mut self.0);
// This unsafe allows us to mutably access the iterator field
let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
match next {
None => None, None => None,
Some(Err(e)) => Some(Err(e.into())), Some(Err(e)) => Some(Err(e.into())),
Some(Ok((k, v))) => Some(Ok((k.to_vec(), v.to_vec()))), Some(Ok((k, v))) => Some(Ok((k.to_vec(), v.to_vec()))),
@ -340,7 +375,16 @@ where
} }
} }
// ---- // ---- iterators within transactions ----
fn tx_iter_item<'a>(
item: std::result::Result<(&'a [u8], &'a [u8]), heed::Error>,
) -> TxOpResult<(Vec<u8>, Vec<u8>)> {
item.map(|(k, v)| (k.to_vec(), v.to_vec()))
.map_err(|e| TxOpError(Error::from(e)))
}
// ---- utility ----
#[cfg(target_pointer_width = "64")] #[cfg(target_pointer_width = "64")]
pub fn recommended_map_size() -> usize { pub fn recommended_map_size() -> usize {

125
src/db/open.rs Normal file
View file

@ -0,0 +1,125 @@
use std::path::PathBuf;
use crate::{Db, Error, Result};
/// List of supported database engine types
///
/// The `enum` holds list of *all* database engines that are are be supported by crate, no matter
/// if relevant feature is enabled or not. It allows us to distinguish between invalid engine
/// and valid engine, whose support is not enabled via feature flag.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Engine {
Lmdb,
Sqlite,
}
impl Engine {
/// Return variant name as static `&str`
pub fn as_str(&self) -> &'static str {
match self {
Self::Lmdb => "lmdb",
Self::Sqlite => "sqlite",
}
}
}
impl std::fmt::Display for Engine {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
self.as_str().fmt(fmt)
}
}
impl std::str::FromStr for Engine {
type Err = Error;
fn from_str(text: &str) -> Result<Engine> {
match text {
"lmdb" | "heed" => Ok(Self::Lmdb),
"sqlite" | "sqlite3" | "rusqlite" => Ok(Self::Sqlite),
"sled" => Err(Error("Sled is no longer supported as a database engine. Converting your old metadata db can be done using an older Garage binary (e.g. v0.9.4).".into())),
kind => Err(Error(
format!(
"Invalid DB engine: {} (options are: lmdb, sqlite)",
kind
)
.into(),
)),
}
}
}
pub struct OpenOpt {
pub fsync: bool,
pub lmdb_map_size: Option<usize>,
}
impl Default for OpenOpt {
fn default() -> Self {
Self {
fsync: false,
lmdb_map_size: None,
}
}
}
pub fn open_db(path: &PathBuf, engine: Engine, opt: &OpenOpt) -> Result<Db> {
match engine {
// ---- Sqlite DB ----
#[cfg(feature = "sqlite")]
Engine::Sqlite => {
info!("Opening Sqlite database at: {}", path.display());
let manager = r2d2_sqlite::SqliteConnectionManager::file(path);
Ok(crate::sqlite_adapter::SqliteDb::new(manager, opt.fsync)?)
}
// ---- LMDB DB ----
#[cfg(feature = "lmdb")]
Engine::Lmdb => {
info!("Opening LMDB database at: {}", path.display());
if let Err(e) = std::fs::create_dir_all(&path) {
return Err(Error(
format!("Unable to create LMDB data directory: {}", e).into(),
));
}
let map_size = match opt.lmdb_map_size {
None => crate::lmdb_adapter::recommended_map_size(),
Some(v) => v - (v % 4096),
};
let mut env_builder = heed::EnvOpenOptions::new();
env_builder.max_dbs(100);
env_builder.map_size(map_size);
env_builder.max_readers(2048);
unsafe {
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoRdAhead);
env_builder.flag(crate::lmdb_adapter::heed::flags::Flags::MdbNoMetaSync);
if !opt.fsync {
env_builder.flag(heed::flags::Flags::MdbNoSync);
}
}
match env_builder.open(&path) {
Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => {
return Err(Error(
"OutOfMemory error while trying to open LMDB database. This can happen \
if your operating system is not allowing you to use sufficient virtual \
memory address space. Please check that no limit is set (ulimit -v). \
You may also try to set a smaller `lmdb_map_size` configuration parameter. \
On 32-bit machines, you should probably switch to another database engine."
.into(),
))
}
Err(e) => Err(Error(format!("Cannot open LMDB database: {}", e).into())),
Ok(db) => Ok(crate::lmdb_adapter::LmdbDb::init(db)),
}
}
// Pattern is unreachable when all supported DB engines are compiled into binary. The allow
// attribute is added so that we won't have to change this match in case stop building
// support for one or more engines by default.
#[allow(unreachable_patterns)]
engine => Err(Error(
format!("DB engine support not available in this build: {}", engine).into(),
)),
}
}

View file

@ -1,274 +0,0 @@
use core::ops::Bound;
use std::cell::Cell;
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use sled::transaction::{
ConflictableTransactionError, TransactionError, Transactional, TransactionalTree,
UnabortableTransactionError,
};
use crate::{
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
TxResult, TxValueIter, Value, ValueIter,
};
pub use sled;
// -- err
impl From<sled::Error> for Error {
fn from(e: sled::Error) -> Error {
Error(format!("Sled: {}", e).into())
}
}
impl From<sled::Error> for TxOpError {
fn from(e: sled::Error) -> TxOpError {
TxOpError(e.into())
}
}
// -- db
pub struct SledDb {
db: sled::Db,
trees: RwLock<(Vec<sled::Tree>, HashMap<String, usize>)>,
}
impl SledDb {
#[deprecated(
since = "0.9.0",
note = "The Sled database is now deprecated and will be removed in Garage v1.0. Please migrate to LMDB or Sqlite as soon as possible."
)]
pub fn init(db: sled::Db) -> Db {
tracing::warn!("-------------------- IMPORTANT WARNING !!! ----------------------");
tracing::warn!("The Sled database is now deprecated and will be removed in Garage v1.0.");
tracing::warn!("Please migrate to LMDB or Sqlite as soon as possible.");
tracing::warn!("-----------------------------------------------------------------------");
let s = Self {
db,
trees: RwLock::new((Vec::new(), HashMap::new())),
};
Db(Arc::new(s))
}
fn get_tree(&self, i: usize) -> Result<sled::Tree> {
self.trees
.read()
.unwrap()
.0
.get(i)
.cloned()
.ok_or_else(|| Error("invalid tree id".into()))
}
}
impl IDb for SledDb {
fn engine(&self) -> String {
"Sled".into()
}
fn open_tree(&self, name: &str) -> Result<usize> {
let mut trees = self.trees.write().unwrap();
if let Some(i) = trees.1.get(name) {
Ok(*i)
} else {
let tree = self.db.open_tree(name)?;
let i = trees.0.len();
trees.0.push(tree);
trees.1.insert(name.to_string(), i);
Ok(i)
}
}
fn list_trees(&self) -> Result<Vec<String>> {
let mut trees = vec![];
for name in self.db.tree_names() {
let name = std::str::from_utf8(&name)
.map_err(|e| Error(format!("{}", e).into()))?
.to_string();
if name != "__sled__default" {
trees.push(name);
}
}
Ok(trees)
}
// ----
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
let tree = self.get_tree(tree)?;
let val = tree.get(key)?;
Ok(val.map(|x| x.to_vec()))
}
fn len(&self, tree: usize) -> Result<usize> {
let tree = self.get_tree(tree)?;
Ok(tree.len())
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
let tree = self.get_tree(tree)?;
let old_val = tree.insert(key, value)?;
Ok(old_val.map(|x| x.to_vec()))
}
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
let tree = self.get_tree(tree)?;
let old_val = tree.remove(key)?;
Ok(old_val.map(|x| x.to_vec()))
}
fn clear(&self, tree: usize) -> Result<()> {
let tree = self.get_tree(tree)?;
tree.clear()?;
Ok(())
}
fn iter(&self, tree: usize) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree)?;
Ok(Box::new(tree.iter().map(|v| {
v.map(|(x, y)| (x.to_vec(), y.to_vec())).map_err(Into::into)
})))
}
fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree)?;
Ok(Box::new(tree.iter().rev().map(|v| {
v.map(|(x, y)| (x.to_vec(), y.to_vec())).map_err(Into::into)
})))
}
fn range<'r>(
&self,
tree: usize,
low: Bound<&'r [u8]>,
high: Bound<&'r [u8]>,
) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree)?;
Ok(Box::new(tree.range::<&'r [u8], _>((low, high)).map(|v| {
v.map(|(x, y)| (x.to_vec(), y.to_vec())).map_err(Into::into)
})))
}
fn range_rev<'r>(
&self,
tree: usize,
low: Bound<&'r [u8]>,
high: Bound<&'r [u8]>,
) -> Result<ValueIter<'_>> {
let tree = self.get_tree(tree)?;
Ok(Box::new(tree.range::<&'r [u8], _>((low, high)).rev().map(
|v| v.map(|(x, y)| (x.to_vec(), y.to_vec())).map_err(Into::into),
)))
}
// ----
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
let trees = self.trees.read().unwrap();
let res = trees.0.transaction(|txtrees| {
let mut tx = SledTx {
trees: txtrees,
err: Cell::new(None),
};
match f.try_on(&mut tx) {
TxFnResult::Ok(on_commit) => {
assert!(tx.err.into_inner().is_none());
Ok(on_commit)
}
TxFnResult::Abort => {
assert!(tx.err.into_inner().is_none());
Err(ConflictableTransactionError::Abort(()))
}
TxFnResult::DbErr => {
let e = tx.err.into_inner().expect("No DB error");
Err(e.into())
}
}
});
match res {
Ok(on_commit) => Ok(on_commit),
Err(TransactionError::Abort(())) => Err(TxError::Abort(())),
Err(TransactionError::Storage(s)) => Err(TxError::Db(s.into())),
}
}
}
// ----
struct SledTx<'a> {
trees: &'a [TransactionalTree],
err: Cell<Option<UnabortableTransactionError>>,
}
impl<'a> SledTx<'a> {
fn get_tree(&self, i: usize) -> TxOpResult<&TransactionalTree> {
self.trees.get(i).ok_or_else(|| {
TxOpError(Error(
"invalid tree id (it might have been openned after the transaction started)".into(),
))
})
}
fn save_error<R>(
&self,
v: std::result::Result<R, UnabortableTransactionError>,
) -> TxOpResult<R> {
match v {
Ok(x) => Ok(x),
Err(e) => {
let txt = format!("{}", e);
self.err.set(Some(e));
Err(TxOpError(Error(txt.into())))
}
}
}
}
impl<'a> ITx for SledTx<'a> {
fn get(&self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
let tree = self.get_tree(tree)?;
let tmp = self.save_error(tree.get(key))?;
Ok(tmp.map(|x| x.to_vec()))
}
fn len(&self, _tree: usize) -> TxOpResult<usize> {
unimplemented!(".len() in transaction not supported with Sled backend")
}
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>> {
let tree = self.get_tree(tree)?;
let old_val = self.save_error(tree.insert(key, value))?;
Ok(old_val.map(|x| x.to_vec()))
}
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
let tree = self.get_tree(tree)?;
let old_val = self.save_error(tree.remove(key))?;
Ok(old_val.map(|x| x.to_vec()))
}
fn iter(&self, _tree: usize) -> TxOpResult<TxValueIter<'_>> {
unimplemented!("Iterators in transactions not supported with Sled backend");
}
fn iter_rev(&self, _tree: usize) -> TxOpResult<TxValueIter<'_>> {
unimplemented!("Iterators in transactions not supported with Sled backend");
}
fn range<'r>(
&self,
_tree: usize,
_low: Bound<&'r [u8]>,
_high: Bound<&'r [u8]>,
) -> TxOpResult<TxValueIter<'_>> {
unimplemented!("Iterators in transactions not supported with Sled backend");
}
fn range_rev<'r>(
&self,
_tree: usize,
_low: Bound<&'r [u8]>,
_high: Bound<&'r [u8]>,
) -> TxOpResult<TxValueIter<'_>> {
unimplemented!("Iterators in transactions not supported with Sled backend");
}
}

View file

@ -1,12 +1,14 @@
use core::ops::Bound; use core::ops::Bound;
use std::borrow::BorrowMut;
use std::marker::PhantomPinned; use std::marker::PhantomPinned;
use std::path::PathBuf;
use std::pin::Pin; use std::pin::Pin;
use std::ptr::NonNull; use std::ptr::NonNull;
use std::sync::{Arc, Mutex, MutexGuard}; use std::sync::{Arc, Mutex, RwLock};
use rusqlite::{params, Connection, Rows, Statement, Transaction}; use r2d2::Pool;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::{params, Rows, Statement, Transaction};
use crate::{ use crate::{
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult, Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
@ -15,6 +17,8 @@ use crate::{
pub use rusqlite; pub use rusqlite;
type Connection = r2d2::PooledConnection<SqliteConnectionManager>;
// --- err // --- err
impl From<rusqlite::Error> for Error { impl From<rusqlite::Error> for Error {
@ -23,6 +27,12 @@ impl From<rusqlite::Error> for Error {
} }
} }
impl From<r2d2::Error> for Error {
fn from(e: r2d2::Error) -> Error {
Error(format!("Sqlite: {}", e).into())
}
}
impl From<rusqlite::Error> for TxOpError { impl From<rusqlite::Error> for TxOpError {
fn from(e: rusqlite::Error) -> TxOpError { fn from(e: rusqlite::Error) -> TxOpError {
TxOpError(e.into()) TxOpError(e.into())
@ -31,35 +41,47 @@ impl From<rusqlite::Error> for TxOpError {
// -- db // -- db
pub struct SqliteDb(Mutex<SqliteDbInner>); pub struct SqliteDb {
db: Pool<SqliteConnectionManager>,
struct SqliteDbInner { trees: RwLock<Vec<Arc<str>>>,
db: Connection, // All operations that might write on the DB must take this lock first.
trees: Vec<String>, // This emulates LMDB's approach where a single writer can be
// active at once.
write_lock: Mutex<()>,
} }
impl SqliteDb { impl SqliteDb {
pub fn init(db: rusqlite::Connection) -> Db { pub fn new(manager: SqliteConnectionManager, sync_mode: bool) -> Result<Db> {
let s = Self(Mutex::new(SqliteDbInner { let manager = manager.with_init(move |db| {
db, db.pragma_update(None, "journal_mode", "WAL")?;
trees: Vec::new(), if sync_mode {
})); db.pragma_update(None, "synchronous", "NORMAL")?;
Db(Arc::new(s)) } else {
db.pragma_update(None, "synchronous", "OFF")?;
}
Ok(())
});
let s = Self {
db: Pool::builder().build(manager)?,
trees: RwLock::new(vec![]),
write_lock: Mutex::new(()),
};
Ok(Db(Arc::new(s)))
} }
} }
impl SqliteDbInner { impl SqliteDb {
fn get_tree(&self, i: usize) -> Result<&'_ str> { fn get_tree(&self, i: usize) -> Result<Arc<str>> {
self.trees self.trees
.read()
.unwrap()
.get(i) .get(i)
.map(String::as_str) .cloned()
.ok_or_else(|| Error("invalid tree id".into())) .ok_or_else(|| Error("invalid tree id".into()))
} }
fn internal_get(&self, tree: &str, key: &[u8]) -> Result<Option<Value>> { fn internal_get(&self, db: &Connection, tree: &str, key: &[u8]) -> Result<Option<Value>> {
let mut stmt = self let mut stmt = db.prepare(&format!("SELECT v FROM {} WHERE k = ?1", tree))?;
.db
.prepare(&format!("SELECT v FROM {} WHERE k = ?1", tree))?;
let mut res_iter = stmt.query([key])?; let mut res_iter = stmt.query([key])?;
match res_iter.next()? { match res_iter.next()? {
None => Ok(None), None => Ok(None),
@ -75,13 +97,14 @@ impl IDb for SqliteDb {
fn open_tree(&self, name: &str) -> Result<usize> { fn open_tree(&self, name: &str) -> Result<usize> {
let name = format!("tree_{}", name.replace(':', "_COLON_")); let name = format!("tree_{}", name.replace(':', "_COLON_"));
let mut this = self.0.lock().unwrap(); let mut trees = self.trees.write().unwrap();
if let Some(i) = this.trees.iter().position(|x| x == &name) { if let Some(i) = trees.iter().position(|x| x.as_ref() == &name) {
Ok(i) Ok(i)
} else { } else {
let db = self.db.get()?;
trace!("create table {}", name); trace!("create table {}", name);
this.db.execute( db.execute(
&format!( &format!(
"CREATE TABLE IF NOT EXISTS {} ( "CREATE TABLE IF NOT EXISTS {} (
k BLOB PRIMARY KEY, k BLOB PRIMARY KEY,
@ -93,8 +116,8 @@ impl IDb for SqliteDb {
)?; )?;
trace!("table created: {}, unlocking", name); trace!("table created: {}, unlocking", name);
let i = this.trees.len(); let i = trees.len();
this.trees.push(name.to_string()); trees.push(name.to_string().into_boxed_str().into());
Ok(i) Ok(i)
} }
} }
@ -102,11 +125,8 @@ impl IDb for SqliteDb {
fn list_trees(&self) -> Result<Vec<String>> { fn list_trees(&self) -> Result<Vec<String>> {
let mut trees = vec![]; let mut trees = vec![];
trace!("list_trees: lock db"); let db = self.db.get()?;
let this = self.0.lock().unwrap(); let mut stmt = db.prepare(
trace!("list_trees: lock acquired");
let mut stmt = this.db.prepare(
"SELECT name FROM sqlite_schema WHERE type = 'table' AND name LIKE 'tree_%'", "SELECT name FROM sqlite_schema WHERE type = 'table' AND name LIKE 'tree_%'",
)?; )?;
let mut rows = stmt.query([])?; let mut rows = stmt.query([])?;
@ -119,24 +139,29 @@ impl IDb for SqliteDb {
Ok(trees) Ok(trees)
} }
fn snapshot(&self, to: &PathBuf) -> Result<()> {
fn progress(p: rusqlite::backup::Progress) {
let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
info!("Sqlite snapshot progres: {}%", percent);
}
self.db
.get()?
.backup(rusqlite::DatabaseName::Main, to, Some(progress))?;
Ok(())
}
// ---- // ----
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> { fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> {
trace!("get {}: lock db", tree); let tree = self.get_tree(tree)?;
let this = self.0.lock().unwrap(); self.internal_get(&self.db.get()?, &tree, key)
trace!("get {}: lock acquired", tree);
let tree = this.get_tree(tree)?;
this.internal_get(tree, key)
} }
fn len(&self, tree: usize) -> Result<usize> { fn len(&self, tree: usize) -> Result<usize> {
trace!("len {}: lock db", tree); let tree = self.get_tree(tree)?;
let this = self.0.lock().unwrap(); let db = self.db.get()?;
trace!("len {}: lock acquired", tree);
let tree = this.get_tree(tree)?; let mut stmt = db.prepare(&format!("SELECT COUNT(*) FROM {}", tree))?;
let mut stmt = this.db.prepare(&format!("SELECT COUNT(*) FROM {}", tree))?;
let mut res_iter = stmt.query([])?; let mut res_iter = stmt.query([])?;
match res_iter.next()? { match res_iter.next()? {
None => Ok(0), None => Ok(0),
@ -144,74 +169,56 @@ impl IDb for SqliteDb {
} }
} }
fn fast_len(&self, tree: usize) -> Result<Option<usize>> { fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
Ok(Some(self.len(tree)?)) let tree = self.get_tree(tree)?;
} let db = self.db.get()?;
let lock = self.write_lock.lock();
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> { let old_val = self.internal_get(&db, &tree, key)?;
trace!("insert {}: lock db", tree);
let this = self.0.lock().unwrap();
trace!("insert {}: lock acquired", tree);
let tree = this.get_tree(tree)?;
let old_val = this.internal_get(tree, key)?;
let sql = match &old_val { let sql = match &old_val {
Some(_) => format!("UPDATE {} SET v = ?2 WHERE k = ?1", tree), Some(_) => format!("UPDATE {} SET v = ?2 WHERE k = ?1", tree),
None => format!("INSERT INTO {} (k, v) VALUES (?1, ?2)", tree), None => format!("INSERT INTO {} (k, v) VALUES (?1, ?2)", tree),
}; };
let n = this.db.execute(&sql, params![key, value])?; let n = db.execute(&sql, params![key, value])?;
assert_eq!(n, 1); assert_eq!(n, 1);
Ok(old_val) drop(lock);
Ok(())
} }
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>> { fn remove(&self, tree: usize, key: &[u8]) -> Result<()> {
trace!("remove {}: lock db", tree); let tree = self.get_tree(tree)?;
let this = self.0.lock().unwrap(); let db = self.db.get()?;
trace!("remove {}: lock acquired", tree); let lock = self.write_lock.lock();
let tree = this.get_tree(tree)?; db.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
let old_val = this.internal_get(tree, key)?;
if old_val.is_some() { drop(lock);
let n = this Ok(())
.db
.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
assert_eq!(n, 1);
}
Ok(old_val)
} }
fn clear(&self, tree: usize) -> Result<()> { fn clear(&self, tree: usize) -> Result<()> {
trace!("clear {}: lock db", tree); let tree = self.get_tree(tree)?;
let this = self.0.lock().unwrap(); let db = self.db.get()?;
trace!("clear {}: lock acquired", tree); let lock = self.write_lock.lock();
let tree = this.get_tree(tree)?; db.execute(&format!("DELETE FROM {}", tree), [])?;
this.db.execute(&format!("DELETE FROM {}", tree), [])?;
drop(lock);
Ok(()) Ok(())
} }
fn iter(&self, tree: usize) -> Result<ValueIter<'_>> { fn iter(&self, tree: usize) -> Result<ValueIter<'_>> {
trace!("iter {}: lock db", tree); let tree = self.get_tree(tree)?;
let this = self.0.lock().unwrap();
trace!("iter {}: lock acquired", tree);
let tree = this.get_tree(tree)?;
let sql = format!("SELECT k, v FROM {} ORDER BY k ASC", tree); let sql = format!("SELECT k, v FROM {} ORDER BY k ASC", tree);
DbValueIterator::make(this, &sql, []) DbValueIterator::make(self.db.get()?, &sql, [])
} }
fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> { fn iter_rev(&self, tree: usize) -> Result<ValueIter<'_>> {
trace!("iter_rev {}: lock db", tree); let tree = self.get_tree(tree)?;
let this = self.0.lock().unwrap();
trace!("iter_rev {}: lock acquired", tree);
let tree = this.get_tree(tree)?;
let sql = format!("SELECT k, v FROM {} ORDER BY k DESC", tree); let sql = format!("SELECT k, v FROM {} ORDER BY k DESC", tree);
DbValueIterator::make(this, &sql, []) DbValueIterator::make(self.db.get()?, &sql, [])
} }
fn range<'r>( fn range<'r>(
@ -220,11 +227,7 @@ impl IDb for SqliteDb {
low: Bound<&'r [u8]>, low: Bound<&'r [u8]>,
high: Bound<&'r [u8]>, high: Bound<&'r [u8]>,
) -> Result<ValueIter<'_>> { ) -> Result<ValueIter<'_>> {
trace!("range {}: lock db", tree); let tree = self.get_tree(tree)?;
let this = self.0.lock().unwrap();
trace!("range {}: lock acquired", tree);
let tree = this.get_tree(tree)?;
let (bounds_sql, params) = bounds_sql(low, high); let (bounds_sql, params) = bounds_sql(low, high);
let sql = format!("SELECT k, v FROM {} {} ORDER BY k ASC", tree, bounds_sql); let sql = format!("SELECT k, v FROM {} {} ORDER BY k ASC", tree, bounds_sql);
@ -234,7 +237,7 @@ impl IDb for SqliteDb {
.map(|x| x as &dyn rusqlite::ToSql) .map(|x| x as &dyn rusqlite::ToSql)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
DbValueIterator::make::<&[&dyn rusqlite::ToSql]>(this, &sql, params.as_ref()) DbValueIterator::make::<&[&dyn rusqlite::ToSql]>(self.db.get()?, &sql, params.as_ref())
} }
fn range_rev<'r>( fn range_rev<'r>(
&self, &self,
@ -242,11 +245,7 @@ impl IDb for SqliteDb {
low: Bound<&'r [u8]>, low: Bound<&'r [u8]>,
high: Bound<&'r [u8]>, high: Bound<&'r [u8]>,
) -> Result<ValueIter<'_>> { ) -> Result<ValueIter<'_>> {
trace!("range_rev {}: lock db", tree); let tree = self.get_tree(tree)?;
let this = self.0.lock().unwrap();
trace!("range_rev {}: lock acquired", tree);
let tree = this.get_tree(tree)?;
let (bounds_sql, params) = bounds_sql(low, high); let (bounds_sql, params) = bounds_sql(low, high);
let sql = format!("SELECT k, v FROM {} {} ORDER BY k DESC", tree, bounds_sql); let sql = format!("SELECT k, v FROM {} {} ORDER BY k DESC", tree, bounds_sql);
@ -256,25 +255,20 @@ impl IDb for SqliteDb {
.map(|x| x as &dyn rusqlite::ToSql) .map(|x| x as &dyn rusqlite::ToSql)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
DbValueIterator::make::<&[&dyn rusqlite::ToSql]>(this, &sql, params.as_ref()) DbValueIterator::make::<&[&dyn rusqlite::ToSql]>(self.db.get()?, &sql, params.as_ref())
} }
// ---- // ----
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> { fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
trace!("transaction: lock db"); let mut db = self.db.get().map_err(Error::from).map_err(TxError::Db)?;
let mut this = self.0.lock().unwrap(); let trees = self.trees.read().unwrap();
trace!("transaction: lock acquired"); let lock = self.write_lock.lock();
let this_mut_ref: &mut SqliteDbInner = this.borrow_mut();
trace!("trying transaction");
let mut tx = SqliteTx { let mut tx = SqliteTx {
tx: this_mut_ref tx: db.transaction().map_err(Error::from).map_err(TxError::Db)?,
.db trees: &trees,
.transaction()
.map_err(Error::from)
.map_err(TxError::Db)?,
trees: &this_mut_ref.trees,
}; };
let res = match f.try_on(&mut tx) { let res = match f.try_on(&mut tx) {
TxFnResult::Ok(on_commit) => { TxFnResult::Ok(on_commit) => {
@ -294,7 +288,8 @@ impl IDb for SqliteDb {
}; };
trace!("transaction done"); trace!("transaction done");
res drop(lock);
return res;
} }
} }
@ -302,12 +297,12 @@ impl IDb for SqliteDb {
struct SqliteTx<'a> { struct SqliteTx<'a> {
tx: Transaction<'a>, tx: Transaction<'a>,
trees: &'a [String], trees: &'a [Arc<str>],
} }
impl<'a> SqliteTx<'a> { impl<'a> SqliteTx<'a> {
fn get_tree(&self, i: usize) -> TxOpResult<&'_ str> { fn get_tree(&self, i: usize) -> TxOpResult<&'_ str> {
self.trees.get(i).map(String::as_ref).ok_or_else(|| { self.trees.get(i).map(Arc::as_ref).ok_or_else(|| {
TxOpError(Error( TxOpError(Error(
"invalid tree id (it might have been openned after the transaction started)".into(), "invalid tree id (it might have been openned after the transaction started)".into(),
)) ))
@ -341,73 +336,86 @@ impl<'a> ITx for SqliteTx<'a> {
} }
} }
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<Option<Value>> { fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
let old_val = self.internal_get(tree, key)?; let sql = format!("INSERT OR REPLACE INTO {} (k, v) VALUES (?1, ?2)", tree);
self.tx.execute(&sql, params![key, value])?;
let sql = match &old_val { Ok(())
Some(_) => format!("UPDATE {} SET v = ?2 WHERE k = ?1", tree),
None => format!("INSERT INTO {} (k, v) VALUES (?1, ?2)", tree),
};
let n = self.tx.execute(&sql, params![key, value])?;
assert_eq!(n, 1);
Ok(old_val)
} }
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> { fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()> {
let tree = self.get_tree(tree)?; let tree = self.get_tree(tree)?;
let old_val = self.internal_get(tree, key)?; self.tx
.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?;
if old_val.is_some() { Ok(())
let n = self }
.tx fn clear(&mut self, tree: usize) -> TxOpResult<()> {
.execute(&format!("DELETE FROM {} WHERE k = ?1", tree), params![key])?; let tree = self.get_tree(tree)?;
assert_eq!(n, 1); self.tx.execute(&format!("DELETE FROM {}", tree), [])?;
} Ok(())
Ok(old_val)
} }
fn iter(&self, _tree: usize) -> TxOpResult<TxValueIter<'_>> { fn iter(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
unimplemented!(); let tree = self.get_tree(tree)?;
let sql = format!("SELECT k, v FROM {} ORDER BY k ASC", tree);
TxValueIterator::make(self, &sql, [])
} }
fn iter_rev(&self, _tree: usize) -> TxOpResult<TxValueIter<'_>> { fn iter_rev(&self, tree: usize) -> TxOpResult<TxValueIter<'_>> {
unimplemented!(); let tree = self.get_tree(tree)?;
let sql = format!("SELECT k, v FROM {} ORDER BY k DESC", tree);
TxValueIterator::make(self, &sql, [])
} }
fn range<'r>( fn range<'r>(
&self, &self,
_tree: usize, tree: usize,
_low: Bound<&'r [u8]>, low: Bound<&'r [u8]>,
_high: Bound<&'r [u8]>, high: Bound<&'r [u8]>,
) -> TxOpResult<TxValueIter<'_>> { ) -> TxOpResult<TxValueIter<'_>> {
unimplemented!(); let tree = self.get_tree(tree)?;
let (bounds_sql, params) = bounds_sql(low, high);
let sql = format!("SELECT k, v FROM {} {} ORDER BY k ASC", tree, bounds_sql);
let params = params
.iter()
.map(|x| x as &dyn rusqlite::ToSql)
.collect::<Vec<_>>();
TxValueIterator::make::<&[&dyn rusqlite::ToSql]>(self, &sql, params.as_ref())
} }
fn range_rev<'r>( fn range_rev<'r>(
&self, &self,
_tree: usize, tree: usize,
_low: Bound<&'r [u8]>, low: Bound<&'r [u8]>,
_high: Bound<&'r [u8]>, high: Bound<&'r [u8]>,
) -> TxOpResult<TxValueIter<'_>> { ) -> TxOpResult<TxValueIter<'_>> {
unimplemented!(); let tree = self.get_tree(tree)?;
let (bounds_sql, params) = bounds_sql(low, high);
let sql = format!("SELECT k, v FROM {} {} ORDER BY k DESC", tree, bounds_sql);
let params = params
.iter()
.map(|x| x as &dyn rusqlite::ToSql)
.collect::<Vec<_>>();
TxValueIterator::make::<&[&dyn rusqlite::ToSql]>(self, &sql, params.as_ref())
} }
} }
// ---- // ---- iterators outside transactions ----
// complicated, they must hold the Statement and Row objects
// therefore quite some unsafe code (it is a self-referential struct)
struct DbValueIterator<'a> { struct DbValueIterator<'a> {
db: MutexGuard<'a, SqliteDbInner>, db: Connection,
stmt: Option<Statement<'a>>, stmt: Option<Statement<'a>>,
iter: Option<Rows<'a>>, iter: Option<Rows<'a>>,
_pin: PhantomPinned, _pin: PhantomPinned,
} }
impl<'a> DbValueIterator<'a> { impl<'a> DbValueIterator<'a> {
fn make<P: rusqlite::Params>( fn make<P: rusqlite::Params>(db: Connection, sql: &str, args: P) -> Result<ValueIter<'a>> {
db: MutexGuard<'a, SqliteDbInner>,
sql: &str,
args: P,
) -> Result<ValueIter<'a>> {
let res = DbValueIterator { let res = DbValueIterator {
db, db,
stmt: None, stmt: None,
@ -417,17 +425,23 @@ impl<'a> DbValueIterator<'a> {
let mut boxed = Box::pin(res); let mut boxed = Box::pin(res);
trace!("make iterator with sql: {}", sql); trace!("make iterator with sql: {}", sql);
// This unsafe allows us to bypass lifetime checks
let db = unsafe { NonNull::from(&boxed.db).as_ref() };
let stmt = db.prepare(sql)?;
let mut_ref = Pin::as_mut(&mut boxed);
// This unsafe allows us to write in a field of the pinned struct
unsafe { unsafe {
let db = NonNull::from(&boxed.db);
let stmt = db.as_ref().db.prepare(sql)?;
let mut_ref: Pin<&mut DbValueIterator<'a>> = Pin::as_mut(&mut boxed);
Pin::get_unchecked_mut(mut_ref).stmt = Some(stmt); Pin::get_unchecked_mut(mut_ref).stmt = Some(stmt);
}
let mut stmt = NonNull::from(&boxed.stmt); // This unsafe allows us to bypass lifetime checks
let iter = stmt.as_mut().as_mut().unwrap().query(args)?; let stmt = unsafe { NonNull::from(&boxed.stmt).as_mut() };
let iter = stmt.as_mut().unwrap().query(args)?;
let mut_ref: Pin<&mut DbValueIterator<'a>> = Pin::as_mut(&mut boxed); let mut_ref = Pin::as_mut(&mut boxed);
// This unsafe allows us to write in a field of the pinned struct
unsafe {
Pin::get_unchecked_mut(mut_ref).iter = Some(iter); Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
} }
@ -449,28 +463,73 @@ impl<'a> Iterator for DbValueIteratorPin<'a> {
type Item = Result<(Value, Value)>; type Item = Result<(Value, Value)>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
let next = unsafe { let mut_ref = Pin::as_mut(&mut self.0);
let mut_ref: Pin<&mut DbValueIterator<'a>> = Pin::as_mut(&mut self.0); // This unsafe allows us to mutably access the iterator field
Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
}; iter_next_row(next)
let row = match next {
Err(e) => return Some(Err(e.into())),
Ok(None) => return None,
Ok(Some(r)) => r,
};
let k = match row.get::<_, Vec<u8>>(0) {
Err(e) => return Some(Err(e.into())),
Ok(x) => x,
};
let v = match row.get::<_, Vec<u8>>(1) {
Err(e) => return Some(Err(e.into())),
Ok(y) => y,
};
Some(Ok((k, v)))
} }
} }
// ---- // ---- iterators within transactions ----
// it's the same except we don't hold a mutex guard,
// only a Statement and a Rows object
struct TxValueIterator<'a> {
stmt: Statement<'a>,
iter: Option<Rows<'a>>,
_pin: PhantomPinned,
}
impl<'a> TxValueIterator<'a> {
fn make<P: rusqlite::Params>(
tx: &'a SqliteTx<'a>,
sql: &str,
args: P,
) -> TxOpResult<TxValueIter<'a>> {
let stmt = tx.tx.prepare(sql)?;
let res = TxValueIterator {
stmt,
iter: None,
_pin: PhantomPinned,
};
let mut boxed = Box::pin(res);
trace!("make iterator with sql: {}", sql);
// This unsafe allows us to bypass lifetime checks
let stmt = unsafe { NonNull::from(&boxed.stmt).as_mut() };
let iter = stmt.query(args)?;
let mut_ref = Pin::as_mut(&mut boxed);
// This unsafe allows us to write in a field of the pinned struct
unsafe {
Pin::get_unchecked_mut(mut_ref).iter = Some(iter);
}
Ok(Box::new(TxValueIteratorPin(boxed)))
}
}
impl<'a> Drop for TxValueIterator<'a> {
fn drop(&mut self) {
trace!("drop iter");
drop(self.iter.take());
}
}
struct TxValueIteratorPin<'a>(Pin<Box<TxValueIterator<'a>>>);
impl<'a> Iterator for TxValueIteratorPin<'a> {
type Item = TxOpResult<(Value, Value)>;
fn next(&mut self) -> Option<Self::Item> {
let mut_ref = Pin::as_mut(&mut self.0);
// This unsafe allows us to mutably access the iterator field
let next = unsafe { Pin::get_unchecked_mut(mut_ref).iter.as_mut()?.next() };
iter_next_row(next)
}
}
// ---- utility ----
fn bounds_sql<'r>(low: Bound<&'r [u8]>, high: Bound<&'r [u8]>) -> (String, Vec<Vec<u8>>) { fn bounds_sql<'r>(low: Bound<&'r [u8]>, high: Bound<&'r [u8]>) -> (String, Vec<Vec<u8>>) {
let mut sql = String::new(); let mut sql = String::new();
@ -510,3 +569,25 @@ fn bounds_sql<'r>(low: Bound<&'r [u8]>, high: Bound<&'r [u8]>) -> (String, Vec<V
(sql, params) (sql, params)
} }
fn iter_next_row<E>(
next_row: rusqlite::Result<Option<&rusqlite::Row>>,
) -> Option<std::result::Result<(Value, Value), E>>
where
E: From<rusqlite::Error>,
{
let row = match next_row {
Err(e) => return Some(Err(e.into())),
Ok(None) => return None,
Ok(Some(r)) => r,
};
let k = match row.get::<_, Vec<u8>>(0) {
Err(e) => return Some(Err(e.into())),
Ok(x) => x,
};
let v = match row.get::<_, Vec<u8>>(1) {
Err(e) => return Some(Err(e.into())),
Ok(y) => y,
};
Some(Ok((k, v)))
}

Some files were not shown because too many files have changed in this diff Show more