Compare commits

...

182 Commits

Author SHA1 Message Date
trinity-1686a c7f5dcd953 fix compilation on macos
fsblkcnt_t is ony 32b there, so we have to do an additional cast
2023-10-15 17:57:27 +02:00
Alex d8263fdf92 Merge pull request 'documentation updates for v0.9.0' (#647) from doc-updates into main
Reviewed-on: Deuxfleurs/garage#647
2023-10-11 12:57:37 +00:00
Alex d24aaba697 doc: update quick start and real world for v0.9.0 2023-10-11 14:49:54 +02:00
Alex b571dcd811 doc: updates to the "migrating to v0.9" page 2023-10-10 15:43:26 +02:00
Alex e6df7089a1 Merge pull request 'Garage v0.9' (#473) from next into main
Reviewed-on: Deuxfleurs/garage#473
2023-10-10 13:28:28 +00:00
Alex 952c9570c4 bump version to v0.9.0 2023-10-10 14:08:11 +02:00
Alex 3d7892477d convert_db: fix build 2023-10-10 14:06:25 +02:00
Alex d4932c31ea Merge branch 'main' into next 2023-10-10 13:57:21 +02:00
Alex e75fe2157d Merge pull request 'Move convert_db command into main garage binary' (#645) from convert-db-main-binary into main
Reviewed-on: Deuxfleurs/garage#645
2023-10-10 11:42:14 +00:00
Alex 2d5d7a7031 Move convert_db command into main garage binary 2023-10-10 12:13:15 +02:00
Alex 0c431b0c03 admin api: increased compatibility for v0/ endpoints 2023-10-05 16:56:13 +02:00
Alex 1c13135f25 admin api: remove broken GET /v0/key router rule 2023-10-05 16:27:29 +02:00
Alex 2448eb7713 upgrade doc: fixes and precisions 2023-10-05 15:29:55 +02:00
Alex 6790e24f5a Add migration to v0.9 guide 2023-10-05 15:20:48 +02:00
Alex 9ccc1d6f4a move upgrade test to release build 2023-10-05 10:42:10 +02:00
Alex 920dec393a cli: more precise doc comment 2023-10-04 10:44:42 +02:00
Alex 2e656b541b Merge branch 'main' into next 2023-10-03 18:40:37 +02:00
Alex 9ac1d5be0e add upgrade test for garage 0.8 -> 0.9 2023-09-27 14:57:37 +02:00
Alex 897cbf2c27 actually update rmp-serde to 1.1.2 for both garage and netapp dependency (fix #629) 2023-09-27 13:13:00 +02:00
Alex ad82035b98 Merge branch 'main' into next 2023-09-27 13:11:52 +02:00
Alex aa7eadc799 Merge pull request 'New layout: fixes and UX improvements' (#634) from new-layout-ux into next
Reviewed-on: Deuxfleurs/garage#634
2023-09-27 09:04:32 +00:00
Alex 0e5925fff6 layout doc: reformulate 2023-09-22 16:14:47 +02:00
Alex 8d07888fa2 layout doc: write explanations for bizarre scenarios 2023-09-22 16:07:46 +02:00
Alex 405aa42b7d layout doc: update old text 2023-09-22 10:06:31 +02:00
Alex b4a0e636d8 new layout doc: add examples of unexpected layout, to explain 2023-09-22 09:49:07 +02:00
Alex 1d986bd889 Merge pull request 'Refactor db transactions and add on_commit for table.queue_insert' (#637) from k2v-indices-lmdb into next
Reviewed-on: Deuxfleurs/garage#637
2023-09-21 14:03:35 +00:00
Alex 0635250b2b garage_table/queue_insert: delay worker notification to after transaction commit (fix #583) 2023-09-21 15:37:28 +02:00
Alex f97168f805 garage_db: refactor transactions and add on_commit mechanism 2023-09-21 15:35:31 +02:00
Alex 3ecc17f8c5 new layout: use deterministic randomness for reproducible results 2023-09-21 11:21:35 +02:00
Alex 013b026d56 update cargo.nix 2023-09-18 12:18:56 +02:00
Alex 0088599f52 new layout: fix clippy lints 2023-09-18 12:17:07 +02:00
Alex 749b4865d0 new layout: improve display and fix comments 2023-09-18 12:07:45 +02:00
Alex 015ccb39aa new layout: make zone_redundancy optionnal (if not set, is maximum) 2023-09-18 11:59:08 +02:00
Alex 2e229d4430 new layout: improve output display 2023-09-12 17:24:51 +02:00
Alex fd7d8fec59 Merge branch 'main' into next 2023-09-11 23:09:20 +02:00
Alex 51abbb02d8 Merge branch 'main' into next 2023-09-11 20:00:02 +02:00
Alex ad6b1cc0be Merge branch 'main' into next 2023-09-11 13:14:18 +02:00
Alex 7228fbfd4f Merge pull request 'multi-hdd support (fix #218)' (#625) from multihdd into next
Reviewed-on: Deuxfleurs/garage#625
2023-09-11 10:52:01 +00:00
Alex ba7ac52c19 block repair: simpler/more robust iterator progress calculation 2023-09-11 12:31:34 +02:00
Alex 9526328d38 scrub: clear saved checkpoint when canceling scrub 2023-09-11 12:10:48 +02:00
Alex 7f9ba49c71 block manager: remove data_dir field 2023-09-11 11:57:36 +02:00
Alex de5d792181 block manager: fix indentation (why not detected by cargo fmt?) 2023-09-11 11:52:57 +02:00
Alex be91ef6294 block manager: fix bug where rebalance didn't delete old copies 2023-09-07 16:04:03 +02:00
Alex 2657b5c1b9 block manager: fix bugs 2023-09-07 15:30:56 +02:00
Alex eb972a8422 doc: update multi-hdd section 2023-09-07 14:48:36 +02:00
Alex 2f112ac682 correct free data space accounting for multiple data dirs on same fs 2023-09-07 14:42:20 +02:00
Alex 6a067e30ee doc: documentation of rebalance repair 2023-09-07 13:49:12 +02:00
Alex 6b008b5bd3 block manager: add rebalance operation to rebalance multi-hdd setups 2023-09-07 13:44:11 +02:00
Alex 6595efd82f Document multi-hdd support 2023-09-07 13:23:02 +02:00
Alex bca347a1e8 doc: update page on upgradin clusters 2023-09-07 12:52:44 +02:00
Alex 99ed18350f block manager: refactor and fix monitoring/statistics 2023-09-07 12:41:36 +02:00
Alex f38a31b330 block manager: avoid incorrect data_dir configs and avoid losing files 2023-09-06 17:49:30 +02:00
Alex e30865984a block manager: scrub checkpointing 2023-09-06 16:35:28 +02:00
Alex 55c514999e block manager: fixes in layout 2023-09-06 16:35:28 +02:00
Alex a44f486931 block manager: refactoring & increase max worker count to 8 2023-09-06 16:35:28 +02:00
Alex 3a74844df0 block manager: fix dir_not_empty 2023-09-06 16:35:28 +02:00
Alex 93114a9747 block manager: refactoring 2023-09-06 16:35:28 +02:00
Alex fd00a47ddc table queue: increase batch size 2023-09-06 16:35:28 +02:00
Alex 1b8c265c14 block manager: get rid of check_block_status 2023-09-06 16:35:28 +02:00
Alex 3199cab4c8 update cargo.nix 2023-09-06 16:35:28 +02:00
Alex a09f86729c block manager: move blocks in write_block if necessary 2023-09-06 16:35:28 +02:00
Alex 887b3233f4 block manager: use data paths from layout 2023-09-06 16:35:28 +02:00
Alex 6c420c0880 block manager: multi-directory layout computation 2023-09-06 16:35:28 +02:00
Alex 71c0188055 block manager: skeleton for multi-hdd support 2023-09-06 16:35:28 +02:00
Alex 4b4f2000f4 lifecycle: fix SkipBucket bug 2023-09-06 16:34:07 +02:00
Alex 3f461d8891 Merge pull request 'object lifecycles (fix #309)' (#620) from bucket-lifecycle into next
Reviewed-on: Deuxfleurs/garage#620
2023-09-04 09:45:10 +00:00
Alex 8e0c020bb9 lifecycle worker: correct small clippy lints 2023-09-04 11:33:44 +02:00
Alex 1cdc321e28 lifecycle worker: don't get stuck on non-existent bucket 2023-08-31 11:36:30 +02:00
Alex f579d6d9b4 lifecycle worker: fix potential inifinite loop 2023-08-31 11:29:54 +02:00
Alex a00a52633f lifecycle worker: add log message when starting 2023-08-31 11:25:14 +02:00
Alex adbf5925de lifecycle worker: use queue_insert and process objects in batches 2023-08-31 11:19:26 +02:00
Alex 1cfcc61de8 lifecycle worker: mitigate potential bugs + refactoring 2023-08-31 00:28:37 +02:00
Alex be03a4610f s3api: remove redundant serde rename attribute 2023-08-31 00:00:26 +02:00
Alex b2f679675e lifecycle worker: take into account disabled rules 2023-08-30 23:52:09 +02:00
Alex 5fad4c4658 update cargo.nix 2023-08-30 23:47:42 +02:00
Alex 01c327a07a lifecycle worker: avoid building chrono's serde feature 2023-08-30 23:46:15 +02:00
Alex f0a395e2e5 s3 bucket apis: remove redundant call 2023-08-30 23:39:28 +02:00
Alex d94f1c9178 reference manual: remove obsolete caveat about multipart uploads 2023-08-30 23:27:02 +02:00
Alex 5c923d48d7 reference manual: document support for lifecycle configuration 2023-08-30 23:24:28 +02:00
Alex a1d57283c0 bucket_table: bucketparams::new doesn't need to be pub 2023-08-30 20:07:14 +02:00
Alex d2e94e36d6 lifecycle config: add missing line in merge() and remove tracing 2023-08-30 20:05:53 +02:00
Alex 75ccc5a95c lifecycle config: store date as given, try to debug 2023-08-30 20:02:07 +02:00
Alex 7200954318 lifecycle worker: add logging 2023-08-30 14:54:52 +02:00
Alex 0f1849e1ac lifecycle worker: launch with the rest of Garage 2023-08-30 14:51:08 +02:00
Alex da8b224e24 lifecycle worker: skip entire bucket when no lifecycle config is set 2023-08-30 14:38:19 +02:00
Alex 2996dc875f lifecycle worker: implement main functionality 2023-08-30 14:29:03 +02:00
Alex a2e0e34db5 lifecycle: skeleton for lifecycle worker 2023-08-30 12:41:11 +02:00
Alex f7b409f114 use a NaiveDate in data model, it serializes to string (iso 8601 format) 2023-08-30 11:24:01 +02:00
Alex abf011c290 lifecycle: implement validation into garage's internal data structure 2023-08-29 18:22:03 +02:00
Alex 8041d9a827 s3: add xml structures to serialize/deserialize lifecycle configs 2023-08-29 17:44:17 +02:00
Alex 0b83e0558e bucket_table: data model for lifecycle configuration 2023-08-29 17:00:41 +02:00
Alex 2e90e1c124 Merge branch 'main' into next 2023-08-29 11:32:42 +02:00
Alex 8ef42c9609 admin docs: reformatting, key admin: add check 2023-06-14 17:19:25 +02:00
Alex a83a092c03 admin: uniformize layout api and improve code 2023-06-14 17:12:37 +02:00
Alex 7895f99d3a admin and cli: hide secret keys unless asked 2023-06-14 16:56:15 +02:00
Alex 4a82f6380e admin api: move all endpoints to v1/ by default (v0/ still supported) 2023-06-14 14:15:51 +02:00
Alex 28cc9f178a admin api: make name optionnal for CreateKey 2023-06-14 13:56:37 +02:00
Alex 2c83006608 admin api: fix doc in drafts 2023-06-14 13:54:34 +02:00
Alex 35c108b85d admin api: switch GetClusterHealth to camelcase (fix #381 again) 2023-06-14 13:53:19 +02:00
Alex 52376d47ca admin api: change cluster status/layout to use lists and not maps (fix #377) 2023-06-14 13:45:27 +02:00
Alex 187240e539 Merge branch 'main' into next 2023-06-14 13:02:46 +02:00
Alex 5670367126 multipartupload in test: add forgotten timestamp 2023-06-13 23:10:46 +02:00
Alex cda957b4b1 update netapp's rmp-serde dependency to v1.1 2023-06-13 17:34:49 +02:00
Alex 90b2d43eb4 Merge branch 'main' into next 2023-06-13 17:14:11 +02:00
Alex bf19a44fd9 admin API: add missing camelCase conversions (fix #381) 2023-06-13 16:15:50 +02:00
Alex 7126f3e1d1 garage key import: add checks and `--yes` CLI flag (fix #278) 2023-06-13 15:56:48 +02:00
Alex 942c1f1bfe multipart uploads: save timestamp 2023-06-13 10:48:22 +02:00
Alex 0a06fda0da Merge pull request 'Fix #204 (full Multipart Uploads semantics)' (#553) from nlnet-task1 into next
Reviewed-on: Deuxfleurs/garage#553
2023-06-09 15:34:09 +00:00
Alex 3d477906d4 properly delete multipart uploads after completion 2023-06-09 17:13:27 +02:00
Alex e645bbd3ce smoke test: add multipart upload test with part re-upload 2023-06-09 16:23:37 +02:00
Alex 58563ed700 Add multipart upload using aws s3api 2023-06-09 16:23:37 +02:00
Alex a6cc563bdd UploadPart: automatic cleanup of version (and reference blocked) when interrupted 2023-06-09 16:23:37 +02:00
Alex c14d3735e5 Add test for multipart uploads and fix part renumbering 2023-06-09 16:23:37 +02:00
Alex 53bf2f070c undo sort_key() returning Cow 2023-06-09 16:23:37 +02:00
Alex 412ab77b08 comments and clippy lint fixes 2023-06-09 16:23:37 +02:00
Alex 511e07ecd4 fix mpu counter (add missing workers) and report info at appropriate places 2023-06-09 16:23:37 +02:00
Alex 4ea53dc759 Add multipart upload repair 2023-06-09 16:23:37 +02:00
Alex 058518c22b refactor repair workers with a trait 2023-06-09 16:23:37 +02:00
Alex 8644376ac2 fix test; simplify code 2023-06-09 16:23:37 +02:00
Alex 7ad7dae5d4 fix s3 list test 2023-06-09 16:23:37 +02:00
Alex 75a0e01372 fix online repair 2023-06-09 16:23:37 +02:00
Alex bb176ebcb8 cargo fmt 2023-06-09 16:23:37 +02:00
Alex c1e1764f17 move git-version dependency to main crate to reduce rebuilds 2023-06-09 16:23:37 +02:00
Alex 87be8eeb93 updaet block admin for new multipartupload models 2023-06-09 16:23:37 +02:00
Alex 82e75c0e29 Adapt S3 API code to use new multipart upload models
- Create and PutPart
- completemultipartupload
- upload part copy
- list_parts
2023-06-09 16:23:37 +02:00
Alex 38d6ac4295 New multipart upload table layout 2023-06-09 16:23:37 +02:00
Alex 6005491cd8 Use Cow<[u8]> for sort keys 2023-06-09 16:23:37 +02:00
Alex ea3bfd2ab1 Minio tests for multipart upload behaviour:
- upload part renumbering test
- part skipping test
2023-06-09 16:23:37 +02:00
Alex e7e164a280 Make fsync an option for meta and data 2023-06-09 16:23:21 +02:00
Alex 1e466b11eb Revert integration tests to using Sled as LMDB causes failures 2023-06-09 13:23:08 +02:00
Alex 865f0c7d0c Add LMDB to debug builds 2023-06-09 12:04:28 +02:00
Alex 906fe78b24 Integration tests: print logs when fails 2023-06-09 12:03:44 +02:00
Alex 8a74e1c2bd Split garage/admin.rs into smaller files 2023-06-06 15:39:15 +02:00
Alex 19639705e6 Mark sled as deprecated, make lmdb default, and improve sqlite and lmdb defaults 2023-05-17 14:30:53 +02:00
Alex 351d734e6c Merge branch 'main' into next 2023-05-09 12:40:08 +02:00
Alex a1fcf1b175 Merge branch 'main' into next 2023-04-25 16:58:57 +02:00
Alex fa78d806e3 Merge branch 'main' into next 2023-04-25 12:34:26 +02:00
Alex 654999e254 Update Cargo.nix 2023-01-26 15:50:54 +01:00
Jonathan Davies db56d4658f util/Cargo.toml: Updated rmp-serde from 0.15 to 1.1. 2023-01-26 11:03:43 +00:00
Alex 12a4e1f303
Merge branch 'optimal-layout' into next 2023-01-11 17:50:42 +01:00
Alex 84b4a868e3
Migration of cluster layout from v0.8 to v0.9 2023-01-11 17:47:46 +01:00
Alex 4f409f73dc Merge pull request 'Changed all instances of assignation to assignment' (#465) from jpds/garage:assignments-correction into next
Reviewed-on: Deuxfleurs/garage#465
2023-01-11 16:05:27 +00:00
Mendes 597d64b31a change in gitignore 2023-01-09 16:06:47 +01:00
Mendes e3cc7a89b0 First draft of t a preprint describing the layout computation algorithm 2023-01-09 16:05:20 +01:00
Jonathan Davies cb07e6145c Changed all instances of assignation to assignment. 2023-01-05 11:09:25 +00:00
Alex 80e4abb98d Merge pull request 'Changed all instances of 'key new' to 'key create' to make it the same as the bucket commands.' (#459) from jpds/garage:key-create-standardize into next
Reviewed-on: Deuxfleurs/garage#459
2023-01-04 10:35:49 +00:00
Alex 570e5e5bbb
Merge branch 'main' into next 2023-01-04 11:34:43 +01:00
Jonathan Davies 8be862aa19 Changed all instances of 'key new' to 'key create' to make it consistent as bucket commands issued normally around the same time. 2023-01-03 11:11:12 +00:00
Alex 6e44369cbc Merge pull request 'Optimal layout assignation algorithm' (#296) from optimal-layout into next
Reviewed-on: Deuxfleurs/garage#296
2022-12-11 17:41:53 +00:00
Alex 2c2e65ad8b
Merge commit 'ec12d6c' into next 2022-12-11 18:41:15 +01:00
Alex 9d83364ad9
itertools .unique() doesn't require sorted items 2022-12-11 18:30:02 +01:00
Alex ec12d6c8dd
Slightly simplify code at places 2022-11-08 16:15:45 +01:00
Alex 217abdca18
Fix HTTP return code 2022-11-08 15:38:53 +01:00
Alex fc2729cd81
Fix integration test 2022-11-08 15:19:46 +01:00
Alex d75b37b018
Return more info when layout's .check() fails, fix compilation, fix test 2022-11-08 14:58:39 +01:00
Alex 73a4ca8b15
Use bytes as capacity units 2022-11-07 21:12:11 +01:00
Alex fd5bc142b5
Ensure .sort() is called before counting unique items 2022-11-07 20:29:25 +01:00
Alex ea5afc2511
Style improvements 2022-11-07 20:11:30 +01:00
Alex 28d7a49f63
Merge branch 'main' into optimal-layout 2022-11-07 12:20:59 +01:00
Alex 3039bb5d43
rm .gitattributes 2022-10-13 12:40:42 +02:00
Mendes bcdd1e0c33 Added some comment 2022-10-11 18:29:21 +02:00
Mendes e5664c9822 Improved the statistics displayed in layout show
corrected a few bugs
2022-10-11 17:17:13 +02:00
Mendes 4abab246f1 cargo fmt 2022-10-10 17:21:13 +02:00
Mendes fcf9ac674a Tests written in layout.rs
added staged_parameters to ClusterLayout
removed the serde(default) -> will need a migration function
2022-10-10 17:19:25 +02:00
Mendes 911eb17bd9 corrected warnings of cargo clippy 2022-10-06 14:53:57 +02:00
Mendes 9407df60cc Corrected two bugs:
- self.node_id_vec was not properly updated when the previous ring was empty
- ClusterLayout::merge was not considering changes in the layout parameters
2022-10-06 12:54:51 +02:00
Mendes a951b6c452 Added a CLI command to update the parameters for the layout computation (for now, only the zone redundancy) 2022-10-05 16:04:19 +02:00
Mendes ceac3713d6 modifications in several files to :
- have consistent error return types
- store the zone redundancy in a Lww
- print the error and message in the CLI (TODO: for the server Api, should msg be returned in the body response?)
2022-10-05 15:29:48 +02:00
Mendes 829f815a89 Merge remote-tracking branch 'origin/main' into optimal-layout 2022-10-04 18:14:49 +02:00
Mendes 99f96b9564 deleted zone_redundancy from System struct 2022-10-04 18:09:24 +02:00
Mendes bd842e1388 Correction of a few bugs in the tests, modification of ClusterLayout::check 2022-09-22 19:30:01 +02:00
Mendes 7f3249a237 New version of the algorithm that calculate the layout.
It takes as paramters the replication factor and the zone redundancy, computes the
largest partition size reachable with these constraints, and among the possible
assignation with this partition size, it computes the one that moves the least number
of partitions compared to the previous assignation.
This computation uses graph algorithms defined in graph_algo.rs
2022-09-21 14:39:59 +02:00
Mendes c4adbeed51 Added the section with description proofs of the parametric assignment computation in the optimal layout report 2022-09-10 13:51:12 +02:00
Mendes d38fb6c250 ignore log files in commit 2022-09-08 12:43:33 +02:00
Mendes 81083dd415 Added a first draft version of the algorithm and analysis for the non-strict mode. 2022-08-19 21:21:41 +02:00
Mendes 7b2c065c82 Merge branch 'optimal-layout' of https://git.deuxfleurs.fr/Deuxfleurs/garage into optimal-layout 2022-07-19 13:30:49 +02:00
Mendes 03e3a1bd15 Added the latex report on the optimal layout algorithm 2022-07-18 22:35:29 +02:00
Alex 617f28bfa4
Correct small formatting issue 2022-05-05 14:21:57 +02:00
Mendes 948ff93cf1 Corrected the warnings and errors issued by cargo clippy 2022-05-01 16:05:39 +02:00
Alex 3ba2c5b424
updated cargo.lock 2022-05-01 10:11:43 +02:00
Alex 2aeaddd5e2
Apply cargo fmt 2022-05-01 09:57:05 +02:00
Alex c1d1646c4d
Change the way new layout assignations are computed.
The function now computes an optimal assignation (with respect to partition size) that minimizes the distance to the former assignation, using flow algorithms.

This commit was written by Mendes Oulamara <mendes.oulamara@pm.me>
2022-05-01 09:54:19 +02:00
122 changed files with 27956 additions and 2482 deletions

View File

@ -20,6 +20,7 @@ steps:
image: nixpkgs/nix:nixos-22.05
environment:
GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage
GARAGE_TEST_INTEGRATION_PATH: tmp-garage-integration
commands:
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
- nix-build --no-build-output --attr test.amd64
@ -31,8 +32,9 @@ steps:
- ./result/bin/garage_util-*
- ./result/bin/garage_web-*
- ./result/bin/garage-*
- ./result/bin/integration-*
- ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
- rm result
- rm -rv tmp-garage-integration
- name: integration tests
image: nixpkgs/nix:nixos-22.05
@ -63,11 +65,16 @@ steps:
- nix-build --no-build-output --attr pkgs.amd64.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
- nix-shell --attr rust --run "./script/not-dynamic.sh result-bin/bin/garage"
- name: integration
- name: integration tests
image: nixpkgs/nix:nixos-22.05
commands:
- nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
- name: upgrade tests
image: nixpkgs/nix:nixos-22.05
commands:
- nix-shell --attr integration --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
- name: push static binary
image: nixpkgs/nix:nixos-22.05
environment:
@ -114,11 +121,16 @@ steps:
- nix-build --no-build-output --attr pkgs.i386.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
- nix-shell --attr rust --run "./script/not-dynamic.sh result-bin/bin/garage"
- name: integration
- name: integration tests
image: nixpkgs/nix:nixos-22.05
commands:
- nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
- name: upgrade tests
image: nixpkgs/nix:nixos-22.05
commands:
- nix-shell --attr integration --run "./script/test-upgrade.sh v0.8.4 i686-unknown-linux-musl" || (cat /tmp/garage.log; false)
- name: push static binary
image: nixpkgs/nix:nixos-22.05
environment:
@ -283,6 +295,6 @@ trigger:
---
kind: signature
hmac: ac09a5a8c82502f67271f93afa1e1e21ce66383b8e24a6deb26b285cc1c378ba
hmac: 0c4b57eb4b27b7c6a6ff21ab87f0767fe3eb90f5d95d5cbcdccf794e9d2a5d86
...

31
Cargo.lock generated
View File

@ -1198,7 +1198,7 @@ dependencies = [
[[package]]
name = "garage"
version = "0.8.4"
version = "0.9.0"
dependencies = [
"assert-json-diff",
"async-trait",
@ -1249,7 +1249,7 @@ dependencies = [
[[package]]
name = "garage_api"
version = "0.8.4"
version = "0.9.0"
dependencies = [
"async-trait",
"base64 0.21.3",
@ -1295,12 +1295,13 @@ dependencies = [
[[package]]
name = "garage_block"
version = "0.8.4"
version = "0.9.0"
dependencies = [
"arc-swap",
"async-compression",
"async-trait",
"bytes",
"bytesize",
"futures",
"futures-util",
"garage_db",
@ -1320,7 +1321,7 @@ dependencies = [
[[package]]
name = "garage_db"
version = "0.8.4"
version = "0.9.0"
dependencies = [
"clap 4.4.0",
"err-derive",
@ -1335,12 +1336,13 @@ dependencies = [
[[package]]
name = "garage_model"
version = "0.8.4"
version = "0.9.0"
dependencies = [
"arc-swap",
"async-trait",
"base64 0.21.3",
"blake2",
"chrono",
"err-derive",
"futures",
"futures-util",
@ -1362,18 +1364,21 @@ dependencies = [
[[package]]
name = "garage_rpc"
version = "0.8.4"
version = "0.9.0"
dependencies = [
"arc-swap",
"async-trait",
"bytes",
"bytesize",
"err-derive",
"format_table",
"futures",
"futures-util",
"garage_db",
"garage_util",
"gethostname",
"hex",
"itertools 0.10.5",
"k8s-openapi",
"kube",
"kuska-sodiumoxide",
@ -1394,7 +1399,7 @@ dependencies = [
[[package]]
name = "garage_table"
version = "0.8.4"
version = "0.9.0"
dependencies = [
"arc-swap",
"async-trait",
@ -1416,7 +1421,7 @@ dependencies = [
[[package]]
name = "garage_util"
version = "0.8.4"
version = "0.9.0"
dependencies = [
"arc-swap",
"async-trait",
@ -1450,7 +1455,7 @@ dependencies = [
[[package]]
name = "garage_web"
version = "0.8.4"
version = "0.9.0"
dependencies = [
"err-derive",
"futures",
@ -2283,9 +2288,9 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
[[package]]
name = "netapp"
version = "0.5.2"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ffe47ac46d3b2ce2f736a70865492df082e042eb2bfdddfca3b8dd66bd9469d"
checksum = "0a00b76cec93e3ae68c9ed5f08e27a1507424987ee23d5ec961ebd4da820a265"
dependencies = [
"arc-swap",
"async-trait",
@ -3032,9 +3037,9 @@ dependencies = [
[[package]]
name = "rmp-serde"
version = "0.15.5"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "723ecff9ad04f4ad92fe1c8ca6c20d2196d9286e9c60727c4cb5511629260e9d"
checksum = "bffea85eea980d8a74453e5d02a8d93028f3c34725de143085a844ebe953258a"
dependencies = [
"byteorder",
"rmp",

309
Cargo.nix
View File

@ -7,13 +7,13 @@ args@{
"garage_db/default"
"garage_util/default"
"garage_rpc/default"
"format_table/default"
"garage_table/default"
"garage_block/default"
"garage_model/default"
"garage_api/default"
"garage_web/default"
"garage/default"
"format_table/default"
"k2v-client/default"
],
rustPackages,
@ -33,7 +33,7 @@ args@{
ignoreLockHash,
}:
let
nixifiedLockHash = "b73d35e98dc62acc3b01aba2cb825ba6e99217e46781b8c59f8e0ceef34e79d6";
nixifiedLockHash = "1a87886681a3ef0b83c95addc26674a538b8a93d35bc80db8998e1fcd0821f6c";
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
lockHashIgnored = if ignoreLockHash
@ -57,16 +57,16 @@ in
{
cargo2nixVersion = "0.11.0";
workspace = {
garage_db = rustPackages.unknown.garage_db."0.8.4";
garage_util = rustPackages.unknown.garage_util."0.8.4";
garage_rpc = rustPackages.unknown.garage_rpc."0.8.4";
garage_table = rustPackages.unknown.garage_table."0.8.4";
garage_block = rustPackages.unknown.garage_block."0.8.4";
garage_model = rustPackages.unknown.garage_model."0.8.4";
garage_api = rustPackages.unknown.garage_api."0.8.4";
garage_web = rustPackages.unknown.garage_web."0.8.4";
garage = rustPackages.unknown.garage."0.8.4";
garage_db = rustPackages.unknown.garage_db."0.9.0";
garage_util = rustPackages.unknown.garage_util."0.9.0";
garage_rpc = rustPackages.unknown.garage_rpc."0.9.0";
format_table = rustPackages.unknown.format_table."0.1.1";
garage_table = rustPackages.unknown.garage_table."0.9.0";
garage_block = rustPackages.unknown.garage_block."0.9.0";
garage_model = rustPackages.unknown.garage_model."0.9.0";
garage_api = rustPackages.unknown.garage_api."0.9.0";
garage_web = rustPackages.unknown.garage_web."0.9.0";
garage = rustPackages.unknown.garage."0.9.0";
k2v-client = rustPackages.unknown.k2v-client."0.0.4";
};
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.21.0" = overridableMkRustCrate (profileName: rec {
@ -98,12 +98,12 @@ in
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "std")
];
dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "getrandom" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".getrandom."0.2.10" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && !((hostPlatform.parsed.cpu.name == "armv6l" || hostPlatform.parsed.cpu.name == "armv7l") && hostPlatform.parsed.kernel.name == "none") then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.18.0" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") && !((hostPlatform.parsed.cpu.name == "armv6l" || hostPlatform.parsed.cpu.name == "armv7l") && hostPlatform.parsed.kernel.name == "none") then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.18.0" { inherit profileName; }).out;
};
buildDependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "version_check" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "version_check" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".version_check."0.9.4" { profileName = "__noProfile"; }).out;
};
});
@ -128,7 +128,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "alloc")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "alloc")
];
});
@ -824,7 +824,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"; };
dependencies = {
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
};
});
@ -888,8 +888,8 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "extern_crate_alloc")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "extern_crate_std")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "extern_crate_alloc")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "extern_crate_std")
];
});
@ -1182,13 +1182,13 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "alloc")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "default")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "std")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "alloc")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "std")
];
dependencies = {
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "crossbeam_utils" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.16" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "crossbeam_utils" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.16" { inherit profileName; }).out;
};
});
@ -1459,8 +1459,8 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "default")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "std")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "std")
];
});
@ -1705,9 +1705,9 @@ in
};
});
"unknown".garage."0.8.4" = overridableMkRustCrate (profileName: rec {
"unknown".garage."0.9.0" = overridableMkRustCrate (profileName: rec {
name = "garage";
version = "0.8.4";
version = "0.9.0";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/garage");
features = builtins.concatLists [
@ -1716,13 +1716,13 @@ in
(lib.optional (rootFeatures' ? "garage/default") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery") "kubernetes-discovery")
(lib.optional (rootFeatures' ? "garage/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics") "metrics")
(lib.optional (rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp") "opentelemetry-otlp")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus") "opentelemetry-prometheus")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus") "prometheus")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled") "sled")
(lib.optional (rootFeatures' ? "garage/sqlite") "sqlite")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite") "sqlite")
(lib.optional (rootFeatures' ? "garage/system-libs") "system-libs")
(lib.optional (rootFeatures' ? "garage/telemetry-otlp") "telemetry-otlp")
];
@ -1734,18 +1734,18 @@ in
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.8.4" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.8.4" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.8.4" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.4" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
garage_web = (rustPackages."unknown".garage_web."0.8.4" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.9.0" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.9.0" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.9.0" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.0" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out;
garage_web = (rustPackages."unknown".garage_web."0.9.0" { inherit profileName; }).out;
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.10.0" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp" then "opentelemetry_otlp" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry-otlp."0.10.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/opentelemetry-prometheus" then "opentelemetry_prometheus" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry-prometheus."0.10.0" { inherit profileName; }).out;
@ -1777,9 +1777,9 @@ in
};
});
"unknown".garage_api."0.8.4" = overridableMkRustCrate (profileName: rec {
"unknown".garage_api."0.9.0" = overridableMkRustCrate (profileName: rec {
name = "garage_api";
version = "0.8.4";
version = "0.9.0";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/api");
features = builtins.concatLists [
@ -1798,11 +1798,11 @@ in
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.2.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.8.4" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.8.4" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.4" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.9.0" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.9.0" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.0" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.9" { inherit profileName; }).out;
@ -1832,9 +1832,9 @@ in
};
});
"unknown".garage_block."0.8.4" = overridableMkRustCrate (profileName: rec {
"unknown".garage_block."0.9.0" = overridableMkRustCrate (profileName: rec {
name = "garage_block";
version = "0.8.4";
version = "0.9.0";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/block");
features = builtins.concatLists [
@ -1845,12 +1845,13 @@ in
async_compression = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".async-compression."0.4.1" { inherit profileName; }).out;
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.73" { profileName = "__noProfile"; }).out;
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.4.0" { inherit profileName; }).out;
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.3.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.4" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.0" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
@ -1863,9 +1864,9 @@ in
};
});
"unknown".garage_db."0.8.4" = overridableMkRustCrate (profileName: rec {
"unknown".garage_db."0.9.0" = overridableMkRustCrate (profileName: rec {
name = "garage_db";
version = "0.8.4";
version = "0.9.0";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/db");
features = builtins.concatLists [
@ -1873,20 +1874,20 @@ in
(lib.optional (rootFeatures' ? "garage_db/clap" || rootFeatures' ? "garage_db/cli") "clap")
(lib.optional (rootFeatures' ? "garage_db/cli") "cli")
(lib.optional (rootFeatures' ? "garage_db/default") "default")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "heed")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "heed")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger") "pretty_env_logger")
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "rusqlite")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "rusqlite")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "sled")
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite")
];
dependencies = {
${ if rootFeatures' ? "garage_db/clap" || rootFeatures' ? "garage_db/cli" then "clap" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".clap."4.4.0" { inherit profileName; }).out;
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "heed" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed."0.11.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed."0.11.0" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger" then "pretty_env_logger" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pretty_env_logger."0.5.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.29.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.29.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled" then "sled" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }).out;
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.37" { inherit profileName; }).out;
};
@ -1895,33 +1896,34 @@ in
};
});
"unknown".garage_model."0.8.4" = overridableMkRustCrate (profileName: rec {
"unknown".garage_model."0.9.0" = overridableMkRustCrate (profileName: rec {
name = "garage_model";
version = "0.8.4";
version = "0.9.0";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/model");
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage_model/default") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sled") "sled")
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "sqlite")
];
dependencies = {
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.6.0" { inherit profileName; }).out;
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.73" { profileName = "__noProfile"; }).out;
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.21.3" { inherit profileName; }).out;
blake2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".blake2."0.10.6" { inherit profileName; }).out;
chrono = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.26" { inherit profileName; }).out;
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.8.4" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.4" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.9.0" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.0" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.10.0" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
@ -1932,9 +1934,9 @@ in
};
});
"unknown".garage_rpc."0.8.4" = overridableMkRustCrate (profileName: rec {
"unknown".garage_rpc."0.9.0" = overridableMkRustCrate (profileName: rec {
name = "garage_rpc";
version = "0.8.4";
version = "0.9.0";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
features = builtins.concatLists [
@ -1951,17 +1953,20 @@ in
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.6.0" { inherit profileName; }).out;
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.73" { profileName = "__noProfile"; }).out;
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.4.0" { inherit profileName; }).out;
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.3.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive" then "err_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out;
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.10.5" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.16.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "kube" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kube."0.75.0" { inherit profileName; }).out;
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.10.0" { inherit profileName; }).out;
nix = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".nix."0.27.1" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
pnet_datalink = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pnet_datalink."0.33.0" { inherit profileName; }).out;
@ -1977,9 +1982,9 @@ in
};
});
"unknown".garage_table."0.8.4" = overridableMkRustCrate (profileName: rec {
"unknown".garage_table."0.9.0" = overridableMkRustCrate (profileName: rec {
name = "garage_table";
version = "0.8.4";
version = "0.9.0";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/table");
dependencies = {
@ -1988,9 +1993,9 @@ in
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.4.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.28" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.9.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
@ -2002,9 +2007,9 @@ in
};
});
"unknown".garage_util."0.8.4" = overridableMkRustCrate (profileName: rec {
"unknown".garage_util."0.9.0" = overridableMkRustCrate (profileName: rec {
name = "garage_util";
version = "0.8.4";
version = "0.9.0";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/util");
features = builtins.concatLists [
@ -2020,16 +2025,16 @@ in
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.7" { inherit profileName; }).out;
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.4" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.9.0" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.9" { inherit profileName; }).out;
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.27" { inherit profileName; }).out;
lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.10.0" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."1.1.2" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.105" { inherit profileName; }).out;
sha2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.10.7" { inherit profileName; }).out;
@ -2046,18 +2051,18 @@ in
};
});
"unknown".garage_web."0.8.4" = overridableMkRustCrate (profileName: rec {
"unknown".garage_web."0.9.0" = overridableMkRustCrate (profileName: rec {
name = "garage_web";
version = "0.8.4";
version = "0.9.0";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/web");
dependencies = {
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.28" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.8.4" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.8.4" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.4" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.4" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.9.0" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.9.0" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.9.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.9.0" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.9" { inherit profileName; }).out;
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.27" { inherit profileName; }).out;
hyperlocal = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyperlocal."0.8.0" { inherit profileName; }).out;
@ -2181,15 +2186,15 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "ahash")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "allocator-api2")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "default")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "inline-more")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "ahash")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "allocator-api2")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "inline-more")
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "raw")
];
dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "ahash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "allocator_api2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "ahash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".ahash."0.8.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "allocator_api2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".allocator-api2."0.2.16" { inherit profileName; }).out;
};
});
@ -2199,7 +2204,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f"; };
dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "hashbrown" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashbrown."0.14.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashbrown" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashbrown."0.14.0" { inherit profileName; }).out;
};
});
@ -2229,20 +2234,20 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "269c7486ed6def5d7b59a427cec3e87b4d4dd4381d01e21c8c9f2d3985688392"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb-rkv-sys")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "lmdb-rkv-sys")
];
dependencies = {
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "bytemuck" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytemuck."1.13.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "byteorder" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "heed_traits" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-traits."0.8.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "heed_types" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-types."0.8.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "lmdb_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lmdb-rkv-sys."0.11.2" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.18.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "page_size" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".page_size."0.4.2" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "synchronoise" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".synchronoise."1.0.1" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isWindows then "url" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".url."2.4.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "bytemuck" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytemuck."1.13.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "byteorder" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed_traits" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-traits."0.8.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed_types" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-types."0.8.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "lmdb_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lmdb-rkv-sys."0.11.2" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "once_cell" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.18.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "page_size" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".page_size."0.4.2" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "synchronoise" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".synchronoise."1.0.1" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isWindows then "url" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".url."2.4.0" { inherit profileName; }).out;
};
});
@ -2259,20 +2264,20 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "9a6cf0a6952fcedc992602d5cddd1e3fff091fbe87d38636e3ec23a31f32acbd"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "bincode")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "default")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "serde")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "serde-bincode")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "serde-json")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "serde_json")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "bincode")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "serde")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "serde-bincode")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "serde-json")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "serde_json")
];
dependencies = {
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "bincode" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bincode."1.3.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "bytemuck" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytemuck."1.13.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "byteorder" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "heed_traits" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-traits."0.8.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "serde_json" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.105" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "bincode" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bincode."1.3.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "bytemuck" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytemuck."1.13.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "byteorder" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "heed_traits" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".heed-traits."0.8.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "serde" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "serde_json" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.105" { inherit profileName; }).out;
};
});
@ -2648,12 +2653,12 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp") "default")
(lib.optional (rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp") "use_alloc")
(lib.optional (rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp") "use_std")
[ "default" ]
[ "use_alloc" ]
[ "use_std" ]
];
dependencies = {
${ if rootFeatures' ? "garage/opentelemetry-otlp" || rootFeatures' ? "garage/telemetry-otlp" then "either" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".either."1.9.0" { inherit profileName; }).out;
either = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".either."1.9.0" { inherit profileName; }).out;
};
});
@ -3009,15 +3014,15 @@ in
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled_bindings")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "cc")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "default")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "min_sqlite_version_3_14_0")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "pkg-config")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "vcpkg")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "default")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "min_sqlite_version_3_14_0")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "pkg-config")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite") "vcpkg")
];
buildDependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.27" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "vcpkg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".vcpkg."0.2.15" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.27" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "vcpkg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".vcpkg."0.2.15" { profileName = "__noProfile"; }).out;
};
});
@ -3047,14 +3052,14 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "61b9ce6b3be08acefa3003c57b7565377432a89ec24476bbe72e11d101f852fe"; };
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "default")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") "default")
];
dependencies = {
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out;
};
buildDependencies = {
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.27" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "cc" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".cc."1.0.83" { profileName = "__noProfile"; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "pkg_config" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".pkg-config."0.3.27" { profileName = "__noProfile"; }).out;
};
});
@ -3218,11 +3223,11 @@ in
src = fetchCratesIo { inherit name version; sha256 = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"; };
});
"registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" = overridableMkRustCrate (profileName: rec {
"registry+https://github.com/rust-lang/crates.io-index".netapp."0.10.0" = overridableMkRustCrate (profileName: rec {
name = "netapp";
version = "0.5.2";
version = "0.10.0";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "4ffe47ac46d3b2ce2f736a70865492df082e042eb2bfdddfca3b8dd66bd9469d"; };
src = fetchCratesIo { inherit name version; sha256 = "0a00b76cec93e3ae68c9ed5f08e27a1507424987ee23d5ec961ebd4da820a265"; };
features = builtins.concatLists [
[ "default" ]
[ "opentelemetry" ]
@ -3244,7 +3249,7 @@ in
opentelemetry_contrib = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry-contrib."0.9.0" { inherit profileName; }).out;
pin_project = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-project."1.1.3" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."1.1.2" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.188" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.32.0" { inherit profileName; }).out;
tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.14" { inherit profileName; }).out;
@ -3479,7 +3484,7 @@ in
[ "default" ]
[ "race" ]
[ "std" ]
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "unstable")
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" || rootFeatures' ? "garage_rpc/kube" || rootFeatures' ? "garage_rpc/kubernetes-discovery") "unstable")
];
});
@ -3617,8 +3622,8 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd"; };
dependencies = {
${ if (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isWindows then "winapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.147" { inherit profileName; }).out;
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb") && hostPlatform.isWindows then "winapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }).out;
};
});
@ -4309,11 +4314,11 @@ in
};
});
"registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" = overridableMkRustCrate (profileName: rec {
"registry+https://github.com/rust-lang/crates.io-index".rmp-serde."1.1.2" = overridableMkRustCrate (profileName: rec {
name = "rmp-serde";
version = "0.15.5";
version = "1.1.2";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "723ecff9ad04f4ad92fe1c8ca6c20d2196d9286e9c60727c4cb5511629260e9d"; };
src = fetchCratesIo { inherit name version; sha256 = "bffea85eea980d8a74453e5d02a8d93028f3c34725de143085a844ebe953258a"; };
dependencies = {
byteorder = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
rmp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp."0.8.12" { inherit profileName; }).out;
@ -4346,12 +4351,12 @@ in
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "modern_sqlite")
];
dependencies = {
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."2.4.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "fallible_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-iterator."0.2.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "fallible_streaming_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-streaming-iterator."0.1.9" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "hashlink" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashlink."0.8.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "libsqlite3_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.26.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.11.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "bitflags" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bitflags."2.4.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-iterator."0.2.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "fallible_streaming_iterator" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fallible-streaming-iterator."0.1.9" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "hashlink" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hashlink."0.8.3" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "libsqlite3_sys" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libsqlite3-sys."0.26.0" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/sqlite" then "smallvec" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".smallvec."1.11.0" { inherit profileName; }).out;
};
});
@ -5015,7 +5020,7 @@ in
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2"; };
dependencies = {
${ if rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb" then "crossbeam_queue" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-queue."0.3.8" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/default" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/default" || rootFeatures' ? "garage_model/lmdb" then "crossbeam_queue" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-queue."0.3.8" { inherit profileName; }).out;
};
});

View File

@ -18,14 +18,14 @@ default-members = ["src/garage"]
[workspace.dependencies]
format_table = { version = "0.1.1", path = "src/format-table" }
garage_api = { version = "0.8.4", path = "src/api" }
garage_block = { version = "0.8.4", path = "src/block" }
garage_db = { version = "0.8.4", path = "src/db", default-features = false }
garage_model = { version = "0.8.4", path = "src/model", default-features = false }
garage_rpc = { version = "0.8.4", path = "src/rpc" }
garage_table = { version = "0.8.4", path = "src/table" }
garage_util = { version = "0.8.4", path = "src/util" }
garage_web = { version = "0.8.4", path = "src/web" }
garage_api = { version = "0.9.0", path = "src/api" }
garage_block = { version = "0.9.0", path = "src/block" }
garage_db = { version = "0.9.0", path = "src/db", default-features = false }
garage_model = { version = "0.9.0", path = "src/model", default-features = false }
garage_rpc = { version = "0.9.0", path = "src/rpc" }
garage_table = { version = "0.9.0", path = "src/table" }
garage_util = { version = "0.9.0", path = "src/util" }
garage_web = { version = "0.9.0", path = "src/web" }
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
[profile.dev]

View File

@ -37,7 +37,7 @@ Second, we suppose you have created a key and a bucket.
As a reminder, you can create a key for your nextcloud instance as follow:
```bash
garage key new --name nextcloud-key
garage key create nextcloud-key
```
Keep the Key ID and the Secret key in a pad, they will be needed later.
@ -139,7 +139,7 @@ a reasonable trade-off for some instances.
Create a key for Peertube:
```bash
garage key new --name peertube-key
garage key create peertube-key
```
Keep the Key ID and the Secret key in a pad, they will be needed later.
@ -253,7 +253,7 @@ As such, your Garage cluster should be configured appropriately for good perform
This is the usual Garage setup:
```bash
garage key new --name mastodon-key
garage key create mastodon-key
garage bucket create mastodon-data
garage bucket allow mastodon-data --read --write --key mastodon-key
```
@ -379,7 +379,7 @@ Supposing you have a working synapse installation, you can add the module with p
Now create a bucket and a key for your matrix instance (note your Key ID and Secret Key somewhere, they will be needed later):
```bash
garage key new --name matrix-key
garage key create matrix-key
garage bucket create matrix
garage bucket allow matrix --read --write --key matrix-key
```

View File

@ -54,7 +54,7 @@ how to configure this.
Create your key and bucket:
```bash
garage key new my-key
garage key create my-key
garage bucket create backup
garage bucket allow backup --read --write --key my-key
```

View File

@ -23,7 +23,7 @@ You can configure a different target for each data type (check `[lfs]` and `[att
Let's start by creating a key and a bucket (your key id and secret will be needed later, keep them somewhere):
```bash
garage key new --name gitea-key
garage key create gitea-key
garage bucket create gitea
garage bucket allow gitea --read --write --key gitea-key
```
@ -118,7 +118,7 @@ through another support, like a git repository.
As a first step, we will need to create a bucket on Garage and enabling website access on it:
```bash
garage key new --name nix-key
garage key create nix-key
garage bucket create nix.example.com
garage bucket allow nix.example.com --read --write --key nix-key
garage bucket website nix.example.com --allow

View File

@ -19,9 +19,10 @@ To run a real-world deployment, make sure the following conditions are met:
- You have at least three machines with sufficient storage space available.
- Each machine has a public IP address which is reachable by other machines. It
is highly recommended that you use IPv6 for this end-to-end connectivity. If
IPv6 is not available, then using a mesh VPN such as
- Each machine has an IP address which makes it directly reachable by all other machines.
In many cases, nodes will be behind a NAT and will not each have a public
IPv4 addresses. In this case, is recommended that you use IPv6 for this
end-to-end connectivity if it is available. Otherwise, using a mesh VPN such as
[Nebula](https://github.com/slackhq/nebula) or
[Yggdrasil](https://yggdrasil-network.github.io/) are approaches to consider
in addition to building out your own VPN tunneling.
@ -42,7 +43,7 @@ For our example, we will suppose the following infrastructure with IPv6 connecti
| Brussels | Mars | fc00:F::1 | 1.5 TB |
Note that Garage will **always** store the three copies of your data on nodes at different
locations. This means that in the case of this small example, the available capacity
locations. This means that in the case of this small example, the usable capacity
of the cluster is in fact only 1.5 TB, because nodes in Brussels can't store more than that.
This also means that nodes in Paris and London will be under-utilized.
To make better use of the available hardware, you should ensure that the capacity
@ -75,28 +76,23 @@ to store 2 TB of data in total.
- For the metadata storage, Garage does not do checksumming and integrity
verification on its own. If you are afraid of bitrot/data corruption,
put your metadata directory on a BTRFS partition. Otherwise, just use regular
put your metadata directory on a ZFS or BTRFS partition. Otherwise, just use regular
EXT4 or XFS.
- Having a single server with several storage drives is currently not very well
supported in Garage ([#218](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/218)).
For an easy setup, just put all your drives in a RAID0 or a ZFS RAIDZ array.
If you're adventurous, you can try to format each of your disk as
a separate XFS partition, and then run one `garage` daemon per disk drive,
or use something like [`mergerfs`](https://github.com/trapexit/mergerfs) to merge
all your disks in a single union filesystem that spreads load over them.
- Servers with multiple HDDs are supported natively by Garage without resorting
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
## Get a Docker image
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
We encourage you to use a fixed tag (eg. `v0.8.0`) and not the `latest` tag.
For this example, we will use the latest published version at the time of the writing which is `v0.8.0` but it's up to you
We encourage you to use a fixed tag (eg. `v0.9.0`) and not the `latest` tag.
For this example, we will use the latest published version at the time of the writing which is `v0.9.0` but it's up to you
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
For example:
```
sudo docker pull dxflrs/garage:v0.8.0
sudo docker pull dxflrs/garage:v0.9.0
```
## Deploying and configuring Garage
@ -161,12 +157,13 @@ docker run \
-v /etc/garage.toml:/etc/garage.toml \
-v /var/lib/garage/meta:/var/lib/garage/meta \
-v /var/lib/garage/data:/var/lib/garage/data \
dxflrs/garage:v0.8.0
dxflrs/garage:v0.9.0
```
It should be restarted automatically at each reboot.
Please note that we use host networking as otherwise Docker containers
can not communicate with IPv6.
With this command line, Garage should be started automatically at each boot.
Please note that we use host networking as otherwise the network indirection
added by Docker would prevent Garage nodes from communicating with one another
(especially if using IPv6).
If you want to use `docker-compose`, you may use the following `docker-compose.yml` file as a reference:
@ -174,7 +171,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
version: "3"
services:
garage:
image: dxflrs/garage:v0.8.0
image: dxflrs/garage:v0.9.0
network_mode: "host"
restart: unless-stopped
volumes:
@ -183,10 +180,12 @@ services:
- /var/lib/garage/data:/var/lib/garage/data
```
Upgrading between Garage versions should be supported transparently,
but please check the relase notes before doing so!
To upgrade, simply stop and remove this container and
start again the command with a new version of Garage.
If you wish to upgrade your cluster, make sure to read the corresponding
[documentation page](@/documentation/operations/upgrading.md) first, as well as
the documentation relevant to your version of Garage in the case of major
upgrades. With the containerized setup proposed here, the upgrade process
will require stopping and removing the existing container, and re-creating it
with the upgraded version.
## Controling the daemon
@ -270,12 +269,12 @@ of a role that is assigned to each active cluster node.
For our example, we will suppose we have the following infrastructure
(Capacity, Identifier and Zone are specific values to Garage described in the following):
| Location | Name | Disk Space | `Capacity` | `Identifier` | `Zone` |
|----------|---------|------------|------------|--------------|--------------|
| Paris | Mercury | 1 TB | `10` | `563e` | `par1` |
| Paris | Venus | 2 TB | `20` | `86f0` | `par1` |
| London | Earth | 2 TB | `20` | `6814` | `lon1` |
| Brussels | Mars | 1.5 TB | `15` | `212f` | `bru1` |
| Location | Name | Disk Space | Identifier | Zone (`-z`) | Capacity (`-c`) |
|----------|---------|------------|------------|-------------|-----------------|
| Paris | Mercury | 1 TB | `563e` | `par1` | `1T` |
| Paris | Venus | 2 TB | `86f0` | `par1` | `2T` |
| London | Earth | 2 TB | `6814` | `lon1` | `2T` |
| Brussels | Mars | 1.5 TB | `212f` | `bru1` | `1.5T` |
#### Node identifiers
@ -297,6 +296,8 @@ garage status
It will display the IP address associated with each node;
from the IP address you will be able to recognize the node.
We will now use the `garage layout assign` command to configure the correct parameters for each node.
#### Zones
Zones are simply a user-chosen identifier that identify a group of server that are grouped together logically.
@ -306,29 +307,29 @@ In most cases, a zone will correspond to a geographical location (i.e. a datacen
Behind the scene, Garage will use zone definition to try to store the same data on different zones,
in order to provide high availability despite failure of a zone.
Zones are passed to Garage using the `-z` flag of `garage layout assign` (see below).
#### Capacity
Garage reasons on an abstract metric about disk storage that is named the *capacity* of a node.
The capacity configured in Garage must be proportional to the disk space dedicated to the node.
Garage needs to know the storage capacity (disk space) it can/should use on
each node, to be able to correctly balance data.
Capacity values must be **integers** but can be given any signification.
Here we chose that 1 unit of capacity = 100 GB.
Capacity values are expressed in bytes and are passed to Garage using the `-c` flag of `garage layout assign` (see below).
Note that the amount of data stored by Garage on each server may not be strictly proportional to
its capacity value, as Garage will priorize having 3 copies of data in different zones,
even if this means that capacities will not be strictly respected. For example in our above examples,
nodes Earth and Mars will always store a copy of everything each, and the third copy will
have 66% chance of being stored by Venus and 33% chance of being stored by Mercury.
#### Tags
You can add additional tags to nodes using the `-t` flag of `garage layout assign` (see below).
Tags have no specific meaning for Garage and can be used at your convenience.
#### Injecting the topology
Given the information above, we will configure our cluster as follow:
```bash
garage layout assign 563e -z par1 -c 10 -t mercury
garage layout assign 86f0 -z par1 -c 20 -t venus
garage layout assign 6814 -z lon1 -c 20 -t earth
garage layout assign 212f -z bru1 -c 15 -t mars
garage layout assign 563e -z par1 -c 1T -t mercury
garage layout assign 86f0 -z par1 -c 2T -t venus
garage layout assign 6814 -z lon1 -c 2T -t earth
garage layout assign 212f -z bru1 -c 1.5T -t mars
```
At this point, the changes in the cluster layout have not yet been applied.
@ -338,6 +339,7 @@ To show the new layout that will be applied, call:
garage layout show
```
Make sure to read carefully the output of `garage layout show`.
Once you are satisfied with your new layout, apply it with:
```bash

View File

@ -91,6 +91,16 @@ is definitely lost, then there is no other choice than to declare your S3 object
as unrecoverable, and to delete them properly from the data store. This can be done
using the `garage block purge` command.
## Rebalancing data directories
In [multi-HDD setups](@/documentation/operations/multi-hdd.md), to ensure that
data blocks are well balanced between storage locations, you may run a
rebalance operation using `garage repair rebalance`. This is usefull when
adding storage locations or when capacities of the storage locations have been
changed. Once this is finished, Garage will know for each block of a single
possible location where it can be, which can increase access speed. This
operation will also move out all data from locations marked as read-only.
# Metadata operations
@ -114,4 +124,3 @@ in your cluster, you can run one of the following repair procedures:
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
- `garage repair block_refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)

View File

@ -9,18 +9,30 @@ a certain capacity, or a gateway node that does not store data and is only
used as an API entry point for faster cluster access.
An introduction to building cluster layouts can be found in the [production deployment](@/documentation/cookbook/real-world.md) page.
In Garage, all of the data that can be stored in a given cluster is divided
into slices which we call *partitions*. Each partition is stored by
one or several nodes in the cluster
(see [`replication_mode`](@/documentation/reference-manual/configuration.md#replication-mode)).
The layout determines the correspondence between these partition,
which exist on a logical level, and actual storage nodes.
## How cluster layouts work in Garage
In Garage, a cluster layout is composed of the following components:
A cluster layout is composed of the following components:
- a table of roles assigned to nodes
- a table of roles assigned to nodes, defined by the user
- an optimal assignation of partitions to nodes, computed by an algorithm that is ran once when calling `garage layout apply` or the ApplyClusterLayout API endpoint
- a version number
Garage nodes will always use the cluster layout with the highest version number.
Garage nodes also maintain and synchronize between them a set of proposed role
changes that haven't yet been applied. These changes will be applied (or
canceled) in the next version of the layout
canceled) in the next version of the layout.
All operations on the layout can be realized using the `garage` CLI or using the
[administration API endpoint](@/documentation/reference-manual/admin-api.md).
We give here a description of CLI commands, the admin API semantics are very similar.
The following commands insert modifications to the set of proposed role changes
for the next layout version (but they do not create the new layout immediately):
@ -51,7 +63,7 @@ commands will fail otherwise.
## Warnings about Garage cluster layout management
**Warning: never make several calls to `garage layout apply` or `garage layout
**⚠️ Never make several calls to `garage layout apply` or `garage layout
revert` with the same value of the `--version` flag. Doing so can lead to the
creation of several different layouts with the same version number, in which
case your Garage cluster will become inconsistent until fixed.** If a call to
@ -65,13 +77,198 @@ shell, you shouldn't have much issues as long as you run commands one after
the other and take care of checking the output of `garage layout show`
before applying any changes.
If you are using the `garage` CLI to script layout changes, follow the following recommendations:
If you are using the `garage` CLI or the admin API to script layout changes,
follow the following recommendations:
- Make all of your `garage` CLI calls to the same RPC host. Do not use the
`garage` CLI to connect to individual nodes to send them each a piece of the
layout changes you are making, as the changes propagate asynchronously
between nodes and might not all be taken into account at the time when the
new layout is applied.
- If using the CLI, make all of your `garage` CLI calls to the same RPC host.
If using the admin API, make all of your API calls to the same Garage node. Do
not connect to individual nodes to send them each a piece of the layout changes
you are making, as the changes propagate asynchronously between nodes and might
not all be taken into account at the time when the new layout is applied.
- **Only call `garage layout apply` once**, and call it **strictly after** all
of the `layout assign` and `layout remove` commands have returned.
- **Only call `garage layout apply`/ApplyClusterLayout once**, and call it
**strictly after** all of the `layout assign` and `layout remove`
commands/UpdateClusterLayout API calls have returned.
## Understanding unexpected layout calculations
When adding, removing or modifying nodes in a cluster layout, sometimes
unexpected assigntations of partitions to node can occur. These assignations
are in fact normal and logical, given the objectives of the algorihtm. Indeed,
**the layout algorithm prioritizes moving less data between nodes over the fact
of achieving equal distribution of load. It also tries to use all links between
pairs of nodes in equal proportions when moving data.** This section presents
two examples and illustrates how one can control Garage's behavior to obtain
the desired results.
### Example 1
In this example, a cluster is originally composed of 3 nodes in 3 different
zones (data centers). The three nodes are of equal capacity, therefore they
are all fully exploited and all store a copy of all of the data in the cluster.
Then, a fourth node of the same size is added in the datacenter `dc1`.
As illustrated by the following, **Garage will by default not store any data on the new node**:
```
$ garage layout show
==== CURRENT CLUSTER LAYOUT ====
ID Tags Zone Capacity Usable capacity
b10c110e4e854e5a node1 dc1 1000.0 MB 1000.0 MB (100.0%)
a235ac7695e0c54d node2 dc2 1000.0 MB 1000.0 MB (100.0%)
62b218d848e86a64 node3 dc3 1000.0 MB 1000.0 MB (100.0%)
Zone redundancy: maximum
Current cluster layout version: 6
==== STAGED ROLE CHANGES ====
ID Tags Zone Capacity
a11c7cf18af29737 node4 dc1 1000.0 MB
==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====
ID Tags Zone Capacity Usable capacity
b10c110e4e854e5a node1 dc1 1000.0 MB 1000.0 MB (100.0%)
a11c7cf18af29737 node4 dc1 1000.0 MB 0 B (0.0%)
a235ac7695e0c54d node2 dc2 1000.0 MB 1000.0 MB (100.0%)
62b218d848e86a64 node3 dc3 1000.0 MB 1000.0 MB (100.0%)
Zone redundancy: maximum
==== COMPUTATION OF A NEW PARTITION ASSIGNATION ====
Partitions are replicated 3 times on at least 3 distinct zones.
Optimal partition size: 3.9 MB (3.9 MB in previous layout)
Usable capacity / total cluster capacity: 3.0 GB / 4.0 GB (75.0 %)
Effective capacity (replication factor 3): 1000.0 MB
A total of 0 new copies of partitions need to be transferred.
dc1 Tags Partitions Capacity Usable capacity
b10c110e4e854e5a node1 256 (0 new) 1000.0 MB 1000.0 MB (100.0%)
a11c7cf18af29737 node4 0 (0 new) 1000.0 MB 0 B (0.0%)
TOTAL 256 (256 unique) 2.0 GB 1000.0 MB (50.0%)
dc2 Tags Partitions Capacity Usable capacity
a235ac7695e0c54d node2 256 (0 new) 1000.0 MB 1000.0 MB (100.0%)
TOTAL 256 (256 unique) 1000.0 MB 1000.0 MB (100.0%)
dc3 Tags Partitions Capacity Usable capacity
62b218d848e86a64 node3 256 (0 new) 1000.0 MB 1000.0 MB (100.0%)
TOTAL 256 (256 unique) 1000.0 MB 1000.0 MB (100.0%)
```
While unexpected, this is logical because of the following facts:
- storing some data on the new node does not help increase the total quantity
of data that can be stored on the cluster, as the two other zones (`dc2` and
`dc3`) still need to store a full copy of everything, and their capacity is
still the same;
- there is therefore no need to move any data on the new node as this would be pointless;
- moving data to the new node has a cost which the algorithm decides to not pay if not necessary.
This distribution of data can however not be what the administrator wanted: if
they added a new node to `dc1`, it might be because the existing node is too
slow, and they wish to divide its load by half. In that case, what they need to
do to force Garage to distribute the data between the two nodes is to attribute
only half of the capacity to each node in `dc1` (in our example, 500M instead of 1G).
In that case, Garage would determine that to be able to store 1G in total, it
would need to store 500M on the old node and 500M on the added one.
### Example 2
The following example is a slightly different scenario, where `dc1` had two
nodes that were used at 50%, and `dc2` and `dc3` each have one node that is
100% used. All node capacities are the same.
Then, a node from `dc1` is moved into `dc3`. One could expect that the roles of
`dc1` and `dc3` would simply be swapped: the remaining node in `dc1` would be
used at 100%, and the two nodes now in `dc3` would be used at 50%. Instead,
this happens:
```
==== CURRENT CLUSTER LAYOUT ====
ID Tags Zone Capacity Usable capacity
b10c110e4e854e5a node1 dc1 1000.0 MB 500.0 MB (50.0%)
a11c7cf18af29737 node4 dc1 1000.0 MB 500.0 MB (50.0%)
a235ac7695e0c54d node2 dc2 1000.0 MB 1000.0 MB (100.0%)
62b218d848e86a64 node3 dc3 1000.0 MB 1000.0 MB (100.0%)
Zone redundancy: maximum
Current cluster layout version: 8
==== STAGED ROLE CHANGES ====
ID Tags Zone Capacity
a11c7cf18af29737 node4 dc3 1000.0 MB
==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====
ID Tags Zone Capacity Usable capacity
b10c110e4e854e5a node1 dc1 1000.0 MB 1000.0 MB (100.0%)
a235ac7695e0c54d node2 dc2 1000.0 MB 1000.0 MB (100.0%)
62b218d848e86a64 node3 dc3 1000.0 MB 753.9 MB (75.4%)
a11c7cf18af29737 node4 dc3 1000.0 MB 246.1 MB (24.6%)
Zone redundancy: maximum
==== COMPUTATION OF A NEW PARTITION ASSIGNATION ====
Partitions are replicated 3 times on at least 3 distinct zones.
Optimal partition size: 3.9 MB (3.9 MB in previous layout)
Usable capacity / total cluster capacity: 3.0 GB / 4.0 GB (75.0 %)
Effective capacity (replication factor 3): 1000.0 MB
A total of 128 new copies of partitions need to be transferred.
dc1 Tags Partitions Capacity Usable capacity
b10c110e4e854e5a node1 256 (128 new) 1000.0 MB 1000.0 MB (100.0%)
TOTAL 256 (256 unique) 1000.0 MB 1000.0 MB (100.0%)
dc2 Tags Partitions Capacity Usable capacity
a235ac7695e0c54d node2 256 (0 new) 1000.0 MB 1000.0 MB (100.0%)
TOTAL 256 (256 unique) 1000.0 MB 1000.0 MB (100.0%)
dc3 Tags Partitions Capacity Usable capacity
62b218d848e86a64 node3 193 (0 new) 1000.0 MB 753.9 MB (75.4%)
a11c7cf18af29737 node4 63 (0 new) 1000.0 MB 246.1 MB (24.6%)
TOTAL 256 (256 unique) 2.0 GB 1000.0 MB (50.0%)
```
As we can see, the node that was moved to `dc3` (node4) is only used at 25% (approximatively),
whereas the node that was already in `dc3` (node3) is used at 75%.
This can be explained by the following:
- node1 will now be the only node remaining in `dc1`, thus it has to store all
of the data in the cluster. Since it was storing only half of it before, it has
to retrieve the other half from other nodes in the cluster.
- The data which it does not have is entirely stored by the other node that was
in `dc1` and that is now in `dc3` (node4). There is also a copy of it on node2
and node3 since both these nodes have a copy of everything.
- node3 and node4 are the two nodes that will now be in a datacenter that is
under-utilized (`dc3`), this means that those are the two candidates from which
data can be removed to be moved to node1.
- Garage will move data in equal proportions from all possible sources, in this
case it means that it will tranfer 25% of the entire data set from node3 to
node1 and another 25% from node4 to node1.
This explains why node3 ends with 75% utilization (100% from before minus 25%
that is moved to node1), and node4 ends with 25% (50% from before minus 25%
that is moved to node1).
This illustrates the second principle of the layout computation: **if there is
a choice in moving data out of some nodes, then all links between pairs of
nodes are used in equal proportions** (this is approximately true, there is
randomness in the algorihtm to achieve this so there might be some small
fluctuations, as we see above).

View File

@ -0,0 +1,101 @@
+++
title = "Multi-HDD support"
weight = 15
+++
Since v0.9, Garage natively supports nodes that have several storage drives
for storing data blocks (not for metadata storage).
## Initial setup
To set up a new Garage storage node with multiple HDDs,
format and mount all your drives in different directories,
and use a Garage configuration as follows:
```toml
data_dir = [
{ path = "/path/to/hdd1", capacity = "2T" },
{ path = "/path/to/hdd2", capacity = "4T" },
]
```
Garage will automatically balance all blocks stored by the node
among the different specified directories, proportionnally to the
specified capacities.
## Updating the list of storage locations
If you add new storage locations to your `data_dir`,
Garage will not rebalance existing data between storage locations.
Newly written blocks will be balanced proportionnally to the specified capacities,
and existing data may be moved between drives to improve balancing,
but only opportunistically when a data block is re-written (e.g. an object
is re-uploaded, or an object with a duplicate block is uploaded).
To understand precisely what is happening, we need to dive in to how Garage
splits data among the different storage locations.
First of all, Garage divides the set of all possible block hashes
in a fixed number of slices (currently 1024), and assigns
to each slice a primary storage location among the specified data directories.
The number of slices having their primary location in each data directory
is proportionnal to the capacity specified in the config file.
When Garage receives a block to write, it will always write it in the primary
directory of the slice that contains its hash.
Now, to be able to not lose existing data blocks when storage locations
are added, Garage also keeps a list of secondary data directories
for all of the hash slices. Secondary data directories for a slice indicates
storage locations that once were primary directories for that slice, i.e. where
Garage knows that data blocks of that slice might be stored.
When Garage is requested to read a certain data block,
it will first look in the primary storage directory of its slice,
and if it doesn't find it there it goes through all of the secondary storage
locations until it finds it. This allows Garage to continue operating
normally when storage locations are added, without having to shuffle
files between drives to place them in the correct location.
This relatively simple strategy works well but does not ensure that data
is correctly balanced among drives according to their capacity.
To rebalance data, two strategies can be used:
- Lazy rebalancing: when a block is re-written (e.g. the object is re-uploaded),
Garage checks whether the existing copy is in the primary directory of the slice
or in a secondary directory. If the current copy is in a secondary directory,
Garage re-writes a copy in the primary directory and deletes the one from the
secondary directory. This might never end up rebalancing everything if there
are data blocks that are only read and never written.
- Active rebalancing: an operator of a Garage node can explicitly launch a repair
procedure that rebalances the data directories, moving all blocks to their
primary location. Once done, all secondary locations for all hash slices are
removed so that they won't be checked anymore when looking for a data block.
## Read-only storage locations
If you would like to move all data blocks from an existing data directory to one
or several new data directories, mark the old directory as read-only:
```toml
data_dir = [
{ path = "/path/to/old_data", read_only = true },
{ path = "/path/to/new_hdd1", capacity = "2T" },
{ path = "/path/to/new_hdd2", capacity = "4T" },
]
```
Garage will be able to read requested blocks from the read-only directory.
Garage will also move data out of the read-only directory either progressively
(lazy rebalancing) or if requested explicitly (active rebalancing).
Once an active rebalancing has finished, your read-only directory should be empty:
it might still contain subdirectories, but no data files. You can check that
it contains no files using:
```bash
find -type f /path/to/old_data # should not print anything
```
at which point it can be removed from the `data_dir` list in your config file.

View File

@ -80,6 +80,6 @@ The entire procedure would look something like this:
5. If any specific migration procedure is required, it is usually in one of the two cases:
- It can be run on online nodes after the new version has started, during regular cluster operation.
- it has to be run offline
- it has to be run offline, in which case you will have to again take all nodes offline one after the other to run the repair
For this last step, please refer to the specific documentation pertaining to the version upgrade you are doing.

View File

@ -84,9 +84,8 @@ admin_token = "$(openssl rand -base64 32)"
EOF
```
Now that your configuration file has been created, you can put
it in the right place. By default, garage looks at **`/etc/garage.toml`.**
Now that your configuration file has been created, you may save it to the directory of your choice.
By default, Garage looks for **`/etc/garage.toml`.**
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
at each invocation of the `garage` binary (for example: `garage -c ./garage.toml server`, `garage -c ./garage.toml status`).
@ -103,12 +102,14 @@ your data to be persisted properly.
### Launching the Garage server
Use the following command to launch the Garage server with our configuration file:
Use the following command to launch the Garage server:
```
garage server
garage -c path/to/garage.toml server
```
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
You can tune Garage's verbosity as follows (from less verbose to more verbose):
```
@ -126,7 +127,7 @@ Log level `debug` can help you check why your S3 API calls are not working.
The `garage` utility is also used as a CLI tool to configure your Garage deployment.
It uses values from the TOML configuration file to find the Garage daemon running on the
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
again have to specify `-c path/to/garage.toml`.
again have to specify `-c path/to/garage.toml` at each invocation.
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
the following command should be enough to show the status of your cluster:
@ -140,7 +141,7 @@ This should show something like this:
```
==== HEALTHY NODES ====
ID Hostname Address Tag Zone Capacity
563e1ac825ee3323 linuxbox 127.0.0.1:3901 NO ROLE ASSIGNED
563e1ac825ee3323 linuxbox 127.0.0.1:3901 NO ROLE ASSIGNED
```
## Creating a cluster layout
@ -153,12 +154,12 @@ For our test deployment, we are using only one node. The way in which we configu
it does not matter, you can simply write:
```bash
garage layout assign -z dc1 -c 1 <node_id>
garage layout assign -z dc1 -c 1G <node_id>
```
where `<node_id>` corresponds to the identifier of the node shown by `garage status` (first column).
You can enter simply a prefix of that identifier.
For instance here you could write just `garage layout assign -z dc1 -c 1 563e`.
For instance here you could write just `garage layout assign -z dc1 -c 1G 563e`.
The layout then has to be applied to the cluster, using:
@ -209,7 +210,7 @@ one key can access multiple buckets, multiple keys can access one bucket.
Create an API key using the following command:
```
garage key new --name nextcloud-app-key
garage key create nextcloud-app-key
```
The output should look as follows:

View File

@ -10,6 +10,8 @@ Here is an example `garage.toml` configuration file that illustrates all of the
```toml
metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
metadata_fsync = true
data_fsync = false
db_engine = "lmdb"
@ -90,6 +92,19 @@ This folder can be placed on an HDD. The space available for `data_dir`
should be counted to determine a node's capacity
when [adding it to the cluster layout](@/documentation/cookbook/real-world.md).
Since `v0.9.0`, Garage supports multiple data directories with the following syntax:
```toml
data_dir = [
{ path = "/path/to/old_data", read_only = true },
{ path = "/path/to/new_hdd1", capacity = "2T" },
{ path = "/path/to/new_hdd2", capacity = "4T" },
]
```
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
on how to operate Garage in such a setup.
### `db_engine` (since `v0.8.0`)
By default, Garage uses the Sled embedded database library
@ -131,6 +146,49 @@ convert-db -a <input db engine> -i <input db path> \
Make sure to specify the full database path as presented in the table above,
and not just the path to the metadata directory.
### `metadata_fsync`
Whether to enable synchronous mode for the database engine or not.
This is disabled (`false`) by default.
This reduces the risk of metadata corruption in case of power failures,
at the cost of a significant drop in write performance,
as Garage will have to pause to sync data to disk much more often
(several times for API calls such as PutObject).
Using this option reduces the risk of simultaneous metadata corruption on several
cluster nodes, which could lead to data loss.
If multi-site replication is used, this option is most likely not necessary, as
it is extremely unlikely that two nodes in different locations will have a
power failure at the exact same time.
(Metadata corruption on a single node is not an issue, the corrupted data file
can always be deleted and reconstructed from the other nodes in the cluster.)
Here is how this option impacts the different database engines:
| Database | `metadata_fsync = false` (default) | `metadata_fsync = true` |
|----------|------------------------------------|-------------------------------|
| Sled | default options | *unsupported* |
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
Note that the Sqlite database is always ran in `WAL` mode (`PRAGMA journal_mode = WAL`).
### `data_fsync`
Whether to `fsync` data blocks and their containing directory after they are
saved to disk.
This is disabled (`false`) by default.
This might reduce the risk that a data block is lost in rare
situations such as simultaneous node losing power,
at the cost of a moderate drop in write performance.
Similarly to `metatada_fsync`, this is likely not necessary
if geographical replication is used.
### `block_size`
Garage splits stored objects in consecutive chunks of size `block_size`

View File

@ -75,16 +75,13 @@ but these endpoints are documented in [Red Hat Ceph Storage - Chapter 2. Ceph Ob
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
| [AbortMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
| [CompleteMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) | ✅ Implemented (see details below) | ✅ | ✅ | ✅ | ✅ |
| [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) | ✅ Implemented | ✅| ✅ | ✅ | ✅ |
| [ListMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUpload.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
| [ListParts](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
| [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) | ✅ Implemented (see details below) | ✅ | ✅| ✅ | ✅ |
| [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
Our implementation of Multipart Upload is currently a bit more restrictive than Amazon's one in some edge cases.
For more information, please refer to our [issue tracker](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/204).
| [AbortMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
| [CompleteMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
| [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) | ✅ Implemented | ✅| ✅ | ✅ | ✅ |
| [ListMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUpload.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
| [ListParts](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
| [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) | ✅ Implemented | ✅ | ✅| ✅ | ✅ |
| [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
### Website endpoints
@ -127,15 +124,22 @@ If you need this feature, please [share your use case in our dedicated issue](ht
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
| [DeleteBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) | ❌ Missing | ❌| ✅| ❌| ✅|
| [GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ✅|
| [PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ✅|
| [DeleteBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) | ✅ Implemented | ❌| ✅| ❌| ✅|
| [GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) | ✅ Implemented | ❌| ✅ | ❌| ✅|
| [PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) | ⚠ Partially implemented (see below) | ❌| ✅ | ❌| ✅|
| [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) | ❌ Stub (see below) | ✅| ✅ | ❌| ✅|
| [ListObjectVersions](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html) | ❌ Missing | ❌| ✅ | ❌| ✅|
| [PutBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) | ❌ Missing | ❌| ✅| ❌| ✅|
**PutBucketLifecycleConfiguration:** The only actions supported are
`AbortIncompleteMultipartUpload` and `Expiration` (without the
`ExpiredObjectDeleteMarker` field). All other operations are dependent on
either bucket versionning or storage classes which Garage currently does not
implement. The deprecated `Prefix` member directly in the the `Rule`
structure/XML tag is not supported, specified prefixes must be inside the
`Filter` structure/XML tag.
**GetBucketVersioning:** Stub implementation (Garage does not yet support versionning so this always returns "versionning not enabled").
**GetBucketVersioning:** Stub implementation which always returns "versionning not enabled", since Garage does not yet support bucket versionning.
### Replication endpoints

View File

@ -0,0 +1,72 @@
+++
title = "Migrating from 0.8 to 0.9"
weight = 12
+++
**This guide explains how to migrate to 0.9 if you have an existing 0.8 cluster.
We don't recommend trying to migrate to 0.9 directly from 0.7 or older.**
This migration procedure has been tested on several clusters without issues.
However, it is still a *critical procedure* that might cause issues.
**Make sure to back up all your data before attempting it!**
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
The following are **breaking changes** in Garage v0.9 that require your attention when migrating:
- LMDB is now the default metadata db engine and Sled is deprecated. If you were using Sled, make sure to specify `db_engine = "sled"` in your configuration file, or take the time to [convert your database](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0).
- Capacity values are now in actual byte units. The translation from the old layout will assign 1 capacity = 1Gb by default, which might be wrong for your cluster. This does not cause any data to be moved around, but you might want to re-assign correct capacity values post-migration.
- Multipart uploads that were started in Garage v0.8 will not be visible in Garage v0.9 and will have to be restarted from scratch.
- Changes to the admin API: some `v0/` endpoints have been replaced by `v1/` counterparts with updated/uniformized syntax. All other endpoints have also moved to `v1/` by default, without syntax changes, but are still available under `v0/` for compatibility.
## Simple migration procedure (takes cluster offline for a while)
The migration steps are as follows:
1. Disable API and web access. You may do this by stopping your reverse proxy or by commenting out
the `api_bind_addr` values in your `config.toml` file and restarting Garage.
2. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`,
check the logs and check that all data seems to be synced correctly between
nodes. If you have time, do additional checks (`versions`, `block_refs`, etc.)
3. Check that the block resync queue and Merkle queue are empty:
run `garage stats -a` to query them or inspect metrics in the Grafana dashboard.
4. Turn off Garage v0.8
5. **Backup the metadata folder of all your nodes!** For instance, use the following command
if your metadata directory is `/var/lib/garage/meta`: `cd /var/lib/garage ; tar -acf meta-v0.8.tar.zst meta/`
6. Install Garage v0.9
7. Update your configuration file if necessary.
8. Turn on Garage v0.9
9. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`.
Wait for a full table sync to run.
10. Your upgraded cluster should be in a working state. Re-enable API and Web
access and check that everything went well.
11. Monitor your cluster in the next hours to see if it works well under your production load, report any issue.
12. You might want to assign correct capacity values to all your nodes. Doing so might cause data to be moved
in your cluster, which should also be monitored carefully.
## Minimal downtime migration procedure
The migration to Garage v0.9 can be done with almost no downtime,
by restarting all nodes at once in the new version.
The migration steps are as follows:
1. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`,
check the logs and check that all data seems to be synced correctly between
nodes. If you have time, do additional checks (`versions`, `block_refs`, etc.)
2. Turn off each node individually; back up its metadata folder (see above); turn it back on again.
This will allow you to take a backup of all nodes without impacting global cluster availability.
You can do all nodes of a single zone at once as this does not impact the availability of Garage.
3. Prepare your binaries and configuration files for Garage v0.9
4. Shut down all v0.8 nodes simultaneously, and restart them all simultaneously in v0.9.
Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to achieve this as fast as possible.
Garage v0.9 should be in a working state as soon as it starts.
5. Proceed with repair and monitoring as described in steps 9-12 above.

View File

@ -52,11 +52,11 @@ Returns an HTTP status 200 if the node is ready to answer user's requests,
and an HTTP status 503 (Service Unavailable) if there are some partitions
for which a quorum of nodes is not available.
A simple textual message is also returned in a body with content-type `text/plain`.
See `/v0/health` for an API that also returns JSON output.
See `/v1/health` for an API that also returns JSON output.
### Cluster operations
#### GetClusterStatus `GET /v0/status`
#### GetClusterStatus `GET /v1/status`
Returns the cluster's current status in JSON, including:
@ -70,86 +70,112 @@ Example response body:
```json
{
"node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"garage_version": "git:v0.8.0",
"knownNodes": {
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
"garageVersion": "git:v0.9.0-dev",
"garageFeatures": [
"k2v",
"sled",
"lmdb",
"sqlite",
"metrics",
"bundled-libs"
],
"rustVersion": "1.68.0",
"dbEngine": "LMDB (using Heed crate)",
"knownNodes": [
{
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"addr": "10.0.0.11:3901",
"is_up": true,
"last_seen_secs_ago": 9,
"isUp": true,
"lastSeenSecsAgo": 9,
"hostname": "node1"
},
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
{
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
"addr": "10.0.0.12:3901",
"is_up": true,
"last_seen_secs_ago": 1,
"isUp": true,
"lastSeenSecsAgo": 1,
"hostname": "node2"
},
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
{
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"addr": "10.0.0.21:3901",
"is_up": true,
"last_seen_secs_ago": 7,
"isUp": true,
"lastSeenSecsAgo": 7,
"hostname": "node3"
},
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
{
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
"addr": "10.0.0.22:3901",
"is_up": true,
"last_seen_secs_ago": 1,
"isUp": true,
"lastSeenSecsAgo": 1,
"hostname": "node4"
}
},
],
"layout": {
"version": 12,
"roles": {
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
"roles": [
{
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"zone": "dc1",
"capacity": 4,
"capacity": 10737418240,
"tags": [
"node1"
]
},
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
{
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
"zone": "dc1",
"capacity": 6,
"capacity": 10737418240,
"tags": [
"node2"
]
},
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
{
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"zone": "dc2",
"capacity": 10,
"capacity": 10737418240,
"tags": [
"node3"
]
}
},
"stagedRoleChanges": {
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
],
"stagedRoleChanges": [
{
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
"remove": false,
"zone": "dc2",
"capacity": 5,
"capacity": 10737418240,
"tags": [
"node4"
]
}
}
{
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"remove": true,
"zone": null,
"capacity": null,
"tags": null,
}
]
}
}
```
#### GetClusterHealth `GET /v0/health`
#### GetClusterHealth `GET /v1/health`
Returns the cluster's current health in JSON format, with the following variables:
- `status`: one of `Healthy`, `Degraded` or `Unavailable`:
- Healthy: Garage node is connected to all storage nodes
- Degraded: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions
- Unavailable: a quorum of write nodes is not available for some partitions
- `known_nodes`: the number of nodes this Garage node has had a TCP connection to since the daemon started
- `connected_nodes`: the nubmer of nodes this Garage node currently has an open connection to
- `storage_nodes`: the number of storage nodes currently registered in the cluster layout
- `storage_nodes_ok`: the number of storage nodes to which a connection is currently open
- `status`: one of `healthy`, `degraded` or `unavailable`:
- healthy: Garage node is connected to all storage nodes
- degraded: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions
- unavailable: a quorum of write nodes is not available for some partitions
- `knownNodes`: the number of nodes this Garage node has had a TCP connection to since the daemon started
- `connectedNodes`: the nubmer of nodes this Garage node currently has an open connection to
- `storageNodes`: the number of storage nodes currently registered in the cluster layout
- `storageNodesOk`: the number of storage nodes to which a connection is currently open
- `partitions`: the total number of partitions of the data (currently always 256)
- `partitions_quorum`: the number of partitions for which a quorum of write nodes is available
- `partitions_all_ok`: the number of partitions for which we are connected to all storage nodes responsible of storing it
- `partitionsQuorum`: the number of partitions for which a quorum of write nodes is available
- `partitionsAllOk`: the number of partitions for which we are connected to all storage nodes responsible of storing it
Contrarily to `GET /health`, this endpoint always returns a 200 OK HTTP response code.
@ -157,18 +183,18 @@ Example response body:
```json
{
"status": "Degraded",
"known_nodes": 3,
"connected_nodes": 2,
"storage_nodes": 3,
"storage_nodes_ok": 2,
"partitions": 256,
"partitions_quorum": 256,
"partitions_all_ok": 0
"status": "degraded",
"knownNodes": 3,
"connectedNodes": 3,
"storageNodes": 4,
"storageNodesOk": 3,
"partitions": 256,
"partitionsQuorum": 256,
"partitionsAllOk": 64
}
```
#### ConnectClusterNodes `POST /v0/connect`
#### ConnectClusterNodes `POST /v1/connect`
Instructs this Garage node to connect to other Garage nodes at specified addresses.
@ -198,7 +224,7 @@ Example response:
]
```
#### GetClusterLayout `GET /v0/layout`
#### GetClusterLayout `GET /v1/layout`
Returns the cluster's current layout in JSON, including:
@ -212,42 +238,54 @@ Example response body:
```json
{
"version": 12,
"roles": {
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
"roles": [
{
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"zone": "dc1",
"capacity": 4,
"capacity": 10737418240,
"tags": [
"node1"
]
},
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
{
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
"zone": "dc1",
"capacity": 6,
"capacity": 10737418240,
"tags": [
"node2"
]
},
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
{
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"zone": "dc2",
"capacity": 10,
"capacity": 10737418240,
"tags": [
"node3"
]
}
},
"stagedRoleChanges": {
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
],
"stagedRoleChanges": [
{
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
"remove": false,
"zone": "dc2",
"capacity": 5,
"capacity": 10737418240,
"tags": [
"node4"
]
}
}
{
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"remove": true,
"zone": null,
"capacity": null,
"tags": null,
}
]
}
```
#### UpdateClusterLayout `POST /v0/layout`
#### UpdateClusterLayout `POST /v1/layout`
Send modifications to the cluster layout. These modifications will
be included in the staged role changes, visible in subsequent calls
@ -259,8 +297,9 @@ the layout.
Request body format:
```json
{
<node_id>: {
[
{
"id": <node_id>,
"capacity": <new_capacity>,
"zone": <new_zone>,
"tags": [
@ -268,17 +307,22 @@ Request body format:
...
]
},
<node_id_to_remove>: null,
...
}
{
"id": <node_id_to_remove>,
"remove": true
}
]
```
Contrary to the CLI that may update only a subset of the fields
`capacity`, `zone` and `tags`, when calling this API all of these
values must be specified.
This returns the new cluster layout with the proposed staged changes,
as returned by GetClusterLayout.
#### ApplyClusterLayout `POST /v0/layout/apply`
#### ApplyClusterLayout `POST /v1/layout/apply`
Applies to the cluster the layout changes currently registered as
staged layout changes.
@ -295,7 +339,10 @@ Similarly to the CLI, the body must include the version of the new layout
that will be created, which MUST be 1 + the value of the currently
existing layout in the cluster.
#### RevertClusterLayout `POST /v0/layout/revert`
This returns the message describing all the calculations done to compute the new
layout, as well as the description of the layout as returned by GetClusterLayout.
#### RevertClusterLayout `POST /v1/layout/revert`
Clears all of the staged layout changes.
@ -313,10 +360,13 @@ Similarly to the CLI, the body must include the incremented
version number, which MUST be 1 + the value of the currently
existing layout in the cluster.
This returns the new cluster layout with all changes reverted,
as returned by GetClusterLayout.
### Access key operations
#### ListKeys `GET /v0/key`
#### ListKeys `GET /v1/key`
Returns all API access keys in the cluster.
@ -335,34 +385,8 @@ Example response:
]
```
#### CreateKey `POST /v0/key`
Creates a new API access key.
Request body format:
```json
{
"name": "NameOfMyKey"
}
```
#### ImportKey `POST /v0/key/import`
Imports an existing API key.
Request body format:
```json
{
"accessKeyId": "GK31c2f218a2e44f485b94239e",
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835",
"name": "NameOfMyKey"
}
```
#### GetKeyInfo `GET /v0/key?id=<acces key id>`
#### GetKeyInfo `GET /v0/key?search=<pattern>`
#### GetKeyInfo `GET /v1/key?id=<acces key id>`
#### GetKeyInfo `GET /v1/key?search=<pattern>`
Returns information about the requested API access key.
@ -370,6 +394,9 @@ If `id` is set, the key is looked up using its exact identifier (faster).
If `search` is set, the key is looked up using its name or prefix
of identifier (slower, all keys are enumerated to do this).
Optionnally, the query parameter `showSecretKey=true` can be set to reveal the
associated secret access key.
Example response:
```json
@ -433,11 +460,40 @@ Example response:
}
```
#### DeleteKey `DELETE /v0/key?id=<acces key id>`
#### CreateKey `POST /v1/key`
Deletes an API access key.
Creates a new API access key.
#### UpdateKey `POST /v0/key?id=<acces key id>`
Request body format:
```json
{
"name": "NameOfMyKey"
}
```
This returns the key info, including the created secret key,
in the same format as the result of GetKeyInfo.
#### ImportKey `POST /v1/key/import`
Imports an existing API key.
This will check that the imported key is in the valid format, i.e.
is a key that could have been generated by Garage.
Request body format:
```json
{
"accessKeyId": "GK31c2f218a2e44f485b94239e",
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835",
"name": "NameOfMyKey"
}
```
This returns the key info in the same format as the result of GetKeyInfo.
#### UpdateKey `POST /v1/key?id=<acces key id>`
Updates information about the specified API access key.
@ -457,10 +513,16 @@ All fields (`name`, `allow` and `deny`) are optional.
If they are present, the corresponding modifications are applied to the key, otherwise nothing is changed.
The possible flags in `allow` and `deny` are: `createBucket`.
This returns the key info in the same format as the result of GetKeyInfo.
#### DeleteKey `DELETE /v1/key?id=<acces key id>`
Deletes an API access key.
### Bucket operations
#### ListBuckets `GET /v0/bucket`
#### ListBuckets `GET /v1/bucket`
Returns all storage buckets in the cluster.
@ -502,8 +564,8 @@ Example response:
]
```
#### GetBucketInfo `GET /v0/bucket?id=<bucket id>`
#### GetBucketInfo `GET /v0/bucket?globalAlias=<alias>`
#### GetBucketInfo `GET /v1/bucket?id=<bucket id>`
#### GetBucketInfo `GET /v1/bucket?globalAlias=<alias>`
Returns information about the requested storage bucket.
@ -535,7 +597,10 @@ Example response:
],
"objects": 14827,
"bytes": 13189855625,
"unfinshedUploads": 0,
"unfinishedUploads": 1,
"unfinishedMultipartUploads": 1,
"unfinishedMultipartUploadParts": 11,
"unfinishedMultipartUploadBytes": 41943040,
"quotas": {
"maxSize": null,
"maxObjects": null
@ -543,7 +608,7 @@ Example response:
}
```
#### CreateBucket `POST /v0/bucket`
#### CreateBucket `POST /v1/bucket`
Creates a new storage bucket.
@ -583,13 +648,7 @@ or no alias at all.
Technically, you can also specify both `globalAlias` and `localAlias` and that would create
two aliases, but I don't see why you would want to do that.
#### DeleteBucket `DELETE /v0/bucket?id=<bucket id>`
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
Warning: this will delete all aliases associated with the bucket!
#### UpdateBucket `PUT /v0/bucket?id=<bucket id>`
#### UpdateBucket `PUT /v1/bucket?id=<bucket id>`
Updates configuration of the given bucket.
@ -621,9 +680,16 @@ In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or
to remove the quotas. An absent value will be considered the same as a `null`. It is not possible
to change only one of the two quotas.
#### DeleteBucket `DELETE /v1/bucket?id=<bucket id>`
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
Warning: this will delete all aliases associated with the bucket!
### Operations on permissions for keys on buckets
#### BucketAllowKey `POST /v0/bucket/allow`
#### BucketAllowKey `POST /v1/bucket/allow`
Allows a key to do read/write/owner operations on a bucket.
@ -644,7 +710,7 @@ Request body format:
Flags in `permissions` which have the value `true` will be activated.
Other flags will remain unchanged.
#### BucketDenyKey `POST /v0/bucket/deny`
#### BucketDenyKey `POST /v1/bucket/deny`
Denies a key from doing read/write/owner operations on a bucket.
@ -668,19 +734,19 @@ Other flags will remain unchanged.
### Operations on bucket aliases
#### GlobalAliasBucket `PUT /v0/bucket/alias/global?id=<bucket id>&alias=<global alias>`
#### GlobalAliasBucket `PUT /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>`
Empty body. Creates a global alias for a bucket.
#### GlobalUnaliasBucket `DELETE /v0/bucket/alias/global?id=<bucket id>&alias=<global alias>`
#### GlobalUnaliasBucket `DELETE /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>`
Removes a global alias for a bucket.
#### LocalAliasBucket `PUT /v0/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>`
#### LocalAliasBucket `PUT /v1/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>`
Empty body. Creates a local alias for a bucket in the namespace of a specific access key.
#### LocalUnaliasBucket `DELETE /v0/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>`
#### LocalUnaliasBucket `DELETE /v1/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>`
Removes a local alias for a bucket in the namespace of a specific access key.

13
doc/optimal_layout_report/.gitignore vendored Normal file
View File

@ -0,0 +1,13 @@
optimal_layout.aux
optimal_layout.log
optimal_layout.synctex.gz
optimal_layout.bbl
optimal_layout.blg
geodistrib.aux
geodistrib.bbl
geodistrib.blg
geodistrib.log
geodistrib.out
geodistrib.synctex.gz

Binary file not shown.

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 161 KiB

Binary file not shown.

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 560 KiB

Binary file not shown.

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 287 KiB

Binary file not shown.

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

View File

@ -0,0 +1,317 @@
\documentclass[]{article}
\usepackage{amsmath,amssymb}
\usepackage{amsthm}
\usepackage{stmaryrd}
\usepackage{graphicx,xcolor}
\usepackage{hyperref}
\usepackage{algorithm,algpseudocode,float}
\renewcommand\thesubsubsection{\Alph{subsubsection})}
\newtheorem{proposition}{Proposition}
%opening
\title{An algorithm for geo-distributed and redundant storage in Garage}
\author{Mendes Oulamara \\ \emph{mendes@deuxfleurs.fr}}
\date{}
\begin{document}
\maketitle
\begin{abstract}
Garage
\end{abstract}
\section{Introduction}
Garage\footnote{\url{https://garagehq.deuxfleurs.fr/}} is an open-source distributed object storage service tailored for self-hosting. It was designed by the Deuxfleurs association\footnote{\url{https://deuxfleurs.fr/}} to enable small structures (associations, collectives, small companies) to share storage resources to reliably self-host their data, possibly with old and non-reliable machines.
To achieve these reliability and availability goals, the data is broken into \emph{partitions} and every partition is replicated over 3 different machines (that we call \emph{nodes}). When the data is queried, a consensus algorithm allows to fetch it from one of the nodes. A \emph{replication factor} of 3 ensures the best guarantees in the consensus algorithm \cite{ADD RREF}, but this parameter can be different.
Moreover, if the nodes are spread over different \emph{zones} (different houses, offices, cities\dots), we can ask the data to be replicated over nodes belonging to different zones, to improve the storage robustness against zone failure (such as power outage). To do so, we set a \emph{redundancy parameter}, that is no more than the replication factor, and we ask that any partition is replicated over this number of zones at least.
In this work, we propose a repartition algorithm that, given the nodes specifications and the replication and redundancy parameters, computes an optimal assignation of partitions to nodes. We say that the assignation is optimal in the sense that it maximizes the size of the partitions, and hence the effective storage capacity of the system.
Moreover, when a former assignation exists, which is not optimal anymore due to nodes or zones updates, our algorithm computes a new optimal assignation that minimizes the amount of data to be transferred during the assignation update (the \emph{transfer load}).
We call the set of nodes cooperating to store the data a \emph{cluster}, and a description of the nodes, zones and the assignation of partitions to nodes a \emph{cluster layout}
\subsection{Notations}
Let $k$ be some fixed parameter value, typically 8, that we call the ``partition bits''.
Every object to be stored in the system is split into data blocks of fixed size. We compute a hash $h(\mathbf{b})$ of every such block $\mathbf{b}$, and we define the $k$ last bits of this hash to be the partition number $p(\mathbf{b})$ of the block. This label can take $P=2^k$ different values, and hence there are $P$ different partitions. We denote $\mathbf{P}$ the set of partition labels (i.e. $\mathbf{P}=\llbracket1,P\rrbracket$).
We are given a set $\mathbf{N}$ of $N$ nodes and a set $\mathbf{Z}$ of $Z$ zones. Every node $n$ has a non-negative storage capacity $c_n\ge 0$ and belongs to a zone $z_n\in \mathbf{Z}$. We are also given a replication parameter $\rho_\mathbf{N}$ and a redundancy parameter $\rho_\mathbf{Z}$ such that $1\le \rho_\mathbf{Z} \le \rho_\mathbf{N}$ (typical values would be $\rho_N=3$ and $\rho_Z=2$).
Our goal is to compute an assignment $\alpha = (\alpha_p^1, \ldots, \alpha_p^{\rho_\mathbf{N}})_{p\in \mathbf{P}}$ such that every partition $p$ is associated to $\rho_\mathbf{N}$ distinct nodes $\alpha_p^1, \ldots, \alpha_p^{\rho_\mathbf{N}} \in \mathbf{N}$ and these nodes belong to at least $\rho_\mathbf{Z}$ distinct zones. Among the possible assignations, we choose one that \emph{maximizes} the effective storage capacity of the cluster. If the layout contained a previous assignment $\alpha'$, we \emph{minimize} the amount of data to transfer during the layout update by making $\alpha$ as close as possible to $\alpha'$. These maximization and minimization are described more formally in the following section.
\subsection{Optimization parameters}
To link the effective storage capacity of the cluster to partition assignment, we make the following assumption:
\begin{equation}
\tag{H1}
\text{\emph{All partitions have the same size $s$.}}
\end{equation}
This assumption is justified by the dispersion of the hashing function, when the number of partitions is small relative to the number of stored blocks.
Every node $n$ wille store some number $p_n$ of partitions (it is the number of partitions $p$ such that $n$ appears in the $\alpha_p$). Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/p_n$. This remark leads us to define the optimal size that we will want to maximize:
\begin{equation}
\label{eq:optimal}
\tag{OPT}
s^* = \min_{n \in N} \frac{c_n}{p_n}.
\end{equation}
When the capacities of the nodes are updated (this includes adding or removing a node), we want to update the assignment as well. However, transferring the data between nodes has a cost and we would like to limit the number of changes in the assignment. We make the following assumption:
\begin{equation}
\tag{H2}
\text{\emph{Nodes updates happen rarely relatively to block operations.}}
\end{equation}
This assumption justifies that when we compute the new assignment $\alpha$, it is worth to optimize the partition size \eqref{eq:optimal} first, and then, among the possible optimal solution, to try to minimize the number of partition transfers. More formally, we minimize the distance between two assignments defined by
\begin{equation}
d(\alpha, \alpha') := \#\{ (n,p) \in \mathbf{N}\times\mathbf{P} ~|~ n\in \alpha_p \triangle \alpha'_p \}
\end{equation}
where the symmetric difference $\alpha_p \triangle \alpha'_p$ denotes the nodes appearing in one of the assignations but not in both.
\section{Computation of an optimal assignment}
The algorithm that we propose takes as inputs the cluster layout parameters $\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$, that we defined in the introduction, together with the former assignation $\alpha'$ (if any). The computation of the new optimal assignation $\alpha^*$ is done in three successive steps that will be detailed in the following sections. The first step computes the largest partition size $s^*$ that an assignation can achieve. The second step computes an optimal candidate assignment $\alpha$ that achieves $s^*$ and a heuristic is used in the computation to make it hopefully close to $\alpha'$. The third steps modifies $\alpha$ iteratively to reduces $d(\alpha, \alpha')$ and yields an assignation $\alpha^*$ achieving $s^*$, and minimizing $d(\cdot, \alpha')$ among such assignations.
We will explain in the next section how to represent an assignment $\alpha$ by a flow $f$ on a weighted graph $G$ to enable the use of flow and graph algorithms. The main function of the algorithm can be written as follows.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Compute Layout}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$, $\alpha'$}
\State $s^* \leftarrow$ \Call{Compute Partition Size}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$}
\State $G \leftarrow G(s^*)$
\State $f \leftarrow$ \Call{Compute Candidate Assignment}{$G$, $\alpha'$}
\State $f^* \leftarrow$ \Call{Minimize transfer load}{$G$, $f$, $\alpha'$}
\State Build $\alpha^*$ from $f^*$
\State \Return $\alpha^*$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
As we will see in the next sections, the worst case complexity of this algorithm is $O(P^2 N^2)$. The minimization of transfer load is the most expensive step, and it can run with a timeout since it is only an optimization step. Without this step (or with a smart timeout), the worst cas complexity can be $O((PN)^{3/2}\log C)$ where $C$ is the total storage capacity of the cluster.
\subsection{Determination of the partition size $s^*$}
We will represent an assignment $\alpha$ as a flow in a specific graph $G$. We will not compute the optimal partition size $s^*$ a priori, but we will determine it by dichotomy, as the largest size $s$ such that the maximal flow achievable on $G=G(s)$ has value $\rho_\mathbf{N}P$. We will assume that the capacities are given in a small enough unit (say, Megabytes), and we will determine $s^*$ at the precision of the given unit.
Given some candidate size value $s$, we describe the oriented weighted graph $G=(V,E)$ with vertex set $V$ arc set $E$ (see Figure \ref{fig:flowgraph}).
The set of vertices $V$ contains the source $\mathbf{s}$, the sink $\mathbf{t}$, vertices
$\mathbf{p^+, p^-}$ for every partition $p$, vertices $\mathbf{x}_{p,z}$ for every partition $p$ and zone $z$, and vertices $\mathbf{n}$ for every node $n$.
The set of arcs $E$ contains:
\begin{itemize}
\item ($\mathbf{s}$,$\mathbf{p}^+$, $\rho_\mathbf{Z}$) for every partition $p$;
\item ($\mathbf{s}$,$\mathbf{p}^-$, $\rho_\mathbf{N}-\rho_\mathbf{Z}$) for every partition $p$;
\item ($\mathbf{p}^+$,$\mathbf{x}_{p,z}$, 1) for every partition $p$ and zone $z$;
\item ($\mathbf{p}^-$,$\mathbf{x}_{p,z}$, $\rho_\mathbf{N}-\rho_\mathbf{Z}$) for every partition $p$ and zone $z$;
\item ($\mathbf{x}_{p,z}$,$\mathbf{n}$, 1) for every partition $p$, zone $z$ and node $n\in z$;
\item ($\mathbf{n}$, $\mathbf{t}$, $\lfloor c_n/s \rfloor$) for every node $n$.
\end{itemize}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{figures/flow_graph_param}
\caption{An example of graph $G(s)$. Arcs are oriented from left to right, and unlabeled arcs have capacity 1. In this example, nodes $n_1,n_2,n_3$ belong to zone $z_1$, and nodes $n_4,n_5$ belong to zone $z_2$.}
\label{fig:flowgraph}
\end{figure}
In the following complexity calculations, we will use the number of vertices and edges of $G$. Remark from now that $\# V = O(PZ)$ and $\# E = O(PN)$.
\begin{proposition}
An assignment $\alpha$ is realizable with partition size $s$ and the redundancy constraints $(\rho_\mathbf{N},\rho_\mathbf{Z})$ if and only if there exists a maximal flow function $f$ in $G$ with total flow $\rho_\mathbf{N}P$, such that the arcs ($\mathbf{x}_{p,z}$,$\mathbf{n}$, 1) used are exactly those for which $p$ is associated to $n$ in $\alpha$.
\end{proposition}
\begin{proof}
Given such flow $f$, we can reconstruct a candidate $\alpha$. In $f$, the flow passing through $\mathbf{p^+}$ and $\mathbf{p^-}$ is $\rho_\mathbf{N}$, and since the outgoing capacity of every $\mathbf{x}_{p,z}$ is 1, every partition is associated to $\rho_\mathbf{N}$ distinct nodes. The fraction $\rho_\mathbf{Z}$ of the flow passing through every $\mathbf{p^+}$ must be spread over as many distinct zones as every arc outgoing from $\mathbf{p^+}$ has capacity 1. So the reconstructed $\alpha$ verifies the redundancy constraints. For every node $n$, the flow between $\mathbf{n}$ and $\mathbf{t}$ corresponds to the number of partitions associated to $n$. By construction of $f$, this does not exceed $\lfloor c_n/s \rfloor$. We assumed that the partition size is $s$, hence this association does not exceed the storage capacity of the nodes.
In the other direction, given an assignment $\alpha$, one can similarly check that the facts that $\alpha$ respects the redundancy constraints, and the storage capacities of the nodes, are necessary condition to construct a maximal flow function $f$.
\end{proof}
\textbf{Implementation remark:} In the flow algorithm, while exploring the graph, we explore the neighbours of every vertex in a random order to heuristically spread the associations between nodes and partitions.
\subsubsection*{Algorithm}
With this result mind, we can describe the first step of our algorithm. All divisions are supposed to be integer divisions.
\begin{algorithmic}[1]
\Function{Compute Partition Size}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$}
\State Build the graph $G=G(s=1)$
\State $ f \leftarrow$ \Call{Maximal flow}{$G$}
\If{$f.\mathrm{total flow} < \rho_\mathbf{N}P$}
\State \Return Error: capacities too small or constraints too strong.
\EndIf
\State $s^- \leftarrow 1$
\State $s^+ \leftarrow 1+\frac{1}{\rho_\mathbf{N}}\sum_{n \in \mathbf{N}} c_n$
\While{$s^-+1 < s^+$}
\State Build the graph $G=G(s=(s^-+s^+)/2)$
\State $ f \leftarrow$ \Call{Maximal flow}{$G$}
\If{$f.\mathrm{total flow} < \rho_\mathbf{N}P$}
\State $s^+ \leftarrow (s^- + s^+)/2$
\Else
\State $s^- \leftarrow (s^- + s^+)/2$
\EndIf
\EndWhile
\State \Return $s^-$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
To compute the maximal flow, we use Dinic's algorithm. Its complexity on general graphs is $O(\#V^2 \#E)$, but on graphs with edge capacity bounded by a constant, it turns out to be $O(\#E^{3/2})$. The graph $G$ does not fall in this case since the capacities of the arcs incoming to $\mathbf{t}$ are far from bounded. However, the proof of this complexity function works readily for graphs where we only ask the edges \emph{not} incoming to the sink $\mathbf{t}$ to have their capacities bounded by a constant. One can find the proof of this claim in \cite[Section 2]{even1975network}.
The dichotomy adds a logarithmic factor $\log (C)$ where $C=\sum_{n \in \mathbf{N}} c_n$ is the total capacity of the cluster. The total complexity of this first function is hence
$O(\#E^{3/2}\log C ) = O\big((PN)^{3/2} \log C\big)$.
\subsubsection*{Metrics}
We can display the discrepancy between the computed $s^*$ and the best size we could have hoped for the given total capacity, that is $C/\rho_\mathbf{N}$.
\subsection{Computation of a candidate assignment}
Now that we have the optimal partition size $s^*$, to compute a candidate assignment it would be enough to compute a maximal flow function $f$ on $G(s^*)$. This is what we do if there is no former assignation $\alpha'$.
If there is some $\alpha'$, we add a step that will heuristically help to obtain a candidate $\alpha$ closer to $\alpha'$. We fist compute a flow function $\tilde{f}$ that uses only the partition-to-node associations appearing in $\alpha'$. Most likely, $\tilde{f}$ will not be a maximal flow of $G(s^*)$. In Dinic's algorithm, we can start from a non maximal flow function and then discover improving paths. This is what we do by starting from $\tilde{f}$. The hope\footnote{This is only a hope, because one can find examples where the construction of $f$ from $\tilde{f}$ produces an assignment $\alpha$ that is not as close as possible to $\alpha'$.} is that the final flow function $f$ will tend to keep the associations appearing in $\tilde{f}$.
More formally, we construct the graph $G_{|\alpha'}$ from $G$ by removing all the arcs $(\mathbf{x}_{p,z},\mathbf{n}, 1)$ where $p$ is not associated to $n$ in $\alpha'$. We compute a maximal flow function $\tilde{f}$ in $G_{|\alpha'}$. The flow $\tilde{f}$ is also a valid (most likely non maximal) flow function on $G$. We compute a maximal flow function $f$ on $G$ by starting Dinic's algorithm on $\tilde{f}$.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Compute Candidate Assignment}{$G$, $\alpha'$}
\State Build the graph $G_{|\alpha'}$
\State $ \tilde{f} \leftarrow$ \Call{Maximal flow}{$G_{|\alpha'}$}
\State $ f \leftarrow$ \Call{Maximal flow from flow}{$G$, $\tilde{f}$}
\State \Return $f$
\EndFunction
\end{algorithmic}
~
\textbf{Remark:} The function ``Maximal flow'' can be just seen as the function ``Maximal flow from flow'' called with the zero flow function as starting flow.
\subsubsection*{Complexity}
With the considerations of the last section, we have the complexity of the Dinic's algorithm $O(\#E^{3/2}) = O((PN)^{3/2})$.
\subsubsection*{Metrics}
We can display the flow value of $\tilde{f}$, which is an upper bound of the distance between $\alpha$ and $\alpha'$. It might be more a Debug level display than Info.
\subsection{Minimization of the transfer load}
Now that we have a candidate flow function $f$, we want to modify it to make its corresponding assignation $\alpha$ as close as possible to $\alpha'$. Denote by $f'$ the maximal flow corresponding to $\alpha'$, and let $d(f, \alpha')=d(f, f'):=d(\alpha,\alpha')$\footnote{It is the number of arcs of type $(\mathbf{x}_{p,z},\mathbf{n})$ saturated in one flow and not in the other.}.
We want to build a sequence $f=f_0, f_1, f_2 \dots$ of maximal flows such that $d(f_i, \alpha')$ decreases as $i$ increases. The distance being a non-negative integer, this sequence of flow functions must be finite. We now explain how to find some improving $f_{i+1}$ from $f_i$.
For any maximal flow $f$ in $G$, we define the oriented weighted graph $G_f=(V, E_f)$ as follows. The vertices of $G_f$ are the same as the vertices of $G$. $E_f$ contains the arc $(v_1,v_2, w)$ between vertices $v_1,v_2\in V$ with weight $w$ if and only if the arc $(v_1,v_2)$ is not saturated in $f$ (i.e. $c(v_1,v_2)-f(v_1,v_2) \ge 1$, we also consider reversed arcs). The weight $w$ is:
\begin{itemize}
\item $-1$ if $(v_1,v_2)$ is of type $(\mathbf{x}_{p,z},\mathbf{n})$ or $(\mathbf{x}_{p,z},\mathbf{n})$ and is saturated in only one of the two flows $f,f'$;
\item $+1$ if $(v_1,v_2)$ is of type $(\mathbf{x}_{p,z},\mathbf{n})$ or $(\mathbf{x}_{p,z},\mathbf{n})$ and is saturated in either both or none of the two flows $f,f'$;
\item $0$ otherwise.
\end{itemize}
If $\gamma$ is a simple cycle of arcs in $G_f$, we define its weight $w(\gamma)$ as the sum of the weights of its arcs. We can add $+1$ to the value of $f$ on the arcs of $\gamma$, and by construction of $G_f$ and the fact that $\gamma$ is a cycle, the function that we get is still a valid flow function on $G$, it is maximal as it has the same flow value as $f$. We denote this new function $f+\gamma$.
\begin{proposition}
Given a maximal flow $f$ and a simple cycle $\gamma$ in $G_f$, we have $d(f+\gamma, f') - d(f,f') = w(\gamma)$.
\end{proposition}
\begin{proof}
Let $X$ be the set of arcs of type $(\mathbf{x}_{p,z},\mathbf{n})$. Then we can express $d(f,f')$ as
\begin{align*}
d(f,f') & = \#\{e\in X ~|~ f(e)\neq f'(e)\}
= \sum_{e\in X} 1_{f(e)\neq f'(e)} \\
& = \frac{1}{2}\big( \#X + \sum_{e\in X} 1_{f(e)\neq f'(e)} - 1_{f(e)= f'(e)} \big).
\end{align*}
We can express the cycle weight as
\begin{align*}
w(\gamma) & = \sum_{e\in X, e\in \gamma} - 1_{f(e)\neq f'(e)} + 1_{f(e)= f'(e)}.
\end{align*}
Remark that since we passed on unit of flow in $\gamma$ to construct $f+\gamma$, we have for any $e\in X$, $f(e)=f'(e)$ if and only if $(f+\gamma)(e) \neq f'(e)$.
Hence
\begin{align*}
w(\gamma) & = \frac{1}{2}(w(\gamma) + w(\gamma)) \\
&= \frac{1}{2} \Big(
\sum_{e\in X, e\in \gamma} - 1_{f(e)\neq f'(e)} + 1_{f(e)= f'(e)} \\
& \qquad +
\sum_{e\in X, e\in \gamma} 1_{(f+\gamma)(e)\neq f'(e)} + 1_{(f+\gamma)(e)= f'(e)}
\Big).
\end{align*}
Plugging this in the previous equation, we find that
$$d(f,f')+w(\gamma) = d(f+\gamma, f').$$
\end{proof}
This result suggests that given some flow $f_i$, we just need to find a negative cycle $\gamma$ in $G_{f_i}$ to construct $f_{i+1}$ as $f_i+\gamma$. The following proposition ensures that this greedy strategy reaches an optimal flow.
\begin{proposition}
For any maximal flow $f$, $G_f$ contains a negative cycle if and only if there exists a maximal flow $f^*$ in $G$ such that $d(f^*, f') < d(f, f')$.
\end{proposition}
\begin{proof}
Suppose that there is such flow $f^*$. Define the oriented multigraph $M_{f,f^*}=(V,E_M)$ with the same vertex set $V$ as in $G$, and for every $v_1,v_2 \in V$, $E_M$ contains $(f^*(v_1,v_2) - f(v_1,v_2))_+$ copies of the arc $(v_1,v_2)$. For every vertex $v$, its total degree (meaning its outer degree minus its inner degree) is equal to
\begin{align*}
\deg v & = \sum_{u\in V} (f^*(v,u) - f(v,u))_+ - \sum_{u\in V} (f^*(u,v) - f(u,v))_+ \\
& = \sum_{u\in V} f^*(v,u) - f(v,u) = \sum_{u\in V} f^*(v,u) - \sum_{u\in V} f(v,u).
\end{align*}
The last two sums are zero for any inner vertex since $f,f^*$ are flows, and they are equal on the source and sink since the two flows are both maximal and have hence the same value. Thus, $\deg v = 0$ for every vertex $v$.
This implies that the multigraph $M_{f,f^*}$ is the union of disjoint simple cycles. $f$ can be transformed into $f^*$ by pushing a mass 1 along all these cycles in any order. Since $d(f^*, f')<d(f,f')$, there must exists one of these simple cycles $\gamma$ with $d(f+\gamma, f') < d(f, f')$. Finally, since we can push a mass in $f$ along $\gamma$, it must appear in $G_f$. Hence $\gamma$ is a cycle of $G_f$ with negative weight.
\end{proof}
In the next section we describe the corresponding algorithm. Instead of discovering only one cycle, we are allowed to discover a set $\Gamma$ of disjoint negative cycles.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Minimize transfer load}{$G$, $f$, $\alpha'$}
\State Build the graph $G_f$
\State $\Gamma \leftarrow$ \Call{Detect Negative Cycles}{$G_f$}
\While{$\Gamma \neq \emptyset$}
\ForAll{$\gamma \in \Gamma$}
\State $f \leftarrow f+\gamma$
\EndFor
\State Update $G_f$
\State $\Gamma \leftarrow$ \Call{Detect Negative Cycles}{$G_f$}
\EndWhile
\State \Return $f$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
The distance $d(f,f')$ is bounded by the maximal number of differences in the associated assignment. If these assignment are totally disjoint, this distance is $2\rho_N P$. At every iteration of the While loop, the distance decreases, so there is at most $O(\rho_N P) = O(P)$ iterations.
The detection of negative cycle is done with the Bellman-Ford algorithm, whose complexity should normally be $O(\#E\#V)$. In our case, it amounts to $O(P^2ZN)$. Multiplied by the complexity of the outer loop, it amounts to $O(P^3ZN)$ which is a lot when the number of partitions and nodes starts to be large. To avoid that, we adapt the Bellman-Ford algorithm.
The Bellman-Ford algorithm runs $\#V$ iterations of an outer loop, and an inner loop over $E$. The idea is to compute the shortest paths from a source vertex $v$ to all other vertices. After $k$ iterations of the outer loop, the algorithm has computed all shortest path of length at most $k$. All simple paths have length at most $\#V-1$, so if there is an update in the last iteration of the loop, it means that there is a negative cycle in the graph. The observation that will enable us to improve the complexity is the following:
\begin{proposition}
In the graph $G_f$ (and $G$), all simple paths have a length at most $4N$.
\end{proposition}
\begin{proof}
Since $f$ is a maximal flow, there is no outgoing edge from $\mathbf{s}$ in $G_f$. One can thus check than any simple path of length 4 must contain at least two node of type $\mathbf{n}$. Hence on a path, at most 4 arcs separate two successive nodes of type $\mathbf{n}$.
\end{proof}
Thus, in the absence of negative cycles, shortest paths in $G_f$ have length at most $4N$. So we can do only $4N+1$ iterations of the outer loop in the Bellman-Ford algorithm. This makes the complexity of the detection of one set of cycle to be $O(N\#E) = O(N^2 P)$.
With this improvement, the complexity of the whole algorithm is, in the worst case, $O(N^2P^2)$. However, since we detect several cycles at once and we start with a flow that might be close to the previous one, the number of iterations of the outer loop might be smaller in practice.
\subsubsection*{Metrics}
We can display the node and zone utilization ratio, by dividing the flow passing through them divided by their outgoing capacity. In particular, we can pinpoint saturated nodes and zones (i.e. used at their full potential).
We can display the distance to the previous assignment, and the number of partition transfers.
\bibliography{optimal_layout}
\bibliographystyle{ieeetr}
\end{document}

View File

@ -0,0 +1,11 @@
@article{even1975network,
title={Network flow and testing graph connectivity},
author={Even, Shimon and Tarjan, R Endre},
journal={SIAM journal on computing},
volume={4},
number={4},
pages={507--518},
year={1975},
publisher={SIAM}
}

Binary file not shown.

View File

@ -0,0 +1,709 @@
\documentclass[]{article}
\usepackage{amsmath,amssymb}
\usepackage{amsthm}
\usepackage{graphicx,xcolor}
\usepackage{algorithm,algpseudocode,float}
\renewcommand\thesubsubsection{\Alph{subsubsection})}
\newtheorem{proposition}{Proposition}
%opening
\title{Optimal partition assignment in Garage}
\author{Mendes}
\begin{document}
\maketitle
\section{Introduction}
\subsection{Context}
Garage is an open-source distributed storage service blablabla$\dots$
Every object to be stored in the system falls in a partition given by the last $k$ bits of its hash. There are $P=2^k$ partitions. Every partition will be stored on distinct nodes of the system. The goal of the assignment of partitions to nodes is to ensure (nodes and zone) redundancy and to be as efficient as possible.
\subsection{Formal description of the problem}
We are given a set of nodes $\mathbf{N}$ and a set of zones $\mathbf{Z}$. Every node $n$ has a non-negative storage capacity $c_n\ge 0$ and belongs to a zone $z\in \mathbf{Z}$. We are also given a number of partition $P>0$ (typically $P=256$).
We would like to compute an assignment of nodes to partitions. We will impose some redundancy constraints to this assignment, and under these constraints, we want our system to have the largest storage capacity possible. To link storage capacity to partition assignment, we make the following assumption:
\begin{equation}
\tag{H1}
\text{\emph{All partitions have the same size $s$.}}
\end{equation}
This assumption is justified by the dispersion of the hashing function, when the number of partitions is small relative to the number of stored large objects.
Every node $n$ wille store some number $k_n$ of partitions. Hence the partitions stored by $n$ (and hence all partitions by our assumption) have there size bounded by $c_n/k_n$. This remark leads us to define the optimal size that we will want to maximize:
\begin{equation}
\label{eq:optimal}
\tag{OPT}
s^* = \min_{n \in N} \frac{c_n}{k_n}.
\end{equation}
When the capacities of the nodes are updated (this includes adding or removing a node), we want to update the assignment as well. However, transferring the data between nodes has a cost and we would like to limit the number of changes in the assignment. We make the following assumption:
\begin{equation}
\tag{H2}
\text{\emph{Updates of capacity happens rarely relatively to object storing.}}
\end{equation}
This assumption justifies that when we compute the new assignment, it is worth to optimize the partition size \eqref{eq:optimal} first, and then, among the possible optimal solution, to try to minimize the number of partition transfers.
For now, in the following, we ask the following redundancy constraint:
\textbf{Parametric node and zone redundancy:} Given two integer parameters $1\le \rho_\mathbf{Z} \le \rho_\mathbf{N}$, we ask every partition to be stored on $\rho_\mathbf{N}$ distinct nodes, and these nodes must belong to at least $\rho_\mathbf{Z}$ distinct zones.
\textbf{Mode 3-strict:} every partition needs to be assignated to three nodes belonging to three different zones.
\textbf{Mode 3:} every partition needs to be assignated to three nodes. We try to spread the three nodes over different zones as much as possible.
\textbf{Warning:} This is a working document written incrementaly. The last version of the algorithm is the \textbf{parametric assignment} described in the next section.
\section{Computation of a parametric assignment}
\textbf{Attention : }We change notations in this section.
Notations : let $P$ be the number of partitions, $N$ the number of nodes, $Z$ the number of zones. Let $\mathbf{P,N,Z}$ be the label sets of, respectively, partitions, nodes and zones.
Let $s^*$ be the largest partition size achievable with the redundancy constraints. Let $(c_n)_{n\in \mathbf{N}}$ be the storage capacity of every node.
In this section, we propose a third specification of the problem. The user inputs two redundancy parameters $1\le \rho_\mathbf{Z} \le \rho_\mathbf{N}$. We compute an assignment $\alpha = (\alpha_p^1, \ldots, \alpha_p^{\rho_\mathbf{N}})_{p\in \mathbf{P}}$ such that every partition $p$ is associated to $\rho_\mathbf{N}$ distinct nodes $\alpha_p^1, \ldots, \alpha_p^{\rho_\mathbf{N}}$ and these nodes belong to at least $\rho_\mathbf{Z}$ distinct zones.
If the layout contained a previous assignment $\alpha'$, we try to minimize the amount of data to transfer during the layout update by making $\alpha$ as close as possible to $\alpha'$.
In the following subsections, we describe the successive steps of the algorithm we propose to compute $\alpha$.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Compute Layout}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$, $\alpha'$}
\State $s^* \leftarrow$ \Call{Compute Partition Size}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$}
\State $G \leftarrow G(s^*)$
\State $f \leftarrow$ \Call{Compute Candidate Assignment}{$G$, $\alpha'$}
\State $f^* \leftarrow$ \Call{Minimize transfer load}{$G$, $f$, $\alpha'$}
\State Build $\alpha^*$ from $f^*$
\State \Return $\alpha^*$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
As we will see in the next sections, the worst case complexity of this algorithm is $O(P^2 N^2)$. The minimization of transfer load is the most expensive step, and it can run with a timeout since it is only an optimization step. Without this step (or with a smart timeout), the worst cas complexity can be $O((PN)^{3/2}\log C)$ where $C$ is the total storage capacity of the cluster.
\subsection{Determination of the partition size $s^*$}
Again, we will represent an assignment $\alpha$ as a flow in a specific graph $G$. We will not compute the optimal partition size $s^*$ a priori, but we will determine it by dichotomy, as the largest size $s$ such that the maximal flow achievable on $G=G(s)$ has value $\rho_\mathbf{N}P$. We will assume that the capacities are given in a small enough unit (say, Megabytes), and we will determine $s^*$ at the precision of the given unit.
Given some candidate size value $s$, we describe the oriented weighted graph $G=(V,E)$ with vertex set $V$ arc set $E$.
The set of vertices $V$ contains the source $\mathbf{s}$, the sink $\mathbf{t}$, vertices
$\mathbf{p^+, p^-}$ for every partition $p$, vertices $\mathbf{x}_{p,z}$ for every partition $p$ and zone $z$, and vertices $\mathbf{n}$ for every node $n$.
The set of arcs $E$ contains:
\begin{itemize}
\item ($\mathbf{s}$,$\mathbf{p}^+$, $\rho_\mathbf{Z}$) for every partition $p$;
\item ($\mathbf{s}$,$\mathbf{p}^-$, $\rho_\mathbf{N}-\rho_\mathbf{Z}$) for every partition $p$;
\item ($\mathbf{p}^+$,$\mathbf{x}_{p,z}$, 1) for every partition $p$ and zone $z$;
\item ($\mathbf{p}^-$,$\mathbf{x}_{p,z}$, $\rho_\mathbf{N}-\rho_\mathbf{Z}$) for every partition $p$ and zone $z$;
\item ($\mathbf{x}_{p,z}$,$\mathbf{n}$, 1) for every partition $p$, zone $z$ and node $n\in z$;
\item ($\mathbf{n}$, $\mathbf{t}$, $\lfloor c_n/s \rfloor$) for every node $n$.
\end{itemize}
In the following complexity calculations, we will use the number of vertices and edges of $G$. Remark from now that $\# V = O(PZ)$ and $\# E = O(PN)$.
\begin{proposition}
An assignment $\alpha$ is realizable with partition size $s$ and the redundancy constraints $(\rho_\mathbf{N},\rho_\mathbf{Z})$ if and only if there exists a maximal flow function $f$ in $G$ with total flow $\rho_\mathbf{N}P$, such that the arcs ($\mathbf{x}_{p,z}$,$\mathbf{n}$, 1) used are exactly those for which $p$ is associated to $n$ in $\alpha$.
\end{proposition}
\begin{proof}
Given such flow $f$, we can reconstruct a candidate $\alpha$. In $f$, the flow passing through $\mathbf{p^+}$ and $\mathbf{p^-}$ is $\rho_\mathbf{N}$, and since the outgoing capacity of every $\mathbf{x}_{p,z}$ is 1, every partition is associated to $\rho_\mathbf{N}$ distinct nodes. The fraction $\rho_\mathbf{Z}$ of the flow passing through every $\mathbf{p^+}$ must be spread over as many distinct zones as every arc outgoing from $\mathbf{p^+}$ has capacity 1. So the reconstructed $\alpha$ verifies the redundancy constraints. For every node $n$, the flow between $\mathbf{n}$ and $\mathbf{t}$ corresponds to the number of partitions associated to $n$. By construction of $f$, this does not exceed $\lfloor c_n/s \rfloor$. We assumed that the partition size is $s$, hence this association does not exceed the storage capacity of the nodes.
In the other direction, given an assignment $\alpha$, one can similarly check that the facts that $\alpha$ respects the redundancy constraints, and the storage capacities of the nodes, are necessary condition to construct a maximal flow function $f$.
\end{proof}
\textbf{Implementation remark:} In the flow algorithm, while exploring the graph, we explore the neighbours of every vertex in a random order to heuristically spread the association between nodes and partitions.
\subsubsection*{Algorithm}
With this result mind, we can describe the first step of our algorithm. All divisions are supposed to be integer division.
\begin{algorithmic}[1]
\Function{Compute Partition Size}{$\mathbf{N}$, $\mathbf{Z}$, $\mathbf{P}$, $(c_n)_{n\in \mathbf{N}}$, $\rho_\mathbf{N}$, $\rho_\mathbf{Z}$}
\State Build the graph $G=G(s=1)$
\State $ f \leftarrow$ \Call{Maximal flow}{$G$}
\If{$f.\mathrm{total flow} < \rho_\mathbf{N}P$}
\State \Return Error: capacities too small or constraints too strong.
\EndIf
\State $s^- \leftarrow 1$
\State $s^+ \leftarrow 1+\frac{1}{\rho_\mathbf{N}}\sum_{n \in \mathbf{N}} c_n$
\While{$s^-+1 < s^+$}
\State Build the graph $G=G(s=(s^-+s^+)/2)$
\State $ f \leftarrow$ \Call{Maximal flow}{$G$}
\If{$f.\mathrm{total flow} < \rho_\mathbf{N}P$}
\State $s^+ \leftarrow (s^- + s^+)/2$
\Else
\State $s^- \leftarrow (s^- + s^+)/2$
\EndIf
\EndWhile
\State \Return $s^-$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
To compute the maximal flow, we use Dinic's algorithm. Its complexity on general graphs is $O(\#V^2 \#E)$, but on graphs with edge capacity bounded by a constant, it turns out to be $O(\#E^{3/2})$. The graph $G$ does not fall in this case since the capacities of the arcs incoming to $\mathbf{t}$ are far from bounded. However, the proof of this complexity works readily for graph where we only ask the edges \emph{not} incoming to the sink $\mathbf{t}$ to have their capacities bounded by a constant. One can find the proof of this claim in \cite[Section 2]{even1975network}.
The dichotomy adds a logarithmic factor $\log (C)$ where $C=\sum_{n \in \mathbf{N}} c_n$ is the total capacity of the cluster. The total complexity of this first function is hence
$O(\#E^{3/2}\log C ) = O\big((PN)^{3/2} \log C\big)$.
\subsubsection*{Metrics}
We can display the discrepancy between the computed $s^*$ and the best size we could hope for a given total capacity, that is $C/\rho_\mathbf{N}$.
\subsection{Computation of a candidate assignment}
Now that we have the optimal partition size $s^*$, to compute a candidate assignment, it would be enough to compute a maximal flow function $f$ on $G(s^*)$. This is what we do if there was no previous assignment $\alpha'$.
If there was some $\alpha'$, we add a step that will heuristically help to obtain a candidate $\alpha$ closer to $\alpha'$. to do so, we fist compute a flow function $\tilde{f}$ that uses only the partition-to-node association appearing in $\alpha'$. Most likely, $\tilde{f}$ will not be a maximal flow of $G(s^*)$. In Dinic's algorithm, we can start from a non maximal flow function and then discover improving paths. This is what we do in starting from $\tilde{f}$. The hope\footnote{This is only a hope, because one can find examples where the construction of $f$ from $\tilde{f}$ produces an assignment $\alpha$ that is not as close as possible to $\alpha'$.} is that the final flow function $f$ will tend to keep the associations appearing in $\tilde{f}$.
More formally, we construct the graph $G_{|\alpha'}$ from $G$ by removing all the arcs $(\mathbf{x}_{p,z},\mathbf{n}, 1)$ where $p$ is not associated to $n$ in $\alpha'$. We compute a maximal flow function $\tilde{f}$ in $G_{|\alpha'}$. $\tilde{f}$ is also a valid (most likely non maximal) flow function in $G$. We compute a maximal flow function $f$ on $G$ by starting Dinic's algorithm on $\tilde{f}$.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Compute Candidate Assignment}{$G$, $\alpha'$}
\State Build the graph $G_{|\alpha'}$
\State $ \tilde{f} \leftarrow$ \Call{Maximal flow}{$G_{|\alpha'}$}
\State $ f \leftarrow$ \Call{Maximal flow from flow}{$G$, $\tilde{f}$}
\State \Return $f$
\EndFunction
\end{algorithmic}
\textbf{Remark:} The function ``Maximal flow'' can be just seen as the function ``Maximal flow from flow'' called with the zero flow function as starting flow.
\subsubsection*{Complexity}
From the consideration of the last section, we have the complexity of the Dinic's algorithm $O(\#E^{3/2}) = O((PN)^{3/2})$.
\subsubsection*{Metrics}
We can display the flow value of $\tilde{f}$, which is an upper bound of the distance between $\alpha$ and $\alpha'$. It might be more a Debug level display than Info.
\subsection{Minimization of the transfer load}
Now that we have a candidate flow function $f$, we want to modify it to make its associated assignment as close as possible to $\alpha'$. Denote by $f'$ the maximal flow associated to $\alpha'$, and let $d(f, f')$ be distance between the associated assignments\footnote{It is the number of arcs of type $(\mathbf{x}_{p,z},\mathbf{n})$ saturated in one flow and not in the other.}.
We want to build a sequence $f=f_0, f_1, f_2 \dots$ of maximal flows such that $d(f_i, \alpha')$ decreases as $i$ increases. The distance being a non-negative integer, this sequence of flow functions must be finite. We now explain how to find some improving $f_{i+1}$ from $f_i$.
For any maximal flow $f$ in $G$, we define the oriented weighted graph $G_f=(V, E_f)$ as follows. The vertices of $G_f$ are the same as the vertices of $G$. $E_f$ contains the arc $(v_1,v_2, w)$ between vertices $v_1,v_2\in V$ with weight $w$ if and only if the arc $(v_1,v_2)$ is not saturated in $f$ (i.e. $c(v_1,v_2)-f(v_1,v_2) \ge 1$, we also consider reversed arcs). The weight $w$ is:
\begin{itemize}
\item $-1$ if $(v_1,v_2)$ is of type $(\mathbf{x}_{p,z},\mathbf{n})$ or $(\mathbf{x}_{p,z},\mathbf{n})$ and is saturated in only one of the two flows $f,f'$;
\item $+1$ if $(v_1,v_2)$ is of type $(\mathbf{x}_{p,z},\mathbf{n})$ or $(\mathbf{x}_{p,z},\mathbf{n})$ and is saturated in either both or none of the two flows $f,f'$;
\item $0$ otherwise.
\end{itemize}
If $\gamma$ is a simple cycle of arcs in $G_f$, we define its weight $w(\gamma)$ as the sum of the weights of its arcs. We can add $+1$ to the value of $f$ on the arcs of $\gamma$, and by construction of $G_f$ and the fact that $\gamma$ is a cycle, the function that we get is still a valid flow function on $G$, it is maximal as it has the same flow value as $f$. We denote this new function $f+\gamma$.
\begin{proposition}
Given a maximal flow $f$ and a simple cycle $\gamma$ in $G_f$, we have $d(f+\gamma, f') - d(f,f') = w(\gamma)$.
\end{proposition}
\begin{proof}
Let $X$ be the set of arcs of type $(\mathbf{x}_{p,z},\mathbf{n})$. Then we can express $d(f,f')$ as
\begin{align*}
d(f,f') & = \#\{e\in X ~|~ f(e)\neq f'(e)\}
= \sum_{e\in X} 1_{f(e)\neq f'(e)} \\
& = \frac{1}{2}\big( \#X + \sum_{e\in X} 1_{f(e)\neq f'(e)} - 1_{f(e)= f'(e)} \big).
\end{align*}
We can express the cycle weight as
\begin{align*}
w(\gamma) & = \sum_{e\in X, e\in \gamma} - 1_{f(e)\neq f'(e)} + 1_{f(e)= f'(e)}.
\end{align*}
Remark that since we passed on unit of flow in $\gamma$ to construct $f+\gamma$, we have for any $e\in X$, $f(e)=f'(e)$ if and only if $(f+\gamma)(e) \neq f'(e)$.
Hence
\begin{align*}
w(\gamma) & = \frac{1}{2}(w(\gamma) + w(\gamma)) \\
&= \frac{1}{2} \Big(
\sum_{e\in X, e\in \gamma} - 1_{f(e)\neq f'(e)} + 1_{f(e)= f'(e)} \\
& \qquad +
\sum_{e\in X, e\in \gamma} 1_{(f+\gamma)(e)\neq f'(e)} + 1_{(f+\gamma)(e)= f'(e)}
\Big).
\end{align*}
Plugging this in the previous equation, we find that
$$d(f,f')+w(\gamma) = d(f+\gamma, f').$$
\end{proof}
This result suggests that given some flow $f_i$, we just need to find a negative cycle $\gamma$ in $G_{f_i}$ to construct $f_{i+1}$ as $f_i+\gamma$. The following proposition ensures that this greedy strategy reaches an optimal flow.
\begin{proposition}
For any maximal flow $f$, $G_f$ contains a negative cycle if and only if there exists a maximal flow $f^*$ in $G$ such that $d(f^*, f') < d(f, f')$.
\end{proposition}
\begin{proof}
Suppose that there is such flow $f^*$. Define the oriented multigraph $M_{f,f^*}=(V,E_M)$ with the same vertex set $V$ as in $G$, and for every $v_1,v_2 \in V$, $E_M$ contains $(f^*(v_1,v_2) - f(v_1,v_2))_+$ copies of the arc $(v_1,v_2)$. For every vertex $v$, its total degree (meaning its outer degree minus its inner degree) is equal to
\begin{align*}
\deg v & = \sum_{u\in V} (f^*(v,u) - f(v,u))_+ - \sum_{u\in V} (f^*(u,v) - f(u,v))_+ \\
& = \sum_{u\in V} f^*(v,u) - f(v,u) = \sum_{u\in V} f^*(v,u) - \sum_{u\in V} f(v,u).
\end{align*}
The last two sums are zero for any inner vertex since $f,f^*$ are flows, and they are equal on the source and sink since the two flows are both maximal and have hence the same value. Thus, $\deg v = 0$ for every vertex $v$.
This implies that the multigraph $M_{f,f^*}$ is the union of disjoint simple cycles. $f$ can be transformed into $f^*$ by pushing a mass 1 along all these cycles in any order. Since $d(f^*, f')<d(f,f')$, there must exists one of these simple cycles $\gamma$ with $d(f+\gamma, f') < d(f, f')$. Finally, since we can push a mass in $f$ along $\gamma$, it must appear in $G_f$. Hence $\gamma$ is a cycle of $G_f$ with negative weight.
\end{proof}
In the next section we describe the corresponding algorithm. Instead of discovering only one cycle, we are allowed to discover a set $\Gamma$ of disjoint negative cycles.
\subsubsection*{Algorithm}
\begin{algorithmic}[1]
\Function{Minimize transfer load}{$G$, $f$, $\alpha'$}
\State Build the graph $G_f$
\State $\Gamma \leftarrow$ \Call{Detect Negative Cycles}{$G_f$}
\While{$\Gamma \neq \emptyset$}
\ForAll{$\gamma \in \Gamma$}
\State $f \leftarrow f+\gamma$
\EndFor
\State Update $G_f$
\State $\Gamma \leftarrow$ \Call{Detect Negative Cycles}{$G_f$}
\EndWhile
\State \Return $f$
\EndFunction
\end{algorithmic}
\subsubsection*{Complexity}
The distance $d(f,f')$ is bounded by the maximal number of differences in the associated assignment. If these assignment are totally disjoint, this distance is $2\rho_N P$. At every iteration of the While loop, the distance decreases, so there is at most $O(\rho_N P) = O(P)$ iterations.
The detection of negative cycle is done with the Bellman-Ford algorithm, whose complexity should normally be $O(\#E\#V)$. In our case, it amounts to $O(P^2ZN)$. Multiplied by the complexity of the outer loop, it amounts to $O(P^3ZN)$ which is a lot when the number of partitions and nodes starts to be large. To avoid that, we adapt the Bellman-Ford algorithm.
The Bellman-Ford algorithm runs $\#V$ iterations of an outer loop, and an inner loop over $E$. The idea is to compute the shortest paths from a source vertex $v$ to all other vertices. After $k$ iterations of the outer loop, the algorithm has computed all shortest path of length at most $k$. All simple paths have length at most $\#V-1$, so if there is an update in the last iteration of the loop, it means that there is a negative cycle in the graph. The observation that will enable us to improve the complexity is the following:
\begin{proposition}
In the graph $G_f$ (and $G$), all simple paths have a length at most $4N$.
\end{proposition}
\begin{proof}
Since $f$ is a maximal flow, there is no outgoing edge from $\mathbf{s}$ in $G_f$. One can thus check than any simple path of length 4 must contain at least two node of type $\mathbf{n}$. Hence on a path, at most 4 arcs separate two successive nodes of type $\mathbf{n}$.
\end{proof}
Thus, in the absence of negative cycles, shortest paths in $G_f$ have length at most $4N$. So we can do only $4N+1$ iterations of the outer loop in Bellman-Ford algorithm. This makes the complexity of the detection of one set of cycle to be $O(N\#E) = O(N^2 P)$.
With this improvement, the complexity of the whole algorithm is, in the worst case, $O(N^2P^2)$. However, since we detect several cycles at once and we start with a flow that might be close to the previous one, the number of iterations of the outer loop might be smaller in practice.
\subsubsection*{Metrics}
We can display the node and zone utilization ratio, by dividing the flow passing through them divided by their outgoing capacity. In particular, we can pinpoint saturated nodes and zones (i.e. used at their full potential).
We can display the distance to the previous assignment, and the number of partition transfers.
\section{Properties of an optimal 3-strict assignment}
\subsection{Optimal assignment}
\label{sec:opt_assign}
For every zone $z\in Z$, define the zone capacity $c_z = \sum_{v, z_v=z} c_v$ and define $C = \sum_v c_v = \sum_z c_z$.
One can check that the best we could be doing to maximize $s^*$ would be to use the nodes proportionally to their capacity. This would yield $s^*=C/(3N)$. This is not possible because of (i) redundancy constraints and (ii) integer rounding but it gives and upper bound.
\subsubsection*{Optimal utilization}
We call an \emph{utilization} a collection of non-negative integers $(n_v)_{v\in V}$ such that $\sum_v n_v = 3N$ and for every zone $z$, $\sum_{v\in z} n_v \le N$. We call such utilization \emph{optimal} if it maximizes $s^*$.
We start by computing a node sub-utilization $(\hat{n}_v)_{v\in V}$ such that for every zone $z$, $\sum_{v\in z} \hat{n}_v \le N$ and we show that there is an optimal utilization respecting the constraints and such that $\hat{n}_v \le n_v$ for every node.
Assume that there is a zone $z_0$ such that $c_{z_0}/C \ge 1/3$. Then for any $v\in z_0$, we define
$$\hat{n}_v = \left\lfloor\frac{c_v}{c_{z_0}}N\right\rfloor.$$
This choice ensures for any such $v$ that
$$
\frac{c_v}{\hat{n}_v} \ge \frac{c_{z_0}}{N} \ge \frac{C}{3N}
$$
which is the universal upper bound on $s^*$. Hence any optimal utilization $(n_v)$ can be modified to another optimal utilization such that $n_v\ge \hat{n}_v$
Because $z_0$ cannot store more than $N$ partition occurences, in any assignment, at least $2N$ partitions must be assignated to the zones $Z\setminus\{z_0\}$. Let $C_0 = C-c_{z_0}$. Suppose that there exists a zone $z_1\neq z_0$ such that $c_{z_1}/C_0 \ge 1/2$. Then, with the same argument as for $z_0$, we can define
$$\hat{n}_v = \left\lfloor\frac{c_v}{c_{z_1}}N\right\rfloor$$
for every $v\in z_1$.
Now we can assign the remaining partitions. Let $(\hat{N}, \hat{C})$ to be
\begin{itemize}
\item $(3N,C)$ if we did not find any $z_0$;
\item $(2N,C-c_{z_0})$ if there was a $z_0$ but no $z_1$;
\item $(N,C-c_{z_0}-c_{z_1})$ if there was a $z_0$ and a $z_1$.
\end{itemize}
Then at least $\hat{N}$ partitions must be spread among the remaining zones. Hence $s^*$ is upper bounded by $\hat{C}/\hat{N}$ and without loss of generality, we can define, for every node that is not in $z_0$ nor $z_1$,
$$\hat{n}_v = \left\lfloor\frac{c_v}{\hat{C}}\hat{N}\right\rfloor.$$
We constructed a sub-utilization $\hat{n}_v$. Now notice that $3N-\sum_v \hat{n}_v \le \# V$ where $\# V$ denotes the number of nodes. We can iteratively pick a node $v^*$ such that
\begin{itemize}
\item $\sum_{v\in z_{v^*}} \hat{n}_v < N$ where $z_{v^*}$ is the zone of $v^*$;
\item $v^*$ maximizes the quantity $c_v/(\hat{n}_v+1)$ among the vertices satisfying the first condition (i.e. not in a saturated zone).
\end{itemize}
We iterate these instructions until $\sum_v \hat{n}_v= 3N$, and at this stage we define $(n_v) = (\hat{n}_v)$. It is easy to prove by induction that at every step, there is an optimal utilization that is pointwise larger than $\hat{n}_v$, and in particular, that $(n_v)$ is optimal.
\subsubsection*{Existence of an optimal assignment}
As for now, the \emph{optimal utilization} that we obtained is just a vector of numbers and it is not clear that it can be realized as the utilization of some concrete assignment. Here is a way to get a concrete assignment.
Define $3N$ tokens $t_1,\ldots, t_{3N}\in V$ as follows:
\begin{itemize}
\item Enumerate the zones $z$ of $Z$ in any order;
\item enumerate the nodes $v$ of $z$ in any order;
\item repeat $n_v$ times the token $v$.
\end{itemize}
Then for $1\le i \le N$, define the triplet $T_i$ to be
$(t_i, t_{i+N}, t_{i+2N})$. Since the same nodes of a zone appear contiguously, the three nodes of a triplet must belong to three distinct zones.
However simple, this solution to go from an utilization to an assignment has the drawback of not spreading the triplets: a node will tend to be associated to the same two other nodes for many partitions. Hence, during data transfer, it will tend to use only two link, instead of spreading the bandwith use over many other links to other nodes. To achieve this goal, we will reframe the search of an assignment as a flow problem. and in the flow algorithm, we will introduce randomness in the order of exploration. This will be sufficient to obtain a good dispersion of the triplets.
\begin{figure}
\centering
\includegraphics[width=0.9\linewidth]{figures/naive}
\caption{On the left, the creation of a concrete assignment with the naive approach of repeating tokens. On the right, the zones containing the nodes.}
\end{figure}
\subsubsection*{Assignment as a maximum flow problem}
We describe the flow problem via its graph $(X,E)$ where $X$ is a set of vertices, and $E$ are directed weighted edges between the vertices. For every zone $z$, define $n_z=\sum_{v\in z} n_v$.
The set of vertices $X$ contains the source $\mathbf{s}$ and the sink $\mathbf{t}$; a vertex $\mathbf{x}_z$ for every zone $z\in Z$, and a vertex $\mathbf{y}_i$ for every partition index $1\le i\le N$.
The set of edges $E$ contains
\begin{itemize}
\item the edge $(\mathbf{s}, \mathbf{x}_z, n_z)$ for every zone $z\in Z$;
\item the edge $(\mathbf{x}_z, \mathbf{y}_i, 1)$ for every zone $z\in Z$ and partition $1\le i\le N$;
\item the edge $(\mathbf{y}_i, \mathbf{t}, 3)$ for every partition $1\le i\le N$.
\end{itemize}
\begin{figure}[b]
\centering
\includegraphics[width=0.6\linewidth]{figures/flow}
\caption{Flow problem to compute and optimal assignment.}
\end{figure}
We first show the equivalence between this problem and and the construction of an assignment. Given some optimal assignment $(n_v)$, define the flow $f:E\to \mathbb{N}$ that saturates every edge from $\mathbf{s}$ or to $\mathbf{t}$, takes value $1$ on the edge between $\mathbf{x}_z$ and $\mathbf{y}_i$ if partition $i$ is stored in some node of the zone $z$, and $0$ otherwise. One can easily check that $f$ thus defined is indeed a flow and is maximum.
Reciprocally, by the existence of maximum flows constructed from optimal assignments, any maximum flow must saturate the edges linked to the source or the sink. It can only take value 0 or 1 on the other edge, and every partition vertex is associated to exactly three distinct zone vertices. Every zone is associated to exactly $n_z$ partitions.
A maximum flow can be constructed using, for instance, Dinic's algorithm. This algorithm works by discovering augmenting path to iteratively increase the flow. During the exploration of the graph to find augmenting path, we can shuffle the order of enumeration of the neighbours to spread the associations between zones and partitions.
Once we have such association, we can randomly distribute the $n_z$ edges picked for every zone $z$ to its nodes $v\in z$ such that every such $v$ gets $n_z$ edges. This defines an optimal assignment of partitions to nodes.
\subsection{Minimal transfer}
Assume that there was a previous assignment $(T'_i)_{1\le i\le N}$ corresponding to utilizations $(n'_v)_{v\in V}$. We would like the new computed assignment $(T_i)_{1\le i\le N}$ from some $(n_v)_{v\in V}$ to minimize the number of partitions that need to be transferred. We can imagine two different objectives corresponding to different hypotheses:
\begin{equation}
\tag{H3A}
\label{hyp:A}
\text{\emph{Transfers between different zones cost much more than inside a zone.}}
\end{equation}
\begin{equation}
\tag{H3B}
\label{hyp:B}
\text{\emph{Changing zone is not the largest cost when transferring a partition.}}
\end{equation}
In case $A$, our goal will be to minimize the number of changes of zone in the assignment of partitions to zone. More formally, we will maximize the quantity
$$
Q_Z :=
\sum_{1\le i\le N}
\#\{z\in Z ~|~ z\cap T_i \neq \emptyset, z\cap T'_i \neq \emptyset \}
.$$
In case $B$, our goal will be to minimize the number of changes of nodes in the assignment of partitions to nodes. We will maximize the quantity
$$
Q_V :=
\sum_{1\le i\le N} \#(T_i \cap T'_i).
$$
It is tempting to hope that there is a way to maximize both quantity, that having the least discrepancy in terms of nodes will lead to the least discrepancy in terms of zones. But this is actually wrong! We propose the following counter-example to convince the reader:
We consider eight nodes $a, a', b, c, d, d', e, e'$ belonging to five different zones $\{a,a'\}, \{b\}, \{c\}, \{d,d'\}, \{e, e'\}$. We take three partitions ($N=3$), that are originally assigned with some utilization $(n'_v)_{v\in V}$ as follows:
$$
T'_1=(a,b,c) \qquad
T'_2=(a',b,d) \qquad
T'_3=(b,c,e).
$$
This assignment, with updated utilizations $(n_v)_{v\in V}$ minimizes the number of zone changes:
$$
T_1=(d,b,c) \qquad
T_2=(a,b,d) \qquad
T_3=(b,c,e').
$$
This one, with the same utilization, minimizes the number of node changes:
$$
T_1=(a,b,c) \qquad
T_2=(e',b,d) \qquad
T_3=(b,c,d').
$$
One can check that in this case, it is impossible to minimize both the number of zone and node changes.
Because of the redundancy constraint, we cannot use a greedy algorithm to just replace nodes in the triplets to try to get the new utilization rate: this could lead to blocking situation where there is still a hole to fill in a triplet but no available node satisfies the zone separation constraint. To circumvent this issue, we propose an algorithm based on finding cycles in a graph encoding of the assignment. As in section \ref{sec:opt_assign}, we can explore the neigbours in a random order in the graph algorithms, to spread the triplets distribution.
\subsubsection{Minimizing the zone discrepancy}
First, notice that, given an assignment of partitions to \emph{zones}, it is easy to deduce an assignment to \emph{nodes} that minimizes the number of transfers for this zone assignment: For every zone $z$ and every node $v\in z$, pick in any way a set $P_v$ of partitions that where assigned to $v$ in $T'$, to $z_v$ in $T$, with the cardinality of $P_v$ smaller than $n_v$. Once all these sets are chosen, complement the assignment to reach the right utilization for every node. If $\#P_v > n_v$, it means that all the partitions that could stay in $v$ (i.e. that were already in $v$ and are still assigned to its zone) do stay in $v$. If $\#P_v = n_v$, then $n_v$ partitions stay in $v$, which is the number of partitions that need to be in $v$ in the end. In both cases, we could not hope for better given the partition to zone assignment.
Our goal now is to find a assignment of partitions to zones that minimizes the number of zone transfers. To do so we are going to represent an assignment as a graph.
Let $G_T=(X,E_T)$ be the directed weighted graph with vertices $(\mathbf{x}_i)_{1\le i\le N}$ and $(\mathbf{y}_z)_{z\in Z}$. For any $1\le i\le N$ and $z\in Z$, $E_T$ contains the arc:
\begin{itemize}
\item $(\mathbf{x}_i, \mathbf{y}_z, +1)$, if $z$ appears in $T_i'$ and $T_i$;
\item $(\mathbf{x}_i, \mathbf{y}_z, -1)$, if $z$ appears in $T_i$ but not in $T'_i$;
\item $(\mathbf{y}_z, \mathbf{x}_i, -1)$, if $z$ appears in $T'_i$ but not in $T_i$;
\item $(\mathbf{y}_z, \mathbf{x}_i, +1)$, if $z$ does not appear in $T'_i$ nor in $T_i$.
\end{itemize}
In other words, the orientation of the arc encodes whether partition $i$ is stored in zone $z$ in the assignment $T$ and the weight $\pm 1$ encodes whether this corresponds to what happens in the assignment $T'$.
\begin{figure}[t]
\centering
\begin{minipage}{.40\linewidth}
\centering
\includegraphics[width=.8\linewidth]{figures/mini_zone}
\end{minipage}
\begin{minipage}{.55\linewidth}
\centering
\includegraphics[width=.8\linewidth]{figures/mini_node}
\end{minipage}
\caption{On the left: the graph $G_T$ encoding an assignment to minimize the zone discrepancy. On the right: the graph $G_T$ encoding an assignment to minimize the node discrepancy.}
\end{figure}
Notice that at every partition, there are three outgoing arcs, and at every zone, there are $n_z$ incoming arcs. Moreover, if $w(e)$ is the weight of an arc $e$, define the weight of $G_T$ by
\begin{align*}
w(G_T) := \sum_{e\in E} w(e) &= \#Z \times N - 4 \sum_{1\le i\le N} \#\{z\in Z ~|~ z\cap T_i = \emptyset, z\cap T'_i \neq \emptyset\} \\
&=\#Z \times N - 4 \sum_{1\le i\le N} 3- \#\{z\in Z ~|~ z\cap T_i \neq \emptyset, z\cap T'_i \neq \emptyset\} \\
&= (\#Z-12)N + 4 Q_Z.
\end{align*}
Hence maximizing $Q_Z$ is equivalent to maximizing $w(G_T)$.
Assume that their exist some assignment $T^*$ with the same utilization $(n_v)_{v\in V}$. Define $G_{T^*}$ similarly and consider the set $E_\mathrm{Diff} = E_T \setminus E_{T^*}$ of arcs that appear only in $G_T$. Since all vertices have the same number of incoming arcs in $G_T$ and $G_{T^*}$, the vertices of the graph $(X, E_\mathrm{Diff})$ must all have the same number number of incoming and outgoing arrows. So $E_\mathrm{Diff}$ can be expressed as a union of disjoint cycles. Moreover, the edges of $E_\mathrm{Diff}$ must appear in $E_{T^*}$ with reversed orientation and opposite weight. Hence, we have
$$
w(G_T) - w(G_{T^*}) = 2 \sum_{e\in E_\mathrm{Diff}} w(e).
$$
Hence, if $T$ is not optimal, there exists some $T^*$ with $w(G_T) < w(G_{T^*})$, and by the considerations above, there must exist a cycle in $E_\mathrm{Diff}$, and hence in $G_T$, with negative weight. If we reverse the edges and weights along this cycle, we obtain some graph. Since we did not change the incoming degree of any vertex, this is the graph encoding of some valid assignment $T^+$ such that $w(G_{T^+}) > w(G_T)$. We can iterate this operation until there is no other assignment $T^*$ with larger weight, that is until we obtain an optimal assignment.
\subsubsection{Minimizing the node discrepancy}
We will follow an approach similar to the one where we minimize the zone discrepancy. Here we will directly obtain a node assignment from a graph encoding.
Let $G_T=(X,E_T)$ be the directed weighted graph with vertices $(\mathbf{x}_i)_{1\le i\le N}$, $(\mathbf{y}_{z,i})_{z\in Z, 1\le i\le N}$ and $(\mathbf{u}_v)_{v\in V}$. For any $1\le i\le N$ and $z\in Z$, $E_T$ contains the arc:
\begin{itemize}
\item $(\mathbf{x}_i, \mathbf{y}_{z,i}, 0)$, if $z$ appears in $T_i$;
\item $(\mathbf{y}_{z,i}, \mathbf{x}_i, 0)$, if $z$ does not appear in $T_i$.
\end{itemize}
For any $1\le i\le N$ and $v\in V$, $E_T$ contains the arc:
\begin{itemize}
\item $(\mathbf{y}_{z_v,i}, \mathbf{u}_v, +1)$, if $v$ appears in $T_i'$ and $T_i$;
\item $(\mathbf{y}_{z_v,i}, \mathbf{u}_v, -1)$, if $v$ appears in $T_i$ but not in $T'_i$;
\item $(\mathbf{u}_v, \mathbf{y}_{z_v,i}, -1)$, if $v$ appears in $T'_i$ but not in $T_i$;
\item $(\mathbf{u}_v, \mathbf{y}_{z_v,i}, +1)$, if $v$ does not appear in $T'_i$ nor in $T_i$.
\end{itemize}
Every vertex $\mathbb{x}_i$ has outgoing degree 3, every vertex $\mathbf{y}_{z,v}$ has outgoing degree 1, and every vertex $\mathbf{u}_v$ has incoming degree $n_v$.
Remark that any graph respecting these degree constraints is the encoding of a valid assignment with utilizations $(n_v)_{v\in V}$, in particular no partition is stored in two nodes of the same zone.
We define $w(G_T)$ similarly:
\begin{align*}
w(G_T) := \sum_{e\in E_T} w(e) &= \#V \times N - 4\sum_{1\le i\le N} 3-\#(T_i\cap T'_i) \\
&= (\#V-12)N + 4Q_V.
\end{align*}
Exactly like in the previous section, the existence of an assignment with larger weight implies the existence of a negatively weighted cycle in $G_T$. Reversing this cycle gives us the encoding of a valid assignment with a larger weight. Iterating this operation yields an optimal assignment.
\subsubsection{Linear combination of both criteria}
In the graph $G_T$ defined in the previous section, instead of having weights $0$ and $\pm 1$, we could be having weights $\pm\alpha$ between $\mathbf{x}$ and $\mathbf{y}$ vertices, and weights $\pm\beta$ between $\mathbf{y}$ and $\mathbf{u}$ vertices, for some $\alpha,\beta>0$ (we have positive weight if the assignment corresponds to $T'$ and negative otherwise). Then
\begin{align*}
w(G_T) &= \sum_{e\in E_T} w(e) =
\alpha \big( (\#Z-12)N + 4 Q_Z\big) +
\beta \big( (\#V-12)N + 4 Q_V\big) \\
&= \mathrm{const}+ 4(\alpha Q_Z + \beta Q_V).
\end{align*}
So maximizing the weight of such graph encoding would be equivalent to maximizing a linear combination of $Q_Z$ and $Q_V$.
\subsection{Algorithm}
We give a high level description of the algorithm to compute an optimal 3-strict assignment. The operations appearing at lines 1,2,4 are respectively described by Algorithms \ref{alg:util},\ref{alg:opt} and \ref{alg:mini}.
\begin{algorithm}[H]
\caption{Optimal 3-strict assignment}
\label{alg:total}
\begin{algorithmic}[1]
\Function{Optimal 3-strict assignment}{$N$, $(c_v)_{v\in V}$, $T'$}
\State $(n_v)_{v\in V} \leftarrow$ \Call{Compute optimal utilization}{$N$, $(c_v)_{v\in V}$}
\State $(T_i)_{1\le i\le N} \leftarrow$ \Call{Compute candidate assignment}{$N$, $(n_v)_{v\in V}$}
\If {there was a previous assignment $T'$}
\State $T \leftarrow$ \Call{Minimization of transfers}{$(T_i)_{1\le i\le N}$, $(T'_i)_{1\le i\le N}$}
\EndIf
\State \Return $T$.
\EndFunction
\end{algorithmic}
\end{algorithm}
We give some considerations of worst case complexity for these algorithms. In the following, we assume $N>\#V>\#Z$. The complexity of Algorithm \ref{alg:total} is $O(N^3\# Z)$ if we assume \eqref{hyp:A} and $O(N^3 \#Z \#V)$ if we assume \eqref{hyp:B}.
Algorithm \ref{alg:util} can be implemented with complexity $O(\#V^2)$. The complexity of the function call at line \ref{lin:subutil} is $O(\#V)$. The difference between the sum of the subutilizations and $3N$ is at most the sum of the rounding errors when computing the $\hat{n}_v$. Hence it is bounded by $\#V$ and the loop at line \ref{lin:loopsub} is iterated at most $\#V$ times. Finding the minimizing $v$ at line \ref{lin:findmin} takes $O(\#V)$ operations (naively, we could also use a heap).
Algorithm \ref{alg:opt} can be implemented with complexity $O(N^3\times \#Z)$. The flow graph has $O(N+\#Z)$ vertices and $O(N\times \#Z)$ edges. Dinic's algorithm has complexity $O(\#\mathrm{Vertices}^2\#\mathrm{Edges})$ hence in our case it is $O(N^3\times \#Z)$.
Algorithm \ref{alg:mini} can be implented with complexity $O(N^3\# Z)$ under \eqref{hyp:A} and $O(N^3 \#Z \#V)$ under \eqref{hyp:B}.
The graph $G_T$ has $O(N)$ vertices and $O(N\times \#Z)$ edges under assumption \eqref{hyp:A} and respectively $O(N\times \#Z)$ vertices and $O(N\times \#V)$ edges under assumption \eqref{hyp:B}. The loop at line \ref{lin:repeat} is iterated at most $N$ times since the distance between $T$ and $T'$ decreases at every iteration. Bellman-Ford algorithm has complexity $O(\#\mathrm{Vertices}\#\mathrm{Edges})$, which in our case amounts to $O(N^2\# Z)$ under \eqref{hyp:A} and $O(N^2 \#Z \#V)$ under \eqref{hyp:B}.
\begin{algorithm}
\caption{Computation of the optimal utilization}
\label{alg:util}
\begin{algorithmic}[1]
\Function{Compute optimal utilization}{$N$, $(c_v)_{v\in V}$}
\State $(\hat{n}_v)_{v\in V} \leftarrow $ \Call{Compute subutilization}{$N$, $(c_v)_{v\in V}$} \label{lin:subutil}
\While{$\sum_{v\in V} \hat{n}_v < 3N$} \label{lin:loopsub}
\State Pick $v\in V$ minimizing $\frac{c_v}{\hat{n}_v+1}$ and such that
$\sum_{v'\in z_v} \hat{n}_{v'} < N$ \label{lin:findmin}
\State $\hat{n}_v \leftarrow \hat{n}_v+1$
\EndWhile
\State \Return $(\hat{n}_v)_{v\in V}$
\EndFunction
\State
\Function{Compute subutilization}{$N$, $(c_v)_{v\in V}$}
\State $R \leftarrow 3$
\For{$v\in V$}
\State $\hat{n}_v \leftarrow \mathrm{unset}$
\EndFor
\For{$z\in Z$}
\State $c_z \leftarrow \sum_{v\in z} c_v$
\EndFor
\State $C \leftarrow \sum_{z\in Z} c_z$
\While{$\exists z \in Z$ such that $R\times c_{z} > C$}
\For{$v\in z$}
\State $\hat{n}_v \leftarrow \left\lfloor \frac{c_v}{c_z} N \right\rfloor$
\EndFor
\State $C \leftarrow C-c_z$
\State $R\leftarrow R-1$
\EndWhile
\For{$v\in V$}
\If{$\hat{n}_v = \mathrm{unset}$}
\State $\hat{n}_v \leftarrow \left\lfloor \frac{Rc_v}{C} N \right\rfloor$
\EndIf
\EndFor
\State \Return $(\hat{n}_v)_{v\in V}$
\EndFunction
\end{algorithmic}
\end{algorithm}
\begin{algorithm}
\caption{Computation of a candidate assignment}
\label{alg:opt}
\begin{algorithmic}[1]
\Function{Compute candidate assignment}{$N$, $(n_v)_{v\in V}$}
\State Compute the flow graph $G$
\State Compute the maximal flow $f$ using Dinic's algorithm with randomized neighbours enumeration
\State Construct the assignment $(T_i)_{1\le i\le N}$ from $f$
\State \Return $(T_i)_{1\le i\le N}$
\EndFunction
\end{algorithmic}
\end{algorithm}
\begin{algorithm}
\caption{Minimization of the number of transfers}
\label{alg:mini}
\begin{algorithmic}[1]
\Function{Minimization of transfers}{$(T_i)_{1\le i\le N}$, $(T'_i)_{1\le i\le N}$}
\State Construct the graph encoding $G_T$
\Repeat \label{lin:repeat}
\State Find a negative cycle $\gamma$ using Bellman-Ford algorithm on $G_T$
\State Reverse the orientations and weights of edges in $\gamma$
\Until{no negative cycle is found}
\State Update $(T_i)_{1\le i\le N}$ from $G_T$
\State \Return $(T_i)_{1\le i\le N}$
\EndFunction
\end{algorithmic}
\end{algorithm}
\newpage
\section{Computation of a 3-non-strict assignment}
\subsection{Choices of optimality}
In this mode, we primarily want to store every partition on three nodes, and only secondarily try to spread the nodes among different zone. So we make the choice of not taking the zone repartition in the criterion of optimality.
We try to maximize $s^*$ defined in \eqref{eq:optimal}. So we can compute the optimal utilizations $(n_v)_{v\in V}$ with the only constraint that $n_v \le N$ for every node $v$. As in the previous section, we start with a sub-utilization proportional to $c_v$ (and capped at $N$), and we iteratively increase the $\hat{n}_v$ that is less than $N$ and maximizes the quantity $c_v/(\hat{n}_v+1)$, until the total sum is $3N$.
\subsection{Computation of a candidate assignment}
To compute a candidate assignment (that does not optimize zone spreading nor distance to a previous assignment yet), we can use the folowing flow problem.
Define the oriented weighted graph $(X,E)$. The set of vertices $X$ contains the source $\mathbf{s}$, the sink $\mathbf{t}$, vertices
$\mathbf{x}_p, \mathbf{u}^+_p, \mathbf{u}^-_p$ for every partition $p$, vertices $\mathbf{y}_{p,z}$ for every partition $p$ and zone $z$, and vertices $\mathbf{z}_v$ for every node $v$.
The set of edges is composed of the following arcs:
\begin{itemize}
\item ($\mathbf{s}$,$\mathbf{x}_p$, 3) for every partition $p$;
\item ($\mathbf{x}_p$,$\mathbf{u}^+_p$, 3) for every partition $p$;
\item ($\mathbf{x}_p$,$\mathbf{u}^-_p$, 2) for every partition $p$;
\item ($\mathbf{u}^+_p$,$\mathbf{y}_{p,z}$, 1) for every partition $p$ and zone $z$;
\item ($\mathbf{u}^-_p$,$\mathbf{y}_{p,z}$, 2) for every partition $p$ and zone $z$;
\item ($\mathbf{y}_{p,z}$,$\mathbf{z}_v$, 1) for every partition $p$, zone $z$ and node $v\in z$;
\item ($\mathbf{z}_v$, $\mathbf{t}$, $n_v$) for every node $v$;
\end{itemize}
One can check that any maximal flow in this graph corresponds to an assignment of partitions to nodes. In such a flow, all the arcs from $\mathbf{s}$ and to $\mathbf{t}$ are saturated. The arc from $\mathbf{y}_{p,z}$ to $\mathbf{z}_v$ is saturated if and only if $p$ is associated to~$v$.
Finally the flow from $\mathbf{x}_p$ to $\mathbf{y}_{p,z}$ can go either through $\mathbf{u}^+_p$ or $\mathbf{u}^-_p$.
\subsection{Maximal spread and minimal transfers}
Notice that if the arc $\mathbf{u}_p^+\mathbf{y}_{p,z}$ is not saturated but there is some flow in $\mathbf{u}_p^-\mathbf{y}_{p,z}$, then it is possible to transfer a unit of flow from the path $\mathbf{x}_p\mathbf{u}_p^-\mathbf{y}_{p,z}$ to the path $\mathbf{x}_p\mathbf{u}_p^+\mathbf{y}_{p,z}$. So we can always find an equivalent maximal flow $f^*$ that uses the path through $\mathbf{u}_p^-$ only if the path through $\mathbf{u}_p^+$ is saturated.
We will use this fact to consider the amount of flow going through the vertices $\mathbf{u}^+$ as a measure of how well the partitions are spread over nodes belonging to different zones. If the partition $p$ is associated to 3 different zones, then a flow of 3 will cross $\mathbf{u}_p^+$ in $f^*$ (i.e. a flow of 0 will cross $\mathbf{u}_p^+$). If $p$ is associated to two zones, a flow of $2$ will cross $\mathbf{u}_p^+$. If $p$ is associated to a single zone, a flow of $1$ will cross $\mathbf{u}_p^+$.
Let $N_1, N_2, N_3$ be the number of partitions associated to respectively 1,2 and 3 distinct zones. We will optimize a linear combination of these variables using the discovery of positively weighted circuits in a graph.
At the same step, we will also optimize the distance to a previous assignment $T'$. Let $\alpha> \beta> \gamma \ge 0$ be three parameters.
Given the flow $f$, let $G_f=(X',E_f)$ be the multi-graph where $X' = X\setminus\{\mathbf{s},\mathbf{t}\}$. The set $E_f$ is composed of the arcs:
\begin{itemize}
\item As many arcs from $(\mathbf{x}_p, \mathbf{u}^+_p,\alpha), (\mathbf{x}_p, \mathbf{u}^+_p,\beta), (\mathbf{x}_p, \mathbf{u}^+_p,\gamma)$ (selected in this order) as there is flow crossing $\mathbf{u}^+_p$ in $f$;
\item As many arcs from $(\mathbf{u}^+_p, \mathbf{x}_p,-\gamma), (\mathbf{u}^+_p, \mathbf{x}_p,-\beta), (\mathbf{u}^+_p, \mathbf{x}_p,-\alpha)$ (selected in this order) as there is flow crossing $\mathbf{u}^-_p$ in $f$;
\item As many copies of $(\mathbf{x}_p, \mathbf{u}^-_p,0)$ as there is flow through $\mathbf{u}^-_p$;
\item As many copies of $(\mathbf{u}^-_p,\mathbf{x}_p,0)$ so that the number of arcs between these two vertices is 2;
\item $(\mathbf{u}^+_p,\mathbf{y}_{p,z}, 0)$ if the flow between these vertices is 1, and the opposite arc otherwise;
\item as many copies of $(\mathbf{u}^-_p,\mathbf{y}_{p,z}, 0)$ as the flow between these vertices, and as many copies of the opposite arc as 2~$-$~the flow;
\item $(\mathbf{y}_{p,z},\mathbf{z}_v, \pm1)$ if it is saturated in $f$, with $+1$ if $v\in T'_p$ and $-1$ otherwise;
\item $(\mathbf{z}_v,\mathbf{y}_{p,z}, \pm1)$ if it is not saturated in $f$, with $+1$ if $v\notin T'_p$ and $-1$ otherwise.
\end{itemize}
To summarize, arcs are oriented left to right if they correspond to a presence of flow in $f$, and right to left if they correspond to an absence of flow. They are positively weighted if we want them to stay at their current state, and negatively if we want them to switch. Let us compute the weight of such graph.
\begin{multline*}
w(G_f) = \sum_{e\in E_f} w(e_f) \\
=
(\alpha - \beta -\gamma) N_1 + (\alpha +\beta - \gamma) N_2 + (\alpha+\beta+\gamma) N_3
\\ +
\#V\times N - 4 \sum_p 3-\#(T_p\cap T'_p) \\
=(\#V-12+\alpha-\beta-\gamma)\times N + 4Q_V + 2\beta N_2 + 2(\beta+\gamma) N_3 \\
\end{multline*}
As for the mode 3-strict, one can check that the difference of two such graphs corresponding to the same $(n_v)$ is always eulerian. Hence we can navigate in this class with the same greedy algorithm that discovers positive cycles and flips them.
The function that we optimize is
$$
2Q_V + \beta N_2 + (\beta+\gamma) N_3.
$$
The choice of parameters $\beta$ and $\gamma$ should be lead by the following question: For $\beta$, where to put the tradeoff between zone dispersion and distance to the previous configuration? For $\gamma$, do we prefer to have more partitions spread between 2 zones, or have less between at least 2 zones but more between 3 zones.
The quantity $Q_V$ varies between $0$ and $3N$, it should be of order $N$. The quantity $N_2+N_3$ should also be of order $N$ (it is exactly $N$ in the strict mode). So the two terms of the function are comparable.
\bibliography{optimal_layout}
\bibliographystyle{ieeetr}
\end{document}

View File

@ -189,7 +189,7 @@ let
rootFeatures = if features != null then
features
else
([ "garage/bundled-libs" "garage/sled" "garage/k2v" ] ++ (if release then [
([ "garage/bundled-libs" "garage/sled" "garage/lmdb" "garage/k2v" ] ++ (if release then [
"garage/consul-discovery"
"garage/kubernetes-discovery"
"garage/metrics"

View File

@ -9,11 +9,22 @@ GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
NIX_RELEASE="${REPO_FOLDER}/result/bin/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
garage -c /tmp/config.1.toml bucket create eprouvette
KEY_INFO=$(garage -c /tmp/config.1.toml key new --name opérateur)
if [ -z "$GARAGE_BIN" ]; then
GARAGE_BIN=$(which garage || exit 1)
echo -en "Found garage at: ${GARAGE_BIN}\n"
else
echo -en "Using garage binary at: ${GARAGE_BIN}\n"
fi
$GARAGE_BIN -c /tmp/config.1.toml bucket create eprouvette
if [ "$GARAGE_08" = "1" ]; then
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key new --name opérateur)
else
KEY_INFO=$($GARAGE_BIN -c /tmp/config.1.toml key create opérateur)
fi
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
SECRET_KEY=`echo $KEY_INFO|grep -Po 'Secret key: [a-f0-9]+'|grep -Po '[a-f0-9]+$'`
garage -c /tmp/config.1.toml bucket allow eprouvette --read --write --owner --key $ACCESS_KEY
$GARAGE_BIN -c /tmp/config.1.toml bucket allow eprouvette --read --write --owner --key $ACCESS_KEY
echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3
echo "Bucket s3://eprouvette created. Credentials stored in /tmp/garage.s3."

View File

@ -11,11 +11,16 @@ PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
FANCYCOLORS=("41m" "42m" "44m" "45m" "100m" "104m")
export RUST_BACKTRACE=1
export RUST_LOG=garage=info,garage_api=debug,netapp=trace
export RUST_LOG=garage=info,garage_api=debug
MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m"
WHICH_GARAGE=$(which garage || exit 1)
echo -en "${MAIN_LABEL} Found garage at: ${WHICH_GARAGE}\n"
if [ -z "$GARAGE_BIN" ]; then
GARAGE_BIN=$(which garage || exit 1)
echo -en "${MAIN_LABEL} Found garage at: ${GARAGE_BIN}\n"
else
echo -en "${MAIN_LABEL} Using garage binary at: ${GARAGE_BIN}\n"
fi
$GARAGE_BIN --version
NETWORK_SECRET="$(openssl rand -hex 32)"
@ -28,6 +33,7 @@ LABEL="\e[${FANCYCOLORS[$count]}[$count]\e[49m"
cat > $CONF_PATH <<EOF
block_size = 1048576 # objects are split in blocks of maximum this number of bytes
metadata_dir = "/tmp/garage-meta-$count"
db_engine = "lmdb"
data_dir = "/tmp/garage-data-$count"
rpc_bind_addr = "0.0.0.0:$((3900+$count))" # the port other Garage nodes will use to talk to this node
rpc_public_addr = "127.0.0.1:$((3900+$count))"
@ -51,7 +57,7 @@ EOF
echo -en "$LABEL configuration written to $CONF_PATH\n"
(garage -c /tmp/config.$count.toml server 2>&1|while read r; do echo -en "$LABEL $r\n"; done) &
($GARAGE_BIN -c /tmp/config.$count.toml server 2>&1|while read r; do echo -en "$LABEL $r\n"; done) &
done
# >>>>>>>>>>>>>>>> END FOR LOOP ON NODES
@ -73,14 +79,14 @@ fi
sleep 3
# Establish connections between nodes
for count in $(seq 1 3); do
NODE=$(garage -c /tmp/config.$count.toml node id -q)
NODE=$($GARAGE_BIN -c /tmp/config.$count.toml node id -q)
for count2 in $(seq 1 3); do
garage -c /tmp/config.$count2.toml node connect $NODE
$GARAGE_BIN -c /tmp/config.$count2.toml node connect $NODE
done
done
RETRY=120
until garage -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
until $GARAGE_BIN -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
(( RETRY-- ))
if (( RETRY <= 0 )); then
echo -en "${MAIN_LABEL} Garage did not start"

View File

@ -9,9 +9,17 @@ GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
NIX_RELEASE="${REPO_FOLDER}/result/bin/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
if [ -z "$GARAGE_BIN" ]; then
GARAGE_BIN=$(which garage || exit 1)
echo -en "Found garage at: ${GARAGE_BIN}\n"
else
echo -en "Using garage binary at: ${GARAGE_BIN}\n"
fi
$GARAGE_BIN --version
sleep 5
RETRY=120
until garage -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
until $GARAGE_BIN -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
(( RETRY-- ))
if (( RETRY <= 0 )); then
echo "garage did not start in time, failing."
@ -21,11 +29,20 @@ until garage -c /tmp/config.1.toml status 2>&1|grep -q HEALTHY ; do
sleep 1
done
garage -c /tmp/config.1.toml status \
| grep 'NO ROLE' \
| grep -Po '^[0-9a-f]+' \
| while read id; do
garage -c /tmp/config.1.toml layout assign $id -z dc1 -c 1
done
if [ "$GARAGE_08" = "1" ]; then
$GARAGE_BIN -c /tmp/config.1.toml status \
| grep 'NO ROLE' \
| grep -Po '^[0-9a-f]+' \
| while read id; do
$GARAGE_BIN -c /tmp/config.1.toml layout assign $id -z dc1 -c 1
done
else
$GARAGE_BIN -c /tmp/config.1.toml status \
| grep 'NO ROLE' \
| grep -Po '^[0-9a-f]+' \
| while read id; do
$GARAGE_BIN -c /tmp/config.1.toml layout assign $id -z dc1 -c 1G
done
fi
garage -c /tmp/config.1.toml layout apply --version 1
$GARAGE_BIN -c /tmp/config.1.toml layout apply --version 1

View File

@ -21,4 +21,4 @@ version: 0.4.1
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "v0.8.4"
appVersion: "v0.9.0"

138
script/test-renumbering.sh Normal file
View File

@ -0,0 +1,138 @@
#!/usr/bin/env bash
: '
This script tests part renumbering on an S3 remote (here configured for Minio).
On Minio:
The results confirm that if I upload parts with number 1, 4, 5 and 6,
they are renumbered to 1, 2, 3 and 4 after CompleteMultipartUpload.
Thus, specifying partNumber=4 on a GetObject/HeadObject should return
information on the part I originally uploaded with part number
On S3: not tested
Sample output (on Minio):
f07e1404cc527d494242824ded3a616b part1
78974cd4d0f622eb3426ea7cd22f5a1c part4
f9cc379f8baa61645558d9ba7e6351fa part5
1bd2383eebbac1f8e7143575ba5b1f4a part6
Upload ID: 6838b813-d0ca-400b-9d28-ec8b2b5cd004
PART 1 ETag: "f07e1404cc527d494242824ded3a616b"
PART 4 ETag: "78974cd4d0f622eb3426ea7cd22f5a1c"
PART 5 ETag: "f9cc379f8baa61645558d9ba7e6351fa"
PART 6 ETag: "1bd2383eebbac1f8e7143575ba5b1f4a"
======================================== LIST ====
{
"Parts": [
{
"PartNumber": 1,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"f07e1404cc527d494242824ded3a616b\"",
"Size": 20971520
},
{
"PartNumber": 4,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"78974cd4d0f622eb3426ea7cd22f5a1c\"",
"Size": 20971520
},
{
"PartNumber": 5,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"f9cc379f8baa61645558d9ba7e6351fa\"",
"Size": 20971520
},
{
"PartNumber": 6,
"LastModified": "2023-04-25T10:21:54.350000+00:00",
"ETag": "\"1bd2383eebbac1f8e7143575ba5b1f4a\"",
"Size": 20971520
}
],
"ChecksumAlgorithm": "",
"Initiator": {
"ID": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4",
"DisplayName": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4"
},
"Owner": {
"DisplayName": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4",
"ID": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4"
},
"StorageClass": "STANDARD"
}
======================================== COMPLETE ====
{
"Location": "http://localhost:9000/test/upload",
"Bucket": "test",
"Key": "upload",
"ETag": "\"8e817c8ccd442f9a79c77b58fe808c43-4\""
}
======================================== LIST ====
An error occurred (NoSuchUpload) when calling the ListParts operation: The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.
======================================== GET PART 4 ====
{
"AcceptRanges": "bytes",
"LastModified": "2023-04-25T10:21:59+00:00",
"ContentLength": 20971520,
"ETag": "\"8e817c8ccd442f9a79c77b58fe808c43-4\"",
"ContentRange": "bytes 62914560-83886079/83886080",
"ContentType": "binary/octet-stream",
"Metadata": {},
"PartsCount": 4
}
1bd2383eebbac1f8e7143575ba5b1f4a get-part4
Conclusions:
- Parts are indeed renumbered with consecutive numbers
- ListParts only applies to multipart uploads in progress,
it cannot be used once the multipart upload has been completed
'
export AWS_ACCESS_KEY_ID=1D8Pk2k4oQSoh1BU
export AWS_SECRET_ACCESS_KEY=4B46SR8U7FUgY0raB8Zuxg1NLyLTvbNV
function aws { command aws --endpoint-url http://localhost:9000 $@ ; }
aws --version
aws s3 mb s3://test
for NUM in 1 4 5 6; do
dd if=/dev/urandom of=part$NUM bs=1M count=10
done
md5sum part*
UPLOAD=$(aws s3api create-multipart-upload --bucket test --key 'upload' | jq -r ".UploadId")
echo "Upload ID: $UPLOAD"
PARTS=""
for NUM in 1 4 5 6; do
ETAG=$(aws s3api upload-part --bucket test --key 'upload' --part-number $NUM \
--body "part$NUM" --upload-id "$UPLOAD" | jq -r ".ETag")
echo "PART $NUM ETag: $ETAG"
if [ -n "$PARTS" ]; then
PARTS="$PARTS,"
fi
PARTS="$PARTS {\"ETag\":$ETAG,\"PartNumber\":$NUM}"
done
echo "======================================== LIST ===="
aws s3api list-parts --bucket test --key upload --upload-id "$UPLOAD" | jq
echo "======================================== COMPLETE ===="
echo "{\"Parts\":[$PARTS]}" > mpu
aws s3api complete-multipart-upload --multipart-upload file://mpu \
--bucket test --key 'upload' --upload-id "$UPLOAD"
echo "======================================== LIST ===="
aws s3api list-parts --bucket test --key upload --upload-id "$UPLOAD" | jq
echo "======================================== GET PART 4 ===="
aws s3api get-object --bucket test --key upload --part-number 4 get-part4
md5sum get-part4

103
script/test-skip-part.sh Normal file
View File

@ -0,0 +1,103 @@
#!/usr/bin/env bash
: '
This script tests whether uploaded parts can be skipped in a
CompleteMultipartUpoad
On Minio: yes, parts can be skipped
On S3: not tested
Sample output (on Minio):
f23911bcd1230f5ebe8887cbf5bc396e part1
a2657143167eaf647c40473e78a091dc part4
72f72c02c5163bc81024b28ac818c5e0 part5
e29cf500d20498218904b8df8806caa2 part6
Upload ID: e8fe7b83-9800-46fb-ae90-9d7ccd42fe76
PART 1 ETag: "f23911bcd1230f5ebe8887cbf5bc396e"
PART 4 ETag: "a2657143167eaf647c40473e78a091dc"
PART 5 ETag: "72f72c02c5163bc81024b28ac818c5e0"
PART 6 ETag: "e29cf500d20498218904b8df8806caa2"
======================================== COMPLETE ====
{
"Location": "http://localhost:9000/test/upload",
"Bucket": "test",
"Key": "upload",
"ETag": "\"48246e44d4b38bdc2f3c10ee25b1af17-3\""
}
======================================== GET FULL ====
{
"AcceptRanges": "bytes",
"LastModified": "2023-04-25T10:54:35+00:00",
"ContentLength": 31457280,
"ETag": "\"48246e44d4b38bdc2f3c10ee25b1af17-3\"",
"ContentType": "binary/octet-stream",
"Metadata": {}
}
97fb904da7ad310699a6afab0eb6e061 get-full
97fb904da7ad310699a6afab0eb6e061 -
======================================== GET PART 3 ====
{
"AcceptRanges": "bytes",
"LastModified": "2023-04-25T10:54:35+00:00",
"ContentLength": 10485760,
"ETag": "\"48246e44d4b38bdc2f3c10ee25b1af17-3\"",
"ContentRange": "bytes 20971520-31457279/31457280",
"ContentType": "binary/octet-stream",
"Metadata": {},
"PartsCount": 3
}
e29cf500d20498218904b8df8806caa2 get-part3
Conclusions:
- Skipping a part in a CompleteMultipartUpoad call is OK
- The part is simply not included in the stored object
- Sequential part renumbering counts only non-skipped parts
'
export AWS_ACCESS_KEY_ID=1D8Pk2k4oQSoh1BU
export AWS_SECRET_ACCESS_KEY=4B46SR8U7FUgY0raB8Zuxg1NLyLTvbNV
function aws { command aws --endpoint-url http://localhost:9000 $@ ; }
aws --version
aws s3 mb s3://test
for NUM in 1 4 5 6; do
dd if=/dev/urandom of=part$NUM bs=1M count=10
done
md5sum part*
UPLOAD=$(aws s3api create-multipart-upload --bucket test --key 'upload' | jq -r ".UploadId")
echo "Upload ID: $UPLOAD"
PARTS=""
for NUM in 1 4 5 6; do
ETAG=$(aws s3api upload-part --bucket test --key 'upload' --part-number $NUM \
--body "part$NUM" --upload-id "$UPLOAD" | jq -r ".ETag")
echo "PART $NUM ETag: $ETAG"
if [ "$NUM" != "5" ]; then
if [ -n "$PARTS" ]; then
PARTS="$PARTS,"
fi
PARTS="$PARTS {\"ETag\":$ETAG,\"PartNumber\":$NUM}"
fi
done
echo "======================================== COMPLETE ===="
echo "{\"Parts\":[$PARTS]}" > mpu
aws s3api complete-multipart-upload --multipart-upload file://mpu \
--bucket test --key 'upload' --upload-id "$UPLOAD"
echo "======================================== GET FULL ===="
aws s3api get-object --bucket test --key upload get-full
md5sum get-full
cat part1 part4 part6 | md5sum
echo "======================================== GET PART 3 ===="
aws s3api get-object --bucket test --key upload --part-number 3 get-part3
md5sum get-part3

View File

@ -31,6 +31,11 @@ dd if=/dev/urandom of=/tmp/garage.1.rnd bs=1k count=2 # No multipart, inline sto
dd if=/dev/urandom of=/tmp/garage.2.rnd bs=1M count=5 # No multipart but file will be chunked
dd if=/dev/urandom of=/tmp/garage.3.rnd bs=1M count=10 # by default, AWS starts using multipart at 8MB
dd if=/dev/urandom of=/tmp/garage.part1.rnd bs=1M count=5
dd if=/dev/urandom of=/tmp/garage.part2.rnd bs=1M count=5
dd if=/dev/urandom of=/tmp/garage.part3.rnd bs=1M count=5
dd if=/dev/urandom of=/tmp/garage.part4.rnd bs=1M count=5
# data of lower entropy, to test compression
dd if=/dev/urandom bs=1k count=2 | base64 -w0 > /tmp/garage.1.b64
dd if=/dev/urandom bs=1M count=5 | base64 -w0 > /tmp/garage.2.b64
@ -40,7 +45,7 @@ echo "🧪 S3 API testing..."
# AWS
if [ -z "$SKIP_AWS" ]; then
echo "🛠️ Testing with awscli"
echo "🛠️ Testing with awscli (aws s3)"
source ${SCRIPT_FOLDER}/dev-env-aws.sh
aws s3 ls
for idx in {1..3}.{rnd,b64}; do
@ -51,8 +56,36 @@ if [ -z "$SKIP_AWS" ]; then
rm /tmp/garage.$idx.dl
aws s3 rm "s3://eprouvette/&+-é\"/garage.$idx.aws"
done
echo "🛠️ Testing multipart uploads with awscli (aws s3api)"
UPLOAD=$(aws s3api create-multipart-upload --bucket eprouvette --key 'upload' | jq -r ".UploadId")
echo "Upload ID: $UPLOAD"
ETAG3=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 3 --body "/tmp/garage.part1.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
ETAG2=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 2 --body "/tmp/garage.part2.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
ETAG3=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 3 --body "/tmp/garage.part3.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
ETAG6=$(aws s3api upload-part --bucket eprouvette --key 'upload' \
--part-number 6 --body "/tmp/garage.part4.rnd" --upload-id "$UPLOAD" \
| jq -r ".ETag")
MPU="{\"Parts\":[{\"PartNumber\":2,\"ETag\":$ETAG2}, {\"PartNumber\":3,\"ETag\":$ETAG3}, {\"PartNumber\":6,\"ETag\":$ETAG6}]}"
echo $MPU > /tmp/garage.mpu.json
aws s3api complete-multipart-upload --multipart-upload file:///tmp/garage.mpu.json \
--bucket eprouvette --key 'upload' --upload-id "$UPLOAD"
aws s3api get-object --bucket eprouvette --key upload /tmp/garage.mpu.get
if [ "$(md5sum /tmp/garage.mpu.get | cut -d ' ' -f 1)" != "$(cat /tmp/garage.part{2,3,4}.rnd | md5sum | cut -d ' ' -f 1)" ]; then
echo "Invalid multipart upload"
exit 1
fi
fi
echo "OK!!"
exit 0
# S3CMD
if [ -z "$SKIP_S3CMD" ]; then
echo "🛠️ Testing with s3cmd"
@ -141,6 +174,7 @@ rm eprouvette/winscp
EOF
fi
rm /tmp/garage.part{1..4}.rnd
rm /tmp/garage.{1..3}.{rnd,b64}
echo "🏁 Teardown"

75
script/test-upgrade.sh Executable file
View File

@ -0,0 +1,75 @@
#!/usr/bin/env bash
set -ex
export LC_ALL=C.UTF-8
export LANG=C.UTF-8
SCRIPT_FOLDER="`dirname \"$0\"`"
REPO_FOLDER="${SCRIPT_FOLDER}/../"
GARAGE_DEBUG="${REPO_FOLDER}/target/debug/"
GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
NIX_RELEASE="${REPO_FOLDER}/result/bin/:${REPO_FOLDER}/result-bin/bin/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH"
OLD_VERSION="$1"
ARCH="$2"
echo "Downloading old garage binary..."
curl https://garagehq.deuxfleurs.fr/_releases/$OLD_VERSION/$ARCH/garage > /tmp/old_garage
chmod +x /tmp/old_garage
echo "============= insert data into old version cluster ================="
export GARAGE_BIN=/tmp/old_garage
if echo $OLD_VERSION | grep 'v0\.8\.'; then
echo "Detected Garage v0.8.x"
export GARAGE_08=1
fi
echo "⏳ Setup cluster using old version"
$GARAGE_BIN --version
${SCRIPT_FOLDER}/dev-clean.sh
${SCRIPT_FOLDER}/dev-cluster.sh > /tmp/garage.log 2>&1 &
sleep 6
${SCRIPT_FOLDER}/dev-configure.sh
${SCRIPT_FOLDER}/dev-bucket.sh
echo "🛠️ Inserting data in old cluster"
source ${SCRIPT_FOLDER}/dev-env-rclone.sh
rclone copy "${SCRIPT_FOLDER}/../.git/" garage:eprouvette/test_dotgit --stats=1s --stats-log-level=NOTICE --stats-one-line
echo "🏁 Stopping old cluster"
killall -INT old_garage
sleep 2
killall -9 old_garage || true
echo "🏁 Removing old garage version"
rm -rv $GARAGE_BIN
export -n GARAGE_BIN
export -n GARAGE_08
echo "================ read data from new cluster ==================="
echo "⏳ Setup cluster using new version"
pwd
ls
export GARAGE_BIN=$(which garage)
$GARAGE_BIN --version
${SCRIPT_FOLDER}/dev-cluster.sh >> /tmp/garage.log 2>&1 &
sleep 3
echo "🛠️ Retrieving data from old cluster"
rclone copy garage:eprouvette/test_dotgit /tmp/test_dotgit --stats=1s --stats-log-level=NOTICE --stats-one-line --fast-list
if ! diff <(find "${SCRIPT_FOLDER}/../.git" -type f | xargs md5sum | cut -d ' ' -f 1 | sort) <(find /tmp/test_dotgit -type f | xargs md5sum | cut -d ' ' -f 1 | sort); then
echo "TEST FAILURE: directories are different"
exit 1
fi
rm -r /tmp/test_dotgit
echo "🏁 Teardown"
rm -rf /tmp/garage-{data,meta}-*
rm -rf /tmp/config.*.toml
echo "✅ Success"

View File

@ -1,6 +1,6 @@
[package]
name = "garage_api"
version = "0.8.4"
version = "0.9.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"

View File

@ -25,7 +25,8 @@ use crate::admin::bucket::*;
use crate::admin::cluster::*;
use crate::admin::error::*;
use crate::admin::key::*;
use crate::admin::router::{Authorization, Endpoint};
use crate::admin::router_v0;
use crate::admin::router_v1::{Authorization, Endpoint};
use crate::helpers::host_to_bucket;
pub struct AdminApiServer {
@ -229,7 +230,12 @@ impl ApiHandler for AdminApiServer {
type Error = Error;
fn parse_endpoint(&self, req: &Request<Body>) -> Result<Endpoint, Error> {
Endpoint::from_request(req)
if req.uri().path().starts_with("/v0/") {
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
Endpoint::from_v0(endpoint_v0)
} else {
Endpoint::from_request(req)
}
}
async fn handle(
@ -276,8 +282,13 @@ impl ApiHandler for AdminApiServer {
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage, req).await,
// Keys
Endpoint::ListKeys => handle_list_keys(&self.garage).await,
Endpoint::GetKeyInfo { id, search } => {
handle_get_key_info(&self.garage, id, search).await
Endpoint::GetKeyInfo {
id,
search,
show_secret_key,
} => {
let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
handle_get_key_info(&self.garage, id, search, show_secret_key).await
}
Endpoint::CreateKey => handle_create_key(&self.garage, req).await,
Endpoint::ImportKey => handle_import_key(&self.garage, req).await,

View File

@ -14,6 +14,7 @@ use garage_model::bucket_alias_table::*;
use garage_model::bucket_table::*;
use garage_model::garage::Garage;
use garage_model::permission::*;
use garage_model::s3::mpu_table;
use garage_model::s3::object_table::*;
use crate::admin::error::*;
@ -124,6 +125,14 @@ async fn bucket_info_results(
.map(|x| x.filtered_values(&garage.system.ring.borrow()))
.unwrap_or_default();
let mpu_counters = garage
.mpu_counter_table
.table
.get(&bucket_id, &EmptyKey)
.await?
.map(|x| x.filtered_values(&garage.system.ring.borrow()))
.unwrap_or_default();
let mut relevant_keys = HashMap::new();
for (k, _) in bucket
.state
@ -208,12 +217,12 @@ async fn bucket_info_results(
}
})
.collect::<Vec<_>>(),
objects: counters.get(OBJECTS).cloned().unwrap_or_default(),
bytes: counters.get(BYTES).cloned().unwrap_or_default(),
unfinished_uploads: counters
.get(UNFINISHED_UPLOADS)
.cloned()
.unwrap_or_default(),
objects: *counters.get(OBJECTS).unwrap_or(&0),
bytes: *counters.get(BYTES).unwrap_or(&0),
unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0),
unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0),
unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0),
unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0),
quotas: ApiBucketQuotas {
max_size: quotas.max_size,
max_objects: quotas.max_objects,
@ -235,6 +244,9 @@ struct GetBucketInfoResult {
objects: i64,
bytes: i64,
unfinished_uploads: i64,
unfinished_multipart_uploads: i64,
unfinished_multipart_upload_parts: i64,
unfinished_multipart_upload_bytes: i64,
quotas: ApiBucketQuotas,
}

View File

@ -1,14 +1,13 @@
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::Arc;
use hyper::{Body, Request, Response, StatusCode};
use hyper::{Body, Request, Response};
use serde::{Deserialize, Serialize};
use garage_util::crdt::*;
use garage_util::data::*;
use garage_rpc::layout::*;
use garage_rpc::layout;
use garage_model::garage::Garage;
@ -26,26 +25,37 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
.system
.get_known_nodes()
.into_iter()
.map(|i| {
(
hex::encode(i.id),
KnownNodeResp {
addr: i.addr,
is_up: i.is_up,
last_seen_secs_ago: i.last_seen_secs_ago,
hostname: i.status.hostname,
},
)
.map(|i| KnownNodeResp {
id: hex::encode(i.id),
addr: i.addr,
is_up: i.is_up,
last_seen_secs_ago: i.last_seen_secs_ago,
hostname: i.status.hostname,
})
.collect(),
layout: get_cluster_layout(garage),
layout: format_cluster_layout(&garage.system.get_cluster_layout()),
};
Ok(json_ok_response(&res)?)
}
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
use garage_rpc::system::ClusterHealthStatus;
let health = garage.system.health();
let health = ClusterHealth {
status: match health.status {
ClusterHealthStatus::Healthy => "healthy",
ClusterHealthStatus::Degraded => "degraded",
ClusterHealthStatus::Unavailable => "unavailable",
},
known_nodes: health.known_nodes,
connected_nodes: health.connected_nodes,
storage_nodes: health.storage_nodes,
storage_nodes_ok: health.storage_nodes_ok,
partitions: health.partitions,
partitions_quorum: health.partitions_quorum,
partitions_all_ok: health.partitions_all_ok,
};
Ok(json_ok_response(&health)?)
}
@ -74,33 +84,68 @@ pub async fn handle_connect_cluster_nodes(
}
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
let res = get_cluster_layout(garage);
let res = format_cluster_layout(&garage.system.get_cluster_layout());
Ok(json_ok_response(&res)?)
}
fn get_cluster_layout(garage: &Arc<Garage>) -> GetClusterLayoutResponse {
let layout = garage.system.get_cluster_layout();
fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResponse {
let roles = layout
.roles
.items()
.iter()
.filter_map(|(k, _, v)| v.0.clone().map(|x| (k, x)))
.map(|(k, v)| NodeRoleResp {
id: hex::encode(k),
zone: v.zone.clone(),
capacity: v.capacity,
tags: v.tags.clone(),
})
.collect::<Vec<_>>();
let staged_role_changes = layout
.staging_roles
.items()
.iter()
.filter(|(k, _, v)| layout.roles.get(k) != Some(v))
.map(|(k, _, v)| match &v.0 {
None => NodeRoleChange {
id: hex::encode(k),
action: NodeRoleChangeEnum::Remove { remove: true },
},
Some(r) => NodeRoleChange {
id: hex::encode(k),
action: NodeRoleChangeEnum::Update {
zone: r.zone.clone(),
capacity: r.capacity,
tags: r.tags.clone(),
},
},
})
.collect::<Vec<_>>();
GetClusterLayoutResponse {
version: layout.version,
roles: layout
.roles
.items()
.iter()
.filter(|(_, _, v)| v.0.is_some())
.map(|(k, _, v)| (hex::encode(k), v.0.clone()))
.collect(),
staged_role_changes: layout
.staging
.items()
.iter()
.filter(|(k, _, v)| layout.roles.get(k) != Some(v))
.map(|(k, _, v)| (hex::encode(k), v.0.clone()))
.collect(),
roles,
staged_role_changes,
}
}
// ----
#[derive(Debug, Clone, Copy, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ClusterHealth {
status: &'static str,
known_nodes: usize,
connected_nodes: usize,
storage_nodes: usize,
storage_nodes_ok: usize,
partitions: usize,
partitions_quorum: usize,
partitions_all_ok: usize,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct GetClusterStatusResponse {
@ -109,11 +154,19 @@ struct GetClusterStatusResponse {
garage_features: Option<&'static [&'static str]>,
rust_version: &'static str,
db_engine: String,
known_nodes: HashMap<String, KnownNodeResp>,
known_nodes: Vec<KnownNodeResp>,
layout: GetClusterLayoutResponse,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct ApplyClusterLayoutResponse {
message: Vec<String>,
layout: GetClusterLayoutResponse,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct ConnectClusterNodesResponse {
success: bool,
error: Option<String>,
@ -123,18 +176,31 @@ struct ConnectClusterNodesResponse {
#[serde(rename_all = "camelCase")]
struct GetClusterLayoutResponse {
version: u64,
roles: HashMap<String, Option<NodeRole>>,
staged_role_changes: HashMap<String, Option<NodeRole>>,
roles: Vec<NodeRoleResp>,
staged_role_changes: Vec<NodeRoleChange>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct NodeRoleResp {
id: String,
zone: String,
capacity: Option<u64>,
tags: Vec<String>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct KnownNodeResp {
id: String,
addr: SocketAddr,
is_up: bool,
last_seen_secs_ago: Option<u64>,
hostname: String,
}
// ---- update functions ----
pub async fn handle_update_cluster_layout(
garage: &Arc<Garage>,
req: Request<Body>,
@ -144,22 +210,35 @@ pub async fn handle_update_cluster_layout(
let mut layout = garage.system.get_cluster_layout();
let mut roles = layout.roles.clone();
roles.merge(&layout.staging);
roles.merge(&layout.staging_roles);
for (node, role) in updates {
let node = hex::decode(node).ok_or_bad_request("Invalid node identifier")?;
for change in updates {
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
let new_role = match change.action {
NodeRoleChangeEnum::Remove { remove: true } => None,
NodeRoleChangeEnum::Update {
zone,
capacity,
tags,
} => Some(layout::NodeRole {
zone,
capacity,
tags,
}),
_ => return Err(Error::bad_request("Invalid layout change")),
};
layout
.staging
.merge(&roles.update_mutator(node, NodeRoleV(role)));
.staging_roles
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
}
garage.system.update_cluster_layout(&layout).await?;
Ok(Response::builder()
.status(StatusCode::NO_CONTENT)
.body(Body::empty())?)
let res = format_cluster_layout(&layout);
Ok(json_ok_response(&res)?)
}
pub async fn handle_apply_cluster_layout(
@ -169,12 +248,15 @@ pub async fn handle_apply_cluster_layout(
let param = parse_json_body::<ApplyRevertLayoutRequest>(req).await?;
let layout = garage.system.get_cluster_layout();
let layout = layout.apply_staged_changes(Some(param.version))?;
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
garage.system.update_cluster_layout(&layout).await?;
Ok(Response::builder()
.status(StatusCode::NO_CONTENT)
.body(Body::empty())?)
let res = ApplyClusterLayoutResponse {
message: msg,
layout: format_cluster_layout(&layout),
};
Ok(json_ok_response(&res)?)
}
pub async fn handle_revert_cluster_layout(
@ -187,14 +269,39 @@ pub async fn handle_revert_cluster_layout(
let layout = layout.revert_staged_changes(Some(param.version))?;
garage.system.update_cluster_layout(&layout).await?;
Ok(Response::builder()
.status(StatusCode::NO_CONTENT)
.body(Body::empty())?)
let res = format_cluster_layout(&layout);
Ok(json_ok_response(&res)?)
}
type UpdateClusterLayoutRequest = HashMap<String, Option<NodeRole>>;
// ----
type UpdateClusterLayoutRequest = Vec<NodeRoleChange>;
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct ApplyRevertLayoutRequest {
version: u64,
}
// ----
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct NodeRoleChange {
id: String,
#[serde(flatten)]
action: NodeRoleChangeEnum,
}
#[derive(Serialize, Deserialize)]
#[serde(untagged)]
enum NodeRoleChangeEnum {
#[serde(rename_all = "camelCase")]
Remove { remove: bool },
#[serde(rename_all = "camelCase")]
Update {
zone: String,
capacity: Option<u64>,
tags: Vec<String>,
},
}

View File

@ -10,7 +10,7 @@ use garage_model::garage::Garage;
use garage_model::key_table::*;
use crate::admin::error::*;
use crate::helpers::{json_ok_response, parse_json_body};
use crate::helpers::{is_default, json_ok_response, parse_json_body};
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
let res = garage
@ -34,6 +34,7 @@ pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Er
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct ListKeyResultItem {
id: String,
name: String,
@ -43,6 +44,7 @@ pub async fn handle_get_key_info(
garage: &Arc<Garage>,
id: Option<String>,
search: Option<String>,
show_secret_key: bool,
) -> Result<Response<Body>, Error> {
let key = if let Some(id) = id {
garage.key_helper().get_existing_key(&id).await?
@ -55,7 +57,7 @@ pub async fn handle_get_key_info(
unreachable!();
};
key_info_results(garage, key).await
key_info_results(garage, key, show_secret_key).await
}
pub async fn handle_create_key(
@ -64,15 +66,16 @@ pub async fn handle_create_key(
) -> Result<Response<Body>, Error> {
let req = parse_json_body::<CreateKeyRequest>(req).await?;
let key = Key::new(&req.name);
let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key"));
garage.key_table.insert(&key).await?;
key_info_results(garage, key).await
key_info_results(garage, key, true).await
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct CreateKeyRequest {
name: String,
name: Option<String>,
}
pub async fn handle_import_key(
@ -86,10 +89,15 @@ pub async fn handle_import_key(
return Err(Error::KeyAlreadyExists(req.access_key_id.to_string()));
}
let imported_key = Key::import(&req.access_key_id, &req.secret_access_key, &req.name);
let imported_key = Key::import(
&req.access_key_id,
&req.secret_access_key,
req.name.as_deref().unwrap_or("Imported key"),
)
.ok_or_bad_request("Invalid key format")?;
garage.key_table.insert(&imported_key).await?;
key_info_results(garage, imported_key).await
key_info_results(garage, imported_key, false).await
}
#[derive(Deserialize)]
@ -97,7 +105,7 @@ pub async fn handle_import_key(
struct ImportKeyRequest {
access_key_id: String,
secret_access_key: String,
name: String,
name: Option<String>,
}
pub async fn handle_update_key(
@ -127,10 +135,11 @@ pub async fn handle_update_key(
garage.key_table.insert(&key).await?;
key_info_results(garage, key).await
key_info_results(garage, key, false).await
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct UpdateKeyRequest {
name: Option<String>,
allow: Option<KeyPerm>,
@ -149,7 +158,11 @@ pub async fn handle_delete_key(garage: &Arc<Garage>, id: String) -> Result<Respo
.body(Body::empty())?)
}
async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Body>, Error> {
async fn key_info_results(
garage: &Arc<Garage>,
key: Key,
show_secret: bool,
) -> Result<Response<Body>, Error> {
let mut relevant_buckets = HashMap::new();
let key_state = key.state.as_option().unwrap();
@ -178,7 +191,11 @@ async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Bod
let res = GetKeyInfoResult {
name: key_state.name.get().clone(),
access_key_id: key.key_id.clone(),
secret_access_key: key_state.secret_key.clone(),
secret_access_key: if show_secret {
Some(key_state.secret_key.clone())
} else {
None
},
permissions: KeyPerm {
create_bucket: *key_state.allow_create_bucket.get(),
},
@ -224,7 +241,8 @@ async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Bod
struct GetKeyInfoResult {
name: String,
access_key_id: String,
secret_access_key: String,
#[serde(skip_serializing_if = "is_default")]
secret_access_key: Option<String>,
permissions: KeyPerm,
buckets: Vec<KeyInfoBucketResult>,
}
@ -246,6 +264,7 @@ struct KeyInfoBucketResult {
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ApiBucketKeyPerm {
#[serde(default)]
pub(crate) read: bool,

View File

@ -1,6 +1,7 @@
pub mod api_server;
mod error;
mod router;
mod router_v0;
mod router_v1;
mod bucket;
mod cluster;

View File

@ -5,12 +5,6 @@ use hyper::{Method, Request};
use crate::admin::error::*;
use crate::router_macros::*;
pub enum Authorization {
None,
MetricsToken,
AdminToken,
}
router_match! {@func
/// List of all Admin API endpoints.
@ -134,15 +128,6 @@ impl Endpoint {
Ok(res)
}
/// Get the kind of authorization which is required to perform the operation.
pub fn authorization_type(&self) -> Authorization {
match self {
Self::Health => Authorization::None,
Self::CheckDomain => Authorization::None,
Self::Metrics => Authorization::MetricsToken,
_ => Authorization::AdminToken,
}
}
}
generateQueryParameters! {

235
src/api/admin/router_v1.rs Normal file
View File

@ -0,0 +1,235 @@
use std::borrow::Cow;
use hyper::{Method, Request};
use crate::admin::error::*;
use crate::admin::router_v0;
use crate::router_macros::*;
pub enum Authorization {
None,
MetricsToken,
AdminToken,
}
router_match! {@func
/// List of all Admin API endpoints.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Endpoint {
Options,
CheckDomain,
Health,
Metrics,
GetClusterStatus,
GetClusterHealth,
ConnectClusterNodes,
// Layout
GetClusterLayout,
UpdateClusterLayout,
ApplyClusterLayout,
RevertClusterLayout,
// Keys
ListKeys,
CreateKey,
ImportKey,
GetKeyInfo {
id: Option<String>,
search: Option<String>,
show_secret_key: Option<String>,
},
DeleteKey {
id: String,
},
UpdateKey {
id: String,
},
// Buckets
ListBuckets,
CreateBucket,
GetBucketInfo {
id: Option<String>,
global_alias: Option<String>,
},
DeleteBucket {
id: String,
},
UpdateBucket {
id: String,
},
// Bucket-Key Permissions
BucketAllowKey,
BucketDenyKey,
// Bucket aliases
GlobalAliasBucket {
id: String,
alias: String,
},
GlobalUnaliasBucket {
id: String,
alias: String,
},
LocalAliasBucket {
id: String,
access_key_id: String,
alias: String,
},
LocalUnaliasBucket {
id: String,
access_key_id: String,
alias: String,
},
}}
impl Endpoint {
/// Determine which S3 endpoint a request is for using the request, and a bucket which was
/// possibly extracted from the Host header.
/// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets
pub fn from_request<T>(req: &Request<T>) -> Result<Self, Error> {
let uri = req.uri();
let path = uri.path();
let query = uri.query();
let mut query = QueryParameters::from_query(query.unwrap_or_default())?;
let res = router_match!(@gen_path_parser (req.method(), path, query) [
OPTIONS _ => Options,
GET "/check" => CheckDomain,
GET "/health" => Health,
GET "/metrics" => Metrics,
GET "/v1/status" => GetClusterStatus,
GET "/v1/health" => GetClusterHealth,
POST "/v1/connect" => ConnectClusterNodes,
// Layout endpoints
GET "/v1/layout" => GetClusterLayout,
POST "/v1/layout" => UpdateClusterLayout,
POST "/v1/layout/apply" => ApplyClusterLayout,
POST "/v1/layout/revert" => RevertClusterLayout,
// API key endpoints
GET "/v1/key" if id => GetKeyInfo (query_opt::id, query_opt::search, query_opt::show_secret_key),
GET "/v1/key" if search => GetKeyInfo (query_opt::id, query_opt::search, query_opt::show_secret_key),
POST "/v1/key" if id => UpdateKey (query::id),
POST "/v1/key" => CreateKey,
POST "/v1/key/import" => ImportKey,
DELETE "/v1/key" if id => DeleteKey (query::id),
GET "/v1/key" => ListKeys,
// Bucket endpoints
GET "/v1/bucket" if id => GetBucketInfo (query_opt::id, query_opt::global_alias),
GET "/v1/bucket" if global_alias => GetBucketInfo (query_opt::id, query_opt::global_alias),
GET "/v1/bucket" => ListBuckets,
POST "/v1/bucket" => CreateBucket,
DELETE "/v1/bucket" if id => DeleteBucket (query::id),
PUT "/v1/bucket" if id => UpdateBucket (query::id),
// Bucket-key permissions
POST "/v1/bucket/allow" => BucketAllowKey,
POST "/v1/bucket/deny" => BucketDenyKey,
// Bucket aliases
PUT "/v1/bucket/alias/global" => GlobalAliasBucket (query::id, query::alias),
DELETE "/v1/bucket/alias/global" => GlobalUnaliasBucket (query::id, query::alias),
PUT "/v1/bucket/alias/local" => LocalAliasBucket (query::id, query::access_key_id, query::alias),
DELETE "/v1/bucket/alias/local" => LocalUnaliasBucket (query::id, query::access_key_id, query::alias),
]);
if let Some(message) = query.nonempty_message() {
debug!("Unused query parameter: {}", message)
}
Ok(res)
}
/// Some endpoints work exactly the same in their v1/ version as they did in their v0/ version.
/// For these endpoints, we can convert a v0/ call to its equivalent as if it was made using
/// its v1/ URL.
pub fn from_v0(v0_endpoint: router_v0::Endpoint) -> Result<Self, Error> {
match v0_endpoint {
// Cluster endpoints
router_v0::Endpoint::ConnectClusterNodes => Ok(Self::ConnectClusterNodes),
// - GetClusterStatus: response format changed
// - GetClusterHealth: response format changed
// Layout endpoints
router_v0::Endpoint::RevertClusterLayout => Ok(Self::RevertClusterLayout),
// - GetClusterLayout: response format changed
// - UpdateClusterLayout: query format changed
// - ApplyCusterLayout: response format changed
// Key endpoints
router_v0::Endpoint::ListKeys => Ok(Self::ListKeys),
router_v0::Endpoint::CreateKey => Ok(Self::CreateKey),
router_v0::Endpoint::GetKeyInfo { id, search } => Ok(Self::GetKeyInfo {
id,
search,
show_secret_key: Some("true".into()),
}),
router_v0::Endpoint::DeleteKey { id } => Ok(Self::DeleteKey { id }),
// - UpdateKey: response format changed (secret key no longer returned)
// Bucket endpoints
router_v0::Endpoint::GetBucketInfo { id, global_alias } => {
Ok(Self::GetBucketInfo { id, global_alias })
}
router_v0::Endpoint::ListBuckets => Ok(Self::ListBuckets),
router_v0::Endpoint::CreateBucket => Ok(Self::CreateBucket),
router_v0::Endpoint::DeleteBucket { id } => Ok(Self::DeleteBucket { id }),
router_v0::Endpoint::UpdateBucket { id } => Ok(Self::UpdateBucket { id }),
// Bucket-key permissions
router_v0::Endpoint::BucketAllowKey => Ok(Self::BucketAllowKey),
router_v0::Endpoint::BucketDenyKey => Ok(Self::BucketDenyKey),
// Bucket alias endpoints
router_v0::Endpoint::GlobalAliasBucket { id, alias } => {
Ok(Self::GlobalAliasBucket { id, alias })
}
router_v0::Endpoint::GlobalUnaliasBucket { id, alias } => {
Ok(Self::GlobalUnaliasBucket { id, alias })
}
router_v0::Endpoint::LocalAliasBucket {
id,
access_key_id,
alias,
} => Ok(Self::LocalAliasBucket {
id,
access_key_id,
alias,
}),
router_v0::Endpoint::LocalUnaliasBucket {
id,
access_key_id,
alias,
} => Ok(Self::LocalUnaliasBucket {
id,
access_key_id,
alias,
}),
// For endpoints that have different body content syntax, issue
// deprecation warning
_ => Err(Error::bad_request(format!(
"v0/ endpoint is no longer supported: {}",
v0_endpoint.name()
))),
}
}
/// Get the kind of authorization which is required to perform the operation.
pub fn authorization_type(&self) -> Authorization {
match self {
Self::Health => Authorization::None,
Self::CheckDomain => Authorization::None,
Self::Metrics => Authorization::MetricsToken,
_ => Authorization::AdminToken,
}
}
}
generateQueryParameters! {
keywords: [],
fields: [
"format" => format,
"id" => id,
"search" => search,
"globalAlias" => global_alias,
"alias" => alias,
"accessKeyId" => access_key_id,
"showSecretKey" => show_secret_key
]
}

View File

@ -152,6 +152,10 @@ pub fn json_ok_response<T: Serialize>(res: &T) -> Result<Response<Body>, Error>
.body(Body::from(resp_json))?)
}
pub fn is_default<T: Default + PartialEq>(v: &T) -> bool {
*v == T::default()
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -26,6 +26,7 @@ macro_rules! router_match {
$($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)*
]) => {{
{
#[allow(unused_parens)]
match ($method, $reqpath) {
$(
(&Method::$meth, $path) if true $(&& $query.$required.is_some())? => Endpoint::$api {
@ -128,12 +129,6 @@ macro_rules! router_match {
}
}
};
(@if ($($cond:tt)+) then ($($then:tt)*) else ($($else:tt)*)) => {
$($then)*
};
(@if () then ($($then:tt)*) else ($($else:tt)*)) => {
$($else)*
};
}
/// This macro is used to generate part of the code in this module. It must be called only one, and

View File

@ -26,7 +26,9 @@ use crate::s3::copy::*;
use crate::s3::cors::*;
use crate::s3::delete::*;
use crate::s3::get::*;
use crate::s3::lifecycle::*;
use crate::s3::list::*;
use crate::s3::multipart::*;
use crate::s3::post_object::handle_post_object;
use crate::s3::put::*;
use crate::s3::router::Endpoint;
@ -256,7 +258,7 @@ impl ApiHandler for S3ApiServer {
bucket_name,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()),
page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
prefix: prefix.unwrap_or_default(),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
},
@ -286,7 +288,7 @@ impl ApiHandler for S3ApiServer {
bucket_name,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()),
page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
prefix: prefix.unwrap_or_default(),
},
@ -319,7 +321,7 @@ impl ApiHandler for S3ApiServer {
bucket_name,
bucket_id,
delimiter: delimiter.map(|d| d.to_string()),
page_size: max_uploads.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
page_size: max_uploads.unwrap_or(1000).clamp(1, 1000),
prefix: prefix.unwrap_or_default(),
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
},
@ -343,7 +345,7 @@ impl ApiHandler for S3ApiServer {
key,
upload_id,
part_number_marker: part_number_marker.map(|p| p.clamp(1, 10000)),
max_parts: max_parts.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
},
)
.await
@ -353,14 +355,21 @@ impl ApiHandler for S3ApiServer {
}
Endpoint::GetBucketWebsite {} => handle_get_website(&bucket).await,
Endpoint::PutBucketWebsite {} => {
handle_put_website(garage, bucket_id, req, content_sha256).await
handle_put_website(garage, bucket.clone(), req, content_sha256).await
}
Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket_id).await,
Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket.clone()).await,
Endpoint::GetBucketCors {} => handle_get_cors(&bucket).await,
Endpoint::PutBucketCors {} => {
handle_put_cors(garage, bucket_id, req, content_sha256).await
handle_put_cors(garage, bucket.clone(), req, content_sha256).await
}
Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket.clone()).await,
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(&bucket).await,
Endpoint::PutBucketLifecycleConfiguration {} => {
handle_put_lifecycle(garage, bucket.clone(), req, content_sha256).await
}
Endpoint::DeleteBucketLifecycle {} => {
handle_delete_lifecycle(garage, bucket.clone()).await
}
Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket_id).await,
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
};

View File

@ -2,7 +2,7 @@ use std::pin::Pin;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use futures::{stream, stream::Stream, StreamExt, TryFutureExt};
use futures::{stream, stream::Stream, StreamExt};
use md5::{Digest as Md5Digest, Md5};
use bytes::Bytes;
@ -18,12 +18,14 @@ use garage_util::time::*;
use garage_model::garage::Garage;
use garage_model::key_table::Key;
use garage_model::s3::block_ref_table::*;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::helpers::parse_bucket_key;
use crate::s3::error::*;
use crate::s3::put::{decode_upload_id, get_headers};
use crate::s3::multipart;
use crate::s3::put::get_headers;
use crate::s3::xml::{self as s3_xml, xmlns_tag};
pub async fn handle_copy(
@ -92,7 +94,10 @@ pub async fn handle_copy(
let tmp_dest_object_version = ObjectVersion {
uuid: new_uuid,
timestamp: new_timestamp,
state: ObjectVersionState::Uploading(new_meta.headers.clone()),
state: ObjectVersionState::Uploading {
headers: new_meta.headers.clone(),
multipart: false,
},
};
let tmp_dest_object = Object::new(
dest_bucket_id,
@ -105,8 +110,14 @@ pub async fn handle_copy(
// this means that the BlockRef entries linked to this version cannot be
// marked as deleted (they are marked as deleted only if the Version
// doesn't exist or is marked as deleted).
let mut dest_version =
Version::new(new_uuid, dest_bucket_id, dest_key.to_string(), false);
let mut dest_version = Version::new(
new_uuid,
VersionBacklink::Object {
bucket_id: dest_bucket_id,
key: dest_key.to_string(),
},
false,
);
garage.version_table.insert(&dest_version).await?;
// Fill in block list for version and insert block refs
@ -179,17 +190,13 @@ pub async fn handle_upload_part_copy(
) -> Result<Response<Body>, Error> {
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
let dest_version_uuid = decode_upload_id(upload_id)?;
let dest_upload_id = multipart::decode_upload_id(upload_id)?;
let dest_key = dest_key.to_string();
let (source_object, dest_object) = futures::try_join!(
let (source_object, (_, _, mut dest_mpu)) = futures::try_join!(
get_copy_source(&garage, api_key, req),
garage
.object_table
.get(&dest_bucket_id, &dest_key)
.map_err(Error::from),
multipart::get_upload(&garage, &dest_bucket_id, &dest_key, &dest_upload_id)
)?;
let dest_object = dest_object.ok_or(Error::NoSuchKey)?;
let (source_object_version, source_version_data, source_version_meta) =
extract_source_info(&source_object)?;
@ -217,15 +224,6 @@ pub async fn handle_upload_part_copy(
},
};
// Check destination version is indeed in uploading state
if !dest_object
.versions()
.iter()
.any(|v| v.uuid == dest_version_uuid && v.is_uploading())
{
return Err(Error::NoSuchUpload);
}
// Check source version is not inlined
match source_version_data {
ObjectVersionData::DeleteMarker => unreachable!(),
@ -242,23 +240,11 @@ pub async fn handle_upload_part_copy(
// Fetch source versin with its block list,
// and destination version to check part hasn't yet been uploaded
let (source_version, dest_version) = futures::try_join!(
garage
.version_table
.get(&source_object_version.uuid, &EmptyKey),
garage.version_table.get(&dest_version_uuid, &EmptyKey),
)?;
let source_version = source_version.ok_or(Error::NoSuchKey)?;
// Check this part number hasn't yet been uploaded
if let Some(dv) = dest_version {
if dv.has_part_number(part_number) {
return Err(Error::bad_request(format!(
"Part number {} has already been uploaded",
part_number
)));
}
}
let source_version = garage
.version_table
.get(&source_object_version.uuid, &EmptyKey)
.await?
.ok_or(Error::NoSuchKey)?;
// We want to reuse blocks from the source version as much as possible.
// However, we still need to get the data from these blocks
@ -299,6 +285,33 @@ pub async fn handle_upload_part_copy(
current_offset = block_end;
}
// Calculate the identity of destination part: timestamp, version id
let dest_version_id = gen_uuid();
let dest_mpu_part_key = MpuPartKey {
part_number,
timestamp: dest_mpu.next_timestamp(part_number),
};
// Create the uploaded part
dest_mpu.parts.clear();
dest_mpu.parts.put(
dest_mpu_part_key,
MpuPart {
version: dest_version_id,
etag: None,
size: None,
},
);
garage.mpu_table.insert(&dest_mpu).await?;
let mut dest_version = Version::new(
dest_version_id,
VersionBacklink::MultipartUpload {
upload_id: dest_upload_id,
},
false,
);
// Now, actually copy the blocks
let mut md5hasher = Md5::new();
@ -348,8 +361,8 @@ pub async fn handle_upload_part_copy(
let must_upload = existing_block_hash.is_none();
let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..]));
let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false);
version.blocks.put(
dest_version.blocks.clear();
dest_version.blocks.put(
VersionBlockKey {
part_number,
offset: current_offset,
@ -363,7 +376,7 @@ pub async fn handle_upload_part_copy(
let block_ref = BlockRef {
block: final_hash,
version: dest_version_uuid,
version: dest_version_id,
deleted: false.into(),
};
@ -378,23 +391,33 @@ pub async fn handle_upload_part_copy(
Ok(())
}
},
// Thing 2: we need to insert the block in the version
garage.version_table.insert(&version),
// Thing 3: we need to add a block reference
garage.block_ref_table.insert(&block_ref),
async {
// Thing 2: we need to insert the block in the version
garage.version_table.insert(&dest_version).await?;
// Thing 3: we need to add a block reference
garage.block_ref_table.insert(&block_ref).await
},
// Thing 4: we need to prefetch the next block
defragmenter.next(),
)?;
next_block = res.3;
next_block = res.2;
}
assert_eq!(current_offset, source_range.length);
let data_md5sum = md5hasher.finalize();
let etag = hex::encode(data_md5sum);
// Put the part's ETag in the Versiontable
let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false);
version.parts_etags.put(part_number, etag.clone());
garage.version_table.insert(&version).await?;
dest_mpu.parts.put(
dest_mpu_part_key,
MpuPart {
version: dest_version_id,
etag: Some(etag.clone()),
size: Some(current_offset),
},
);
garage.mpu_table.insert(&dest_mpu).await?;
// LGTM
let resp_xml = s3_xml::to_xml_with_header(&CopyPartResult {

View File

@ -44,14 +44,11 @@ pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<Body>, Error> {
pub async fn handle_delete_cors(
garage: Arc<Garage>,
bucket_id: Uuid,
mut bucket: Bucket,
) -> Result<Response<Body>, Error> {
let mut bucket = garage
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
let param = bucket.params_mut().unwrap();
let param = bucket
.params_mut()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
param.cors_config.update(None);
garage.bucket_table.insert(&bucket).await?;
@ -63,7 +60,7 @@ pub async fn handle_delete_cors(
pub async fn handle_put_cors(
garage: Arc<Garage>,
bucket_id: Uuid,
mut bucket: Bucket,
req: Request<Body>,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
@ -73,12 +70,9 @@ pub async fn handle_put_cors(
verify_signed_content(content_sha256, &body[..])?;
}
let mut bucket = garage
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
let param = bucket.params_mut().unwrap();
let param = bucket
.params_mut()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
let conf: CorsConfiguration = from_reader(&body as &[u8])?;
conf.validate()?;

View File

@ -149,7 +149,6 @@ pub async fn handle_head(
let (part_offset, part_end) =
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
let n_parts = version.parts_etags.items().len();
Ok(object_headers(object_version, version_meta)
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
@ -162,7 +161,7 @@ pub async fn handle_head(
version_meta.size
),
)
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts))
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
.status(StatusCode::PARTIAL_CONTENT)
.body(Body::empty())?)
}
@ -376,7 +375,6 @@ async fn handle_get_part(
let (begin, end) =
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
let n_parts = version.parts_etags.items().len();
let body = body_from_blocks_range(garage, version.blocks.items(), begin, end);
@ -386,7 +384,7 @@ async fn handle_get_part(
CONTENT_RANGE,
format!("bytes {}-{}/{}", begin, end - 1, version_meta.size),
)
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts))
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
.body(body)?)
}
_ => unreachable!(),

401
src/api/s3/lifecycle.rs Normal file
View File

@ -0,0 +1,401 @@
use quick_xml::de::from_reader;
use std::sync::Arc;
use hyper::{Body, Request, Response, StatusCode};
use serde::{Deserialize, Serialize};
use crate::s3::error::*;
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content;
use garage_model::bucket_table::{
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule,
};
use garage_model::garage::Garage;
use garage_util::data::*;
pub async fn handle_get_lifecycle(bucket: &Bucket) -> Result<Response<Body>, Error> {
let param = bucket
.params()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
if let Some(lifecycle) = param.lifecycle_config.get() {
let wc = LifecycleConfiguration::from_garage_lifecycle_config(lifecycle);
let xml = to_xml_with_header(&wc)?;
Ok(Response::builder()
.status(StatusCode::OK)
.header(http::header::CONTENT_TYPE, "application/xml")
.body(Body::from(xml))?)
} else {
Ok(Response::builder()
.status(StatusCode::NO_CONTENT)
.body(Body::empty())?)
}
}
pub async fn handle_delete_lifecycle(
garage: Arc<Garage>,
mut bucket: Bucket,
) -> Result<Response<Body>, Error> {
let param = bucket
.params_mut()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
param.lifecycle_config.update(None);
garage.bucket_table.insert(&bucket).await?;
Ok(Response::builder()
.status(StatusCode::NO_CONTENT)
.body(Body::empty())?)
}
pub async fn handle_put_lifecycle(
garage: Arc<Garage>,
mut bucket: Bucket,
req: Request<Body>,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let body = hyper::body::to_bytes(req.into_body()).await?;
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let param = bucket
.params_mut()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
let conf: LifecycleConfiguration = from_reader(&body as &[u8])?;
let config = conf
.validate_into_garage_lifecycle_config()
.ok_or_bad_request("Invalid lifecycle configuration")?;
param.lifecycle_config.update(Some(config));
garage.bucket_table.insert(&bucket).await?;
Ok(Response::builder()
.status(StatusCode::OK)
.body(Body::empty())?)
}
// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ----
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct LifecycleConfiguration {
#[serde(serialize_with = "xmlns_tag", skip_deserializing)]
pub xmlns: (),
#[serde(rename = "Rule")]
pub lifecycle_rules: Vec<LifecycleRule>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct LifecycleRule {
#[serde(rename = "ID")]
pub id: Option<Value>,
#[serde(rename = "Status")]
pub status: Value,
#[serde(rename = "Filter", default)]
pub filter: Option<Filter>,
#[serde(rename = "Expiration", default)]
pub expiration: Option<Expiration>,
#[serde(rename = "AbortIncompleteMultipartUpload", default)]
pub abort_incomplete_mpu: Option<AbortIncompleteMpu>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct Filter {
#[serde(rename = "And")]
pub and: Option<Box<Filter>>,
#[serde(rename = "Prefix")]
pub prefix: Option<Value>,
#[serde(rename = "ObjectSizeGreaterThan")]
pub size_gt: Option<IntValue>,
#[serde(rename = "ObjectSizeLessThan")]
pub size_lt: Option<IntValue>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct Expiration {
#[serde(rename = "Days")]
pub days: Option<IntValue>,
#[serde(rename = "Date")]
pub at_date: Option<Value>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct AbortIncompleteMpu {
#[serde(rename = "DaysAfterInitiation")]
pub days: IntValue,
}
impl LifecycleConfiguration {
pub fn validate_into_garage_lifecycle_config(
self,
) -> Result<Vec<GarageLifecycleRule>, &'static str> {
let mut ret = vec![];
for rule in self.lifecycle_rules {
ret.push(rule.validate_into_garage_lifecycle_rule()?);
}
Ok(ret)
}
pub fn from_garage_lifecycle_config(config: &[GarageLifecycleRule]) -> Self {
Self {
xmlns: (),
lifecycle_rules: config
.iter()
.map(LifecycleRule::from_garage_lifecycle_rule)
.collect(),
}
}
}
impl LifecycleRule {
pub fn validate_into_garage_lifecycle_rule(self) -> Result<GarageLifecycleRule, &'static str> {
let enabled = match self.status.0.as_str() {
"Enabled" => true,
"Disabled" => false,
_ => return Err("invalid value for <Status>"),
};
let filter = self
.filter
.map(Filter::validate_into_garage_lifecycle_filter)
.transpose()?
.unwrap_or_default();
let abort_incomplete_mpu_days = self.abort_incomplete_mpu.map(|x| x.days.0 as usize);
let expiration = self
.expiration
.map(Expiration::validate_into_garage_lifecycle_expiration)
.transpose()?;
Ok(GarageLifecycleRule {
id: self.id.map(|x| x.0),
enabled,
filter,
abort_incomplete_mpu_days,
expiration,
})
}
pub fn from_garage_lifecycle_rule(rule: &GarageLifecycleRule) -> Self {
Self {
id: rule.id.as_deref().map(Value::from),
status: if rule.enabled {
Value::from("Enabled")
} else {
Value::from("Disabled")
},
filter: Filter::from_garage_lifecycle_filter(&rule.filter),
abort_incomplete_mpu: rule
.abort_incomplete_mpu_days
.map(|days| AbortIncompleteMpu {
days: IntValue(days as i64),
}),
expiration: rule
.expiration
.as_ref()
.map(Expiration::from_garage_lifecycle_expiration),
}
}
}
impl Filter {
pub fn count(&self) -> i32 {
fn count<T>(x: &Option<T>) -> i32 {
x.as_ref().map(|_| 1).unwrap_or(0)
}
count(&self.prefix) + count(&self.size_gt) + count(&self.size_lt)
}
pub fn validate_into_garage_lifecycle_filter(
self,
) -> Result<GarageLifecycleFilter, &'static str> {
if self.count() > 0 && self.and.is_some() {
Err("Filter tag cannot contain both <And> and another condition")
} else if let Some(and) = self.and {
if and.and.is_some() {
return Err("Nested <And> tags");
}
Ok(and.internal_into_garage_lifecycle_filter())
} else if self.count() > 1 {
Err("Multiple Filter conditions must be wrapped in an <And> tag")
} else {
Ok(self.internal_into_garage_lifecycle_filter())
}
}
fn internal_into_garage_lifecycle_filter(self) -> GarageLifecycleFilter {
GarageLifecycleFilter {
prefix: self.prefix.map(|x| x.0),
size_gt: self.size_gt.map(|x| x.0 as u64),
size_lt: self.size_lt.map(|x| x.0 as u64),
}
}
pub fn from_garage_lifecycle_filter(rule: &GarageLifecycleFilter) -> Option<Self> {
let filter = Filter {
and: None,
prefix: rule.prefix.as_deref().map(Value::from),
size_gt: rule.size_gt.map(|x| IntValue(x as i64)),
size_lt: rule.size_lt.map(|x| IntValue(x as i64)),
};
match filter.count() {
0 => None,
1 => Some(filter),
_ => Some(Filter {
and: Some(Box::new(filter)),
..Default::default()
}),
}
}
}
impl Expiration {
pub fn validate_into_garage_lifecycle_expiration(
self,
) -> Result<GarageLifecycleExpiration, &'static str> {
match (self.days, self.at_date) {
(Some(_), Some(_)) => Err("cannot have both <Days> and <Date> in <Expiration>"),
(None, None) => Err("<Expiration> must contain either <Days> or <Date>"),
(Some(days), None) => Ok(GarageLifecycleExpiration::AfterDays(days.0 as usize)),
(None, Some(date)) => {
parse_lifecycle_date(&date.0)?;
Ok(GarageLifecycleExpiration::AtDate(date.0))
}
}
}
pub fn from_garage_lifecycle_expiration(exp: &GarageLifecycleExpiration) -> Self {
match exp {
GarageLifecycleExpiration::AfterDays(days) => Expiration {
days: Some(IntValue(*days as i64)),
at_date: None,
},
GarageLifecycleExpiration::AtDate(date) => Expiration {
days: None,
at_date: Some(Value(date.to_string())),
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use quick_xml::de::from_str;
#[test]
fn test_deserialize_lifecycle_config() -> Result<(), Error> {
let message = r#"<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Rule>
<ID>id1</ID>
<Status>Enabled</Status>
<Filter>
<Prefix>documents/</Prefix>
</Filter>
<AbortIncompleteMultipartUpload>
<DaysAfterInitiation>7</DaysAfterInitiation>
</AbortIncompleteMultipartUpload>
</Rule>
<Rule>
<ID>id2</ID>
<Status>Enabled</Status>
<Filter>
<And>
<Prefix>logs/</Prefix>
<ObjectSizeGreaterThan>1000000</ObjectSizeGreaterThan>
</And>
</Filter>
<Expiration>
<Days>365</Days>
</Expiration>
</Rule>
</LifecycleConfiguration>"#;
let conf: LifecycleConfiguration = from_str(message).unwrap();
let ref_value = LifecycleConfiguration {
xmlns: (),
lifecycle_rules: vec![
LifecycleRule {
id: Some("id1".into()),
status: "Enabled".into(),
filter: Some(Filter {
prefix: Some("documents/".into()),
..Default::default()
}),
expiration: None,
abort_incomplete_mpu: Some(AbortIncompleteMpu { days: IntValue(7) }),
},
LifecycleRule {
id: Some("id2".into()),
status: "Enabled".into(),
filter: Some(Filter {
and: Some(Box::new(Filter {
prefix: Some("logs/".into()),
size_gt: Some(IntValue(1000000)),
..Default::default()
})),
..Default::default()
}),
expiration: Some(Expiration {
days: Some(IntValue(365)),
at_date: None,
}),
abort_incomplete_mpu: None,
},
],
};
assert_eq! {
ref_value,
conf
};
let message2 = to_xml_with_header(&ref_value)?;
let cleanup = |c: &str| c.replace(char::is_whitespace, "");
assert_eq!(cleanup(message), cleanup(&message2));
// Check validation
let validated = ref_value
.validate_into_garage_lifecycle_config()
.ok_or_bad_request("invalid xml config")?;
let ref_config = vec![
GarageLifecycleRule {
id: Some("id1".into()),
enabled: true,
filter: GarageLifecycleFilter {
prefix: Some("documents/".into()),
..Default::default()
},
expiration: None,
abort_incomplete_mpu_days: Some(7),
},
GarageLifecycleRule {
id: Some("id2".into()),
enabled: true,
filter: GarageLifecycleFilter {
prefix: Some("logs/".into()),
size_gt: Some(1000000),
..Default::default()
},
expiration: Some(GarageLifecycleExpiration::AfterDays(365)),
abort_incomplete_mpu_days: None,
},
];
assert_eq!(validated, ref_config);
let message3 = to_xml_with_header(&LifecycleConfiguration::from_garage_lifecycle_config(
&validated,
))?;
assert_eq!(cleanup(message), cleanup(&message3));
Ok(())
}
}

View File

@ -1,4 +1,3 @@
use std::cmp::Ordering;
use std::collections::{BTreeMap, BTreeSet};
use std::iter::{Iterator, Peekable};
use std::sync::Arc;
@ -11,15 +10,15 @@ use garage_util::error::Error as GarageError;
use garage_util::time::*;
use garage_model::garage::Garage;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::Version;
use garage_table::{EmptyKey, EnumerationOrder};
use garage_table::EnumerationOrder;
use crate::encoding::*;
use crate::helpers::key_after_prefix;
use crate::s3::error::*;
use crate::s3::put as s3_put;
use crate::s3::multipart as s3_multipart;
use crate::s3::xml as s3_xml;
const DUMMY_NAME: &str = "Dummy Key";
@ -176,7 +175,9 @@ pub async fn handle_list_multipart_upload(
t.get_range(
&bucket,
key,
Some(ObjectFilter::IsUploading),
Some(ObjectFilter::IsUploading {
check_multipart: Some(true),
}),
count,
EnumerationOrder::Forward,
)
@ -272,24 +273,26 @@ pub async fn handle_list_parts(
) -> Result<Response<Body>, Error> {
debug!("ListParts {:?}", query);
let upload_id = s3_put::decode_upload_id(&query.upload_id)?;
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
let (object, version) = futures::try_join!(
garage.object_table.get(&query.bucket_id, &query.key),
garage.version_table.get(&upload_id, &EmptyKey),
)?;
let (_, _, mpu) =
s3_multipart::get_upload(&garage, &query.bucket_id, &query.key, &upload_id).await?;
let (info, next) = fetch_part_info(query, object, version, upload_id)?;
let (info, next) = fetch_part_info(query, &mpu)?;
let result = s3_xml::ListPartsResult {
xmlns: (),
// Query parameters
bucket: s3_xml::Value(query.bucket_name.to_string()),
key: s3_xml::Value(query.key.to_string()),
upload_id: s3_xml::Value(query.upload_id.to_string()),
part_number_marker: query.part_number_marker.map(|e| s3_xml::IntValue(e as i64)),
next_part_number_marker: next.map(|e| s3_xml::IntValue(e as i64)),
max_parts: s3_xml::IntValue(query.max_parts as i64),
is_truncated: s3_xml::Value(next.map(|_| "true").unwrap_or("false").to_string()),
// Result values
next_part_number_marker: next.map(|e| s3_xml::IntValue(e as i64)),
is_truncated: s3_xml::Value(format!("{}", next.is_some())),
parts: info
.iter()
.map(|part| s3_xml::PartItem {
@ -299,6 +302,8 @@ pub async fn handle_list_parts(
size: s3_xml::IntValue(part.size as i64),
})
.collect(),
// Dummy result values (unsupported features)
initiator: s3_xml::Initiator {
display_name: s3_xml::Value(DUMMY_NAME.to_string()),
id: s3_xml::Value(DUMMY_KEY.to_string()),
@ -335,8 +340,8 @@ struct UploadInfo {
}
#[derive(Debug, PartialEq)]
struct PartInfo {
etag: String,
struct PartInfo<'a> {
etag: &'a str,
timestamp: u64,
part_number: u64,
size: u64,
@ -456,107 +461,51 @@ where
}
}
fn fetch_part_info(
fn fetch_part_info<'a>(
query: &ListPartsQuery,
object: Option<Object>,
version: Option<Version>,
upload_id: Uuid,
) -> Result<(Vec<PartInfo>, Option<u64>), Error> {
// Check results
let object = object.ok_or(Error::NoSuchKey)?;
mpu: &'a MultipartUpload,
) -> Result<(Vec<PartInfo<'a>>, Option<u64>), Error> {
assert!((1..=1000).contains(&query.max_parts)); // see s3/api_server.rs
let obj_version = object
.versions()
.iter()
.find(|v| v.uuid == upload_id && v.is_uploading())
.ok_or(Error::NoSuchUpload)?;
let version = version.ok_or(Error::NoSuchKey)?;
// Cut the beginning of our 2 vectors if required
let (etags, blocks) = match &query.part_number_marker {
Some(marker) => {
let next = marker + 1;
let part_idx = into_ok_or_err(
version
.parts_etags
.items()
.binary_search_by(|(part_num, _)| part_num.cmp(&next)),
);
let parts = &version.parts_etags.items()[part_idx..];
let block_idx = into_ok_or_err(
version
.blocks
.items()
.binary_search_by(|(vkey, _)| vkey.part_number.cmp(&next)),
);
let blocks = &version.blocks.items()[block_idx..];
(parts, blocks)
}
None => (version.parts_etags.items(), version.blocks.items()),
};
// Use the block vector to compute a (part_number, size) vector
let mut size = Vec::<(u64, u64)>::new();
blocks.iter().for_each(|(key, val)| {
let mut new_size = val.size;
match size.pop() {
Some((part_number, size)) if part_number == key.part_number => new_size += size,
Some(v) => size.push(v),
None => (),
}
size.push((key.part_number, new_size))
});
// Merge the etag vector and size vector to build a PartInfo vector
let max_parts = query.max_parts as usize;
let (mut etag_iter, mut size_iter) = (etags.iter().peekable(), size.iter().peekable());
let mut info = Vec::<PartInfo>::with_capacity(max_parts);
while info.len() < max_parts {
match (etag_iter.peek(), size_iter.peek()) {
(Some((ep, etag)), Some((sp, size))) => match ep.cmp(sp) {
Ordering::Less => {
debug!("ETag information ignored due to missing corresponding block information. Query: {:?}", query);
etag_iter.next();
// Parse multipart upload part list, removing parts not yet finished
// and failed part uploads that were overwritten
let mut parts: Vec<PartInfo<'a>> = Vec::with_capacity(mpu.parts.items().len());
for (pk, p) in mpu.parts.items().iter() {
if let (Some(etag), Some(size)) = (&p.etag, p.size) {
let part_info = PartInfo {
part_number: pk.part_number,
timestamp: pk.timestamp,
etag,
size,
};
match parts.last_mut() {
Some(lastpart) if lastpart.part_number == pk.part_number => {
*lastpart = part_info;
}
Ordering::Equal => {
info.push(PartInfo {
etag: etag.to_string(),
timestamp: obj_version.timestamp,
part_number: *ep,
size: *size,
});
etag_iter.next();
size_iter.next();
_ => {
parts.push(part_info);
}
Ordering::Greater => {
debug!("Block information ignored due to missing corresponding ETag information. Query: {:?}", query);
size_iter.next();
}
},
(None, None) => return Ok((info, None)),
_ => {
debug!(
"Additional block or ETag information ignored. Query: {:?}",
query
);
return Ok((info, None));
}
}
}
match info.last() {
Some(part_info) => {
let pagination = Some(part_info.part_number);
Ok((info, pagination))
}
None => Ok((info, None)),
// Cut the beginning if we have a marker
if let Some(marker) = &query.part_number_marker {
let next = marker + 1;
let part_idx = parts
.binary_search_by(|part| part.part_number.cmp(&next))
.unwrap_or_else(|x| x);
parts = parts.split_off(part_idx);
}
// Cut the end if we have too many parts
if parts.len() > query.max_parts as usize {
parts.truncate(query.max_parts as usize);
let pagination = Some(parts.last().unwrap().part_number);
return Ok((parts, pagination));
}
Ok((parts, None))
}
/*
@ -651,7 +600,7 @@ impl ListMultipartUploadsQuery {
}),
uuid => Ok(RangeBegin::AfterUpload {
key: key_marker.to_string(),
upload: s3_put::decode_upload_id(uuid)?,
upload: s3_multipart::decode_upload_id(uuid)?,
}),
},
@ -843,7 +792,7 @@ impl ExtractAccumulator for UploadAccumulator {
let mut uploads_for_key = object
.versions()
.iter()
.filter(|x| x.is_uploading())
.filter(|x| x.is_uploading(Some(true)))
.collect::<Vec<&ObjectVersion>>();
// S3 logic requires lexicographically sorted upload ids.
@ -918,14 +867,6 @@ impl ExtractAccumulator for UploadAccumulator {
* Utility functions
*/
/// This is a stub for Result::into_ok_or_err that is not yet in Rust stable
fn into_ok_or_err<T>(r: Result<T, T>) -> T {
match r {
Ok(r) => r,
Err(r) => r,
}
}
/// Returns the common prefix of the object given the query prefix and delimiter
fn common_prefix<'a>(object: &'a Object, query: &ListQueryCommon) -> Option<&'a str> {
match &query.delimiter {
@ -951,7 +892,6 @@ fn uriencode_maybe(s: &str, yes: bool) -> s3_xml::Value {
#[cfg(test)]
mod tests {
use super::*;
use garage_model::s3::version_table::*;
use garage_util::*;
use std::iter::FromIterator;
@ -991,10 +931,13 @@ mod tests {
ObjectVersion {
uuid: Uuid::from(uuid),
timestamp: TS,
state: ObjectVersionState::Uploading(ObjectVersionHeaders {
content_type: "text/plain".to_string(),
other: BTreeMap::<String, String>::new(),
}),
state: ObjectVersionState::Uploading {
multipart: true,
headers: ObjectVersionHeaders {
content_type: "text/plain".to_string(),
other: BTreeMap::<String, String>::new(),
},
},
}
}
@ -1169,83 +1112,77 @@ mod tests {
Ok(())
}
fn version() -> Version {
fn mpu() -> MultipartUpload {
let uuid = Uuid::from([0x08; 32]);
let blocks = vec![
let parts = vec![
(
VersionBlockKey {
MpuPartKey {
part_number: 1,
offset: 1,
timestamp: TS,
},
VersionBlock {
hash: uuid,
size: 3,
MpuPart {
version: uuid,
size: Some(3),
etag: Some("etag1".into()),
},
),
(
VersionBlockKey {
part_number: 1,
offset: 2,
},
VersionBlock {
hash: uuid,
size: 2,
},
),
(
VersionBlockKey {
MpuPartKey {
part_number: 2,
offset: 1,
timestamp: TS,
},
VersionBlock {
hash: uuid,
size: 8,
MpuPart {
version: uuid,
size: None,
etag: None,
},
),
(
VersionBlockKey {
MpuPartKey {
part_number: 3,
timestamp: TS,
},
MpuPart {
version: uuid,
size: Some(10),
etag: Some("etag2".into()),
},
),
(
MpuPartKey {
part_number: 5,
offset: 1,
timestamp: TS,
},
VersionBlock {
hash: uuid,
size: 7,
MpuPart {
version: uuid,
size: Some(7),
etag: Some("etag3".into()),
},
),
(
VersionBlockKey {
MpuPartKey {
part_number: 8,
offset: 1,
timestamp: TS,
},
VersionBlock {
hash: uuid,
size: 5,
MpuPart {
version: uuid,
size: Some(5),
etag: Some("etag4".into()),
},
),
];
let etags = vec![
(1, "etag1".to_string()),
(3, "etag2".to_string()),
(5, "etag3".to_string()),
(8, "etag4".to_string()),
(9, "etag5".to_string()),
];
Version {
bucket_id: uuid,
key: "a".to_string(),
uuid,
MultipartUpload {
upload_id: uuid,
timestamp: TS,
deleted: false.into(),
blocks: crdt::Map::<VersionBlockKey, VersionBlock>::from_iter(blocks),
parts_etags: crdt::Map::<u64, String>::from_iter(etags),
parts: crdt::Map::<MpuPartKey, MpuPart>::from_iter(parts),
bucket_id: uuid,
key: "a".into(),
}
}
fn obj() -> Object {
Object::new(bucket(), "d".to_string(), vec![objup_version([0x08; 32])])
}
#[test]
fn test_fetch_part_info() -> Result<(), Error> {
let uuid = Uuid::from([0x08; 32]);
@ -1258,82 +1195,85 @@ mod tests {
max_parts: 2,
};
assert!(
fetch_part_info(&query, None, None, uuid).is_err(),
"No object and version should fail"
);
assert!(
fetch_part_info(&query, Some(obj()), None, uuid).is_err(),
"No version should faild"
);
assert!(
fetch_part_info(&query, None, Some(version()), uuid).is_err(),
"No object should fail"
);
let mpu = mpu();
// Start from the beginning but with limited size to trigger pagination
let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?;
assert_eq!(pagination.unwrap(), 5);
let (info, pagination) = fetch_part_info(&query, &mpu)?;
assert_eq!(pagination.unwrap(), 3);
assert_eq!(
info,
vec![
PartInfo {
etag: "etag1".to_string(),
etag: "etag1",
timestamp: TS,
part_number: 1,
size: 5
size: 3
},
PartInfo {
etag: "etag3".to_string(),
etag: "etag2",
timestamp: TS,
part_number: 5,
size: 7
part_number: 3,
size: 10
},
]
);
// Use previous pagination to make a new request
query.part_number_marker = Some(pagination.unwrap());
let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?;
let (info, pagination) = fetch_part_info(&query, &mpu)?;
assert!(pagination.is_none());
assert_eq!(
info,
vec![PartInfo {
etag: "etag4".to_string(),
timestamp: TS,
part_number: 8,
size: 5
},]
vec![
PartInfo {
etag: "etag3",
timestamp: TS,
part_number: 5,
size: 7
},
PartInfo {
etag: "etag4",
timestamp: TS,
part_number: 8,
size: 5
},
]
);
// Trying to access a part that is way larger than registered ones
query.part_number_marker = Some(9999);
let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?;
let (info, pagination) = fetch_part_info(&query, &mpu)?;
assert!(pagination.is_none());
assert_eq!(info, vec![]);
// Try without any limitation
query.max_parts = 1000;
query.part_number_marker = None;
let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?;
let (info, pagination) = fetch_part_info(&query, &mpu)?;
assert!(pagination.is_none());
assert_eq!(
info,
vec![
PartInfo {
etag: "etag1".to_string(),
etag: "etag1",
timestamp: TS,
part_number: 1,
size: 5
size: 3
},
PartInfo {
etag: "etag3".to_string(),
etag: "etag2",
timestamp: TS,
part_number: 3,
size: 10
},
PartInfo {
etag: "etag3",
timestamp: TS,
part_number: 5,
size: 7
},
PartInfo {
etag: "etag4".to_string(),
etag: "etag4",
timestamp: TS,
part_number: 8,
size: 5

View File

@ -6,7 +6,9 @@ mod copy;
pub mod cors;
mod delete;
pub mod get;
mod lifecycle;
mod list;
mod multipart;
mod post_object;
mod put;
mod website;

465
src/api/s3/multipart.rs Normal file
View File

@ -0,0 +1,465 @@
use std::collections::HashMap;
use std::sync::Arc;
use futures::prelude::*;
use hyper::body::Body;
use hyper::{Request, Response};
use md5::{Digest as Md5Digest, Md5};
use garage_table::*;
use garage_util::async_hash::*;
use garage_util::data::*;
use garage_util::time::*;
use garage_model::bucket_table::Bucket;
use garage_model::garage::Garage;
use garage_model::s3::block_ref_table::*;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::s3::error::*;
use crate::s3::put::*;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
// ----
pub async fn handle_create_multipart_upload(
garage: Arc<Garage>,
req: &Request<Body>,
bucket_name: &str,
bucket_id: Uuid,
key: &str,
) -> Result<Response<Body>, Error> {
let upload_id = gen_uuid();
let timestamp = now_msec();
let headers = get_headers(req.headers())?;
// Create object in object table
let object_version = ObjectVersion {
uuid: upload_id,
timestamp,
state: ObjectVersionState::Uploading {
multipart: true,
headers,
},
};
let object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&object).await?;
// Create multipart upload in mpu table
// This multipart upload will hold references to uploaded parts
// (which are entries in the Version table)
let mpu = MultipartUpload::new(upload_id, timestamp, bucket_id, key.into(), false);
garage.mpu_table.insert(&mpu).await?;
// Send success response
let result = s3_xml::InitiateMultipartUploadResult {
xmlns: (),
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key.to_string()),
upload_id: s3_xml::Value(hex::encode(upload_id)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_put_part(
garage: Arc<Garage>,
req: Request<Body>,
bucket_id: Uuid,
key: &str,
part_number: u64,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let upload_id = decode_upload_id(upload_id)?;
let content_md5 = match req.headers().get("content-md5") {
Some(x) => Some(x.to_str()?.to_string()),
None => None,
};
// Read first chuck, and at the same time try to get object to see if it exists
let key = key.to_string();
let body = req.into_body().map_err(Error::from);
let mut chunker = StreamChunker::new(body, garage.config.block_size);
let ((_, _, mut mpu), first_block) = futures::try_join!(
get_upload(&garage, &bucket_id, &key, &upload_id),
chunker.next(),
)?;
// Check object is valid and part can be accepted
let first_block = first_block.ok_or_bad_request("Empty body")?;
// Calculate part identity: timestamp, version id
let version_uuid = gen_uuid();
let mpu_part_key = MpuPartKey {
part_number,
timestamp: mpu.next_timestamp(part_number),
};
// The following consists in many steps that can each fail.
// Keep track that some cleanup will be needed if things fail
// before everything is finished (cleanup is done using the Drop trait).
let mut interrupted_cleanup = InterruptedCleanup(Some(InterruptedCleanupInner {
garage: garage.clone(),
upload_id,
version_uuid,
}));
// Create version and link version from MPU
mpu.parts.clear();
mpu.parts.put(
mpu_part_key,
MpuPart {
version: version_uuid,
etag: None,
size: None,
},
);
garage.mpu_table.insert(&mpu).await?;
let version = Version::new(
version_uuid,
VersionBacklink::MultipartUpload { upload_id },
false,
);
garage.version_table.insert(&version).await?;
// Copy data to version
let first_block_hash = async_blake2sum(first_block.clone()).await;
let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
&garage,
&version,
part_number,
first_block,
first_block_hash,
&mut chunker,
)
.await?;
// Verify that checksums map
ensure_checksum_matches(
data_md5sum.as_slice(),
data_sha256sum,
content_md5.as_deref(),
content_sha256,
)?;
// Store part etag in version
let data_md5sum_hex = hex::encode(data_md5sum);
mpu.parts.put(
mpu_part_key,
MpuPart {
version: version_uuid,
etag: Some(data_md5sum_hex.clone()),
size: Some(total_size),
},
);
garage.mpu_table.insert(&mpu).await?;
// We were not interrupted, everything went fine.
// We won't have to clean up on drop.
interrupted_cleanup.cancel();
let response = Response::builder()
.header("ETag", format!("\"{}\"", data_md5sum_hex))
.body(Body::empty())
.unwrap();
Ok(response)
}
struct InterruptedCleanup(Option<InterruptedCleanupInner>);
struct InterruptedCleanupInner {
garage: Arc<Garage>,
upload_id: Uuid,
version_uuid: Uuid,
}
impl InterruptedCleanup {
fn cancel(&mut self) {
drop(self.0.take());
}
}
impl Drop for InterruptedCleanup {
fn drop(&mut self) {
if let Some(info) = self.0.take() {
tokio::spawn(async move {
let version = Version::new(
info.version_uuid,
VersionBacklink::MultipartUpload {
upload_id: info.upload_id,
},
true,
);
if let Err(e) = info.garage.version_table.insert(&version).await {
warn!("Cannot cleanup after aborted UploadPart: {}", e);
}
});
}
}
}
pub async fn handle_complete_multipart_upload(
garage: Arc<Garage>,
req: Request<Body>,
bucket_name: &str,
bucket: &Bucket,
key: &str,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let body = hyper::body::to_bytes(req.into_body()).await?;
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml)
.ok_or_bad_request("Invalid CompleteMultipartUpload XML")?;
debug!(
"CompleteMultipartUpload list of parts: {:?}",
body_list_of_parts
);
let upload_id = decode_upload_id(upload_id)?;
// Get object and multipart upload
let key = key.to_string();
let (_, mut object_version, mpu) = get_upload(&garage, &bucket.id, &key, &upload_id).await?;
if mpu.parts.is_empty() {
return Err(Error::bad_request("No data was uploaded"));
}
let headers = match object_version.state {
ObjectVersionState::Uploading { headers, .. } => headers,
_ => unreachable!(),
};
// Check that part numbers are an increasing sequence.
// (it doesn't need to start at 1 nor to be a continuous sequence,
// see discussion in #192)
if body_list_of_parts.is_empty() {
return Err(Error::EntityTooSmall);
}
if !body_list_of_parts
.iter()
.zip(body_list_of_parts.iter().skip(1))
.all(|(p1, p2)| p1.part_number < p2.part_number)
{
return Err(Error::InvalidPartOrder);
}
// Check that the list of parts they gave us corresponds to parts we have here
debug!("Parts stored in multipart upload: {:?}", mpu.parts.items());
let mut have_parts = HashMap::new();
for (pk, pv) in mpu.parts.items().iter() {
have_parts.insert(pk.part_number, pv);
}
let mut parts = vec![];
for req_part in body_list_of_parts.iter() {
match have_parts.get(&req_part.part_number) {
Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => {
parts.push(*part)
}
_ => return Err(Error::InvalidPart),
}
}
let grg = &garage;
let parts_versions = futures::future::try_join_all(parts.iter().map(|p| async move {
grg.version_table
.get(&p.version, &EmptyKey)
.await?
.ok_or_internal_error("Part version missing from version table")
}))
.await?;
// Create final version and block refs
let mut final_version = Version::new(
upload_id,
VersionBacklink::Object {
bucket_id: bucket.id,
key: key.to_string(),
},
false,
);
for (part_number, part_version) in parts_versions.iter().enumerate() {
if part_version.deleted.get() {
return Err(Error::InvalidPart);
}
for (vbk, vb) in part_version.blocks.items().iter() {
final_version.blocks.put(
VersionBlockKey {
part_number: (part_number + 1) as u64,
offset: vbk.offset,
},
*vb,
);
}
}
garage.version_table.insert(&final_version).await?;
let block_refs = final_version.blocks.items().iter().map(|(_, b)| BlockRef {
block: b.hash,
version: upload_id,
deleted: false.into(),
});
garage.block_ref_table.insert_many(block_refs).await?;
// Calculate etag of final object
// To understand how etags are calculated, read more here:
// https://teppen.io/2018/06/23/aws_s3_etags/
let mut etag_md5_hasher = Md5::new();
for part in parts.iter() {
etag_md5_hasher.update(part.etag.as_ref().unwrap().as_bytes());
}
let etag = format!(
"{}-{}",
hex::encode(etag_md5_hasher.finalize()),
parts.len()
);
// Calculate total size of final object
let total_size = parts.iter().map(|x| x.size.unwrap()).sum();
if let Err(e) = check_quotas(&garage, bucket, &key, total_size).await {
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
return Err(e);
}
// Write final object version
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
ObjectVersionMeta {
headers,
size: total_size,
etag: etag.clone(),
},
final_version.blocks.items()[0].1.hash,
));
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
// Send response saying ok we're done
let result = s3_xml::CompleteMultipartUploadResult {
xmlns: (),
location: None,
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key),
etag: s3_xml::Value(format!("\"{}\"", etag)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_abort_multipart_upload(
garage: Arc<Garage>,
bucket_id: Uuid,
key: &str,
upload_id: &str,
) -> Result<Response<Body>, Error> {
let upload_id = decode_upload_id(upload_id)?;
let (_, mut object_version, _) =
get_upload(&garage, &bucket_id, &key.to_string(), &upload_id).await?;
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
Ok(Response::new(Body::from(vec![])))
}
// ======== helpers ============
#[allow(clippy::ptr_arg)]
pub(crate) async fn get_upload(
garage: &Garage,
bucket_id: &Uuid,
key: &String,
upload_id: &Uuid,
) -> Result<(Object, ObjectVersion, MultipartUpload), Error> {
let (object, mpu) = futures::try_join!(
garage.object_table.get(bucket_id, key).map_err(Error::from),
garage
.mpu_table
.get(upload_id, &EmptyKey)
.map_err(Error::from),
)?;
let object = object.ok_or(Error::NoSuchUpload)?;
let mpu = mpu.ok_or(Error::NoSuchUpload)?;
let object_version = object
.versions()
.iter()
.find(|v| v.uuid == *upload_id && v.is_uploading(Some(true)))
.ok_or(Error::NoSuchUpload)?
.clone();
Ok((object, object_version, mpu))
}
pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?;
if id_bin.len() != 32 {
return Err(Error::NoSuchUpload);
}
let mut uuid = [0u8; 32];
uuid.copy_from_slice(&id_bin[..]);
Ok(Uuid::from(uuid))
}
#[derive(Debug)]
struct CompleteMultipartUploadPart {
etag: String,
part_number: u64,
}
fn parse_complete_multipart_upload_body(
xml: &roxmltree::Document,
) -> Option<Vec<CompleteMultipartUploadPart>> {
let mut parts = vec![];
let root = xml.root();
let cmu = root.first_child()?;
if !cmu.has_tag_name("CompleteMultipartUpload") {
return None;
}
for item in cmu.children() {
// Only parse <Part> nodes
if !item.is_element() {
continue;
}
if item.has_tag_name("Part") {
let etag = item.children().find(|e| e.has_tag_name("ETag"))?.text()?;
let part_number = item
.children()
.find(|e| e.has_tag_name("PartNumber"))?
.text()?;
parts.push(CompleteMultipartUploadPart {
etag: etag.trim_matches('"').to_string(),
part_number: part_number.parse().ok()?,
});
} else {
return None;
}
}
Some(parts)
}

View File

@ -1,4 +1,4 @@
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use base64::prelude::*;
@ -30,8 +30,6 @@ use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use crate::s3::error::*;
use crate::s3::xml as s3_xml;
use crate::signature::verify_signed_content;
pub async fn handle_put(
garage: Arc<Garage>,
@ -123,20 +121,23 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
// The following consists in many steps that can each fail.
// Keep track that some cleanup will be needed if things fail
// before everything is finished (cleanup is done using the Drop trait).
let mut interrupted_cleanup = InterruptedCleanup(Some((
garage.clone(),
bucket.id,
key.into(),
let mut interrupted_cleanup = InterruptedCleanup(Some(InterruptedCleanupInner {
garage: garage.clone(),
bucket_id: bucket.id,
key: key.into(),
version_uuid,
version_timestamp,
)));
}));
// Write version identifier in object table so that we have a trace
// that we are uploading something
let mut object_version = ObjectVersion {
uuid: version_uuid,
timestamp: version_timestamp,
state: ObjectVersionState::Uploading(headers.clone()),
state: ObjectVersionState::Uploading {
headers: headers.clone(),
multipart: false,
},
};
let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]);
garage.object_table.insert(&object).await?;
@ -145,7 +146,14 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
// Write this entry now, even with empty block list,
// to prevent block_ref entries from being deleted (they can be deleted
// if the reference a version that isn't found in the version table)
let version = Version::new(version_uuid, bucket.id, key.into(), false);
let version = Version::new(
version_uuid,
VersionBacklink::Object {
bucket_id: bucket.id,
key: key.into(),
},
false,
);
garage.version_table.insert(&version).await?;
// Transfer data and verify checksum
@ -192,7 +200,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
/// Validate MD5 sum against content-md5 header
/// and sha256sum against signed content-sha256
fn ensure_checksum_matches(
pub(crate) fn ensure_checksum_matches(
data_md5sum: &[u8],
data_sha256sum: garage_util::data::FixedBytes32,
content_md5: Option<&str>,
@ -218,7 +226,7 @@ fn ensure_checksum_matches(
}
/// Check that inserting this object with this size doesn't exceed bucket quotas
async fn check_quotas(
pub(crate) async fn check_quotas(
garage: &Arc<Garage>,
bucket: &Bucket,
key: &str,
@ -275,7 +283,7 @@ async fn check_quotas(
Ok(())
}
async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
garage: &Garage,
version: &Version,
part_number: u64,
@ -381,7 +389,7 @@ async fn put_block_meta(
Ok(())
}
struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> {
pub(crate) struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> {
stream: S,
read_all: bool,
block_size: usize,
@ -389,7 +397,7 @@ struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> {
}
impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> {
fn new(stream: S, block_size: usize) -> Self {
pub(crate) fn new(stream: S, block_size: usize) -> Self {
Self {
stream,
read_all: false,
@ -398,7 +406,7 @@ impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> {
}
}
async fn next(&mut self) -> Result<Option<Bytes>, Error> {
pub(crate) async fn next(&mut self) -> Result<Option<Bytes>, Error> {
while !self.read_all && self.buf.len() < self.block_size {
if let Some(block) = self.stream.next().await {
let bytes = block?;
@ -425,7 +433,14 @@ pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<Body> {
.unwrap()
}
struct InterruptedCleanup(Option<(Arc<Garage>, Uuid, String, Uuid, u64)>);
struct InterruptedCleanup(Option<InterruptedCleanupInner>);
struct InterruptedCleanupInner {
garage: Arc<Garage>,
bucket_id: Uuid,
key: String,
version_uuid: Uuid,
version_timestamp: u64,
}
impl InterruptedCleanup {
fn cancel(&mut self) {
@ -434,15 +449,15 @@ impl InterruptedCleanup {
}
impl Drop for InterruptedCleanup {
fn drop(&mut self) {
if let Some((garage, bucket_id, key, version_uuid, version_ts)) = self.0.take() {
if let Some(info) = self.0.take() {
tokio::spawn(async move {
let object_version = ObjectVersion {
uuid: version_uuid,
timestamp: version_ts,
uuid: info.version_uuid,
timestamp: info.version_timestamp,
state: ObjectVersionState::Aborted,
};
let object = Object::new(bucket_id, key, vec![object_version]);
if let Err(e) = garage.object_table.insert(&object).await {
let object = Object::new(info.bucket_id, info.key, vec![object_version]);
if let Err(e) = info.garage.object_table.insert(&object).await {
warn!("Cannot cleanup after aborted PutObject: {}", e);
}
});
@ -450,326 +465,9 @@ impl Drop for InterruptedCleanup {
}
}
// ----
// ============ helpers ============
pub async fn handle_create_multipart_upload(
garage: Arc<Garage>,
req: &Request<Body>,
bucket_name: &str,
bucket_id: Uuid,
key: &str,
) -> Result<Response<Body>, Error> {
let version_uuid = gen_uuid();
let headers = get_headers(req.headers())?;
// Create object in object table
let object_version = ObjectVersion {
uuid: version_uuid,
timestamp: now_msec(),
state: ObjectVersionState::Uploading(headers),
};
let object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&object).await?;
// Insert empty version so that block_ref entries refer to something
// (they are inserted concurrently with blocks in the version table, so
// there is the possibility that they are inserted before the version table
// is created, in which case it is allowed to delete them, e.g. in repair_*)
let version = Version::new(version_uuid, bucket_id, key.into(), false);
garage.version_table.insert(&version).await?;
// Send success response
let result = s3_xml::InitiateMultipartUploadResult {
xmlns: (),
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key.to_string()),
upload_id: s3_xml::Value(hex::encode(version_uuid)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_put_part(
garage: Arc<Garage>,
req: Request<Body>,
bucket_id: Uuid,
key: &str,
part_number: u64,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let version_uuid = decode_upload_id(upload_id)?;
let content_md5 = match req.headers().get("content-md5") {
Some(x) => Some(x.to_str()?.to_string()),
None => None,
};
// Read first chuck, and at the same time try to get object to see if it exists
let key = key.to_string();
let body = req.into_body().map_err(Error::from);
let mut chunker = StreamChunker::new(body, garage.config.block_size);
let (object, version, first_block) = futures::try_join!(
garage
.object_table
.get(&bucket_id, &key)
.map_err(Error::from),
garage
.version_table
.get(&version_uuid, &EmptyKey)
.map_err(Error::from),
chunker.next(),
)?;
// Check object is valid and multipart block can be accepted
let first_block = first_block.ok_or_bad_request("Empty body")?;
let object = object.ok_or_bad_request("Object not found")?;
if !object
.versions()
.iter()
.any(|v| v.uuid == version_uuid && v.is_uploading())
{
return Err(Error::NoSuchUpload);
}
// Check part hasn't already been uploaded
if let Some(v) = version {
if v.has_part_number(part_number) {
return Err(Error::bad_request(format!(
"Part number {} has already been uploaded",
part_number
)));
}
}
// Copy block to store
let version = Version::new(version_uuid, bucket_id, key, false);
let first_block_hash = async_blake2sum(first_block.clone()).await;
let (_, data_md5sum, data_sha256sum) = read_and_put_blocks(
&garage,
&version,
part_number,
first_block,
first_block_hash,
&mut chunker,
)
.await?;
// Verify that checksums map
ensure_checksum_matches(
data_md5sum.as_slice(),
data_sha256sum,
content_md5.as_deref(),
content_sha256,
)?;
// Store part etag in version
let data_md5sum_hex = hex::encode(data_md5sum);
let mut version = version;
version
.parts_etags
.put(part_number, data_md5sum_hex.clone());
garage.version_table.insert(&version).await?;
let response = Response::builder()
.header("ETag", format!("\"{}\"", data_md5sum_hex))
.body(Body::empty())
.unwrap();
Ok(response)
}
pub async fn handle_complete_multipart_upload(
garage: Arc<Garage>,
req: Request<Body>,
bucket_name: &str,
bucket: &Bucket,
key: &str,
upload_id: &str,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
let body = hyper::body::to_bytes(req.into_body()).await?;
if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?;
}
let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?;
let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml)
.ok_or_bad_request("Invalid CompleteMultipartUpload XML")?;
debug!(
"CompleteMultipartUpload list of parts: {:?}",
body_list_of_parts
);
let version_uuid = decode_upload_id(upload_id)?;
// Get object and version
let key = key.to_string();
let (object, version) = futures::try_join!(
garage.object_table.get(&bucket.id, &key),
garage.version_table.get(&version_uuid, &EmptyKey),
)?;
let object = object.ok_or(Error::NoSuchKey)?;
let mut object_version = object
.versions()
.iter()
.find(|v| v.uuid == version_uuid && v.is_uploading())
.cloned()
.ok_or(Error::NoSuchUpload)?;
let version = version.ok_or(Error::NoSuchKey)?;
if version.blocks.is_empty() {
return Err(Error::bad_request("No data was uploaded"));
}
let headers = match object_version.state {
ObjectVersionState::Uploading(headers) => headers,
_ => unreachable!(),
};
// Check that part numbers are an increasing sequence.
// (it doesn't need to start at 1 nor to be a continuous sequence,
// see discussion in #192)
if body_list_of_parts.is_empty() {
return Err(Error::EntityTooSmall);
}
if !body_list_of_parts
.iter()
.zip(body_list_of_parts.iter().skip(1))
.all(|(p1, p2)| p1.part_number < p2.part_number)
{
return Err(Error::InvalidPartOrder);
}
// Garage-specific restriction, see #204: part numbers must be
// consecutive starting at 1
if body_list_of_parts[0].part_number != 1
|| !body_list_of_parts
.iter()
.zip(body_list_of_parts.iter().skip(1))
.all(|(p1, p2)| p1.part_number + 1 == p2.part_number)
{
return Err(Error::NotImplemented("Garage does not support completing a Multipart upload with non-consecutive part numbers. This is a restriction of Garage's data model, which might be fixed in a future release. See issue #204 for more information on this topic.".into()));
}
// Check that the list of parts they gave us corresponds to the parts we have here
debug!("Expected parts from request: {:?}", body_list_of_parts);
debug!("Parts stored in version: {:?}", version.parts_etags.items());
let parts = version
.parts_etags
.items()
.iter()
.map(|pair| (&pair.0, &pair.1));
let same_parts = body_list_of_parts
.iter()
.map(|x| (&x.part_number, &x.etag))
.eq(parts);
if !same_parts {
return Err(Error::InvalidPart);
}
// Check that all blocks belong to one of the parts
let block_parts = version
.blocks
.items()
.iter()
.map(|(bk, _)| bk.part_number)
.collect::<BTreeSet<_>>();
let same_parts = body_list_of_parts
.iter()
.map(|x| x.part_number)
.eq(block_parts.into_iter());
if !same_parts {
return Err(Error::bad_request(
"Part numbers in block list and part list do not match. This can happen if a part was partially uploaded. Please abort the multipart upload and try again."
));
}
// Calculate etag of final object
// To understand how etags are calculated, read more here:
// https://teppen.io/2018/06/23/aws_s3_etags/
let num_parts = body_list_of_parts.len();
let mut etag_md5_hasher = Md5::new();
for (_, etag) in version.parts_etags.items().iter() {
etag_md5_hasher.update(etag.as_bytes());
}
let etag = format!("{}-{}", hex::encode(etag_md5_hasher.finalize()), num_parts);
// Calculate total size of final object
let total_size = version.blocks.items().iter().map(|x| x.1.size).sum();
if let Err(e) = check_quotas(&garage, bucket, &key, total_size).await {
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
return Err(e);
}
// Write final object version
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
ObjectVersionMeta {
headers,
size: total_size,
etag: etag.clone(),
},
version.blocks.items()[0].1.hash,
));
let final_object = Object::new(bucket.id, key.clone(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
// Send response saying ok we're done
let result = s3_xml::CompleteMultipartUploadResult {
xmlns: (),
location: None,
bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key),
etag: s3_xml::Value(format!("\"{}\"", etag)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
Ok(Response::new(Body::from(xml.into_bytes())))
}
pub async fn handle_abort_multipart_upload(
garage: Arc<Garage>,
bucket_id: Uuid,
key: &str,
upload_id: &str,
) -> Result<Response<Body>, Error> {
let version_uuid = decode_upload_id(upload_id)?;
let object = garage
.object_table
.get(&bucket_id, &key.to_string())
.await?;
let object = object.ok_or(Error::NoSuchKey)?;
let object_version = object
.versions()
.iter()
.find(|v| v.uuid == version_uuid && v.is_uploading());
let mut object_version = match object_version {
None => return Err(Error::NoSuchUpload),
Some(x) => x.clone(),
};
object_version.state = ObjectVersionState::Aborted;
let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
garage.object_table.insert(&final_object).await?;
Ok(Response::new(Body::from(vec![])))
}
fn get_mime_type(headers: &HeaderMap<HeaderValue>) -> Result<String, Error> {
pub(crate) fn get_mime_type(headers: &HeaderMap<HeaderValue>) -> Result<String, Error> {
Ok(headers
.get(hyper::header::CONTENT_TYPE)
.map(|x| x.to_str())
@ -821,54 +519,3 @@ pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<ObjectVers
other,
})
}
pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?;
if id_bin.len() != 32 {
return Err(Error::NoSuchUpload);
}
let mut uuid = [0u8; 32];
uuid.copy_from_slice(&id_bin[..]);
Ok(Uuid::from(uuid))
}
#[derive(Debug)]
struct CompleteMultipartUploadPart {
etag: String,
part_number: u64,
}
fn parse_complete_multipart_upload_body(
xml: &roxmltree::Document,
) -> Option<Vec<CompleteMultipartUploadPart>> {
let mut parts = vec![];
let root = xml.root();
let cmu = root.first_child()?;
if !cmu.has_tag_name("CompleteMultipartUpload") {
return None;
}
for item in cmu.children() {
// Only parse <Part> nodes
if !item.is_element() {
continue;
}
if item.has_tag_name("Part") {
let etag = item.children().find(|e| e.has_tag_name("ETag"))?.text()?;
let part_number = item
.children()
.find(|e| e.has_tag_name("PartNumber"))?
.text()?;
parts.push(CompleteMultipartUploadPart {
etag: etag.trim_matches('"').to_string(),
part_number: part_number.parse().ok()?,
});
} else {
return None;
}
}
Some(parts)
}

View File

@ -43,14 +43,11 @@ pub async fn handle_get_website(bucket: &Bucket) -> Result<Response<Body>, Error
pub async fn handle_delete_website(
garage: Arc<Garage>,
bucket_id: Uuid,
mut bucket: Bucket,
) -> Result<Response<Body>, Error> {
let mut bucket = garage
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
let param = bucket.params_mut().unwrap();
let param = bucket
.params_mut()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
param.website_config.update(None);
garage.bucket_table.insert(&bucket).await?;
@ -62,7 +59,7 @@ pub async fn handle_delete_website(
pub async fn handle_put_website(
garage: Arc<Garage>,
bucket_id: Uuid,
mut bucket: Bucket,
req: Request<Body>,
content_sha256: Option<Hash>,
) -> Result<Response<Body>, Error> {
@ -72,12 +69,9 @@ pub async fn handle_put_website(
verify_signed_content(content_sha256, &body[..])?;
}
let mut bucket = garage
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
let param = bucket.params_mut().unwrap();
let param = bucket
.params_mut()
.ok_or_internal_error("Bucket should not be deleted at this point")?;
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
conf.validate()?;

View File

@ -1,6 +1,6 @@
[package]
name = "garage_block"
version = "0.8.4"
version = "0.9.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -24,6 +24,7 @@ opentelemetry = "0.17"
arc-swap = "1.5"
async-trait = "0.1.7"
bytes = "1.0"
bytesize = "1.2"
hex = "0.4"
tracing = "0.1"
rand = "0.8"

View File

@ -1,3 +1,5 @@
use std::path::PathBuf;
use bytes::Bytes;
use serde::{Deserialize, Serialize};
use zstd::stream::{decode_all as zstd_decode, Encoder};
@ -19,6 +21,14 @@ pub enum DataBlock {
Compressed(Bytes),
}
#[derive(Debug)]
pub enum DataBlockPath {
/// Uncompressed data fail
Plain(PathBuf),
/// Compressed data fail
Compressed(PathBuf),
}
impl DataBlock {
/// Query whether this block is compressed
pub fn is_compressed(&self) -> bool {

337
src/block/layout.rs Normal file
View File

@ -0,0 +1,337 @@
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
use garage_util::config::DataDirEnum;
use garage_util::data::Hash;
use garage_util::error::{Error, OkOrMessage};
use garage_util::migrate::*;
type Idx = u16;
const DRIVE_NPART: usize = 1024;
const HASH_DRIVE_BYTES: (usize, usize) = (2, 3);
#[derive(Serialize, Deserialize, Debug, Clone)]
pub(crate) struct DataLayout {
pub(crate) data_dirs: Vec<DataDir>,
/// Primary storage location (index in data_dirs) for each partition
/// = the location where the data is supposed to be, blocks are always
/// written there (copies in other dirs may be deleted if they exist)
pub(crate) part_prim: Vec<Idx>,
/// Secondary storage locations for each partition = locations
/// where data blocks might be, we check from these dirs when reading
pub(crate) part_sec: Vec<Vec<Idx>>,
}
#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
pub(crate) struct DataDir {
pub(crate) path: PathBuf,
pub(crate) state: DataDirState,
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
pub(crate) enum DataDirState {
Active { capacity: u64 },
ReadOnly,
}
impl DataLayout {
pub(crate) fn initialize(dirs: &DataDirEnum) -> Result<Self, Error> {
let data_dirs = make_data_dirs(dirs)?;
// Split partitions proportionnally to capacity for all drives
// to affect primary storage location
let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>();
assert!(total_cap > 0);
let mut part_prim = Vec::with_capacity(DRIVE_NPART);
let mut cum_cap = 0;
for (i, dd) in data_dirs.iter().enumerate() {
if let DataDirState::Active { capacity } = dd.state {
cum_cap += capacity;
let n_total = (cum_cap * DRIVE_NPART as u64) / total_cap;
part_prim.resize(n_total as usize, i as Idx);
}
}
assert_eq!(cum_cap, total_cap);
assert_eq!(part_prim.len(), DRIVE_NPART);
// If any of the storage locations is non-empty, it probably existed before
// this algorithm was added, so add it as a secondary storage location for all partitions
// to make sure existing files are not lost
let mut part_sec = vec![vec![]; DRIVE_NPART];
for (i, dd) in data_dirs.iter().enumerate() {
if dir_not_empty(&dd.path)? {
for (sec, prim) in part_sec.iter_mut().zip(part_prim.iter()) {
if *prim != i as Idx {
sec.push(i as Idx);
}
}
}
}
Ok(Self {
data_dirs,
part_prim,
part_sec,
})
}
pub(crate) fn update(&mut self, dirs: &DataDirEnum) -> Result<(), Error> {
// Make list of new data directories, exit if nothing changed
let data_dirs = make_data_dirs(dirs)?;
if data_dirs == self.data_dirs {
return Ok(());
}
let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>();
assert!(total_cap > 0);
// Compute mapping of old indices to new indices
let old2new = self
.data_dirs
.iter()
.map(|x| {
data_dirs
.iter()
.position(|y| y.path == x.path)
.map(|x| x as Idx)
})
.collect::<Vec<_>>();
// Compute secondary location list for partitions based on existing
// folders, translating indices from old to new
let mut part_sec = self
.part_sec
.iter()
.map(|dl| {
dl.iter()
.filter_map(|old| old2new.get(*old as usize).copied().flatten())
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
// Compute a vector that, for each data dir,
// contains the list of partitions primarily stored on that drive
let mut dir_prim = vec![vec![]; data_dirs.len()];
for (ipart, prim) in self.part_prim.iter().enumerate() {
if let Some(new) = old2new.get(*prim as usize).copied().flatten() {
dir_prim[new as usize].push(ipart);
}
}
// Compute the target number of partitions per data directory
let mut cum_cap = 0;
let mut npart_per_dir = vec![0; data_dirs.len()];
for (idir, dd) in data_dirs.iter().enumerate() {
if let DataDirState::Active { capacity } = dd.state {
let begin = (cum_cap * DRIVE_NPART as u64) / total_cap;
cum_cap += capacity;
let end = (cum_cap * DRIVE_NPART as u64) / total_cap;
npart_per_dir[idir] = (end - begin) as usize;
}
}
assert_eq!(cum_cap, total_cap);
assert_eq!(npart_per_dir.iter().sum::<usize>(), DRIVE_NPART);
// For all directories that have too many primary partitions,
// move that partition to secondary
for (idir, (parts, tgt_npart)) in dir_prim.iter_mut().zip(npart_per_dir.iter()).enumerate()
{
while parts.len() > *tgt_npart {
let part = parts.pop().unwrap();
if !part_sec[part].contains(&(idir as Idx)) {
part_sec[part].push(idir as Idx);
}
}
}
// Calculate the vector of primary partition dir index
let mut part_prim = vec![None; DRIVE_NPART];
for (idir, parts) in dir_prim.iter().enumerate() {
for part in parts.iter() {
assert!(part_prim[*part].is_none());
part_prim[*part] = Some(idir as Idx)
}
}
// Calculate a vector of unassigned partitions
let mut unassigned = part_prim
.iter()
.enumerate()
.filter(|(_, dir)| dir.is_none())
.map(|(ipart, _)| ipart)
.collect::<Vec<_>>();
// For all directories that don't have enough primary partitions,
// add partitions from unassigned
for (idir, (parts, tgt_npart)) in dir_prim.iter_mut().zip(npart_per_dir.iter()).enumerate()
{
if parts.len() < *tgt_npart {
let required = *tgt_npart - parts.len();
assert!(unassigned.len() >= required);
for _ in 0..required {
let new_part = unassigned.pop().unwrap();
part_prim[new_part] = Some(idir as Idx);
part_sec[new_part].retain(|x| *x != idir as Idx);
}
}
}
// Sanity checks
assert!(part_prim.iter().all(|x| x.is_some()));
assert!(unassigned.is_empty());
// Transform part_prim from vec of Option<Idx> to vec of Idx
let part_prim = part_prim
.into_iter()
.map(|x| x.unwrap())
.collect::<Vec<_>>();
assert!(part_prim.iter().all(|p| data_dirs
.get(*p as usize)
.and_then(|x| x.capacity())
.unwrap_or(0)
> 0));
// If any of the newly added storage locations is non-empty,
// it might have been removed and added again and might contain data,
// so add it as a secondary storage location for all partitions
// to make sure existing files are not lost
for (i, dd) in data_dirs.iter().enumerate() {
if self.data_dirs.iter().any(|ed| ed.path == dd.path) {
continue;
}
if dir_not_empty(&dd.path)? {
for (sec, prim) in part_sec.iter_mut().zip(part_prim.iter()) {
if *prim != i as Idx && !sec.contains(&(i as Idx)) {
sec.push(i as Idx);
}
}
}
}
// Apply newly generated config
*self = Self {
data_dirs,
part_prim,
part_sec,
};
Ok(())
}
pub(crate) fn primary_block_dir(&self, hash: &Hash) -> PathBuf {
let ipart = self.partition_from(hash);
let idir = self.part_prim[ipart] as usize;
self.block_dir_from(hash, &self.data_dirs[idir].path)
}
pub(crate) fn secondary_block_dirs<'a>(
&'a self,
hash: &'a Hash,
) -> impl Iterator<Item = PathBuf> + 'a {
let ipart = self.partition_from(hash);
self.part_sec[ipart]
.iter()
.map(move |idir| self.block_dir_from(hash, &self.data_dirs[*idir as usize].path))
}
fn partition_from(&self, hash: &Hash) -> usize {
u16::from_be_bytes([
hash.as_slice()[HASH_DRIVE_BYTES.0],
hash.as_slice()[HASH_DRIVE_BYTES.1],
]) as usize % DRIVE_NPART
}
fn block_dir_from(&self, hash: &Hash, dir: &PathBuf) -> PathBuf {
let mut path = dir.clone();
path.push(hex::encode(&hash.as_slice()[0..1]));
path.push(hex::encode(&hash.as_slice()[1..2]));
path
}
pub(crate) fn without_secondary_locations(&self) -> Self {
Self {
data_dirs: self.data_dirs.clone(),
part_prim: self.part_prim.clone(),
part_sec: self.part_sec.iter().map(|_| vec![]).collect::<Vec<_>>(),
}
}
}
impl InitialFormat for DataLayout {
const VERSION_MARKER: &'static [u8] = b"G09bmdl";
}
impl DataDir {
pub fn capacity(&self) -> Option<u64> {
match self.state {
DataDirState::Active { capacity } => Some(capacity),
_ => None,
}
}
}
fn make_data_dirs(dirs: &DataDirEnum) -> Result<Vec<DataDir>, Error> {
let mut data_dirs = vec![];
match dirs {
DataDirEnum::Single(path) => data_dirs.push(DataDir {
path: path.clone(),
state: DataDirState::Active {
capacity: 1_000_000_000, // whatever, doesn't matter
},
}),
DataDirEnum::Multiple(dirs) => {
let mut ok = false;
for dir in dirs.iter() {
let state = match &dir.capacity {
Some(cap) if dir.read_only == false => {
let capacity = cap.parse::<bytesize::ByteSize>()
.ok_or_message("invalid capacity value")?.as_u64();
if capacity == 0 {
return Err(Error::Message(format!("data directory {} should have non-zero capacity", dir.path.to_string_lossy())));
}
ok = true;
DataDirState::Active {
capacity,
}
}
None if dir.read_only == true => {
DataDirState::ReadOnly
}
_ => return Err(Error::Message(format!("data directories in data_dir should have a capacity value or be marked read_only, not the case for {}", dir.path.to_string_lossy()))),
};
data_dirs.push(DataDir {
path: dir.path.clone(),
state,
});
}
if !ok {
return Err(Error::Message(
"incorrect data_dir configuration, no primary writable directory specified"
.into(),
));
}
}
}
Ok(data_dirs)
}
fn dir_not_empty(path: &PathBuf) -> Result<bool, Error> {
for entry in std::fs::read_dir(&path)? {
let dir = entry?;
if dir.file_type()?.is_dir()
&& dir
.file_name()
.into_string()
.ok()
.and_then(|hex| hex::decode(&hex).ok())
.is_some()
{
return Ok(true);
}
}
Ok(false)
}

View File

@ -6,5 +6,6 @@ pub mod repair;
pub mod resync;
mod block;
mod layout;
mod metrics;
mod rc;

View File

@ -3,7 +3,7 @@ use std::pin::Pin;
use std::sync::Arc;
use std::time::Duration;
use arc_swap::ArcSwapOption;
use arc_swap::{ArcSwap, ArcSwapOption};
use async_trait::async_trait;
use bytes::Bytes;
use rand::prelude::*;
@ -25,10 +25,11 @@ use garage_rpc::rpc_helper::netapp::stream::{stream_asyncread, ByteStream};
use garage_db as db;
use garage_util::background::{vars, BackgroundRunner};
use garage_util::config::DataDirEnum;
use garage_util::data::*;
use garage_util::error::*;
use garage_util::metrics::RecordDuration;
use garage_util::persister::PersisterShared;
use garage_util::persister::{Persister, PersisterShared};
use garage_util::time::msec_to_rfc3339;
use garage_rpc::rpc_helper::OrderTag;
@ -38,6 +39,7 @@ use garage_rpc::*;
use garage_table::replication::{TableReplication, TableShardedReplication};
use crate::block::*;
use crate::layout::*;
use crate::metrics::*;
use crate::rc::*;
use crate::repair::*;
@ -77,12 +79,16 @@ impl Rpc for BlockRpc {
pub struct BlockManager {
/// Replication strategy, allowing to find on which node blocks should be located
pub replication: TableShardedReplication,
/// Directory in which block are stored
pub data_dir: PathBuf,
/// Data layout
pub(crate) data_layout: ArcSwap<DataLayout>,
/// Data layout persister
pub(crate) data_layout_persister: Persister<DataLayout>,
data_fsync: bool,
compression_level: Option<i32>,
mutation_lock: [Mutex<BlockManagerLocked>; 256],
mutation_lock: Vec<Mutex<BlockManagerLocked>>,
pub(crate) rc: BlockRc,
pub resync: BlockResyncManager,
@ -105,6 +111,9 @@ pub struct BlockResyncErrorInfo {
pub next_try: u64,
}
// The number of different mutexes used to parallelize write access to data blocks
const MUTEX_COUNT: usize = 256;
// This custom struct contains functions that must only be ran
// when the lock is held. We ensure that it is the case by storing
// it INSIDE a Mutex.
@ -113,11 +122,29 @@ struct BlockManagerLocked();
impl BlockManager {
pub fn new(
db: &db::Db,
data_dir: PathBuf,
data_dir: DataDirEnum,
data_fsync: bool,
compression_level: Option<i32>,
replication: TableShardedReplication,
system: Arc<System>,
) -> Arc<Self> {
) -> Result<Arc<Self>, Error> {
// Load or compute layout, i.e. assignment of data blocks to the different data directories
let data_layout_persister: Persister<DataLayout> =
Persister::new(&system.metadata_dir, "data_layout");
let data_layout = match data_layout_persister.load() {
Ok(mut layout) => {
layout
.update(&data_dir)
.ok_or_message("invalid data_dir config")?;
layout
}
Err(_) => DataLayout::initialize(&data_dir).ok_or_message("invalid data_dir config")?,
};
data_layout_persister
.save(&data_layout)
.expect("cannot save data_layout");
// Open metadata tables
let rc = db
.open_tree("block_local_rc")
.expect("Unable to open block_local_rc tree");
@ -140,9 +167,14 @@ impl BlockManager {
let block_manager = Arc::new(Self {
replication,
data_dir,
data_layout: ArcSwap::new(Arc::new(data_layout)),
data_layout_persister,
data_fsync,
compression_level,
mutation_lock: [(); 256].map(|_| Mutex::new(BlockManagerLocked())),
mutation_lock: vec![(); MUTEX_COUNT]
.iter()
.map(|_| Mutex::new(BlockManagerLocked()))
.collect::<Vec<_>>(),
rc,
resync,
system,
@ -154,7 +186,7 @@ impl BlockManager {
block_manager.endpoint.set_handler(block_manager.clone());
block_manager.scrub_persister.set_with(|_| ()).unwrap();
block_manager
Ok(block_manager)
}
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
@ -201,44 +233,10 @@ impl BlockManager {
hash: &Hash,
order_tag: Option<OrderTag>,
) -> Result<(DataBlockHeader, ByteStream), Error> {
let who = self.replication.read_nodes(hash);
let who = self.system.rpc.request_order(&who);
for node in who.iter() {
let node_id = NodeID::from(*node);
let rpc = self.endpoint.call_streaming(
&node_id,
BlockRpc::GetBlock(*hash, order_tag),
PRIO_NORMAL | PRIO_SECONDARY,
);
tokio::select! {
res = rpc => {
let res = match res {
Ok(res) => res,
Err(e) => {
debug!("Node {:?} returned error: {}", node, e);
continue;
}
};
let (header, stream) = match res.into_parts() {
(Ok(BlockRpc::PutBlock { hash: _, header }), Some(stream)) => (header, stream),
_ => {
debug!("Node {:?} returned a malformed response", node);
continue;
}
};
return Ok((header, stream));
}
_ = tokio::time::sleep(self.system.rpc.rpc_timeout()) => {
debug!("Node {:?} didn't return block in time, trying next.", node);
}
};
}
Err(Error::Message(format!(
"Unable to read block {:?}: no node returned a valid block",
hash
)))
self.rpc_get_raw_block_internal(hash, order_tag, |header, stream| async move {
Ok((header, stream))
})
.await
}
/// Ask nodes that might have a (possibly compressed) block for it
@ -248,6 +246,24 @@ impl BlockManager {
hash: &Hash,
order_tag: Option<OrderTag>,
) -> Result<DataBlock, Error> {
self.rpc_get_raw_block_internal(hash, order_tag, |header, stream| async move {
read_stream_to_end(stream)
.await
.map(|data| DataBlock::from_parts(header, data))
})
.await
}
async fn rpc_get_raw_block_internal<F, Fut, T>(
&self,
hash: &Hash,
order_tag: Option<OrderTag>,
f: F,
) -> Result<T, Error>
where
F: Fn(DataBlockHeader, ByteStream) -> Fut,
Fut: futures::Future<Output = Result<T, Error>>,
{
let who = self.replication.read_nodes(hash);
let who = self.system.rpc.request_order(&who);
@ -263,34 +279,41 @@ impl BlockManager {
let res = match res {
Ok(res) => res,
Err(e) => {
debug!("Node {:?} returned error: {}", node, e);
debug!("Get block {:?}: node {:?} could not be contacted: {}", hash, node, e);
continue;
}
};
let (header, stream) = match res.into_parts() {
(Ok(BlockRpc::PutBlock { hash: _, header }), Some(stream)) => (header, stream),
_ => {
debug!("Node {:?} returned a malformed response", node);
(Ok(_), _) => {
debug!("Get block {:?}: node {:?} returned a malformed response", hash, node);
continue;
}
(Err(e), _) => {
debug!("Get block {:?}: node {:?} returned error: {}", hash, node, e);
continue;
}
};
match read_stream_to_end(stream).await {
Ok(bytes) => return Ok(DataBlock::from_parts(header, bytes)),
match f(header, stream).await {
Ok(ret) => return Ok(ret),
Err(e) => {
debug!("Error reading stream from node {:?}: {}", node, e);
debug!("Get block {:?}: error reading stream from node {:?}: {}", hash, node, e);
}
}
}
// TODO: sleep less long (fail early), initiate a second request earlier
// if the first one doesn't succeed rapidly
// TODO: keep first request running when initiating a new one and take the
// one that finishes earlier
_ = tokio::time::sleep(self.system.rpc.rpc_timeout()) => {
debug!("Node {:?} didn't return block in time, trying next.", node);
debug!("Get block {:?}: node {:?} didn't return block in time, trying next.", hash, node);
}
};
}
Err(Error::Message(format!(
"Unable to read block {:?}: no node returned a valid block",
hash
)))
let msg = format!("Get block {:?}: no node returned a valid block", hash);
debug!("{}", msg);
Err(Error::Message(msg))
}
// ---- Public interface ----
@ -468,8 +491,6 @@ impl BlockManager {
pub(crate) async fn write_block(&self, hash: &Hash, data: &DataBlock) -> Result<(), Error> {
let tracer = opentelemetry::global::tracer("garage");
let write_size = data.inner_buffer().len() as u64;
self.lock_mutate(hash)
.await
.write_block(hash, data, self)
@ -479,8 +500,6 @@ impl BlockManager {
))
.await?;
self.metrics.bytes_written.add(write_size);
Ok(())
}
@ -507,36 +526,42 @@ impl BlockManager {
/// Read block from disk, verifying it's integrity
pub(crate) async fn read_block(&self, hash: &Hash) -> Result<DataBlock, Error> {
let data = self
.read_block_internal(hash)
.bound_record_duration(&self.metrics.block_read_duration)
.await?;
self.metrics
.bytes_read
.add(data.inner_buffer().len() as u64);
Ok(data)
let tracer = opentelemetry::global::tracer("garage");
async {
match self.find_block(hash).await {
Some(p) => self.read_block_from(hash, &p).await,
None => {
// Not found but maybe we should have had it ??
self.resync
.put_to_resync(hash, 2 * self.system.rpc.rpc_timeout())?;
return Err(Error::Message(format!(
"block {:?} not found on node",
hash
)));
}
}
}
.bound_record_duration(&self.metrics.block_read_duration)
.with_context(Context::current_with_span(
tracer.start("BlockManager::read_block"),
))
.await
}
async fn read_block_internal(&self, hash: &Hash) -> Result<DataBlock, Error> {
let mut path = self.block_path(hash);
let compressed = match self.is_block_compressed(hash).await {
Ok(c) => c,
Err(e) => {
// Not found but maybe we should have had it ??
self.resync
.put_to_resync(hash, 2 * self.system.rpc.rpc_timeout())?;
return Err(Into::into(e));
}
pub(crate) async fn read_block_from(
&self,
hash: &Hash,
block_path: &DataBlockPath,
) -> Result<DataBlock, Error> {
let (path, compressed) = match block_path {
DataBlockPath::Plain(p) => (p, false),
DataBlockPath::Compressed(p) => (p, true),
};
if compressed {
path.set_extension("zst");
}
let mut f = fs::File::open(&path).await?;
let mut f = fs::File::open(&path).await?;
let mut data = vec![];
f.read_to_end(&mut data).await?;
self.metrics.bytes_read.add(data.len() as u64);
drop(f);
let data = if compressed {
@ -548,29 +573,27 @@ impl BlockManager {
if data.verify(*hash).is_err() {
self.metrics.corruption_counter.add(1);
warn!(
"Block {:?} is corrupted. Renaming to .corrupted and resyncing.",
hash
);
self.lock_mutate(hash)
.await
.move_block_to_corrupted(hash, self)
.move_block_to_corrupted(block_path)
.await?;
self.resync.put_to_resync(hash, Duration::from_millis(0))?;
return Err(Error::CorruptData(*hash));
}
Ok(data)
}
/// Check if this node has a block and whether it needs it
pub(crate) async fn check_block_status(&self, hash: &Hash) -> Result<BlockStatus, Error> {
self.lock_mutate(hash)
.await
.check_block_status(hash, self)
.await
}
/// Check if this node should have a block, but don't actually have it
async fn need_block(&self, hash: &Hash) -> Result<bool, Error> {
let BlockStatus { exists, needed } = self.check_block_status(hash).await?;
Ok(needed.is_nonzero() && !exists)
let rc = self.rc.get_block_rc(hash)?;
let exists = self.find_block(hash).await.is_some();
Ok(rc.is_nonzero() && !exists)
}
/// Delete block if it is not needed anymore
@ -581,59 +604,65 @@ impl BlockManager {
.await
}
/// Utility: gives the path of the directory in which a block should be found
fn block_dir(&self, hash: &Hash) -> PathBuf {
let mut path = self.data_dir.clone();
path.push(hex::encode(&hash.as_slice()[0..1]));
path.push(hex::encode(&hash.as_slice()[1..2]));
path
}
/// Find the path where a block is currently stored
pub(crate) async fn find_block(&self, hash: &Hash) -> Option<DataBlockPath> {
let data_layout = self.data_layout.load_full();
let dirs = Some(data_layout.primary_block_dir(hash))
.into_iter()
.chain(data_layout.secondary_block_dirs(hash));
let filename = hex::encode(hash.as_ref());
/// Utility: give the full path where a block should be found, minus extension if block is
/// compressed
fn block_path(&self, hash: &Hash) -> PathBuf {
let mut path = self.block_dir(hash);
path.push(hex::encode(hash.as_ref()));
path
}
for dir in dirs {
let mut path = dir;
path.push(&filename);
/// Utility: check if block is stored compressed. Error if block is not stored
async fn is_block_compressed(&self, hash: &Hash) -> Result<bool, Error> {
let mut path = self.block_path(hash);
// If compression is disabled on node - check for the raw block
// first and then a compressed one (as compression may have been
// previously enabled).
match self.compression_level {
None => {
if self.compression_level.is_none() {
// If compression is disabled on node - check for the raw block
// first and then a compressed one (as compression may have been
// previously enabled).
if fs::metadata(&path).await.is_ok() {
return Ok(false);
return Some(DataBlockPath::Plain(path));
}
path.set_extension("zst");
fs::metadata(&path).await.map(|_| true).map_err(Into::into)
}
_ => {
path.set_extension("zst");
if fs::metadata(&path).await.is_ok() {
return Ok(true);
return Some(DataBlockPath::Compressed(path));
}
} else {
path.set_extension("zst");
if fs::metadata(&path).await.is_ok() {
return Some(DataBlockPath::Compressed(path));
}
path.set_extension("");
fs::metadata(&path).await.map(|_| false).map_err(Into::into)
if fs::metadata(&path).await.is_ok() {
return Some(DataBlockPath::Plain(path));
}
}
}
None
}
/// Rewrite a block at the primary location for its path and delete the old path.
/// Returns the number of bytes read/written
pub(crate) async fn fix_block_location(
&self,
hash: &Hash,
wrong_path: DataBlockPath,
) -> Result<usize, Error> {
self.lock_mutate(hash)
.await
.fix_block_location(hash, wrong_path, self)
.await
}
async fn lock_mutate(&self, hash: &Hash) -> MutexGuard<'_, BlockManagerLocked> {
let tracer = opentelemetry::global::tracer("garage");
self.mutation_lock[hash.as_slice()[0] as usize]
let ilock = u16::from_be_bytes([hash.as_slice()[0], hash.as_slice()[1]]) as usize
% self.mutation_lock.len();
self.mutation_lock[ilock]
.lock()
.with_context(Context::current_with_span(
tracer.start("Acquire mutation_lock"),
tracer.start(format!("Acquire mutation_lock #{}", ilock)),
))
.await
}
@ -646,7 +675,7 @@ impl StreamingEndpointHandler<BlockRpc> for BlockManager {
BlockRpc::PutBlock { hash, header } => Resp::new(
self.handle_put_block(*hash, *header, message.take_stream())
.await
.map(|_| BlockRpc::Ok),
.map(|()| BlockRpc::Ok),
),
BlockRpc::GetBlock(h, order_tag) => self.handle_get_block(h, *order_tag).await,
BlockRpc::NeedBlockQuery(h) => {
@ -657,66 +686,78 @@ impl StreamingEndpointHandler<BlockRpc> for BlockManager {
}
}
pub(crate) struct BlockStatus {
pub(crate) exists: bool,
pub(crate) needed: RcEntry,
}
impl BlockManagerLocked {
async fn check_block_status(
&self,
hash: &Hash,
mgr: &BlockManager,
) -> Result<BlockStatus, Error> {
let exists = mgr.is_block_compressed(hash).await.is_ok();
let needed = mgr.rc.get_block_rc(hash)?;
Ok(BlockStatus { exists, needed })
}
async fn write_block(
&self,
hash: &Hash,
data: &DataBlock,
mgr: &BlockManager,
) -> Result<(), Error> {
let existing_path = mgr.find_block(hash).await;
self.write_block_inner(hash, data, mgr, existing_path).await
}
async fn write_block_inner(
&self,
hash: &Hash,
data: &DataBlock,
mgr: &BlockManager,
existing_path: Option<DataBlockPath>,
) -> Result<(), Error> {
let compressed = data.is_compressed();
let data = data.inner_buffer();
let mut path = mgr.block_dir(hash);
let directory = path.clone();
path.push(hex::encode(hash));
let directory = mgr.data_layout.load().primary_block_dir(hash);
fs::create_dir_all(&directory).await?;
let mut tgt_path = directory.clone();
tgt_path.push(hex::encode(hash));
if compressed {
tgt_path.set_extension("zst");
}
let to_delete = match (mgr.is_block_compressed(hash).await, compressed) {
(Ok(true), _) => return Ok(()),
(Ok(false), false) => return Ok(()),
(Ok(false), true) => {
let path_to_delete = path.clone();
path.set_extension("zst");
Some(path_to_delete)
}
(Err(_), compressed) => {
if compressed {
path.set_extension("zst");
}
None
}
let to_delete = match (existing_path, compressed) {
// If the block is stored in the wrong directory,
// write it again at the correct path and delete the old path
(Some(DataBlockPath::Plain(p)), false) if p != tgt_path => Some(p),
(Some(DataBlockPath::Compressed(p)), true) if p != tgt_path => Some(p),
// If the block is already stored not compressed but we have a compressed
// copy, write the compressed copy and delete the uncompressed one
(Some(DataBlockPath::Plain(plain_path)), true) => Some(plain_path),
// If the block is already stored compressed,
// keep the stored copy, we have nothing to do
(Some(DataBlockPath::Compressed(_)), _) => return Ok(()),
// If the block is already stored not compressed,
// and we don't have a compressed copy either,
// keep the stored copy, we have nothing to do
(Some(DataBlockPath::Plain(_)), false) => return Ok(()),
// If the block isn't stored already, just store what is given to us
(None, _) => None,
};
assert!(to_delete.as_ref() != Some(&tgt_path));
let mut path_tmp = path.clone();
let mut path_tmp = tgt_path.clone();
let tmp_extension = format!("tmp{}", hex::encode(thread_rng().gen::<[u8; 4]>()));
path_tmp.set_extension(tmp_extension);
fs::create_dir_all(&directory).await?;
let mut delete_on_drop = DeleteOnDrop(Some(path_tmp.clone()));
let mut f = fs::File::create(&path_tmp).await?;
f.write_all(data).await?;
f.sync_all().await?;
mgr.metrics.bytes_written.add(data.len() as u64);
if mgr.data_fsync {
f.sync_all().await?;
}
drop(f);
fs::rename(path_tmp, path).await?;
fs::rename(path_tmp, tgt_path).await?;
delete_on_drop.cancel();
@ -724,52 +765,67 @@ impl BlockManagerLocked {
fs::remove_file(to_delete).await?;
}
// We want to ensure that when this function returns, data is properly persisted
// to disk. The first step is the sync_all above that does an fsync on the data file.
// Now, we do an fsync on the containing directory, to ensure that the rename
// is persisted properly. See:
// http://thedjbway.b0llix.net/qmail/syncdir.html
let dir = fs::OpenOptions::new()
.read(true)
.mode(0)
.open(directory)
.await?;
dir.sync_all().await?;
drop(dir);
if mgr.data_fsync {
// We want to ensure that when this function returns, data is properly persisted
// to disk. The first step is the sync_all above that does an fsync on the data file.
// Now, we do an fsync on the containing directory, to ensure that the rename
// is persisted properly. See:
// http://thedjbway.b0llix.net/qmail/syncdir.html
let dir = fs::OpenOptions::new()
.read(true)
.mode(0)
.open(directory)
.await?;
dir.sync_all().await?;
drop(dir);
}
Ok(())
}
async fn move_block_to_corrupted(&self, hash: &Hash, mgr: &BlockManager) -> Result<(), Error> {
warn!(
"Block {:?} is corrupted. Renaming to .corrupted and resyncing.",
hash
);
let mut path = mgr.block_path(hash);
let mut path2 = path.clone();
if mgr.is_block_compressed(hash).await? {
path.set_extension("zst");
path2.set_extension("zst.corrupted");
} else {
path2.set_extension("corrupted");
}
async fn move_block_to_corrupted(&self, block_path: &DataBlockPath) -> Result<(), Error> {
let (path, path2) = match block_path {
DataBlockPath::Plain(p) => {
let mut p2 = p.clone();
p2.set_extension("corrupted");
(p, p2)
}
DataBlockPath::Compressed(p) => {
let mut p2 = p.clone();
p2.set_extension("zst.corrupted");
(p, p2)
}
};
fs::rename(path, path2).await?;
Ok(())
}
async fn delete_if_unneeded(&self, hash: &Hash, mgr: &BlockManager) -> Result<(), Error> {
let BlockStatus { exists, needed } = self.check_block_status(hash, mgr).await?;
if exists && needed.is_deletable() {
let mut path = mgr.block_path(hash);
if mgr.is_block_compressed(hash).await? {
path.set_extension("zst");
let rc = mgr.rc.get_block_rc(hash)?;
if rc.is_deletable() {
while let Some(path) = mgr.find_block(hash).await {
let path = match path {
DataBlockPath::Plain(p) | DataBlockPath::Compressed(p) => p,
};
fs::remove_file(path).await?;
mgr.metrics.delete_counter.add(1);
}
fs::remove_file(path).await?;
mgr.metrics.delete_counter.add(1);
}
Ok(())
}
async fn fix_block_location(
&self,
hash: &Hash,
wrong_path: DataBlockPath,
mgr: &BlockManager,
) -> Result<usize, Error> {
let data = mgr.read_block_from(hash, &wrong_path).await?;
self.write_block_inner(hash, &data, mgr, Some(wrong_path))
.await?;
Ok(data.inner_buffer().len())
}
}
async fn read_stream_to_end(mut stream: ByteStream) -> Result<Bytes, Error> {

View File

@ -56,7 +56,7 @@ impl BlockRc {
/// deletion time has passed
pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> {
let now = now_msec();
self.rc.db().transaction(|mut tx| {
self.rc.db().transaction(|tx| {
let rcval = RcEntry::parse_opt(tx.get(&self.rc, hash)?);
match rcval {
RcEntry::Deletable { at_time } if now > at_time => {
@ -64,7 +64,7 @@ impl BlockRc {
}
_ => (),
};
tx.commit(())
Ok(())
})?;
Ok(())
}

View File

@ -17,6 +17,7 @@ use garage_util::persister::PersisterShared;
use garage_util::time::*;
use garage_util::tranquilizer::Tranquilizer;
use crate::block::*;
use crate::manager::*;
// Full scrub every 25 days with a random element of 10 days mixed in below
@ -136,7 +137,7 @@ impl Worker for RepairWorker {
// Lists all blocks on disk and adds them to the resync queue.
// This allows us to find blocks we are storing but don't actually need,
// so that we can offload them if necessary and then delete them locally.
if let Some(hash) = bi.next().await? {
if let Some((_path, hash)) = bi.next().await? {
self.manager
.resync
.put_to_resync(&hash, Duration::from_secs(0))?;
@ -175,7 +176,9 @@ mod v081 {
}
mod v082 {
use garage_util::data::Hash;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use super::v081;
@ -185,6 +188,27 @@ mod v082 {
pub(crate) time_last_complete_scrub: u64,
pub(crate) time_next_run_scrub: u64,
pub(crate) corruptions_detected: u64,
#[serde(default)]
pub(crate) checkpoint: Option<BlockStoreIterator>,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct BlockStoreIterator {
pub todo: Vec<BsiTodo>,
}
#[derive(Serialize, Deserialize, Clone)]
pub enum BsiTodo {
Directory {
path: PathBuf,
progress_min: u64,
progress_max: u64,
},
File {
path: PathBuf,
hash: Hash,
progress: u64,
},
}
impl garage_util::migrate::Migrate for ScrubWorkerPersisted {
@ -199,6 +223,7 @@ mod v082 {
time_last_complete_scrub: old.time_last_complete_scrub,
time_next_run_scrub: randomize_next_scrub_run_time(old.time_last_complete_scrub),
corruptions_detected: old.corruptions_detected,
checkpoint: None,
}
}
}
@ -235,14 +260,23 @@ impl Default for ScrubWorkerPersisted {
time_next_run_scrub: randomize_next_scrub_run_time(now_msec()),
tranquility: INITIAL_SCRUB_TRANQUILITY,
corruptions_detected: 0,
checkpoint: None,
}
}
}
#[derive(Default)]
enum ScrubWorkerState {
Running(BlockStoreIterator),
Paused(BlockStoreIterator, u64), // u64 = time when to resume scrub
Running {
iterator: BlockStoreIterator,
// time of the last checkpoint
t_cp: u64,
},
Paused {
iterator: BlockStoreIterator,
// time at which the scrub should be resumed
t_resume: u64,
},
#[default]
Finished,
}
@ -261,10 +295,17 @@ impl ScrubWorker {
rx_cmd: mpsc::Receiver<ScrubWorkerCommand>,
persister: PersisterShared<ScrubWorkerPersisted>,
) -> Self {
let work = match persister.get_with(|x| x.checkpoint.clone()) {
None => ScrubWorkerState::Finished,
Some(iterator) => ScrubWorkerState::Running {
iterator,
t_cp: now_msec(),
},
};
Self {
manager,
rx_cmd,
work: ScrubWorkerState::Finished,
work,
tranquilizer: Tranquilizer::new(30),
persister,
}
@ -277,7 +318,16 @@ impl ScrubWorker {
ScrubWorkerState::Finished => {
info!("Scrub worker initializing, now performing datastore scrub");
let iterator = BlockStoreIterator::new(&self.manager);
ScrubWorkerState::Running(iterator)
if let Err(e) = self
.persister
.set_with(|x| x.checkpoint = Some(iterator.clone()))
{
error!("Could not save scrub checkpoint: {}", e);
}
ScrubWorkerState::Running {
iterator,
t_cp: now_msec(),
}
}
work => {
error!("Cannot start scrub worker: already running!");
@ -287,8 +337,18 @@ impl ScrubWorker {
}
ScrubWorkerCommand::Pause(dur) => {
self.work = match std::mem::take(&mut self.work) {
ScrubWorkerState::Running(it) | ScrubWorkerState::Paused(it, _) => {
ScrubWorkerState::Paused(it, now_msec() + dur.as_millis() as u64)
ScrubWorkerState::Running { iterator, .. }
| ScrubWorkerState::Paused { iterator, .. } => {
if let Err(e) = self
.persister
.set_with(|x| x.checkpoint = Some(iterator.clone()))
{
error!("Could not save scrub checkpoint: {}", e);
}
ScrubWorkerState::Paused {
iterator,
t_resume: now_msec() + dur.as_millis() as u64,
}
}
work => {
error!("Cannot pause scrub worker: not running!");
@ -298,7 +358,10 @@ impl ScrubWorker {
}
ScrubWorkerCommand::Resume => {
self.work = match std::mem::take(&mut self.work) {
ScrubWorkerState::Paused(it, _) => ScrubWorkerState::Running(it),
ScrubWorkerState::Paused { iterator, .. } => ScrubWorkerState::Running {
iterator,
t_cp: now_msec(),
},
work => {
error!("Cannot resume scrub worker: not paused!");
work
@ -307,7 +370,10 @@ impl ScrubWorker {
}
ScrubWorkerCommand::Cancel => {
self.work = match std::mem::take(&mut self.work) {
ScrubWorkerState::Running(_) | ScrubWorkerState::Paused(_, _) => {
ScrubWorkerState::Running { .. } | ScrubWorkerState::Paused { .. } => {
if let Err(e) = self.persister.set_with(|x| x.checkpoint = None) {
error!("Could not save scrub checkpoint: {}", e);
}
ScrubWorkerState::Finished
}
work => {
@ -343,12 +409,15 @@ impl Worker for ScrubWorker {
..Default::default()
};
match &self.work {
ScrubWorkerState::Running(bsi) => {
s.progress = Some(format!("{:.2}%", bsi.progress() * 100.));
ScrubWorkerState::Running { iterator, .. } => {
s.progress = Some(format!("{:.2}%", iterator.progress() * 100.));
}
ScrubWorkerState::Paused(bsi, rt) => {
s.progress = Some(format!("{:.2}%", bsi.progress() * 100.));
s.freeform = vec![format!("Scrub paused, resumes at {}", msec_to_rfc3339(*rt))];
ScrubWorkerState::Paused { iterator, t_resume } => {
s.progress = Some(format!("{:.2}%", iterator.progress() * 100.));
s.freeform = vec![format!(
"Scrub paused, resumes at {}",
msec_to_rfc3339(*t_resume)
)];
}
ScrubWorkerState::Finished => {
s.freeform = vec![
@ -374,9 +443,11 @@ impl Worker for ScrubWorker {
};
match &mut self.work {
ScrubWorkerState::Running(bsi) => {
ScrubWorkerState::Running { iterator, t_cp } => {
self.tranquilizer.reset();
if let Some(hash) = bsi.next().await? {
let now = now_msec();
if let Some((_path, hash)) = iterator.next().await? {
match self.manager.read_block(&hash).await {
Err(Error::CorruptData(_)) => {
error!("Found corrupt data block during scrub: {:?}", hash);
@ -385,16 +456,23 @@ impl Worker for ScrubWorker {
Err(e) => return Err(e),
_ => (),
};
if now - *t_cp > 60 * 1000 {
self.persister
.set_with(|p| p.checkpoint = Some(iterator.clone()))?;
*t_cp = now;
}
Ok(self
.tranquilizer
.tranquilize_worker(self.persister.get_with(|p| p.tranquility)))
} else {
let now = now_msec();
let next_scrub_timestamp = randomize_next_scrub_run_time(now);
self.persister.set_with(|p| {
p.time_last_complete_scrub = now;
p.time_next_run_scrub = next_scrub_timestamp;
p.checkpoint = None;
})?;
self.work = ScrubWorkerState::Finished;
self.tranquilizer.clear();
@ -413,8 +491,8 @@ impl Worker for ScrubWorker {
async fn wait_for_work(&mut self) -> WorkerState {
let (wait_until, command) = match &self.work {
ScrubWorkerState::Running(_) => return WorkerState::Busy,
ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume),
ScrubWorkerState::Running { .. } => return WorkerState::Busy,
ScrubWorkerState::Paused { t_resume, .. } => (*t_resume, ScrubWorkerCommand::Resume),
ScrubWorkerState::Finished => (
self.persister.get_with(|p| p.time_next_run_scrub),
ScrubWorkerCommand::Start,
@ -437,110 +515,250 @@ impl Worker for ScrubWorker {
}
match &self.work {
ScrubWorkerState::Running(_) => WorkerState::Busy,
ScrubWorkerState::Running { .. } => WorkerState::Busy,
_ => WorkerState::Idle,
}
}
}
// ---- ---- ----
// THIRD KIND OF REPAIR: REBALANCING DATA BLOCKS
// between multiple storage locations.
// This is a one-shot repair operation that can be launched,
// checks everything, and then exits.
// ---- ---- ----
pub struct RebalanceWorker {
manager: Arc<BlockManager>,
block_iter: BlockStoreIterator,
t_started: u64,
t_finished: Option<u64>,
moved: usize,
moved_bytes: u64,
}
impl RebalanceWorker {
pub fn new(manager: Arc<BlockManager>) -> Self {
let block_iter = BlockStoreIterator::new(&manager);
Self {
manager,
block_iter,
t_started: now_msec(),
t_finished: None,
moved: 0,
moved_bytes: 0,
}
}
}
#[async_trait]
impl Worker for RebalanceWorker {
fn name(&self) -> String {
"Block rebalance worker".into()
}
fn status(&self) -> WorkerStatus {
let t_cur = self.t_finished.unwrap_or_else(|| now_msec());
let rate = self.moved_bytes / std::cmp::max(1, (t_cur - self.t_started) / 1000);
let mut freeform = vec![
format!("Blocks moved: {}", self.moved),
format!(
"Bytes moved: {} ({}/s)",
bytesize::ByteSize::b(self.moved_bytes),
bytesize::ByteSize::b(rate)
),
format!("Started: {}", msec_to_rfc3339(self.t_started)),
];
if let Some(t_fin) = self.t_finished {
freeform.push(format!("Finished: {}", msec_to_rfc3339(t_fin)))
}
WorkerStatus {
progress: Some(format!("{:.2}%", self.block_iter.progress() * 100.)),
freeform,
..Default::default()
}
}
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
if let Some((path, hash)) = self.block_iter.next().await? {
let prim_loc = self.manager.data_layout.load().primary_block_dir(&hash);
if path.ancestors().all(|x| x != prim_loc) {
let block_path = match path.extension() {
None => DataBlockPath::Plain(path.clone()),
Some(x) if x.to_str() == Some("zst") => DataBlockPath::Compressed(path.clone()),
_ => {
warn!("not rebalancing file: {}", path.to_string_lossy());
return Ok(WorkerState::Busy);
}
};
// block is not in its primary location,
// move it there (reading and re-writing does the trick)
debug!("rebalance: moving block {:?} => {:?}", block_path, prim_loc);
let block_len = self.manager.fix_block_location(&hash, block_path).await?;
self.moved += 1;
self.moved_bytes += block_len as u64;
}
Ok(WorkerState::Busy)
} else {
// all blocks are in their primary location:
// - the ones we moved now are
// - the ones written in the meantime always were, because we only
// write to primary locations
// so we can safely remove all secondary locations from the data layout
let new_layout = self
.manager
.data_layout
.load_full()
.without_secondary_locations();
self.manager
.data_layout_persister
.save_async(&new_layout)
.await?;
self.manager.data_layout.store(Arc::new(new_layout));
self.t_finished = Some(now_msec());
Ok(WorkerState::Done)
}
}
async fn wait_for_work(&mut self) -> WorkerState {
unreachable!()
}
}
// ---- ---- ----
// UTILITY FOR ENUMERATING THE BLOCK STORE
// ---- ---- ----
struct BlockStoreIterator {
path: Vec<ReadingDir>,
}
enum ReadingDir {
Pending(PathBuf),
Read {
subpaths: Vec<fs::DirEntry>,
pos: usize,
},
}
const PROGRESS_FP: u64 = 1_000_000_000;
impl BlockStoreIterator {
fn new(manager: &BlockManager) -> Self {
let root_dir = manager.data_dir.clone();
Self {
path: vec![ReadingDir::Pending(root_dir)],
let data_layout = manager.data_layout.load_full();
let mut dir_cap = vec![0; data_layout.data_dirs.len()];
for prim in data_layout.part_prim.iter() {
dir_cap[*prim as usize] += 1;
}
for sec_vec in data_layout.part_sec.iter() {
for sec in sec_vec.iter() {
dir_cap[*sec as usize] += 1;
}
}
let sum_cap = dir_cap.iter().sum::<usize>() as u64;
let mut cum_cap = 0;
let mut todo = vec![];
for (dir, cap) in data_layout.data_dirs.iter().zip(dir_cap.into_iter()) {
let progress_min = (cum_cap * PROGRESS_FP) / sum_cap;
let progress_max = ((cum_cap + cap as u64) * PROGRESS_FP) / sum_cap;
cum_cap += cap as u64;
todo.push(BsiTodo::Directory {
path: dir.path.clone(),
progress_min,
progress_max,
});
}
// entries are processed back-to-front (because of .pop()),
// so reverse entries to process them in increasing progress bounds
todo.reverse();
let ret = Self { todo };
debug_assert!(ret.progress_invariant());
ret
}
/// Returns progress done, between 0 and 1
fn progress(&self) -> f32 {
if self.path.is_empty() {
1.0
} else {
let mut ret = 0.0;
let mut next_div = 1;
for p in self.path.iter() {
match p {
ReadingDir::Pending(_) => break,
ReadingDir::Read { subpaths, pos } => {
next_div *= subpaths.len();
ret += ((*pos - 1) as f32) / (next_div as f32);
self.todo
.last()
.map(|x| match x {
BsiTodo::Directory { progress_min, .. } => *progress_min,
BsiTodo::File { progress, .. } => *progress,
})
.map(|x| x as f32 / PROGRESS_FP as f32)
.unwrap_or(1.0)
}
async fn next(&mut self) -> Result<Option<(PathBuf, Hash)>, Error> {
loop {
match self.todo.pop() {
None => return Ok(None),
Some(BsiTodo::Directory {
path,
progress_min,
progress_max,
}) => {
let istart = self.todo.len();
let mut reader = fs::read_dir(&path).await?;
while let Some(ent) = reader.next_entry().await? {
let name = if let Ok(n) = ent.file_name().into_string() {
n
} else {
continue;
};
let ft = ent.file_type().await?;
if ft.is_dir() && hex::decode(&name).is_ok() {
self.todo.push(BsiTodo::Directory {
path: ent.path(),
progress_min: 0,
progress_max: 0,
});
} else if ft.is_file() {
let filename = name.split_once('.').map(|(f, _)| f).unwrap_or(&name);
if filename.len() == 64 {
if let Ok(h) = hex::decode(filename) {
let mut hash = [0u8; 32];
hash.copy_from_slice(&h);
self.todo.push(BsiTodo::File {
path: ent.path(),
hash: hash.into(),
progress: 0,
});
}
}
}
}
let count = self.todo.len() - istart;
for (i, ent) in self.todo[istart..].iter_mut().enumerate() {
let p1 = progress_min
+ ((progress_max - progress_min) * i as u64) / count as u64;
let p2 = progress_min
+ ((progress_max - progress_min) * (i + 1) as u64) / count as u64;
match ent {
BsiTodo::Directory {
progress_min,
progress_max,
..
} => {
*progress_min = p1;
*progress_max = p2;
}
BsiTodo::File { progress, .. } => {
*progress = p1;
}
}
}
self.todo[istart..].reverse();
debug_assert!(self.progress_invariant());
}
Some(BsiTodo::File { path, hash, .. }) => {
return Ok(Some((path, hash)));
}
}
ret
}
}
async fn next(&mut self) -> Result<Option<Hash>, Error> {
loop {
let last_path = match self.path.last_mut() {
None => return Ok(None),
Some(lp) => lp,
};
if let ReadingDir::Pending(path) = last_path {
let mut reader = fs::read_dir(&path).await?;
let mut subpaths = vec![];
while let Some(ent) = reader.next_entry().await? {
subpaths.push(ent);
}
*last_path = ReadingDir::Read { subpaths, pos: 0 };
}
let (subpaths, pos) = match *last_path {
ReadingDir::Read {
ref subpaths,
ref mut pos,
} => (subpaths, pos),
ReadingDir::Pending(_) => unreachable!(),
};
let data_dir_ent = match subpaths.get(*pos) {
None => {
self.path.pop();
continue;
}
Some(ent) => {
*pos += 1;
ent
}
};
let name = data_dir_ent.file_name();
let name = if let Ok(n) = name.into_string() {
n
} else {
continue;
};
let ent_type = data_dir_ent.file_type().await?;
let name = name.strip_suffix(".zst").unwrap_or(&name);
if name.len() == 2 && hex::decode(name).is_ok() && ent_type.is_dir() {
let path = data_dir_ent.path();
self.path.push(ReadingDir::Pending(path));
} else if name.len() == 64 {
if let Ok(h) = hex::decode(name) {
let mut hash = [0u8; 32];
hash.copy_from_slice(&h);
return Ok(Some(hash.into()));
}
}
}
// for debug_assert!
fn progress_invariant(&self) -> bool {
let iter = self.todo.iter().map(|x| match x {
BsiTodo::Directory { progress_min, .. } => progress_min,
BsiTodo::File { progress, .. } => progress,
});
let iter_1 = iter.clone().skip(1);
iter.zip(iter_1).all(|(prev, next)| prev >= next)
}
}

View File

@ -41,7 +41,7 @@ pub(crate) const RESYNC_RETRY_DELAY: Duration = Duration::from_secs(60);
pub(crate) const RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER: u64 = 6;
// No more than 4 resync workers can be running in the system
pub(crate) const MAX_RESYNC_WORKERS: usize = 4;
pub(crate) const MAX_RESYNC_WORKERS: usize = 8;
// Resync tranquility is initially set to 2, but can be changed in the CLI
// and the updated version is persisted over Garage restarts
const INITIAL_RESYNC_TRANQUILITY: u32 = 2;
@ -359,20 +359,23 @@ impl BlockResyncManager {
}
async fn resync_block(&self, manager: &BlockManager, hash: &Hash) -> Result<(), Error> {
let BlockStatus { exists, needed } = manager.check_block_status(hash).await?;
let existing_path = manager.find_block(hash).await;
let exists = existing_path.is_some();
let rc = manager.rc.get_block_rc(hash)?;
if exists != needed.is_needed() || exists != needed.is_nonzero() {
if exists != rc.is_needed() || exists != rc.is_nonzero() {
debug!(
"Resync block {:?}: exists {}, nonzero rc {}, deletable {}",
hash,
exists,
needed.is_nonzero(),
needed.is_deletable(),
rc.is_nonzero(),
rc.is_deletable(),
);
}
if exists && needed.is_deletable() {
if exists && rc.is_deletable() {
info!("Resync block {:?}: offloading and deleting", hash);
let existing_path = existing_path.unwrap();
let mut who = manager.replication.write_nodes(hash);
if who.len() < manager.replication.write_quorum() {
@ -419,7 +422,7 @@ impl BlockResyncManager {
.add(1, &[KeyValue::new("to", format!("{:?}", node))]);
}
let block = manager.read_block(hash).await?;
let block = manager.read_block_from(hash, &existing_path).await?;
let (header, bytes) = block.into_parts();
let put_block_message = Req::new(BlockRpc::PutBlock {
hash: *hash,
@ -451,7 +454,7 @@ impl BlockResyncManager {
manager.rc.clear_deleted_block_rc(hash)?;
}
if needed.is_nonzero() && !exists {
if rc.is_nonzero() && !exists {
info!(
"Resync block {:?}: fetching absent but needed block (refcount > 0)",
hash

View File

@ -1,6 +1,6 @@
[package]
name = "garage_db"
version = "0.8.4"
version = "0.9.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -11,11 +11,6 @@ readme = "../../README.md"
[lib]
path = "lib.rs"
[[bin]]
name = "convert"
path = "bin/convert.rs"
required-features = ["cli"]
[dependencies]
err-derive = "0.3"
hexdump = "0.1"
@ -33,7 +28,7 @@ pretty_env_logger = { version = "0.5", optional = true }
mktemp = "0.5"
[features]
default = [ "sled" ]
default = [ "sled", "lmdb", "sqlite" ]
bundled-libs = [ "rusqlite?/bundled" ]
cli = ["clap", "pretty_env_logger"]
lmdb = [ "heed" ]

View File

@ -1,69 +0,0 @@
use std::path::PathBuf;
use garage_db::*;
use clap::Parser;
/// K2V command line interface
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
/// Input DB path
#[clap(short = 'i')]
input_path: PathBuf,
/// Input DB engine
#[clap(short = 'a')]
input_engine: String,
/// Output DB path
#[clap(short = 'o')]
output_path: PathBuf,
/// Output DB engine
#[clap(short = 'b')]
output_engine: String,
}
fn main() {
let args = Args::parse();
pretty_env_logger::init();
match do_conversion(args) {
Ok(()) => println!("Success!"),
Err(e) => eprintln!("Error: {}", e),
}
}
fn do_conversion(args: Args) -> Result<()> {
let input = open_db(args.input_path, args.input_engine)?;
let output = open_db(args.output_path, args.output_engine)?;
output.import(&input)?;
Ok(())
}
fn open_db(path: PathBuf, engine: String) -> Result<Db> {
match engine.as_str() {
"sled" => {
let db = sled_adapter::sled::Config::default().path(&path).open()?;
Ok(sled_adapter::SledDb::init(db))
}
"sqlite" | "sqlite3" | "rusqlite" => {
let db = sqlite_adapter::rusqlite::Connection::open(&path)?;
Ok(sqlite_adapter::SqliteDb::init(db))
}
"lmdb" | "heed" => {
std::fs::create_dir_all(&path).map_err(|e| {
Error(format!("Unable to create LMDB data directory: {}", e).into())
})?;
let map_size = lmdb_adapter::recommended_map_size();
let db = lmdb_adapter::heed::EnvOpenOptions::new()
.max_dbs(100)
.map_size(map_size)
.open(&path)
.unwrap();
Ok(lmdb_adapter::LmdbDb::init(db))
}
e => Err(Error(format!("Invalid DB engine: {}", e).into())),
}
}

View File

@ -85,7 +85,7 @@ impl CountedTree {
let old_some = expected_old.is_some();
let new_some = new.is_some();
let tx_res = self.0.tree.db().transaction(|mut tx| {
let tx_res = self.0.tree.db().transaction(|tx| {
let old_val = tx.get(&self.0.tree, &key)?;
let is_same = match (&old_val, &expected_old) {
(None, None) => true,
@ -101,9 +101,9 @@ impl CountedTree {
tx.remove(&self.0.tree, &key)?;
}
}
tx.commit(())
Ok(())
} else {
tx.abort(())
Err(TxError::Abort(()))
}
});

View File

@ -2,9 +2,6 @@
#[cfg(feature = "sqlite")]
extern crate tracing;
#[cfg(not(any(feature = "lmdb", feature = "sled", feature = "sqlite")))]
compile_error!("Must activate the Cargo feature for at least one DB engine: lmdb, sled or sqlite.");
#[cfg(feature = "lmdb")]
pub mod lmdb_adapter;
#[cfg(feature = "sled")]
@ -25,10 +22,15 @@ use std::sync::Arc;
use err_derive::Error;
pub(crate) type OnCommit = Vec<Box<dyn FnOnce()>>;
#[derive(Clone)]
pub struct Db(pub(crate) Arc<dyn IDb>);
pub struct Transaction<'a>(&'a mut dyn ITx);
pub struct Transaction<'a> {
tx: &'a mut dyn ITx,
on_commit: OnCommit,
}
#[derive(Clone)]
pub struct Tree(Arc<dyn IDb>, usize);
@ -88,7 +90,7 @@ impl Db {
pub fn transaction<R, E, F>(&self, fun: F) -> TxResult<R, E>
where
F: Fn(Transaction<'_>) -> TxResult<R, E>,
F: Fn(&mut Transaction<'_>) -> TxResult<R, E>,
{
let f = TxFn {
function: fun,
@ -101,14 +103,17 @@ impl Db {
.expect("Transaction did not store result");
match tx_res {
Ok(()) => {
assert!(matches!(ret, Ok(_)));
ret
}
Err(TxError::Abort(())) => {
assert!(matches!(ret, Err(TxError::Abort(_))));
ret
}
Ok(on_commit) => match ret {
Ok(value) => {
on_commit.into_iter().for_each(|f| f());
Ok(value)
}
_ => unreachable!(),
},
Err(TxError::Abort(())) => match ret {
Err(TxError::Abort(e)) => Err(TxError::Abort(e)),
_ => unreachable!(),
},
Err(TxError::Db(e2)) => match ret {
// Ok was stored -> the error occured when finalizing
// transaction
@ -142,7 +147,7 @@ impl Db {
let ex_tree = other.open_tree(&name)?;
let tx_res = self.transaction(|mut tx| {
let tx_res = self.transaction(|tx| {
let mut i = 0;
for item in ex_tree.iter().map_err(TxError::Abort)? {
let (k, v) = item.map_err(TxError::Abort)?;
@ -152,7 +157,7 @@ impl Db {
println!("{}: imported {}", name, i);
}
}
tx.commit(i)
Ok(i)
});
let total = match tx_res {
Err(TxError::Db(e)) => return Err(e),
@ -252,11 +257,11 @@ impl Tree {
impl<'a> Transaction<'a> {
#[inline]
pub fn get<T: AsRef<[u8]>>(&self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> {
self.0.get(tree.1, key.as_ref())
self.tx.get(tree.1, key.as_ref())
}
#[inline]
pub fn len(&self, tree: &Tree) -> TxOpResult<usize> {
self.0.len(tree.1)
self.tx.len(tree.1)
}
/// Returns the old value if there was one
@ -267,21 +272,21 @@ impl<'a> Transaction<'a> {
key: T,
value: U,
) -> TxOpResult<Option<Value>> {
self.0.insert(tree.1, key.as_ref(), value.as_ref())
self.tx.insert(tree.1, key.as_ref(), value.as_ref())
}
/// Returns the old value if there was one
#[inline]
pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> {
self.0.remove(tree.1, key.as_ref())
self.tx.remove(tree.1, key.as_ref())
}
#[inline]
pub fn iter(&self, tree: &Tree) -> TxOpResult<TxValueIter<'_>> {
self.0.iter(tree.1)
self.tx.iter(tree.1)
}
#[inline]
pub fn iter_rev(&self, tree: &Tree) -> TxOpResult<TxValueIter<'_>> {
self.0.iter_rev(tree.1)
self.tx.iter_rev(tree.1)
}
#[inline]
@ -292,7 +297,7 @@ impl<'a> Transaction<'a> {
{
let sb = range.start_bound();
let eb = range.end_bound();
self.0.range(tree.1, get_bound(sb), get_bound(eb))
self.tx.range(tree.1, get_bound(sb), get_bound(eb))
}
#[inline]
pub fn range_rev<K, R>(&self, tree: &Tree, range: R) -> TxOpResult<TxValueIter<'_>>
@ -302,19 +307,12 @@ impl<'a> Transaction<'a> {
{
let sb = range.start_bound();
let eb = range.end_bound();
self.0.range_rev(tree.1, get_bound(sb), get_bound(eb))
}
// ----
#[inline]
pub fn abort<R, E>(self, e: E) -> TxResult<R, E> {
Err(TxError::Abort(e))
self.tx.range_rev(tree.1, get_bound(sb), get_bound(eb))
}
#[inline]
pub fn commit<R, E>(self, r: R) -> TxResult<R, E> {
Ok(r)
pub fn on_commit<F: FnOnce() + 'static>(&mut self, f: F) {
self.on_commit.push(Box::new(f));
}
}
@ -351,7 +349,7 @@ pub(crate) trait IDb: Send + Sync {
high: Bound<&'r [u8]>,
) -> Result<ValueIter<'_>>;
fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()>;
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()>;
}
pub(crate) trait ITx {
@ -383,14 +381,14 @@ pub(crate) trait ITxFn {
}
pub(crate) enum TxFnResult {
Ok,
Ok(OnCommit),
Abort,
DbErr,
}
struct TxFn<F, R, E>
where
F: Fn(Transaction<'_>) -> TxResult<R, E>,
F: Fn(&mut Transaction<'_>) -> TxResult<R, E>,
{
function: F,
result: Cell<Option<TxResult<R, E>>>,
@ -398,12 +396,16 @@ where
impl<F, R, E> ITxFn for TxFn<F, R, E>
where
F: Fn(Transaction<'_>) -> TxResult<R, E>,
F: Fn(&mut Transaction<'_>) -> TxResult<R, E>,
{
fn try_on(&self, tx: &mut dyn ITx) -> TxFnResult {
let res = (self.function)(Transaction(tx));
let mut tx = Transaction {
tx,
on_commit: vec![],
};
let res = (self.function)(&mut tx);
let res2 = match &res {
Ok(_) => TxFnResult::Ok,
Ok(_) => TxFnResult::Ok(tx.on_commit),
Err(TxError::Abort(_)) => TxFnResult::Abort,
Err(TxError::Db(_)) => TxFnResult::DbErr,
};

View File

@ -9,8 +9,8 @@ use heed::types::ByteSlice;
use heed::{BytesDecode, Env, RoTxn, RwTxn, UntypedDatabase as Database};
use crate::{
Db, Error, IDb, ITx, ITxFn, Result, TxError, TxFnResult, TxOpError, TxOpResult, TxResult,
TxValueIter, Value, ValueIter,
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
TxResult, TxValueIter, Value, ValueIter,
};
pub use heed;
@ -186,7 +186,7 @@ impl IDb for LmdbDb {
// ----
fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()> {
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
let trees = self.trees.read().unwrap();
let mut tx = LmdbTx {
trees: &trees.0[..],
@ -199,9 +199,9 @@ impl IDb for LmdbDb {
let res = f.try_on(&mut tx);
match res {
TxFnResult::Ok => {
TxFnResult::Ok(on_commit) => {
tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?;
Ok(())
Ok(on_commit)
}
TxFnResult::Abort => {
tx.tx.abort().map_err(Error::from).map_err(TxError::Db)?;

View File

@ -10,8 +10,8 @@ use sled::transaction::{
};
use crate::{
Db, Error, IDb, ITx, ITxFn, Result, TxError, TxFnResult, TxOpError, TxOpResult, TxResult,
TxValueIter, Value, ValueIter,
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
TxResult, TxValueIter, Value, ValueIter,
};
pub use sled;
@ -38,7 +38,15 @@ pub struct SledDb {
}
impl SledDb {
#[deprecated(
since = "0.9.0",
note = "The Sled database is now deprecated and will be removed in Garage v1.0. Please migrate to LMDB or Sqlite as soon as possible."
)]
pub fn init(db: sled::Db) -> Db {
tracing::warn!("-------------------- IMPORTANT WARNING !!! ----------------------");
tracing::warn!("The Sled database is now deprecated and will be removed in Garage v1.0.");
tracing::warn!("Please migrate to LMDB or Sqlite as soon as possible.");
tracing::warn!("-----------------------------------------------------------------------");
let s = Self {
db,
trees: RwLock::new((Vec::new(), HashMap::new())),
@ -158,7 +166,7 @@ impl IDb for SledDb {
// ----
fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()> {
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
let trees = self.trees.read().unwrap();
let res = trees.0.transaction(|txtrees| {
let mut tx = SledTx {
@ -166,9 +174,9 @@ impl IDb for SledDb {
err: Cell::new(None),
};
match f.try_on(&mut tx) {
TxFnResult::Ok => {
TxFnResult::Ok(on_commit) => {
assert!(tx.err.into_inner().is_none());
Ok(())
Ok(on_commit)
}
TxFnResult::Abort => {
assert!(tx.err.into_inner().is_none());
@ -181,7 +189,7 @@ impl IDb for SledDb {
}
});
match res {
Ok(()) => Ok(()),
Ok(on_commit) => Ok(on_commit),
Err(TransactionError::Abort(())) => Err(TxError::Abort(())),
Err(TransactionError::Storage(s)) => Err(TxError::Db(s.into())),
}

View File

@ -9,8 +9,8 @@ use std::sync::{Arc, Mutex, MutexGuard};
use rusqlite::{params, Connection, Rows, Statement, Transaction};
use crate::{
Db, Error, IDb, ITx, ITxFn, Result, TxError, TxFnResult, TxOpError, TxOpResult, TxResult,
TxValueIter, Value, ValueIter,
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
TxResult, TxValueIter, Value, ValueIter,
};
pub use rusqlite;
@ -261,7 +261,7 @@ impl IDb for SqliteDb {
// ----
fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()> {
fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> {
trace!("transaction: lock db");
let mut this = self.0.lock().unwrap();
trace!("transaction: lock acquired");
@ -277,9 +277,9 @@ impl IDb for SqliteDb {
trees: &this_mut_ref.trees,
};
let res = match f.try_on(&mut tx) {
TxFnResult::Ok => {
TxFnResult::Ok(on_commit) => {
tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?;
Ok(())
Ok(on_commit)
}
TxFnResult::Abort => {
tx.tx.rollback().map_err(Error::from).map_err(TxError::Db)?;

View File

@ -13,26 +13,26 @@ fn test_suite(db: Db) {
assert!(tree.insert(ka, va).unwrap().is_none());
assert_eq!(tree.get(ka).unwrap().unwrap(), va);
let res = db.transaction::<_, (), _>(|mut tx| {
let res = db.transaction::<_, (), _>(|tx| {
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), va);
assert_eq!(tx.insert(&tree, ka, vb).unwrap().unwrap(), va);
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
tx.commit(12)
Ok(12)
});
assert!(matches!(res, Ok(12)));
assert_eq!(tree.get(ka).unwrap().unwrap(), vb);
let res = db.transaction::<(), _, _>(|mut tx| {
let res = db.transaction::<(), _, _>(|tx| {
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb);
assert_eq!(tx.insert(&tree, ka, vc).unwrap().unwrap(), vb);
assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vc);
tx.abort(42)
Err(TxError::Abort(42))
});
assert!(matches!(res, Err(TxError::Abort(42))));
assert_eq!(tree.get(ka).unwrap().unwrap(), vb);

View File

@ -1,6 +1,6 @@
[package]
name = "garage"
version = "0.8.4"
version = "0.9.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -53,7 +53,7 @@ futures = "0.3"
futures-util = "0.3"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
netapp = "0.5"
netapp = "0.10"
opentelemetry = { version = "0.17", features = [ "rt-tokio" ] }
opentelemetry-prometheus = { version = "0.10", optional = true }
@ -78,7 +78,7 @@ k2v-client.workspace = true
[features]
default = [ "bundled-libs", "metrics", "sled", "k2v" ]
default = [ "bundled-libs", "metrics", "sled", "lmdb", "sqlite", "k2v" ]
k2v = [ "garage_util/k2v", "garage_api/k2v" ]

View File

@ -34,6 +34,7 @@ impl AdminRpcHandler {
.get_range(&hash, None, None, 10000, Default::default())
.await?;
let mut versions = vec![];
let mut uploads = vec![];
for br in block_refs {
if let Some(v) = self
.garage
@ -41,6 +42,11 @@ impl AdminRpcHandler {
.get(&br.version, &EmptyKey)
.await?
{
if let VersionBacklink::MultipartUpload { upload_id } = &v.backlink {
if let Some(u) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? {
uploads.push(u);
}
}
versions.push(Ok(v));
} else {
versions.push(Err(br.version));
@ -50,6 +56,7 @@ impl AdminRpcHandler {
hash,
refcount,
versions,
uploads,
})
}
@ -93,6 +100,7 @@ impl AdminRpcHandler {
}
let mut obj_dels = 0;
let mut mpu_dels = 0;
let mut ver_dels = 0;
for hash in blocks {
@ -105,56 +113,80 @@ impl AdminRpcHandler {
.await?;
for br in block_refs {
let version = match self
if let Some(version) = self
.garage
.version_table
.get(&br.version, &EmptyKey)
.await?
{
Some(v) => v,
None => continue,
};
self.handle_block_purge_version_backlink(
&version,
&mut obj_dels,
&mut mpu_dels,
)
.await?;
if let Some(object) = self
.garage
.object_table
.get(&version.bucket_id, &version.key)
.await?
{
let ov = object.versions().iter().rev().find(|v| v.is_complete());
if let Some(ov) = ov {
if ov.uuid == br.version {
let del_uuid = gen_uuid();
let deleted_object = Object::new(
version.bucket_id,
version.key.clone(),
vec![ObjectVersion {
uuid: del_uuid,
timestamp: ov.timestamp + 1,
state: ObjectVersionState::Complete(
ObjectVersionData::DeleteMarker,
),
}],
);
self.garage.object_table.insert(&deleted_object).await?;
obj_dels += 1;
}
if !version.deleted.get() {
let deleted_version = Version::new(version.uuid, version.backlink, true);
self.garage.version_table.insert(&deleted_version).await?;
ver_dels += 1;
}
}
if !version.deleted.get() {
let deleted_version =
Version::new(version.uuid, version.bucket_id, version.key.clone(), true);
self.garage.version_table.insert(&deleted_version).await?;
ver_dels += 1;
}
}
}
Ok(AdminRpc::Ok(format!(
"{} blocks were purged: {} object deletion markers added, {} versions marked deleted",
"Purged {} blocks, {} versions, {} objects, {} multipart uploads",
blocks.len(),
ver_dels,
obj_dels,
ver_dels
mpu_dels,
)))
}
async fn handle_block_purge_version_backlink(
&self,
version: &Version,
obj_dels: &mut usize,
mpu_dels: &mut usize,
) -> Result<(), Error> {
let (bucket_id, key, ov_id) = match &version.backlink {
VersionBacklink::Object { bucket_id, key } => (*bucket_id, key.clone(), version.uuid),
VersionBacklink::MultipartUpload { upload_id } => {
if let Some(mut mpu) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? {
if !mpu.deleted.get() {
mpu.parts.clear();
mpu.deleted.set();
self.garage.mpu_table.insert(&mpu).await?;
*mpu_dels += 1;
}
(mpu.bucket_id, mpu.key.clone(), *upload_id)
} else {
return Ok(());
}
}
};
if let Some(object) = self.garage.object_table.get(&bucket_id, &key).await? {
let ov = object.versions().iter().rev().find(|v| v.is_complete());
if let Some(ov) = ov {
if ov.uuid == ov_id {
let del_uuid = gen_uuid();
let deleted_object = Object::new(
bucket_id,
key,
vec![ObjectVersion {
uuid: del_uuid,
timestamp: ov.timestamp + 1,
state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker),
}],
);
self.garage.object_table.insert(&deleted_object).await?;
*obj_dels += 1;
}
}
}
Ok(())
}
}

View File

@ -73,6 +73,15 @@ impl AdminRpcHandler {
.map(|x| x.filtered_values(&self.garage.system.ring.borrow()))
.unwrap_or_default();
let mpu_counters = self
.garage
.mpu_counter_table
.table
.get(&bucket_id, &EmptyKey)
.await?
.map(|x| x.filtered_values(&self.garage.system.ring.borrow()))
.unwrap_or_default();
let mut relevant_keys = HashMap::new();
for (k, _) in bucket
.state
@ -112,6 +121,7 @@ impl AdminRpcHandler {
bucket,
relevant_keys,
counters,
mpu_counters,
})
}

View File

@ -2,7 +2,7 @@ use std::collections::HashMap;
use garage_table::*;
use garage_model::helper::error::Error;
use garage_model::helper::error::*;
use garage_model::key_table::*;
use crate::cli::*;
@ -14,7 +14,7 @@ impl AdminRpcHandler {
match cmd {
KeyOperation::List => self.handle_list_keys().await,
KeyOperation::Info(query) => self.handle_key_info(query).await,
KeyOperation::New(query) => self.handle_create_key(query).await,
KeyOperation::Create(query) => self.handle_create_key(query).await,
KeyOperation::Rename(query) => self.handle_rename_key(query).await,
KeyOperation::Delete(query) => self.handle_delete_key(query).await,
KeyOperation::Allow(query) => self.handle_allow_key(query).await,
@ -41,12 +41,17 @@ impl AdminRpcHandler {
Ok(AdminRpc::KeyList(key_ids))
}
async fn handle_key_info(&self, query: &KeyOpt) -> Result<AdminRpc, Error> {
let key = self
async fn handle_key_info(&self, query: &KeyInfoOpt) -> Result<AdminRpc, Error> {
let mut key = self
.garage
.key_helper()
.get_existing_matching_key(&query.key_pattern)
.await?;
if !query.show_secret {
key.state.as_option_mut().unwrap().secret_key = "(redacted)".into();
}
self.key_info_result(key).await
}
@ -118,11 +123,17 @@ impl AdminRpcHandler {
}
async fn handle_import_key(&self, query: &KeyImportOpt) -> Result<AdminRpc, Error> {
if !query.yes {
return Err(Error::BadRequest("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string()));
}
let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?;
if prev_key.is_some() {
return Err(Error::BadRequest(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id)));
}
let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name);
let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name)
.ok_or_bad_request("Invalid key format")?;
self.garage.key_table.insert(&imported_key).await?;
self.key_info_result(imported_key).await

View File

@ -28,6 +28,7 @@ use garage_model::garage::Garage;
use garage_model::helper::error::{Error, OkOrBadRequest};
use garage_model::key_table::*;
use garage_model::migrate::Migrate;
use garage_model::s3::mpu_table::MultipartUpload;
use garage_model::s3::version_table::Version;
use crate::cli::*;
@ -53,6 +54,7 @@ pub enum AdminRpc {
bucket: Bucket,
relevant_keys: HashMap<String, Key>,
counters: HashMap<String, i64>,
mpu_counters: HashMap<String, i64>,
},
KeyList(Vec<(String, String)>),
KeyInfo(Key, HashMap<Uuid, Bucket>),
@ -67,6 +69,7 @@ pub enum AdminRpc {
hash: Hash,
refcount: u64,
versions: Vec<Result<Version, Uuid>>,
uploads: Vec<MultipartUpload>,
},
}
@ -274,7 +277,7 @@ impl AdminRpcHandler {
// Gather storage node and free space statistics
let layout = &self.garage.system.ring.borrow().layout;
let mut node_partition_count = HashMap::<Uuid, u64>::new();
for short_id in layout.ring_assignation_data.iter() {
for short_id in layout.ring_assignment_data.iter() {
let id = layout.node_id_vec[*short_id as usize];
*node_partition_count.entry(id).or_default() += 1;
}

View File

@ -85,7 +85,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
));
}
_ => {
let new_role = match layout.staging.get(&adv.id) {
let new_role = match layout.staging_roles.get(&adv.id) {
Some(NodeRoleV(Some(_))) => "(pending)",
_ => "NO ROLE ASSIGNED",
};
@ -190,8 +190,9 @@ pub async fn cmd_admin(
bucket,
relevant_keys,
counters,
mpu_counters,
} => {
print_bucket_info(&bucket, &relevant_keys, &counters);
print_bucket_info(&bucket, &relevant_keys, &counters, &mpu_counters);
}
AdminRpc::KeyList(kl) => {
print_key_list(kl);
@ -215,8 +216,9 @@ pub async fn cmd_admin(
hash,
refcount,
versions,
uploads,
} => {
print_block_info(hash, refcount, versions);
print_block_info(hash, refcount, versions, uploads);
}
r => {
error!("Unexpected response: {:?}", r);

View File

@ -0,0 +1,69 @@
use std::path::PathBuf;
use structopt::StructOpt;
use garage_db::*;
/// K2V command line interface
#[derive(StructOpt, Debug)]
pub struct ConvertDbOpt {
/// Input database path (not the same as metadata_dir, see
/// https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0)
#[structopt(short = "i")]
input_path: PathBuf,
/// Input database engine (sled, lmdb or sqlite; limited by db engines
/// enabled in this build)
#[structopt(short = "a")]
input_engine: String,
/// Output database path
#[structopt(short = "o")]
output_path: PathBuf,
/// Output database engine
#[structopt(short = "b")]
output_engine: String,
}
pub(crate) fn do_conversion(args: ConvertDbOpt) -> Result<()> {
let input = open_db(args.input_path, args.input_engine)?;
let output = open_db(args.output_path, args.output_engine)?;
output.import(&input)?;
Ok(())
}
fn open_db(path: PathBuf, engine: String) -> Result<Db> {
match engine.as_str() {
#[cfg(feature = "sled")]
"sled" => {
let db = sled_adapter::sled::Config::default().path(&path).open()?;
Ok(sled_adapter::SledDb::init(db))
}
#[cfg(feature = "sqlite")]
"sqlite" | "sqlite3" | "rusqlite" => {
let db = sqlite_adapter::rusqlite::Connection::open(&path)?;
db.pragma_update(None, "journal_mode", &"WAL")?;
db.pragma_update(None, "synchronous", &"NORMAL")?;
Ok(sqlite_adapter::SqliteDb::init(db))
}
#[cfg(feature = "lmdb")]
"lmdb" | "heed" => {
std::fs::create_dir_all(&path).map_err(|e| {
Error(format!("Unable to create LMDB data directory: {}", e).into())
})?;
let map_size = lmdb_adapter::recommended_map_size();
let mut env_builder = lmdb_adapter::heed::EnvOpenOptions::new();
env_builder.max_dbs(100);
env_builder.map_size(map_size);
unsafe {
env_builder.flag(lmdb_adapter::heed::flags::Flags::MdbNoMetaSync);
}
let db = env_builder.open(&path)?;
Ok(lmdb_adapter::LmdbDb::init(db))
}
e => Err(Error(
format!("Invalid or unsupported DB engine: {}", e).into(),
)),
}
}

View File

@ -1,3 +1,5 @@
use bytesize::ByteSize;
use format_table::format_table;
use garage_util::crdt::Crdt;
use garage_util::error::*;
@ -14,8 +16,8 @@ pub async fn cli_layout_command_dispatch(
rpc_host: NodeID,
) -> Result<(), Error> {
match cmd {
LayoutOperation::Assign(configure_opt) => {
cmd_assign_role(system_rpc_endpoint, rpc_host, configure_opt).await
LayoutOperation::Assign(assign_opt) => {
cmd_assign_role(system_rpc_endpoint, rpc_host, assign_opt).await
}
LayoutOperation::Remove(remove_opt) => {
cmd_remove_role(system_rpc_endpoint, rpc_host, remove_opt).await
@ -27,6 +29,9 @@ pub async fn cli_layout_command_dispatch(
LayoutOperation::Revert(revert_opt) => {
cmd_revert_layout(system_rpc_endpoint, rpc_host, revert_opt).await
}
LayoutOperation::Config(config_opt) => {
cmd_config_layout(system_rpc_endpoint, rpc_host, config_opt).await
}
}
}
@ -60,14 +65,14 @@ pub async fn cmd_assign_role(
.collect::<Result<Vec<_>, _>>()?;
let mut roles = layout.roles.clone();
roles.merge(&layout.staging);
roles.merge(&layout.staging_roles);
for replaced in args.replace.iter() {
let replaced_node = find_matching_node(layout.node_ids().iter().cloned(), replaced)?;
match roles.get(&replaced_node) {
Some(NodeRoleV(Some(_))) => {
layout
.staging
.staging_roles
.merge(&roles.update_mutator(replaced_node, NodeRoleV(None)));
}
_ => {
@ -83,7 +88,7 @@ pub async fn cmd_assign_role(
return Err(Error::Message(
"-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into()));
}
if args.capacity == Some(0) {
if args.capacity == Some(ByteSize::b(0)) {
return Err(Error::Message("Invalid capacity value: 0".into()));
}
@ -91,7 +96,7 @@ pub async fn cmd_assign_role(
let new_entry = match roles.get(&added_node) {
Some(NodeRoleV(Some(old))) => {
let capacity = match args.capacity {
Some(c) => Some(c),
Some(c) => Some(c.as_u64()),
None if args.gateway => None,
None => old.capacity,
};
@ -108,7 +113,7 @@ pub async fn cmd_assign_role(
}
_ => {
let capacity = match args.capacity {
Some(c) => Some(c),
Some(c) => Some(c.as_u64()),
None if args.gateway => None,
None => return Err(Error::Message(
"Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())),
@ -125,7 +130,7 @@ pub async fn cmd_assign_role(
};
layout
.staging
.staging_roles
.merge(&roles.update_mutator(added_node, NodeRoleV(Some(new_entry))));
}
@ -145,13 +150,13 @@ pub async fn cmd_remove_role(
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
let mut roles = layout.roles.clone();
roles.merge(&layout.staging);
roles.merge(&layout.staging_roles);
let deleted_node =
find_matching_node(roles.items().iter().map(|(id, _, _)| *id), &args.node_id)?;
layout
.staging
.staging_roles
.merge(&roles.update_mutator(deleted_node, NodeRoleV(None)));
send_layout(rpc_cli, rpc_host, layout).await?;
@ -166,40 +171,45 @@ pub async fn cmd_show_layout(
rpc_cli: &Endpoint<SystemRpc, ()>,
rpc_host: NodeID,
) -> Result<(), Error> {
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
let layout = fetch_layout(rpc_cli, rpc_host).await?;
println!("==== CURRENT CLUSTER LAYOUT ====");
if !print_cluster_layout(&layout) {
println!("No nodes currently have a role in the cluster.");
println!("See `garage status` to view available nodes.");
}
print_cluster_layout(&layout, "No nodes currently have a role in the cluster.\nSee `garage status` to view available nodes.");
println!();
println!("Current cluster layout version: {}", layout.version);
if print_staging_role_changes(&layout) {
layout.roles.merge(&layout.staging);
println!();
println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====");
if !print_cluster_layout(&layout) {
println!("No nodes have a role in the new layout.");
}
println!();
let has_role_changes = print_staging_role_changes(&layout);
if has_role_changes {
let v = layout.version;
let res_apply = layout.apply_staged_changes(Some(v + 1));
// this will print the stats of what partitions
// will move around when we apply
if layout.calculate_partition_assignation() {
println!("To enact the staged role changes, type:");
println!();
println!(" garage layout apply --version {}", layout.version + 1);
println!();
println!(
"You can also revert all proposed changes with: garage layout revert --version {}",
layout.version + 1
);
} else {
println!("Not enough nodes have an assigned role to maintain enough copies of data.");
println!("This new layout cannot yet be applied.");
match res_apply {
Ok((layout, msg)) => {
println!();
println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ====");
print_cluster_layout(&layout, "No nodes have a role in the new layout.");
println!();
for line in msg.iter() {
println!("{}", line);
}
println!("To enact the staged role changes, type:");
println!();
println!(" garage layout apply --version {}", v + 1);
println!();
println!(
"You can also revert all proposed changes with: garage layout revert --version {}",
v + 1)
}
Err(e) => {
println!("Error while trying to compute the assignment: {}", e);
println!("This new layout cannot yet be applied.");
println!(
"You can also revert all proposed changes with: garage layout revert --version {}",
v + 1)
}
}
}
@ -213,11 +223,14 @@ pub async fn cmd_apply_layout(
) -> Result<(), Error> {
let layout = fetch_layout(rpc_cli, rpc_host).await?;
let layout = layout.apply_staged_changes(apply_opt.version)?;
let (layout, msg) = layout.apply_staged_changes(apply_opt.version)?;
for line in msg.iter() {
println!("{}", line);
}
send_layout(rpc_cli, rpc_host, layout).await?;
println!("New cluster layout with updated role assignation has been applied in cluster.");
println!("New cluster layout with updated role assignment has been applied in cluster.");
println!("Data will now be moved around between nodes accordingly.");
Ok(())
@ -238,6 +251,52 @@ pub async fn cmd_revert_layout(
Ok(())
}
pub async fn cmd_config_layout(
rpc_cli: &Endpoint<SystemRpc, ()>,
rpc_host: NodeID,
config_opt: ConfigLayoutOpt,
) -> Result<(), Error> {
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
let mut did_something = false;
match config_opt.redundancy {
None => (),
Some(r_str) => {
let r = r_str
.parse::<ZoneRedundancy>()
.ok_or_message("invalid zone redundancy value")?;
if let ZoneRedundancy::AtLeast(r_int) = r {
if r_int > layout.replication_factor {
return Err(Error::Message(format!(
"The zone redundancy must be smaller or equal to the \
replication factor ({}).",
layout.replication_factor
)));
} else if r_int < 1 {
return Err(Error::Message(
"The zone redundancy must be at least 1.".into(),
));
}
}
layout
.staging_parameters
.update(LayoutParameters { zone_redundancy: r });
println!("The zone redundancy parameter has been set to '{}'.", r);
did_something = true;
}
}
if !did_something {
return Err(Error::Message(
"Please specify an action for `garage layout config`".into(),
));
}
send_layout(rpc_cli, rpc_host, layout).await?;
Ok(())
}
// --- utility ---
pub async fn fetch_layout(
@ -268,59 +327,84 @@ pub async fn send_layout(
Ok(())
}
pub fn print_cluster_layout(layout: &ClusterLayout) -> bool {
let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
pub fn print_cluster_layout(layout: &ClusterLayout, empty_msg: &str) {
let mut table = vec!["ID\tTags\tZone\tCapacity\tUsable capacity".to_string()];
for (id, _, role) in layout.roles.items().iter() {
let role = match &role.0 {
Some(r) => r,
_ => continue,
};
let tags = role.tags.join(",");
table.push(format!(
"{:?}\t{}\t{}\t{}",
id,
tags,
role.zone,
role.capacity_string()
));
let usage = layout.get_node_usage(id).unwrap_or(0);
let capacity = layout.get_node_capacity(id).unwrap_or(0);
if capacity > 0 {
table.push(format!(
"{:?}\t{}\t{}\t{}\t{} ({:.1}%)",
id,
tags,
role.zone,
role.capacity_string(),
ByteSize::b(usage as u64 * layout.partition_size).to_string_as(false),
(100.0 * usage as f32 * layout.partition_size as f32) / (capacity as f32)
));
} else {
table.push(format!(
"{:?}\t{}\t{}\t{}",
id,
tags,
role.zone,
role.capacity_string()
));
};
}
if table.len() == 1 {
false
} else {
if table.len() > 1 {
format_table(table);
true
println!();
println!("Zone redundancy: {}", layout.parameters.zone_redundancy);
} else {
println!("{}", empty_msg);
}
}
pub fn print_staging_role_changes(layout: &ClusterLayout) -> bool {
let has_changes = layout
.staging
let has_role_changes = layout
.staging_roles
.items()
.iter()
.any(|(k, _, v)| layout.roles.get(k) != Some(v));
let has_layout_changes = *layout.staging_parameters.get() != layout.parameters;
if has_changes {
if has_role_changes || has_layout_changes {
println!();
println!("==== STAGED ROLE CHANGES ====");
let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
for (id, _, role) in layout.staging.items().iter() {
if layout.roles.get(id) == Some(role) {
continue;
}
if let Some(role) = &role.0 {
let tags = role.tags.join(",");
table.push(format!(
"{:?}\t{}\t{}\t{}",
id,
tags,
role.zone,
role.capacity_string()
));
} else {
table.push(format!("{:?}\tREMOVED", id));
if has_role_changes {
let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
for (id, _, role) in layout.staging_roles.items().iter() {
if layout.roles.get(id) == Some(role) {
continue;
}
if let Some(role) = &role.0 {
let tags = role.tags.join(",");
table.push(format!(
"{:?}\t{}\t{}\t{}",
id,
tags,
role.zone,
role.capacity_string()
));
} else {
table.push(format!("{:?}\tREMOVED", id));
}
}
format_table(table);
println!();
}
if has_layout_changes {
println!(
"Zone redundancy: {}",
layout.staging_parameters.get().zone_redundancy
);
}
format_table(table);
true
} else {
false

View File

@ -4,6 +4,8 @@ pub(crate) mod layout;
pub(crate) mod structs;
pub(crate) mod util;
pub(crate) mod convert_db;
pub(crate) use cmd::*;
pub(crate) use init::*;
pub(crate) use layout::*;

View File

@ -3,6 +3,8 @@ use structopt::StructOpt;
use garage_util::version::garage_version;
use crate::cli::convert_db;
#[derive(StructOpt, Debug)]
pub enum Command {
/// Run Garage server
@ -17,7 +19,7 @@ pub enum Command {
#[structopt(name = "node", version = garage_version())]
Node(NodeOperation),
/// Operations on the assignation of node roles in the cluster layout
/// Operations on the assignment of node roles in the cluster layout
#[structopt(name = "layout", version = garage_version())]
Layout(LayoutOperation),
@ -54,6 +56,10 @@ pub enum Command {
/// Low-level debug operations on data blocks
#[structopt(name = "block", version = garage_version())]
Block(BlockOperation),
/// Convert metadata db between database engine formats
#[structopt(name = "convert-db", version = garage_version())]
ConvertDb(convert_db::ConvertDbOpt),
}
#[derive(StructOpt, Debug)]
@ -91,6 +97,10 @@ pub enum LayoutOperation {
#[structopt(name = "remove", version = garage_version())]
Remove(RemoveRoleOpt),
/// Configure parameters value for the layout computation
#[structopt(name = "config", version = garage_version())]
Config(ConfigLayoutOpt),
/// Show roles currently assigned to nodes and changes staged for commit
#[structopt(name = "show", version = garage_version())]
Show,
@ -114,9 +124,9 @@ pub struct AssignRoleOpt {
#[structopt(short = "z", long = "zone")]
pub(crate) zone: Option<String>,
/// Capacity (in relative terms, use 1 to represent your smallest server)
/// Storage capacity, in bytes (supported suffixes: B, KB, MB, GB, TB, PB)
#[structopt(short = "c", long = "capacity")]
pub(crate) capacity: Option<u32>,
pub(crate) capacity: Option<bytesize::ByteSize>,
/// Gateway-only node
#[structopt(short = "g", long = "gateway")]
@ -137,6 +147,13 @@ pub struct RemoveRoleOpt {
pub(crate) node_id: String,
}
#[derive(StructOpt, Debug)]
pub struct ConfigLayoutOpt {
/// Zone redundancy parameter ('none'/'max' or integer)
#[structopt(short = "r", long = "redundancy")]
pub(crate) redundancy: Option<String>,
}
#[derive(StructOpt, Debug)]
pub struct ApplyLayoutOpt {
/// Version number of new configuration: this command will fail if
@ -317,11 +334,11 @@ pub enum KeyOperation {
/// Get key info
#[structopt(name = "info", version = garage_version())]
Info(KeyOpt),
Info(KeyInfoOpt),
/// Create new key
#[structopt(name = "new", version = garage_version())]
New(KeyNewOpt),
#[structopt(name = "create", version = garage_version())]
Create(KeyNewOpt),
/// Rename key
#[structopt(name = "rename", version = garage_version())]
@ -345,15 +362,18 @@ pub enum KeyOperation {
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyOpt {
pub struct KeyInfoOpt {
/// ID or name of the key
pub key_pattern: String,
/// Whether to display the secret key
#[structopt(long = "show-secret")]
pub show_secret: bool,
}
#[derive(Serialize, Deserialize, StructOpt, Debug)]
pub struct KeyNewOpt {
/// Name of the key
#[structopt(long = "name", default_value = "Unnamed key")]
#[structopt(default_value = "Unnamed key")]
pub name: String,
}
@ -397,6 +417,10 @@ pub struct KeyImportOpt {
/// Key name
#[structopt(short = "n", default_value = "Imported key")]
pub name: String,
/// Confirm key import
#[structopt(long = "yes")]
pub yes: bool,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
@ -432,24 +456,30 @@ pub struct RepairOpt {
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
pub enum RepairWhat {
/// Only do a full sync of metadata tables
/// Do a full sync of metadata tables
#[structopt(name = "tables", version = garage_version())]
Tables,
/// Only repair (resync/rebalance) the set of stored blocks
/// Repair (resync/rebalance) the set of stored blocks in the cluster
#[structopt(name = "blocks", version = garage_version())]
Blocks,
/// Only redo the propagation of object deletions to the version table (slow)
/// Repropagate object deletions to the version table
#[structopt(name = "versions", version = garage_version())]
Versions,
/// Only redo the propagation of version deletions to the block ref table (extremely slow)
/// Repropagate object deletions to the multipart upload table
#[structopt(name = "mpu", version = garage_version())]
MultipartUploads,
/// Repropagate version deletions to the block ref table
#[structopt(name = "block_refs", version = garage_version())]
BlockRefs,
/// Verify integrity of all blocks on disc (extremely slow, i/o intensive)
/// Verify integrity of all blocks on disc
#[structopt(name = "scrub", version = garage_version())]
Scrub {
#[structopt(subcommand)]
cmd: ScrubCmd,
},
/// Rebalance data blocks among HDDs on individual nodes
#[structopt(name = "rebalance", version = garage_version())]
Rebalance,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]

View File

@ -12,8 +12,9 @@ use garage_block::manager::BlockResyncErrorInfo;
use garage_model::bucket_table::*;
use garage_model::key_table::*;
use garage_model::s3::object_table::{BYTES, OBJECTS, UNFINISHED_UPLOADS};
use garage_model::s3::version_table::Version;
use garage_model::s3::mpu_table::{self, MultipartUpload};
use garage_model::s3::object_table;
use garage_model::s3::version_table::*;
use crate::cli::structs::WorkerListOpt;
@ -135,6 +136,7 @@ pub fn print_bucket_info(
bucket: &Bucket,
relevant_keys: &HashMap<String, Key>,
counters: &HashMap<String, i64>,
mpu_counters: &HashMap<String, i64>,
) {
let key_name = |k| {
relevant_keys
@ -148,7 +150,7 @@ pub fn print_bucket_info(
Deletable::Deleted => println!("Bucket is deleted."),
Deletable::Present(p) => {
let size =
bytesize::ByteSize::b(counters.get(BYTES).cloned().unwrap_or_default() as u64);
bytesize::ByteSize::b(*counters.get(object_table::BYTES).unwrap_or(&0) as u64);
println!(
"\nSize: {} ({})",
size.to_string_as(true),
@ -156,14 +158,22 @@ pub fn print_bucket_info(
);
println!(
"Objects: {}",
counters.get(OBJECTS).cloned().unwrap_or_default()
*counters.get(object_table::OBJECTS).unwrap_or(&0)
);
println!(
"Unfinished uploads (multipart and non-multipart): {}",
*counters.get(object_table::UNFINISHED_UPLOADS).unwrap_or(&0)
);
println!(
"Unfinished multipart uploads: {}",
counters
.get(UNFINISHED_UPLOADS)
.cloned()
.unwrap_or_default()
*mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0)
);
let mpu_size =
bytesize::ByteSize::b(*mpu_counters.get(mpu_table::BYTES).unwrap_or(&0) as u64);
println!(
"Size of unfinished multipart uploads: {} ({})",
mpu_size.to_string_as(true),
mpu_size.to_string_as(false),
);
println!("\nWebsite access: {}", p.website_config.get().is_some());
@ -390,29 +400,49 @@ pub fn print_block_error_list(el: Vec<BlockResyncErrorInfo>) {
format_table(table);
}
pub fn print_block_info(hash: Hash, refcount: u64, versions: Vec<Result<Version, Uuid>>) {
pub fn print_block_info(
hash: Hash,
refcount: u64,
versions: Vec<Result<Version, Uuid>>,
uploads: Vec<MultipartUpload>,
) {
println!("Block hash: {}", hex::encode(hash.as_slice()));
println!("Refcount: {}", refcount);
println!();
let mut table = vec!["Version\tBucket\tKey\tDeleted".into()];
let mut table = vec!["Version\tBucket\tKey\tMPU\tDeleted".into()];
let mut nondeleted_count = 0;
for v in versions.iter() {
match v {
Ok(ver) => {
table.push(format!(
"{:?}\t{:?}\t{}\t{:?}",
ver.uuid,
ver.bucket_id,
ver.key,
ver.deleted.get()
));
match &ver.backlink {
VersionBacklink::Object { bucket_id, key } => {
table.push(format!(
"{:?}\t{:?}\t{}\t\t{:?}",
ver.uuid,
bucket_id,
key,
ver.deleted.get()
));
}
VersionBacklink::MultipartUpload { upload_id } => {
let upload = uploads.iter().find(|x| x.upload_id == *upload_id);
table.push(format!(
"{:?}\t{:?}\t{}\t{:?}\t{:?}",
ver.uuid,
upload.map(|u| u.bucket_id).unwrap_or_default(),
upload.map(|u| u.key.as_str()).unwrap_or_default(),
upload_id,
ver.deleted.get()
));
}
}
if !ver.deleted.get() {
nondeleted_count += 1;
}
}
Err(vh) => {
table.push(format!("{:?}\t\t\tyes", vh));
table.push(format!("{:?}\t\t\t\tyes", vh));
}
}
}

View File

@ -17,6 +17,9 @@ compile_error!("Either bundled-libs or system-libs Cargo feature must be enabled
#[cfg(all(feature = "bundled-libs", feature = "system-libs"))]
compile_error!("Only one of bundled-libs and system-libs Cargo features must be enabled");
#[cfg(not(any(feature = "lmdb", feature = "sled", feature = "sqlite")))]
compile_error!("Must activate the Cargo feature for at least one DB engine: lmdb, sled or sqlite.");
use std::net::SocketAddr;
use std::path::PathBuf;
@ -173,6 +176,9 @@ async fn main() {
Command::OfflineRepair(repair_opt) => {
repair::offline::offline_repair(opt.config_file, opt.secrets, repair_opt).await
}
Command::ConvertDb(conv_opt) => {
cli::convert_db::do_conversion(conv_opt).map_err(From::from)
}
Command::Node(NodeOperation::NodeId(node_id_opt)) => {
node_id_command(opt.config_file, node_id_opt.quiet)
}

View File

@ -5,11 +5,16 @@ use async_trait::async_trait;
use tokio::sync::watch;
use garage_block::repair::ScrubWorkerCommand;
use garage_model::garage::Garage;
use garage_model::s3::block_ref_table::*;
use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use garage_table::replication::*;
use garage_table::*;
use garage_util::background::*;
use garage_util::error::Error;
use garage_util::migrate::Migrate;
@ -32,11 +37,15 @@ pub async fn launch_online_repair(
}
RepairWhat::Versions => {
info!("Repairing the versions table");
bg.spawn_worker(RepairVersionsWorker::new(garage.clone()));
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairVersions));
}
RepairWhat::MultipartUploads => {
info!("Repairing the multipart uploads table");
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairMpu));
}
RepairWhat::BlockRefs => {
info!("Repairing the block refs table");
bg.spawn_worker(RepairBlockrefsWorker::new(garage.clone()));
bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs));
}
RepairWhat::Blocks => {
info!("Repairing the stored blocks");
@ -61,76 +70,82 @@ pub async fn launch_online_repair(
info!("Sending command to scrub worker: {:?}", cmd);
garage.block_manager.send_scrub_command(cmd).await?;
}
RepairWhat::Rebalance => {
info!("Rebalancing the stored blocks among storage locations");
bg.spawn_worker(garage_block::repair::RebalanceWorker::new(
garage.block_manager.clone(),
));
}
}
Ok(())
}
// ----
struct RepairVersionsWorker {
#[async_trait]
trait TableRepair: Send + Sync + 'static {
type T: TableSchema;
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication>;
async fn process(
&mut self,
garage: &Garage,
entry: <<Self as TableRepair>::T as TableSchema>::E,
) -> Result<bool, Error>;
}
struct TableRepairWorker<T: TableRepair> {
garage: Arc<Garage>,
pos: Vec<u8>,
counter: usize,
repairs: usize,
inner: T,
}
impl RepairVersionsWorker {
fn new(garage: Arc<Garage>) -> Self {
impl<R: TableRepair> TableRepairWorker<R> {
fn new(garage: Arc<Garage>, inner: R) -> Self {
Self {
garage,
inner,
pos: vec![],
counter: 0,
repairs: 0,
}
}
}
#[async_trait]
impl Worker for RepairVersionsWorker {
impl<R: TableRepair> Worker for TableRepairWorker<R> {
fn name(&self) -> String {
"Version repair worker".into()
format!("{} repair worker", R::T::TABLE_NAME)
}
fn status(&self) -> WorkerStatus {
WorkerStatus {
progress: Some(self.counter.to_string()),
progress: Some(format!("{} ({})", self.counter, self.repairs)),
..Default::default()
}
}
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
let (item_bytes, next_pos) = match self.garage.version_table.data.store.get_gt(&self.pos)? {
let (item_bytes, next_pos) = match R::table(&self.garage).data.store.get_gt(&self.pos)? {
Some((k, v)) => (v, k),
None => {
info!("repair_versions: finished, done {}", self.counter);
info!(
"{}: finished, done {}, fixed {}",
self.name(),
self.counter,
self.repairs
);
return Ok(WorkerState::Done);
}
};
let version = Version::decode(&item_bytes).ok_or_message("Cannot decode Version")?;
if !version.deleted.get() {
let object = self
.garage
.object_table
.get(&version.bucket_id, &version.key)
.await?;
let version_exists = match object {
Some(o) => o
.versions()
.iter()
.any(|x| x.uuid == version.uuid && x.state != ObjectVersionState::Aborted),
None => false,
};
if !version_exists {
info!("Repair versions: marking version as deleted: {:?}", version);
self.garage
.version_table
.insert(&Version::new(
version.uuid,
version.bucket_id,
version.key,
true,
))
.await?;
}
let entry = <R::T as TableSchema>::E::decode(&item_bytes)
.ok_or_message("Cannot decode table entry")?;
if self.inner.process(&self.garage, entry).await? {
self.repairs += 1;
}
self.counter += 1;
@ -146,77 +161,124 @@ impl Worker for RepairVersionsWorker {
// ----
struct RepairBlockrefsWorker {
garage: Arc<Garage>,
pos: Vec<u8>,
counter: usize,
}
impl RepairBlockrefsWorker {
fn new(garage: Arc<Garage>) -> Self {
Self {
garage,
pos: vec![],
counter: 0,
}
}
}
struct RepairVersions;
#[async_trait]
impl Worker for RepairBlockrefsWorker {
fn name(&self) -> String {
"Block refs repair worker".into()
impl TableRepair for RepairVersions {
type T = VersionTable;
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> {
&garage.version_table
}
fn status(&self) -> WorkerStatus {
WorkerStatus {
progress: Some(self.counter.to_string()),
..Default::default()
}
}
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
let (item_bytes, next_pos) =
match self.garage.block_ref_table.data.store.get_gt(&self.pos)? {
Some((k, v)) => (v, k),
None => {
info!("repair_block_ref: finished, done {}", self.counter);
return Ok(WorkerState::Done);
}
async fn process(&mut self, garage: &Garage, version: Version) -> Result<bool, Error> {
if !version.deleted.get() {
let ref_exists = match &version.backlink {
VersionBacklink::Object { bucket_id, key } => garage
.object_table
.get(bucket_id, key)
.await?
.map(|o| {
o.versions().iter().any(|x| {
x.uuid == version.uuid && x.state != ObjectVersionState::Aborted
})
})
.unwrap_or(false),
VersionBacklink::MultipartUpload { upload_id } => garage
.mpu_table
.get(upload_id, &EmptyKey)
.await?
.map(|u| !u.deleted.get())
.unwrap_or(false),
};
let block_ref = BlockRef::decode(&item_bytes).ok_or_message("Cannot decode BlockRef")?;
if !ref_exists {
info!("Repair versions: marking version as deleted: {:?}", version);
garage
.version_table
.insert(&Version::new(version.uuid, version.backlink, true))
.await?;
return Ok(true);
}
}
Ok(false)
}
}
// ----
struct RepairBlockRefs;
#[async_trait]
impl TableRepair for RepairBlockRefs {
type T = BlockRefTable;
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> {
&garage.block_ref_table
}
async fn process(&mut self, garage: &Garage, mut block_ref: BlockRef) -> Result<bool, Error> {
if !block_ref.deleted.get() {
let version = self
.garage
let ref_exists = garage
.version_table
.get(&block_ref.version, &EmptyKey)
.await?;
// The version might not exist if it has been GC'ed
let ref_exists = version.map(|v| !v.deleted.get()).unwrap_or(false);
.await?
.map(|v| !v.deleted.get())
.unwrap_or(false);
if !ref_exists {
info!(
"Repair block ref: marking block_ref as deleted: {:?}",
block_ref
);
self.garage
.block_ref_table
.insert(&BlockRef {
block: block_ref.block,
version: block_ref.version,
deleted: true.into(),
})
.await?;
block_ref.deleted.set();
garage.block_ref_table.insert(&block_ref).await?;
return Ok(true);
}
}
self.counter += 1;
self.pos = next_pos;
Ok(WorkerState::Busy)
}
async fn wait_for_work(&mut self) -> WorkerState {
unreachable!()
Ok(false)
}
}
// ----
struct RepairMpu;
#[async_trait]
impl TableRepair for RepairMpu {
type T = MultipartUploadTable;
fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> {
&garage.mpu_table
}
async fn process(&mut self, garage: &Garage, mut mpu: MultipartUpload) -> Result<bool, Error> {
if !mpu.deleted.get() {
let ref_exists = garage
.object_table
.get(&mpu.bucket_id, &mpu.key)
.await?
.map(|o| {
o.versions()
.iter()
.any(|x| x.uuid == mpu.upload_id && x.is_uploading(Some(true)))
})
.unwrap_or(false);
if !ref_exists {
info!(
"Repair multipart uploads: marking mpu as deleted: {:?}",
mpu
);
mpu.parts.clear();
mpu.deleted.set();
garage.mpu_table.insert(&mpu).await?;
return Ok(true);
}
}
Ok(false)
}
}

View File

@ -52,6 +52,7 @@ impl Instance {
r#"
metadata_dir = "{path}/meta"
data_dir = "{path}/data"
db_engine = "lmdb"
replication_mode = "1"
@ -141,7 +142,7 @@ api_bind_addr = "127.0.0.1:{admin_port}"
self.command()
.args(["layout", "assign"])
.arg(node_short_id)
.args(["-c", "1", "-z", "unzonned"])
.args(["-c", "1G", "-z", "unzonned"])
.quiet()
.expect_success_status("Could not assign garage node layout");
self.command()
@ -186,9 +187,9 @@ api_bind_addr = "127.0.0.1:{admin_port}"
let mut key = Key::default();
let mut cmd = self.command();
let base = cmd.args(["key", "new"]);
let base = cmd.args(["key", "create"]);
let with_name = match maybe_name {
Some(name) => base.args(["--name", name]),
Some(name) => base.args([name]),
None => base,
};

View File

@ -44,6 +44,7 @@ async fn test_items_and_indices() {
let content = format!("{}: hello world", sk).into_bytes();
let content2 = format!("{}: hello universe", sk).into_bytes();
let content3 = format!("{}: concurrent value", sk).into_bytes();
eprintln!("test iteration {}: {}", i, sk);
// Put initially, no causality token
let res = ctx
@ -89,7 +90,7 @@ async fn test_items_and_indices() {
assert_eq!(res_body, content);
// ReadIndex -- now there should be some stuff
tokio::time::sleep(Duration::from_secs(1)).await;
tokio::time::sleep(Duration::from_millis(100)).await;
let res = ctx
.k2v
.request
@ -158,7 +159,7 @@ async fn test_items_and_indices() {
assert_eq!(res_body, content2);
// ReadIndex -- now there should be some stuff
tokio::time::sleep(Duration::from_secs(1)).await;
tokio::time::sleep(Duration::from_millis(100)).await;
let res = ctx
.k2v
.request
@ -230,7 +231,7 @@ async fn test_items_and_indices() {
);
// ReadIndex -- now there should be some stuff
tokio::time::sleep(Duration::from_secs(1)).await;
tokio::time::sleep(Duration::from_millis(100)).await;
let res = ctx
.k2v
.request
@ -299,7 +300,7 @@ async fn test_items_and_indices() {
assert_eq!(res.status(), StatusCode::NO_CONTENT);
// ReadIndex -- now there should be some stuff
tokio::time::sleep(Duration::from_secs(1)).await;
tokio::time::sleep(Duration::from_millis(100)).await;
let res = ctx
.k2v
.request

View File

@ -5,6 +5,190 @@ use aws_sdk_s3::types::{CompletedMultipartUpload, CompletedPart};
const SZ_5MB: usize = 5 * 1024 * 1024;
const SZ_10MB: usize = 10 * 1024 * 1024;
#[tokio::test]
async fn test_multipart_upload() {
let ctx = common::context();
let bucket = ctx.create_bucket("testmpu");
let u1 = vec![0x11; SZ_5MB];
let u2 = vec![0x22; SZ_5MB];
let u3 = vec![0x33; SZ_5MB];
let u4 = vec![0x44; SZ_5MB];
let u5 = vec![0x55; SZ_5MB];
let up = ctx
.client
.create_multipart_upload()
.bucket(&bucket)
.key("a")
.send()
.await
.unwrap();
assert!(up.upload_id.is_some());
let uid = up.upload_id.as_ref().unwrap();
let p3 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(3)
.body(ByteStream::from(u3.clone()))
.send()
.await
.unwrap();
let _p1 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(1)
.body(ByteStream::from(u1))
.send()
.await
.unwrap();
let _p4 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(4)
.body(ByteStream::from(u4))
.send()
.await
.unwrap();
let p1bis = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(1)
.body(ByteStream::from(u2.clone()))
.send()
.await
.unwrap();
let p6 = ctx
.client
.upload_part()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.part_number(6)
.body(ByteStream::from(u5.clone()))
.send()
.await
.unwrap();
{
let r = ctx
.client
.list_parts()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.send()
.await
.unwrap();
assert_eq!(r.parts.unwrap().len(), 4);
}
let cmp = CompletedMultipartUpload::builder()
.parts(
CompletedPart::builder()
.part_number(1)
.e_tag(p1bis.e_tag.unwrap())
.build(),
)
.parts(
CompletedPart::builder()
.part_number(3)
.e_tag(p3.e_tag.unwrap())
.build(),
)
.parts(
CompletedPart::builder()
.part_number(6)
.e_tag(p6.e_tag.unwrap())
.build(),
)
.build();
ctx.client
.complete_multipart_upload()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.multipart_upload(cmp)
.send()
.await
.unwrap();
// The multipart upload must not appear anymore
assert!(ctx
.client
.list_parts()
.bucket(&bucket)
.key("a")
.upload_id(uid)
.send()
.await
.is_err());
{
// The object must appear as a regular object
let r = ctx
.client
.head_object()
.bucket(&bucket)
.key("a")
.send()
.await
.unwrap();
assert_eq!(r.content_length, (SZ_5MB * 3) as i64);
}
{
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key("a")
.send()
.await
.unwrap();
assert_bytes_eq!(o.body, &[&u2[..], &u3[..], &u5[..]].concat());
}
{
for (part_number, data) in [(1, &u2), (2, &u3), (3, &u5)] {
let o = ctx
.client
.get_object()
.bucket(&bucket)
.key("a")
.part_number(part_number)
.send()
.await
.unwrap();
eprintln!("get_object with part_number = {}", part_number);
assert_eq!(o.content_length, SZ_5MB as i64);
assert_bytes_eq!(o.body, data);
}
}
}
#[tokio::test]
async fn test_uploadlistpart() {
let ctx = common::context();
@ -65,7 +249,8 @@ async fn test_uploadlistpart() {
let ps = r.parts.unwrap();
assert_eq!(ps.len(), 1);
let fp = ps.iter().find(|x| x.part_number == 2).unwrap();
assert_eq!(ps[0].part_number, 2);
let fp = &ps[0];
assert!(fp.last_modified.is_some());
assert_eq!(
fp.e_tag.as_ref().unwrap(),
@ -100,13 +285,24 @@ async fn test_uploadlistpart() {
let ps = r.parts.unwrap();
assert_eq!(ps.len(), 2);
let fp = ps.iter().find(|x| x.part_number == 1).unwrap();
assert_eq!(ps[0].part_number, 1);
let fp = &ps[0];
assert!(fp.last_modified.is_some());
assert_eq!(
fp.e_tag.as_ref().unwrap(),
"\"3c484266f9315485694556e6c693bfa2\""
);
assert_eq!(fp.size, SZ_5MB as i64);
assert_eq!(ps[1].part_number, 2);
let sp = &ps[1];
assert!(sp.last_modified.is_some());
assert_eq!(
sp.e_tag.as_ref().unwrap(),
"\"3366bb9dcf710d6801b5926467d02e19\""
);
assert_eq!(sp.size, SZ_5MB as i64);
}
{
@ -123,12 +319,19 @@ async fn test_uploadlistpart() {
.unwrap();
assert!(r.part_number_marker.is_none());
assert!(r.next_part_number_marker.is_some());
assert_eq!(r.next_part_number_marker.as_deref(), Some("1"));
assert_eq!(r.max_parts, 1_i32);
assert!(r.is_truncated);
assert_eq!(r.key.unwrap(), "a");
assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str());
assert_eq!(r.parts.unwrap().len(), 1);
let parts = r.parts.unwrap();
assert_eq!(parts.len(), 1);
let fp = &parts[0];
assert_eq!(fp.part_number, 1);
assert_eq!(
fp.e_tag.as_ref().unwrap(),
"\"3c484266f9315485694556e6c693bfa2\""
);
let r2 = ctx
.client
@ -147,10 +350,18 @@ async fn test_uploadlistpart() {
r.next_part_number_marker.as_ref().unwrap()
);
assert_eq!(r2.max_parts, 1_i32);
assert!(r2.is_truncated);
assert_eq!(r2.key.unwrap(), "a");
assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str());
assert_eq!(r2.parts.unwrap().len(), 1);
let parts = r2.parts.unwrap();
assert_eq!(parts.len(), 1);
let fp = &parts[0];
assert_eq!(fp.part_number, 2);
assert_eq!(
fp.e_tag.as_ref().unwrap(),
"\"3366bb9dcf710d6801b5926467d02e19\""
);
//assert!(r2.is_truncated); // WHY? (this was the test before)
assert!(!r2.is_truncated);
}
let cmp = CompletedMultipartUpload::builder()

View File

@ -1,6 +1,6 @@
[package]
name = "garage_model"
version = "0.8.4"
version = "0.9.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -23,6 +23,7 @@ garage_util.workspace = true
async-trait = "0.1.7"
arc-swap = "1.0"
blake2 = "0.10"
chrono = "0.4"
err-derive = "0.3"
hex = "0.4"
base64 = "0.21"
@ -38,10 +39,10 @@ futures-util = "0.3"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
opentelemetry = "0.17"
netapp = "0.5"
netapp = "0.10"
[features]
default = [ "sled" ]
default = [ "sled", "lmdb", "sqlite" ]
k2v = [ "garage_util/k2v" ]
lmdb = [ "garage_db/lmdb" ]
sled = [ "garage_db/sled" ]

View File

@ -48,6 +48,9 @@ mod v08 {
pub website_config: crdt::Lww<Option<WebsiteConfig>>,
/// CORS rules
pub cors_config: crdt::Lww<Option<Vec<CorsRule>>>,
/// Lifecycle configuration
#[serde(default)]
pub lifecycle_config: crdt::Lww<Option<Vec<LifecycleRule>>>,
/// Bucket quotas
#[serde(default)]
pub quotas: crdt::Lww<BucketQuotas>,
@ -69,6 +72,42 @@ mod v08 {
pub expose_headers: Vec<String>,
}
/// Lifecycle configuration rule
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct LifecycleRule {
/// The ID of the rule
pub id: Option<String>,
/// Whether the rule is active
pub enabled: bool,
/// The filter to check whether rule applies to a given object
pub filter: LifecycleFilter,
/// Number of days after which incomplete multipart uploads are aborted
pub abort_incomplete_mpu_days: Option<usize>,
/// Expiration policy for stored objects
pub expiration: Option<LifecycleExpiration>,
}
/// A lifecycle filter is a set of conditions that must all be true.
/// For each condition, if it is None, it is not verified (always true),
/// and if it is Some(x), then it is verified for value x
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, Default)]
pub struct LifecycleFilter {
/// If Some(x), object key has to start with prefix x
pub prefix: Option<String>,
/// If Some(x), object size has to be more than x
pub size_gt: Option<u64>,
/// If Some(x), object size has to be less than x
pub size_lt: Option<u64>,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub enum LifecycleExpiration {
/// Objects expire x days after they were created
AfterDays(usize),
/// Objects expire at date x (must be in yyyy-mm-dd format)
AtDate(String),
}
#[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct BucketQuotas {
/// Maximum size in bytes (bucket size = sum of sizes of objects in the bucket)
@ -88,7 +127,7 @@ impl AutoCrdt for BucketQuotas {
impl BucketParams {
/// Create an empty BucketParams with no authorized keys and no website accesss
pub fn new() -> Self {
fn new() -> Self {
BucketParams {
creation_date: now_msec(),
authorized_keys: crdt::Map::new(),
@ -96,6 +135,7 @@ impl BucketParams {
local_aliases: crdt::LwwMap::new(),
website_config: crdt::Lww::new(None),
cors_config: crdt::Lww::new(None),
lifecycle_config: crdt::Lww::new(None),
quotas: crdt::Lww::new(BucketQuotas::default()),
}
}
@ -111,10 +151,25 @@ impl Crdt for BucketParams {
self.website_config.merge(&o.website_config);
self.cors_config.merge(&o.cors_config);
self.lifecycle_config.merge(&o.lifecycle_config);
self.quotas.merge(&o.quotas);
}
}
pub fn parse_lifecycle_date(date: &str) -> Result<chrono::NaiveDate, &'static str> {
use chrono::prelude::*;
if let Ok(datetime) = NaiveDateTime::parse_from_str(date, "%Y-%m-%dT%H:%M:%SZ") {
if datetime.time() == NaiveTime::MIN {
Ok(datetime.date())
} else {
Err("date must be at midnight")
}
} else {
NaiveDate::parse_from_str(date, "%Y-%m-%d").map_err(|_| "date has invalid format")
}
}
impl Default for Bucket {
fn default() -> Self {
Self::new()

View File

@ -7,6 +7,7 @@ use garage_db as db;
use garage_util::background::*;
use garage_util::config::*;
use garage_util::error::*;
use garage_util::persister::PersisterShared;
use garage_rpc::replication_mode::ReplicationMode;
use garage_rpc::system::System;
@ -17,6 +18,8 @@ use garage_table::replication::TableShardedReplication;
use garage_table::*;
use crate::s3::block_ref_table::*;
use crate::s3::lifecycle_worker;
use crate::s3::mpu_table::*;
use crate::s3::object_table::*;
use crate::s3::version_table::*;
@ -57,11 +60,18 @@ pub struct Garage {
pub object_table: Arc<Table<ObjectTable, TableShardedReplication>>,
/// Counting table containing object counters
pub object_counter_table: Arc<IndexCounter<Object>>,
/// Table containing S3 multipart uploads
pub mpu_table: Arc<Table<MultipartUploadTable, TableShardedReplication>>,
/// Counting table containing multipart object counters
pub mpu_counter_table: Arc<IndexCounter<MultipartUpload>>,
/// Table containing S3 object versions
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
/// Table containing S3 block references (not blocks themselves)
pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
/// Persister for lifecycle worker info
pub lifecycle_persister: PersisterShared<lifecycle_worker::LifecycleWorkerPersisted>,
#[cfg(feature = "k2v")]
pub k2v: GarageK2V,
}
@ -82,8 +92,22 @@ impl Garage {
// Create meta dir and data dir if they don't exist already
std::fs::create_dir_all(&config.metadata_dir)
.ok_or_message("Unable to create Garage metadata directory")?;
std::fs::create_dir_all(&config.data_dir)
.ok_or_message("Unable to create Garage data directory")?;
match &config.data_dir {
DataDirEnum::Single(data_dir) => {
std::fs::create_dir_all(data_dir).ok_or_message(format!(
"Unable to create Garage data directory: {}",
data_dir.to_string_lossy()
))?;
}
DataDirEnum::Multiple(data_dirs) => {
for dir in data_dirs {
std::fs::create_dir_all(&dir.path).ok_or_message(format!(
"Unable to create Garage data directory: {}",
dir.path.to_string_lossy()
))?;
}
}
}
info!("Opening database...");
let mut db_path = config.metadata_dir.clone();
@ -91,6 +115,11 @@ impl Garage {
// ---- Sled DB ----
#[cfg(feature = "sled")]
"sled" => {
if config.metadata_fsync {
return Err(Error::Message(format!(
"`metadata_fsync = true` is not supported with the Sled database engine"
)));
}
db_path.push("db");
info!("Opening Sled database at: {}", db_path.display());
let db = db::sled_adapter::sled::Config::default()
@ -109,6 +138,15 @@ impl Garage {
db_path.push("db.sqlite");
info!("Opening Sqlite database at: {}", db_path.display());
let db = db::sqlite_adapter::rusqlite::Connection::open(db_path)
.and_then(|db| {
db.pragma_update(None, "journal_mode", &"WAL")?;
if config.metadata_fsync {
db.pragma_update(None, "synchronous", &"NORMAL")?;
} else {
db.pragma_update(None, "synchronous", &"OFF")?;
}
Ok(db)
})
.ok_or_message("Unable to open sqlite DB")?;
db::sqlite_adapter::SqliteDb::init(db)
}
@ -136,8 +174,10 @@ impl Garage {
env_builder.max_readers(500);
env_builder.map_size(map_size);
unsafe {
env_builder.flag(heed::flags::Flags::MdbNoSync);
env_builder.flag(heed::flags::Flags::MdbNoMetaSync);
if !config.metadata_fsync {
env_builder.flag(heed::flags::Flags::MdbNoSync);
}
}
let db = match env_builder.open(&db_path) {
Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => {
@ -182,6 +222,9 @@ impl Garage {
let replication_mode = ReplicationMode::parse(&config.replication_mode)
.ok_or_message("Invalid replication_mode in config file.")?;
info!("Initialize background variable system...");
let mut bg_vars = vars::BgVars::new();
info!("Initialize membership management system...");
let system = System::new(network_key, replication_mode, &config)?;
@ -208,10 +251,12 @@ impl Garage {
let block_manager = BlockManager::new(
&db,
config.data_dir.clone(),
config.data_fsync,
config.compression_level,
data_rep_param,
system.clone(),
);
)?;
block_manager.register_bg_vars(&mut bg_vars);
// ---- admin tables ----
info!("Initialize bucket_table...");
@ -248,6 +293,20 @@ impl Garage {
&db,
);
info!("Initialize multipart upload counter table...");
let mpu_counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), &db);
info!("Initialize multipart upload table...");
let mpu_table = Table::new(
MultipartUploadTable {
version_table: version_table.clone(),
mpu_counter_table: mpu_counter_table.clone(),
},
meta_rep_param.clone(),
system.clone(),
&db,
);
info!("Initialize object counter table...");
let object_counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), &db);
@ -256,6 +315,7 @@ impl Garage {
let object_table = Table::new(
ObjectTable {
version_table: version_table.clone(),
mpu_table: mpu_table.clone(),
object_counter_table: object_counter_table.clone(),
},
meta_rep_param.clone(),
@ -263,14 +323,15 @@ impl Garage {
&db,
);
info!("Load lifecycle worker state...");
let lifecycle_persister =
PersisterShared::new(&system.metadata_dir, "lifecycle_worker_state");
lifecycle_worker::register_bg_vars(&lifecycle_persister, &mut bg_vars);
// ---- K2V ----
#[cfg(feature = "k2v")]
let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param);
// Initialize bg vars
let mut bg_vars = vars::BgVars::new();
block_manager.register_bg_vars(&mut bg_vars);
// -- done --
Ok(Arc::new(Self {
config,
@ -284,14 +345,17 @@ impl Garage {
key_table,
object_table,
object_counter_table,
mpu_table,
mpu_counter_table,
version_table,
block_ref_table,
lifecycle_persister,
#[cfg(feature = "k2v")]
k2v,
}))
}
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
self.block_manager.spawn_workers(bg);
self.bucket_table.spawn_workers(bg);
@ -300,9 +364,16 @@ impl Garage {
self.object_table.spawn_workers(bg);
self.object_counter_table.spawn_workers(bg);
self.mpu_table.spawn_workers(bg);
self.mpu_counter_table.spawn_workers(bg);
self.version_table.spawn_workers(bg);
self.block_ref_table.spawn_workers(bg);
bg.spawn_worker(lifecycle_worker::LifecycleWorker::new(
self.clone(),
self.lifecycle_persister.clone(),
));
#[cfg(feature = "k2v")]
self.k2v.spawn_workers(bg);
}

View File

@ -478,7 +478,9 @@ impl<'a> BucketHelper<'a> {
// ----
/// Deletes all incomplete multipart uploads that are older than a certain time.
/// Returns the number of uploads aborted
/// Returns the number of uploads aborted.
/// This will also include non-multipart uploads, which may be lingering
/// after a node crash
pub async fn cleanup_incomplete_uploads(
&self,
bucket_id: &Uuid,
@ -496,7 +498,9 @@ impl<'a> BucketHelper<'a> {
.get_range(
bucket_id,
start,
Some(ObjectFilter::IsUploading),
Some(ObjectFilter::IsUploading {
check_multipart: None,
}),
1000,
EnumerationOrder::Forward,
)
@ -508,7 +512,7 @@ impl<'a> BucketHelper<'a> {
let aborted_versions = object
.versions()
.iter()
.filter(|v| v.is_uploading() && v.timestamp < older_than)
.filter(|v| v.is_uploading(None) && v.timestamp < older_than)
.map(|v| ObjectVersion {
state: ObjectVersionState::Aborted,
uuid: v.uuid,

View File

@ -294,7 +294,7 @@ impl<T: CountedItem> IndexCounter<T> {
let counter_entry = local_counter.into_counter_entry(self.this_node);
self.local_counter
.db()
.transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
.transaction(|tx| self.table.queue_insert(tx, &counter_entry))?;
next_start = Some(local_counter_k);
}
@ -360,7 +360,7 @@ impl<T: CountedItem> IndexCounter<T> {
let counter_entry = local_counter.into_counter_entry(self.this_node);
self.local_counter
.db()
.transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
.transaction(|tx| self.table.queue_insert(tx, &counter_entry))?;
next_start = Some(counted_entry_k);
}

View File

@ -149,11 +149,19 @@ impl Key {
}
/// Import a key from it's parts
pub fn import(key_id: &str, secret_key: &str, name: &str) -> Self {
Self {
pub fn import(key_id: &str, secret_key: &str, name: &str) -> Result<Self, &'static str> {
if key_id.len() != 26 || &key_id[..2] != "GK" || hex::decode(&key_id[2..]).is_err() {
return Err("The specified key ID is not a valid Garage key ID (starts with `GK`, followed by 12 hex-encoded bytes)");
}
if secret_key.len() != 64 || hex::decode(&secret_key).is_err() {
return Err("The specified secret key is not a valid Garage secret key (composed of 32 hex-encoded bytes)");
}
Ok(Self {
key_id: key_id.to_string(),
state: crdt::Deletable::present(KeyParams::new(secret_key, name)),
}
})
}
/// Create a new Key which can me merged to mark an existing key deleted

Some files were not shown because too many files have changed in this diff Show More