Compare commits
370 commits
talk-fosde
...
main
Author | SHA1 | Date | |
---|---|---|---|
3661a597fa | |||
0fd3c0e794 | |||
4c1bf42192 | |||
906c8708fd | |||
747889a096 | |||
feb09a4bc6 | |||
aa8bc6aa88 | |||
aba7902995 | |||
78de7b5bde | |||
9bd9e392ba | |||
116ad479a8 | |||
|
b6a58c5c16 | ||
2b0bfa9b18 | |||
a18b3f0d1f | |||
7a143f46fc | |||
c731f0291a | |||
34453bc9c2 | |||
6da1353541 | |||
bd71728874 | |||
51ced60366 | |||
586957b4b7 | |||
8d2bb4afeb | |||
c26f32b769 | |||
8062ec7b4b | |||
eb416a02fb | |||
74363c9060 | |||
615698df7d | |||
7061fa5a56 | |||
8881930cdd | |||
52f6c0760b | |||
5b0602c7e9 | |||
182b2af7e5 | |||
baf32c9575 | |||
3dda1ee4f6 | |||
aa7ce9e97c | |||
8d62616ec0 | |||
bd6fe72c06 | |||
4c9e8ef625 | |||
3e711bc110 | |||
7fb66b4944 | |||
679ae8bcbb | |||
2a93ad0c84 | |||
f190032589 | |||
3a87bd1370 | |||
9302cd42f0 | |||
060ad0da32 | |||
a5ed1161c6 | |||
|
222674432b | ||
070a8ad110 | |||
770384cae1 | |||
a0f6bc5b7f | |||
|
88c734bbd9 | ||
d38509ef4b | |||
|
39b37833c5 | ||
a2c1de646b | |||
15847a636a | |||
123d3e1f04 | |||
a6e4b96ca9 | |||
b442b0e35e | |||
0c3b198b22 | |||
33c2086d9e | |||
5ad1e55ccf | |||
1779fd40c0 | |||
ff093ddbb8 | |||
90e3c2af91 | |||
b47706809c | |||
126e0f47a3 | |||
738bb2f09c | |||
7dd7cb5759 | |||
8b663d8c5b | |||
c051db8204 | |||
50669b3e76 | |||
e5838b4837 | |||
87dfaf2eb9 | |||
554437254e | |||
afad62939e | |||
8bfc16ba7d | |||
ecf641d88c | |||
75cd14926d | |||
e1dc84e123 | |||
85f580cbde | |||
0d3e285d13 | |||
25c196f34d | |||
4eba32f29f | |||
32f1786f9f | |||
01a0bd5410 | |||
c0eeb0b0f3 | |||
51d11b4b26 | |||
f7cd4eb600 | |||
95eb8808e8 | |||
e0a4fc097a | |||
73551e9a2d | |||
80f81fa6f3 | |||
f267609343 | |||
cdde0f19ee | |||
74949c69cb | |||
7e0107c47d | |||
3844110cd0 | |||
961b4f9af3 | |||
5225a81dee | |||
e835196940 | |||
ba33bb31f1 | |||
30abf7e086 | |||
84018be862 | |||
091e693670 | |||
fe8a7819fa | |||
ce69dc302c | |||
26310f3242 | |||
65853a4863 | |||
783b586de9 | |||
3eab639c14 | |||
3165ab926c | |||
dc0b78cdb8 | |||
693b89b94b | |||
cf344d73d5 | |||
0038ca8a78 | |||
1a0bffae34 | |||
b55f52a9b7 | |||
e8f9718ccd | |||
fd2e19bf1b | |||
8cf3d24875 | |||
a68c37555d | |||
1e42808a59 | |||
8dff278b72 | |||
a80ce6ab5a | |||
990205dc3b | |||
7c86ff6c37 | |||
62b01d8705 | |||
422d45b659 | |||
a7dddebedd | |||
81191d2d92 | |||
2795b53b8b | |||
32aa246300 | |||
b942949940 | |||
66c23890c1 | |||
05c92204ec | |||
2128b5febd | |||
44454aac01 | |||
1ace34adbb | |||
238545e564 | |||
f537f76681 | |||
ec34728b27 | |||
20c0b4ffb2 | |||
2fd13c7d13 | |||
3fcb54e3cf | |||
e3333f2ac5 | |||
fa4878bad6 | |||
57acc60082 | |||
fe2dc5d51c | |||
afee8c2207 | |||
eab2b81be2 | |||
|
c1769bbe69 | ||
|
8f86af52ed | ||
603604cdfc | |||
|
6760895926 | ||
bbde9bc912 | |||
3168bb34a0 | |||
512933a036 | |||
8670140358 | |||
5bb69a1257 | |||
c8e416aaa5 | |||
|
fb55682c66 | ||
c94bf45cba | |||
7c4f3473af | |||
b6a91e549b | |||
32d6b4def8 | |||
c4de471de1 | |||
|
16e17375c5 | ||
95ab36aae7 | |||
6a7623e90d | |||
70b9904e91 | |||
a36248a169 | |||
b8c7a560ef | |||
d3cf560e5c | |||
73b11eb17c | |||
6d33e721c4 | |||
eaac4924ef | |||
02005055ae | |||
a294dd9473 | |||
947973982d | |||
dc995059aa | |||
10031a3a91 | |||
90cab5b8f2 | |||
e9f759d4cb | |||
a5e4bfeae9 | |||
4c1d42cc5f | |||
2efa9c5a1a | |||
a8cb8e8a8b | |||
d0d95fd53f | |||
4b978b7533 | |||
911a83ea7d | |||
b76c0c102e | |||
babccd2ad3 | |||
3fe94cc14f | |||
ee2b0c8dda | |||
17b55205aa | |||
|
3813e6c71d | ||
3692af7052 | |||
e399b60e25 | |||
d640102b76 | |||
916c67ccf4 | |||
61758ce0f9 | |||
6ee691e65f | |||
e9c42bca34 | |||
cd1069c1d4 | |||
07c7895948 | |||
9b41f4ff20 | |||
93552b9275 | |||
81cebdd124 | |||
59f61c966a | |||
74d0c47f21 | |||
cff702a951 | |||
7e212e20e0 | |||
|
00a5f14a7b | ||
|
1a07c8dd54 | ||
292f4ff9cb | |||
75e591727d | |||
643d1aabd8 | |||
885405d944 | |||
bcd571ef57 | |||
b868493da9 | |||
182a23cc12 | |||
3cdf69f079 | |||
00d479358d | |||
203bb10035 | |||
e91576677e | |||
0b9859befa | |||
95e3a39b4d | |||
66fe893023 | |||
6bb34899f2 | |||
eab54b3798 | |||
b96f84b894 | |||
f0bbad2db9 | |||
b8217361c0 | |||
e73cb79e1e | |||
e54effec45 | |||
eb4a6ce106 | |||
7be3f15e45 | |||
125c662860 | |||
5766befb24 | |||
5ea24254a9 | |||
a2ab275da8 | |||
1b0f167d2f | |||
cf2af186fc | |||
823078b4cd | |||
ea09b483fe | |||
c86ac264cb | |||
bf283c9924 | |||
25e5738568 | |||
198188017c | |||
02e98e2d10 | |||
fe175fa8e2 | |||
3865080c35 | |||
8da67b3aa2 | |||
10bc2ead60 | |||
0c7ce001c9 | |||
f7ae966ed3 | |||
561fad0b44 | |||
1be75fbf4e | |||
555ed75548 | |||
1c85e5e428 | |||
d35d4599de | |||
9900368380 | |||
e4a43bfd59 | |||
5c63193d1d | |||
bcbd15da84 | |||
ad5ce968d2 | |||
c2e1e172d4 | |||
8061bf5e1c | |||
8724aabdf5 | |||
57024a2129 | |||
9e0b1dcf1c | |||
304a89c57b | |||
25c2f37667 | |||
4e62e86644 | |||
8b6a44a53d | |||
710680da15 | |||
33e6db8b72 | |||
3a49f86073 | |||
2b92e8d7c6 | |||
59930977e0 | |||
|
620664ee9c | ||
5d941e0100 | |||
e011941964 | |||
53746b59e5 | |||
a31d1bd496 | |||
e524e7a30d | |||
fe48d60d2b | |||
22332e6c35 | |||
81ccd4586e | |||
a22bd31920 | |||
0bb5b77530 | |||
6e69a1fffc | |||
6e4229e29c | |||
c0a7552015 | |||
fe1af5d98b | |||
f65da26ae2 | |||
feeb076b7f | |||
fe37202f8f | |||
76e09c0472 | |||
1d30cf36c8 | |||
d45189e7b8 | |||
|
91a51dd3e8 | ||
08a871390e | |||
0eef8a69f0 | |||
74e72fc996 | |||
4b54e053df | |||
8527dd87cc | |||
db48dd3d6c | |||
8a6ec1d611 | |||
0041b013a4 | |||
adccce1145 | |||
85b5a6bcd1 | |||
e4f493b481 | |||
f8df90b79b | |||
4dbf254512 | |||
64a6e557a4 | |||
5dd200c015 | |||
063294dd56 | |||
7f2541101f | |||
91b874c4ef | |||
431b28e0cf | |||
9cecea64d4 | |||
aa59059a91 | |||
d90de365b3 | |||
95eb13eb08 | |||
c8356a91d9 | |||
c04dd8788a | |||
539af6eac4 | |||
c539077d30 | |||
11e6fef93c | |||
539a920313 | |||
78362140f5 | |||
d6d239fc79 | |||
3ecd14b9f6 | |||
22f38808e7 | |||
707442f5de | |||
ad5c6f779f | |||
d4df03424f | |||
33c8a489b0 | |||
393c4d4515 | |||
65066c7064 | |||
acd49de9f9 | |||
46007bf01d | |||
b3e729f4b8 | |||
7ef2c23120 | |||
90e1619b1e | |||
3b361d2959 | |||
866196750f | |||
83a11374ca | |||
1aab1f4e68 | |||
8e292e06b3 | |||
9a491fa137 | |||
df24bb806d | |||
ce89d1ddab | |||
df36cf3099 | |||
9d95f6f704 | |||
bad7cc812e | |||
03ebf18830 | |||
94caf9c0c1 | |||
bfb1845fdc | |||
19ef1ec8e7 | |||
8a2b1dd422 | |||
523d2ecb95 | |||
1da0a5676e | |||
8dccee3ccf | |||
fe9af1dcaa | |||
4a9c94514f | |||
12d1dbfc6b | |||
0962313ebd |
300
.drone.yml
|
@ -1,300 +0,0 @@
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
name: default
|
|
||||||
|
|
||||||
node:
|
|
||||||
nix-daemon: 1
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: check formatting
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
commands:
|
|
||||||
- nix-shell --attr rust --run "cargo fmt -- --check"
|
|
||||||
|
|
||||||
- name: build
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
commands:
|
|
||||||
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
|
|
||||||
- name: unit + func tests
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
environment:
|
|
||||||
GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage
|
|
||||||
GARAGE_TEST_INTEGRATION_PATH: tmp-garage-integration
|
|
||||||
commands:
|
|
||||||
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
- nix-build --no-build-output --attr test.amd64
|
|
||||||
- ./result/bin/garage_db-*
|
|
||||||
- ./result/bin/garage_api-*
|
|
||||||
- ./result/bin/garage_model-*
|
|
||||||
- ./result/bin/garage_rpc-*
|
|
||||||
- ./result/bin/garage_table-*
|
|
||||||
- ./result/bin/garage_util-*
|
|
||||||
- ./result/bin/garage_web-*
|
|
||||||
- ./result/bin/garage-*
|
|
||||||
- ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
|
||||||
- rm result
|
|
||||||
- rm -rv tmp-garage-integration
|
|
||||||
|
|
||||||
- name: integration tests
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
commands:
|
|
||||||
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
- nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
event:
|
|
||||||
- custom
|
|
||||||
- push
|
|
||||||
- pull_request
|
|
||||||
- tag
|
|
||||||
- cron
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: docker
|
|
||||||
name: release-linux-amd64
|
|
||||||
|
|
||||||
node:
|
|
||||||
nix-daemon: 1
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
commands:
|
|
||||||
- nix-build --no-build-output --attr pkgs.amd64.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
- nix-shell --attr rust --run "./script/not-dynamic.sh result-bin/bin/garage"
|
|
||||||
|
|
||||||
- name: integration tests
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
commands:
|
|
||||||
- nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
|
||||||
|
|
||||||
- name: upgrade tests
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
commands:
|
|
||||||
- nix-shell --attr integration --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
|
||||||
|
|
||||||
- name: push static binary
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
environment:
|
|
||||||
AWS_ACCESS_KEY_ID:
|
|
||||||
from_secret: garagehq_aws_access_key_id
|
|
||||||
AWS_SECRET_ACCESS_KEY:
|
|
||||||
from_secret: garagehq_aws_secret_access_key
|
|
||||||
TARGET: "x86_64-unknown-linux-musl"
|
|
||||||
commands:
|
|
||||||
- nix-shell --attr release --run "to_s3"
|
|
||||||
|
|
||||||
- name: docker build and publish
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
environment:
|
|
||||||
DOCKER_AUTH:
|
|
||||||
from_secret: docker_auth
|
|
||||||
DOCKER_PLATFORM: "linux/amd64"
|
|
||||||
CONTAINER_NAME: "dxflrs/amd64_garage"
|
|
||||||
HOME: "/kaniko"
|
|
||||||
commands:
|
|
||||||
- mkdir -p /kaniko/.docker
|
|
||||||
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
|
||||||
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
- nix-shell --attr release --run "to_docker"
|
|
||||||
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
event:
|
|
||||||
- promote
|
|
||||||
- cron
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: docker
|
|
||||||
name: release-linux-i386
|
|
||||||
|
|
||||||
node:
|
|
||||||
nix-daemon: 1
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
commands:
|
|
||||||
- nix-build --no-build-output --attr pkgs.i386.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
- nix-shell --attr rust --run "./script/not-dynamic.sh result-bin/bin/garage"
|
|
||||||
|
|
||||||
- name: integration tests
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
commands:
|
|
||||||
- nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
|
||||||
|
|
||||||
- name: upgrade tests
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
commands:
|
|
||||||
- nix-shell --attr integration --run "./script/test-upgrade.sh v0.8.4 i686-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
|
||||||
|
|
||||||
- name: push static binary
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
environment:
|
|
||||||
AWS_ACCESS_KEY_ID:
|
|
||||||
from_secret: garagehq_aws_access_key_id
|
|
||||||
AWS_SECRET_ACCESS_KEY:
|
|
||||||
from_secret: garagehq_aws_secret_access_key
|
|
||||||
TARGET: "i686-unknown-linux-musl"
|
|
||||||
commands:
|
|
||||||
- nix-shell --attr release --run "to_s3"
|
|
||||||
|
|
||||||
- name: docker build and publish
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
environment:
|
|
||||||
DOCKER_AUTH:
|
|
||||||
from_secret: docker_auth
|
|
||||||
DOCKER_PLATFORM: "linux/386"
|
|
||||||
CONTAINER_NAME: "dxflrs/386_garage"
|
|
||||||
HOME: "/kaniko"
|
|
||||||
commands:
|
|
||||||
- mkdir -p /kaniko/.docker
|
|
||||||
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
|
||||||
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
- nix-shell --attr release --run "to_docker"
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
event:
|
|
||||||
- promote
|
|
||||||
- cron
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: docker
|
|
||||||
name: release-linux-arm64
|
|
||||||
|
|
||||||
node:
|
|
||||||
nix-daemon: 1
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
commands:
|
|
||||||
- nix-build --no-build-output --attr pkgs.arm64.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
- nix-shell --attr rust --run "./script/not-dynamic.sh result-bin/bin/garage"
|
|
||||||
|
|
||||||
- name: push static binary
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
environment:
|
|
||||||
AWS_ACCESS_KEY_ID:
|
|
||||||
from_secret: garagehq_aws_access_key_id
|
|
||||||
AWS_SECRET_ACCESS_KEY:
|
|
||||||
from_secret: garagehq_aws_secret_access_key
|
|
||||||
TARGET: "aarch64-unknown-linux-musl"
|
|
||||||
commands:
|
|
||||||
- nix-shell --attr release --run "to_s3"
|
|
||||||
|
|
||||||
- name: docker build and publish
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
environment:
|
|
||||||
DOCKER_AUTH:
|
|
||||||
from_secret: docker_auth
|
|
||||||
DOCKER_PLATFORM: "linux/arm64"
|
|
||||||
CONTAINER_NAME: "dxflrs/arm64_garage"
|
|
||||||
HOME: "/kaniko"
|
|
||||||
commands:
|
|
||||||
- mkdir -p /kaniko/.docker
|
|
||||||
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
|
||||||
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
- nix-shell --attr release --run "to_docker"
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
event:
|
|
||||||
- promote
|
|
||||||
- cron
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: docker
|
|
||||||
name: release-linux-arm
|
|
||||||
|
|
||||||
node:
|
|
||||||
nix-daemon: 1
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
commands:
|
|
||||||
- nix-build --no-build-output --attr pkgs.arm.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
- nix-shell --attr rust --run "./script/not-dynamic.sh result-bin/bin/garage"
|
|
||||||
|
|
||||||
- name: push static binary
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
environment:
|
|
||||||
AWS_ACCESS_KEY_ID:
|
|
||||||
from_secret: garagehq_aws_access_key_id
|
|
||||||
AWS_SECRET_ACCESS_KEY:
|
|
||||||
from_secret: garagehq_aws_secret_access_key
|
|
||||||
TARGET: "armv6l-unknown-linux-musleabihf"
|
|
||||||
commands:
|
|
||||||
- nix-shell --attr release --run "to_s3"
|
|
||||||
|
|
||||||
- name: docker build and publish
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
environment:
|
|
||||||
DOCKER_AUTH:
|
|
||||||
from_secret: docker_auth
|
|
||||||
DOCKER_PLATFORM: "linux/arm"
|
|
||||||
CONTAINER_NAME: "dxflrs/arm_garage"
|
|
||||||
HOME: "/kaniko"
|
|
||||||
commands:
|
|
||||||
- mkdir -p /kaniko/.docker
|
|
||||||
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
|
||||||
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
- nix-shell --attr release --run "to_docker"
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
event:
|
|
||||||
- promote
|
|
||||||
- cron
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: docker
|
|
||||||
name: refresh-release-page
|
|
||||||
|
|
||||||
node:
|
|
||||||
nix-daemon: 1
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: multiarch-docker
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
environment:
|
|
||||||
DOCKER_AUTH:
|
|
||||||
from_secret: docker_auth
|
|
||||||
HOME: "/root"
|
|
||||||
commands:
|
|
||||||
- mkdir -p /root/.docker
|
|
||||||
- echo $DOCKER_AUTH > /root/.docker/config.json
|
|
||||||
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
|
||||||
- nix-shell --attr release --run "multiarch_docker"
|
|
||||||
- name: refresh-index
|
|
||||||
image: nixpkgs/nix:nixos-22.05
|
|
||||||
environment:
|
|
||||||
AWS_ACCESS_KEY_ID:
|
|
||||||
from_secret: garagehq_aws_access_key_id
|
|
||||||
AWS_SECRET_ACCESS_KEY:
|
|
||||||
from_secret: garagehq_aws_secret_access_key
|
|
||||||
commands:
|
|
||||||
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
|
|
||||||
- nix-shell --attr release --run "refresh_index"
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- release-linux-amd64
|
|
||||||
- release-linux-i386
|
|
||||||
- release-linux-arm64
|
|
||||||
- release-linux-arm
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
event:
|
|
||||||
- promote
|
|
||||||
- cron
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: signature
|
|
||||||
hmac: 0c4b57eb4b27b7c6a6ff21ab87f0767fe3eb90f5d95d5cbcdccf794e9d2a5d86
|
|
||||||
|
|
||||||
...
|
|
47
.woodpecker/debug.yaml
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
when:
|
||||||
|
event:
|
||||||
|
- push
|
||||||
|
- tag
|
||||||
|
- pull_request
|
||||||
|
- deployment
|
||||||
|
- cron
|
||||||
|
- manual
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: check formatting
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
commands:
|
||||||
|
- nix-shell --attr devShell --run "cargo fmt -- --check"
|
||||||
|
|
||||||
|
- name: build
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
commands:
|
||||||
|
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
|
||||||
|
- name: unit + func tests
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
environment:
|
||||||
|
GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage
|
||||||
|
GARAGE_TEST_INTEGRATION_PATH: tmp-garage-integration
|
||||||
|
commands:
|
||||||
|
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
- nix-build --no-build-output --attr test.amd64
|
||||||
|
- ./result/bin/garage_db-*
|
||||||
|
- ./result/bin/garage_api-*
|
||||||
|
- ./result/bin/garage_model-*
|
||||||
|
- ./result/bin/garage_rpc-*
|
||||||
|
- ./result/bin/garage_table-*
|
||||||
|
- ./result/bin/garage_util-*
|
||||||
|
- ./result/bin/garage_web-*
|
||||||
|
- ./result/bin/garage-*
|
||||||
|
- GARAGE_TEST_INTEGRATION_DB_ENGINE=lmdb ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
||||||
|
- nix-shell --attr ci --run "killall -9 garage" || true
|
||||||
|
- GARAGE_TEST_INTEGRATION_DB_ENGINE=sqlite ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
||||||
|
- rm result
|
||||||
|
- rm -rv tmp-garage-integration
|
||||||
|
|
||||||
|
- name: integration tests
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
commands:
|
||||||
|
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
29
.woodpecker/publish.yaml
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
when:
|
||||||
|
event:
|
||||||
|
- deployment
|
||||||
|
- cron
|
||||||
|
|
||||||
|
depends_on:
|
||||||
|
- release
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: refresh-index
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
secrets:
|
||||||
|
- source: garagehq_aws_access_key_id
|
||||||
|
target: AWS_ACCESS_KEY_ID
|
||||||
|
- source: garagehq_aws_secret_access_key
|
||||||
|
target: AWS_SECRET_ACCESS_KEY
|
||||||
|
commands:
|
||||||
|
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
|
||||||
|
- nix-shell --attr ci --run "refresh_index"
|
||||||
|
|
||||||
|
- name: multiarch-docker
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
secrets:
|
||||||
|
- docker_auth
|
||||||
|
commands:
|
||||||
|
- mkdir -p /root/.docker
|
||||||
|
- echo $DOCKER_AUTH > /root/.docker/config.json
|
||||||
|
- export CONTAINER_TAG=${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
- nix-shell --attr ci --run "multiarch_docker"
|
70
.woodpecker/release.yaml
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
when:
|
||||||
|
event:
|
||||||
|
- deployment
|
||||||
|
- cron
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- ARCH: amd64
|
||||||
|
TARGET: x86_64-unknown-linux-musl
|
||||||
|
- ARCH: i386
|
||||||
|
TARGET: i686-unknown-linux-musl
|
||||||
|
- ARCH: arm64
|
||||||
|
TARGET: aarch64-unknown-linux-musl
|
||||||
|
- ARCH: arm
|
||||||
|
TARGET: armv6l-unknown-linux-musleabihf
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: build
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
commands:
|
||||||
|
- nix-build --no-build-output --attr pkgs.${ARCH}.release --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
|
||||||
|
- name: check is static binary
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
commands:
|
||||||
|
- nix-build --no-build-output --attr pkgs.${ARCH}.release --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
- nix-shell --attr ci --run "./script/not-dynamic.sh result-bin/bin/garage"
|
||||||
|
|
||||||
|
- name: integration tests
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
commands:
|
||||||
|
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||||
|
when:
|
||||||
|
- matrix:
|
||||||
|
ARCH: amd64
|
||||||
|
- matrix:
|
||||||
|
ARCH: i386
|
||||||
|
|
||||||
|
- name: upgrade tests
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
commands:
|
||||||
|
- nix-shell --attr ci --run "./script/test-upgrade.sh v0.8.4 x86_64-unknown-linux-musl" || (cat /tmp/garage.log; false)
|
||||||
|
when:
|
||||||
|
- matrix:
|
||||||
|
ARCH: amd64
|
||||||
|
|
||||||
|
- name: push static binary
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
environment:
|
||||||
|
TARGET: "${TARGET}"
|
||||||
|
secrets:
|
||||||
|
- source: garagehq_aws_access_key_id
|
||||||
|
target: AWS_ACCESS_KEY_ID
|
||||||
|
- source: garagehq_aws_secret_access_key
|
||||||
|
target: AWS_SECRET_ACCESS_KEY
|
||||||
|
commands:
|
||||||
|
- nix-shell --attr ci --run "to_s3"
|
||||||
|
|
||||||
|
- name: docker build and publish
|
||||||
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
environment:
|
||||||
|
DOCKER_PLATFORM: "linux/${ARCH}"
|
||||||
|
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
|
||||||
|
secrets:
|
||||||
|
- docker_auth
|
||||||
|
commands:
|
||||||
|
- mkdir -p /root/.docker
|
||||||
|
- echo $DOCKER_AUTH > /root/.docker/config.json
|
||||||
|
- export CONTAINER_TAG=${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
- nix-shell --attr ci --run "to_docker"
|
2396
Cargo.lock
generated
135
Cargo.toml
|
@ -3,6 +3,7 @@ resolver = "2"
|
||||||
members = [
|
members = [
|
||||||
"src/db",
|
"src/db",
|
||||||
"src/util",
|
"src/util",
|
||||||
|
"src/net",
|
||||||
"src/rpc",
|
"src/rpc",
|
||||||
"src/table",
|
"src/table",
|
||||||
"src/block",
|
"src/block",
|
||||||
|
@ -17,19 +18,135 @@ members = [
|
||||||
default-members = ["src/garage"]
|
default-members = ["src/garage"]
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
|
||||||
|
# Internal Garage crates
|
||||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||||
garage_api = { version = "0.9.1", path = "src/api" }
|
garage_api = { version = "1.0.1", path = "src/api" }
|
||||||
garage_block = { version = "0.9.1", path = "src/block" }
|
garage_block = { version = "1.0.1", path = "src/block" }
|
||||||
garage_db = { version = "0.9.1", path = "src/db", default-features = false }
|
garage_db = { version = "1.0.1", path = "src/db", default-features = false }
|
||||||
garage_model = { version = "0.9.1", path = "src/model", default-features = false }
|
garage_model = { version = "1.0.1", path = "src/model", default-features = false }
|
||||||
garage_rpc = { version = "0.9.1", path = "src/rpc" }
|
garage_net = { version = "1.0.1", path = "src/net" }
|
||||||
garage_table = { version = "0.9.1", path = "src/table" }
|
garage_rpc = { version = "1.0.1", path = "src/rpc" }
|
||||||
garage_util = { version = "0.9.1", path = "src/util" }
|
garage_table = { version = "1.0.1", path = "src/table" }
|
||||||
garage_web = { version = "0.9.1", path = "src/web" }
|
garage_util = { version = "1.0.1", path = "src/util" }
|
||||||
|
garage_web = { version = "1.0.1", path = "src/web" }
|
||||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||||
|
|
||||||
|
# External crates from crates.io
|
||||||
|
arc-swap = "1.0"
|
||||||
|
argon2 = "0.5"
|
||||||
|
async-trait = "0.1.7"
|
||||||
|
backtrace = "0.3"
|
||||||
|
base64 = "0.21"
|
||||||
|
blake2 = "0.10"
|
||||||
|
bytes = "1.0"
|
||||||
|
bytesize = "1.1"
|
||||||
|
cfg-if = "1.0"
|
||||||
|
chrono = "0.4"
|
||||||
|
crc32fast = "1.4"
|
||||||
|
crc32c = "0.6"
|
||||||
|
crypto-common = "0.1"
|
||||||
|
digest = "0.10"
|
||||||
|
err-derive = "0.3"
|
||||||
|
gethostname = "0.4"
|
||||||
|
git-version = "0.3.4"
|
||||||
|
hex = "0.4"
|
||||||
|
hexdump = "0.1"
|
||||||
|
hmac = "0.12"
|
||||||
|
idna = "0.5"
|
||||||
|
itertools = "0.12"
|
||||||
|
ipnet = "2.9.0"
|
||||||
|
lazy_static = "1.4"
|
||||||
|
md-5 = "0.10"
|
||||||
|
mktemp = "0.5"
|
||||||
|
nix = { version = "0.27", default-features = false, features = ["fs"] }
|
||||||
|
nom = "7.1"
|
||||||
|
parse_duration = "2.1"
|
||||||
|
pin-project = "1.0.12"
|
||||||
|
pnet_datalink = "0.34"
|
||||||
|
rand = "0.8"
|
||||||
|
sha1 = "0.10"
|
||||||
|
sha2 = "0.10"
|
||||||
|
timeago = { version = "0.4", default-features = false }
|
||||||
|
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
||||||
|
|
||||||
|
aes-gcm = { version = "0.10", features = ["aes", "stream"] }
|
||||||
|
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
||||||
|
kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] }
|
||||||
|
|
||||||
|
clap = { version = "4.1", features = ["derive", "env"] }
|
||||||
|
pretty_env_logger = "0.5"
|
||||||
|
structopt = { version = "0.3", default-features = false }
|
||||||
|
syslog-tracing = "0.3"
|
||||||
|
tracing = "0.1"
|
||||||
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
|
||||||
|
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
||||||
|
rusqlite = "0.31.0"
|
||||||
|
r2d2 = "0.8"
|
||||||
|
r2d2_sqlite = "0.24"
|
||||||
|
|
||||||
|
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
||||||
|
zstd = { version = "0.13", default-features = false }
|
||||||
|
|
||||||
|
quick-xml = { version = "0.26", features = [ "serialize" ] }
|
||||||
|
rmp-serde = "1.1.2"
|
||||||
|
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||||
|
serde_bytes = "0.11"
|
||||||
|
serde_json = "1.0"
|
||||||
|
toml = { version = "0.8", default-features = false, features = ["parse"] }
|
||||||
|
|
||||||
|
# newer version requires rust edition 2021
|
||||||
|
k8s-openapi = { version = "0.21", features = ["v1_24"] }
|
||||||
|
kube = { version = "0.88", default-features = false, features = ["runtime", "derive", "client", "rustls-tls"] }
|
||||||
|
schemars = "0.8"
|
||||||
|
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls-manual-roots", "json"] }
|
||||||
|
|
||||||
|
form_urlencoded = "1.0.0"
|
||||||
|
http = "1.0"
|
||||||
|
httpdate = "1.0"
|
||||||
|
http-range = "0.1"
|
||||||
|
http-body-util = "0.1"
|
||||||
|
hyper = { version = "1.0", default-features = false }
|
||||||
|
hyper-util = { version = "0.1", features = [ "full" ] }
|
||||||
|
multer = "3.0"
|
||||||
|
percent-encoding = "2.2"
|
||||||
|
roxmltree = "0.19"
|
||||||
|
url = "2.3"
|
||||||
|
|
||||||
|
futures = "0.3"
|
||||||
|
futures-util = "0.3"
|
||||||
|
tokio = { version = "1.0", default-features = false, features = ["net", "rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
||||||
|
tokio-util = { version = "0.7", features = ["compat", "io"] }
|
||||||
|
tokio-stream = { version = "0.1", features = ["net"] }
|
||||||
|
|
||||||
|
opentelemetry = { version = "0.17", features = [ "rt-tokio", "metrics", "trace" ] }
|
||||||
|
opentelemetry-prometheus = "0.10"
|
||||||
|
opentelemetry-otlp = "0.10"
|
||||||
|
opentelemetry-contrib = "0.9"
|
||||||
|
prometheus = "0.13"
|
||||||
|
|
||||||
|
# used by the k2v-client crate only
|
||||||
|
aws-sigv4 = { version = "1.1" }
|
||||||
|
hyper-rustls = { version = "0.26", features = ["http2"] }
|
||||||
|
log = "0.4"
|
||||||
|
thiserror = "1.0"
|
||||||
|
|
||||||
|
# ---- used only as build / dev dependencies ----
|
||||||
|
assert-json-diff = "2.0"
|
||||||
|
rustc_version = "0.4.0"
|
||||||
|
static_init = "1.0"
|
||||||
|
|
||||||
|
aws-config = "1.1.4"
|
||||||
|
aws-sdk-config = "1.13"
|
||||||
|
aws-sdk-s3 = "1.14"
|
||||||
|
|
||||||
[profile.dev]
|
[profile.dev]
|
||||||
|
#lto = "thin" # disabled for now, adds 2-4 min to each CI build
|
||||||
lto = "off"
|
lto = "off"
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = true
|
lto = true
|
||||||
|
codegen-units = 1
|
||||||
|
opt-level = "s"
|
||||||
|
strip = true
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
Garage [![Build Status](https://drone.deuxfleurs.fr/api/badges/Deuxfleurs/garage/status.svg?ref=refs/heads/main)](https://drone.deuxfleurs.fr/Deuxfleurs/garage)
|
Garage [![status-badge](https://woodpecker.deuxfleurs.fr/api/badges/1/status.svg)](https://woodpecker.deuxfleurs.fr/repos/1)
|
||||||
===
|
===
|
||||||
|
|
||||||
<p align="center" style="text-align:center;">
|
<p align="center" style="text-align:center;">
|
||||||
|
|
|
@ -40,7 +40,6 @@ in {
|
||||||
features = [
|
features = [
|
||||||
"garage/bundled-libs"
|
"garage/bundled-libs"
|
||||||
"garage/k2v"
|
"garage/k2v"
|
||||||
"garage/sled"
|
|
||||||
"garage/lmdb"
|
"garage/lmdb"
|
||||||
"garage/sqlite"
|
"garage/sqlite"
|
||||||
];
|
];
|
||||||
|
|
|
@ -98,7 +98,6 @@ paths:
|
||||||
type: string
|
type: string
|
||||||
example:
|
example:
|
||||||
- "k2v"
|
- "k2v"
|
||||||
- "sled"
|
|
||||||
- "lmdb"
|
- "lmdb"
|
||||||
- "sqlite"
|
- "sqlite"
|
||||||
- "consul-discovery"
|
- "consul-discovery"
|
||||||
|
|
|
@ -23,7 +23,7 @@ client = minio.Minio(
|
||||||
"GKyourapikey",
|
"GKyourapikey",
|
||||||
"abcd[...]1234",
|
"abcd[...]1234",
|
||||||
# Force the region, this is specific to garage
|
# Force the region, this is specific to garage
|
||||||
region="region",
|
region="garage",
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -80,6 +80,53 @@ To test your new configuration, just reload your Nextcloud webpage and start sen
|
||||||
|
|
||||||
*External link:* [Nextcloud Documentation > Primary Storage](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html)
|
*External link:* [Nextcloud Documentation > Primary Storage](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html)
|
||||||
|
|
||||||
|
#### SSE-C encryption (since Garage v1.0)
|
||||||
|
|
||||||
|
Since version 1.0, Garage supports server-side encryption with customer keys
|
||||||
|
(SSE-C). In this mode, Garage is responsible for encrypting and decrypting
|
||||||
|
objects, but it does not store the encryption key itself. The encryption key
|
||||||
|
should be provided by Nextcloud upon each request. This mode of operation is
|
||||||
|
supported by Nextcloud and it has successfully been tested together with
|
||||||
|
Garage.
|
||||||
|
|
||||||
|
To enable SSE-C encryption:
|
||||||
|
|
||||||
|
1. Make sure your Garage server is accessible via SSL through a reverse proxy
|
||||||
|
such as Nginx, and that it is using a valid public certificate (Nextcloud
|
||||||
|
might be able to connect to an S3 server that is using a self-signed
|
||||||
|
certificate, but you will lose many hours while trying, so don't).
|
||||||
|
Configure values for `use_ssl` and `port` accordingly in your `config.php`
|
||||||
|
file.
|
||||||
|
|
||||||
|
2. Generate an encryption key using the following command:
|
||||||
|
|
||||||
|
```
|
||||||
|
openssl rand -base64 32
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure to keep this key **secret**!
|
||||||
|
|
||||||
|
3. Add the encryption key in your `config.php` file as follows:
|
||||||
|
|
||||||
|
|
||||||
|
```php
|
||||||
|
<?php
|
||||||
|
$CONFIG = array(
|
||||||
|
'objectstore' => [
|
||||||
|
'class' => '\\OC\\Files\\ObjectStore\\S3',
|
||||||
|
'arguments' => [
|
||||||
|
...
|
||||||
|
'sse_c_key' => 'exampleencryptionkeyLbU+5fKYQcVoqnn+RaIOXgo=',
|
||||||
|
...
|
||||||
|
],
|
||||||
|
],
|
||||||
|
```
|
||||||
|
|
||||||
|
Nextcloud will now make Garage encrypt files at rest in the storage bucket.
|
||||||
|
These files will not be readable by an S3 client that has credentials to the
|
||||||
|
bucket but doesn't also know the secret encryption key.
|
||||||
|
|
||||||
|
|
||||||
### External Storage
|
### External Storage
|
||||||
|
|
||||||
**From the GUI.** Activate the "External storage support" app from the "Applications" page (click on your account icon on the top right corner of your screen to display the menu). Go to your parameters page (also located below your account icon). Click on external storage (or the corresponding translation in your language).
|
**From the GUI.** Activate the "External storage support" app from the "Applications" page (click on your account icon on the top right corner of your screen to display the menu). Go to your parameters page (also located below your account icon). Click on external storage (or the corresponding translation in your language).
|
||||||
|
@ -245,7 +292,7 @@ with average object size ranging from 50 KB to 150 KB.
|
||||||
As such, your Garage cluster should be configured appropriately for good performance:
|
As such, your Garage cluster should be configured appropriately for good performance:
|
||||||
|
|
||||||
- use Garage v0.8.0 or higher with the [LMDB database engine](@documentation/reference-manual/configuration.md#db-engine-since-v0-8-0).
|
- use Garage v0.8.0 or higher with the [LMDB database engine](@documentation/reference-manual/configuration.md#db-engine-since-v0-8-0).
|
||||||
With the default Sled database engine, your database could quickly end up taking tens of GB of disk space.
|
Older versions of Garage used the Sled database engine which had issues, such as databases quickly ending up taking tens of GB of disk space.
|
||||||
- the Garage database should be stored on a SSD
|
- the Garage database should be stored on a SSD
|
||||||
|
|
||||||
### Creating your bucket
|
### Creating your bucket
|
||||||
|
@ -288,6 +335,7 @@ From the [official Mastodon documentation](https://docs.joinmastodon.org/admin/t
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ RAILS_ENV=production bin/tootctl media remove --days 3
|
$ RAILS_ENV=production bin/tootctl media remove --days 3
|
||||||
|
$ RAILS_ENV=production bin/tootctl media remove --days 15 --prune-profiles
|
||||||
$ RAILS_ENV=production bin/tootctl media remove-orphans
|
$ RAILS_ENV=production bin/tootctl media remove-orphans
|
||||||
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
|
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
|
||||||
```
|
```
|
||||||
|
@ -306,8 +354,6 @@ Imports: 1.7 KB
|
||||||
Settings: 0 Bytes
|
Settings: 0 Bytes
|
||||||
```
|
```
|
||||||
|
|
||||||
Unfortunately, [old avatars and headers cannot currently be cleaned up](https://github.com/mastodon/mastodon/issues/9567).
|
|
||||||
|
|
||||||
### Migrating your data
|
### Migrating your data
|
||||||
|
|
||||||
Data migration should be done with an efficient S3 client.
|
Data migration should be done with an efficient S3 client.
|
||||||
|
|
|
@ -55,8 +55,8 @@ Create your key and bucket:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
garage key create my-key
|
garage key create my-key
|
||||||
garage bucket create backup
|
garage bucket create backups
|
||||||
garage bucket allow backup --read --write --key my-key
|
garage bucket allow backups --read --write --key my-key
|
||||||
```
|
```
|
||||||
|
|
||||||
Then register your Key ID and Secret key in your environment:
|
Then register your Key ID and Secret key in your environment:
|
||||||
|
|
|
@ -259,7 +259,7 @@ duck --delete garage:/my-files/an-object.txt
|
||||||
|
|
||||||
## WinSCP (libs3) {#winscp}
|
## WinSCP (libs3) {#winscp}
|
||||||
|
|
||||||
*You can find instructions on how to use the GUI in french [in our wiki](https://wiki.deuxfleurs.fr/fr/Guide/Garage/WinSCP).*
|
*You can find instructions on how to use the GUI in french [in our wiki](https://guide.deuxfleurs.fr/prise_en_main/winscp/).*
|
||||||
|
|
||||||
How to use `winscp.com`, the CLI interface of WinSCP:
|
How to use `winscp.com`, the CLI interface of WinSCP:
|
||||||
|
|
||||||
|
|
|
@ -53,20 +53,43 @@ and that's also why your nodes have super long identifiers.
|
||||||
|
|
||||||
Adding TLS support built into Garage is not currently planned.
|
Adding TLS support built into Garage is not currently planned.
|
||||||
|
|
||||||
## Garage stores data in plain text on the filesystem
|
## Garage stores data in plain text on the filesystem or encrypted using customer keys (SSE-C)
|
||||||
|
|
||||||
Garage does not handle data encryption at rest by itself, and instead delegates
|
For standard S3 API requests, Garage does not encrypt data at rest by itself.
|
||||||
to the user to add encryption, either at the storage layer (LUKS, etc) or on
|
For the most generic at rest encryption of data, we recommend setting up your
|
||||||
the client side (or both). There are no current plans to add data encryption
|
storage partitions on encrypted LUKS devices.
|
||||||
directly in Garage.
|
|
||||||
|
|
||||||
Implementing data encryption directly in Garage might make things simpler for
|
If you are developping your own client software that makes use of S3 storage,
|
||||||
end users, but also raises many more questions, especially around key
|
we recommend implementing data encryption directly on the client side and never
|
||||||
management: for encryption of data, where could Garage get the encryption keys
|
transmitting plaintext data to Garage. This makes it easy to use an external
|
||||||
from ? If we encrypt data but keep the keys in a plaintext file next to them,
|
untrusted storage provider if necessary.
|
||||||
it's useless. We probably don't want to have to manage secrets in garage as it
|
|
||||||
would be very hard to do in a secure way. Maybe integrate with an external
|
Garage does support [SSE-C
|
||||||
system such as Hashicorp Vault?
|
encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html),
|
||||||
|
an encryption mode of Amazon S3 where data is encrypted at rest using
|
||||||
|
encryption keys given by the client. The encryption keys are passed to the
|
||||||
|
server in a header in each request, to encrypt or decrypt data at the moment of
|
||||||
|
reading or writing. The server discards the key as soon as it has finished
|
||||||
|
using it for the request. This mode allows the data to be encrypted at rest by
|
||||||
|
Garage itself, but it requires support in the client software. It is also not
|
||||||
|
adapted to a model where the server is not trusted or assumed to be
|
||||||
|
compromised, as the server can easily know the encryption keys. Note however
|
||||||
|
that when using SSE-C encryption, the only Garage node that knows the
|
||||||
|
encryption key passed in a given request is the node to which the request is
|
||||||
|
directed (which can be a gateway node), so it is easy to have untrusted nodes
|
||||||
|
in the cluster as long as S3 API requests containing SSE-C encryption keys are
|
||||||
|
not directed to them.
|
||||||
|
|
||||||
|
Implementing automatic data encryption directly in Garage without client-side
|
||||||
|
management of keys (something like
|
||||||
|
[SSE-S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html))
|
||||||
|
could make things simpler for end users that don't want to setup LUKS, but also
|
||||||
|
raises many more questions, especially around key management: for encryption of
|
||||||
|
data, where could Garage get the encryption keys from? If we encrypt data but
|
||||||
|
keep the keys in a plaintext file next to them, it's useless. We probably don't
|
||||||
|
want to have to manage secrets in Garage as it would be very hard to do in a
|
||||||
|
secure way. At the time of speaking, there are no plans to implement this in
|
||||||
|
Garage.
|
||||||
|
|
||||||
|
|
||||||
# Adding data encryption using external tools
|
# Adding data encryption using external tools
|
||||||
|
|
|
@ -90,6 +90,6 @@ The following feature flags are available in v0.8.0:
|
||||||
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
|
| `kubernetes-discovery` | optional | Enable automatic registration and discovery<br>of cluster nodes through the Kubernetes API |
|
||||||
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
|
||||||
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
|
||||||
| `sled` | *by default* | Enable using Sled to store Garage's metadata |
|
| `syslog` | optional | Enable logging to Syslog |
|
||||||
| `lmdb` | optional | Enable using LMDB to store Garage's metadata |
|
| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
|
||||||
| `sqlite` | optional | Enable using Sqlite3 to store Garage's metadata |
|
| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata |
|
||||||
|
|
|
@ -18,7 +18,7 @@ api_bind_addr = "0.0.0.0:3903"
|
||||||
```
|
```
|
||||||
|
|
||||||
This will allow anyone to scrape Prometheus metrics by fetching
|
This will allow anyone to scrape Prometheus metrics by fetching
|
||||||
`http://localhost:3093/metrics`. If you want to restrict access
|
`http://localhost:3903/metrics`. If you want to restrict access
|
||||||
to the exported metrics, set the `metrics_token` configuration value
|
to the exported metrics, set the `metrics_token` configuration value
|
||||||
to a bearer token to be used when fetching the metrics endpoint.
|
to a bearer token to be used when fetching the metrics endpoint.
|
||||||
|
|
||||||
|
|
|
@ -53,9 +53,9 @@ to store 2 TB of data in total.
|
||||||
|
|
||||||
### Best practices
|
### Best practices
|
||||||
|
|
||||||
- If you have fast dedicated networking between all your nodes, and are planing to store
|
- If you have reasonably fast networking between all your nodes, and are planing to store
|
||||||
very large files, bump the `block_size` configuration parameter to 10 MB
|
mostly large files, bump the `block_size` configuration parameter to 10 MB
|
||||||
(`block_size = 10485760`).
|
(`block_size = "10M"`).
|
||||||
|
|
||||||
- Garage stores its files in two locations: it uses a metadata directory to store frequently-accessed
|
- Garage stores its files in two locations: it uses a metadata directory to store frequently-accessed
|
||||||
small metadata items, and a data directory to store data blocks of uploaded objects.
|
small metadata items, and a data directory to store data blocks of uploaded objects.
|
||||||
|
@ -68,31 +68,42 @@ to store 2 TB of data in total.
|
||||||
EXT4 is not recommended as it has more strict limitations on the number of inodes,
|
EXT4 is not recommended as it has more strict limitations on the number of inodes,
|
||||||
which might cause issues with Garage when large numbers of objects are stored.
|
which might cause issues with Garage when large numbers of objects are stored.
|
||||||
|
|
||||||
- If you only have an HDD and no SSD, it's fine to put your metadata alongside the data
|
|
||||||
on the same drive. Having lots of RAM for your kernel to cache the metadata will
|
|
||||||
help a lot with performance. Make sure to use the LMDB database engine,
|
|
||||||
instead of Sled, which suffers from quite bad performance degradation on HDDs.
|
|
||||||
Sled is still the default for legacy reasons, but is not recommended anymore.
|
|
||||||
|
|
||||||
- For the metadata storage, Garage does not do checksumming and integrity
|
|
||||||
verification on its own. If you are afraid of bitrot/data corruption,
|
|
||||||
put your metadata directory on a ZFS or BTRFS partition. Otherwise, just use regular
|
|
||||||
EXT4 or XFS.
|
|
||||||
|
|
||||||
- Servers with multiple HDDs are supported natively by Garage without resorting
|
- Servers with multiple HDDs are supported natively by Garage without resorting
|
||||||
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
|
to RAID, see [our dedicated documentation page](@/documentation/operations/multi-hdd.md).
|
||||||
|
|
||||||
|
- For the metadata storage, Garage does not do checksumming and integrity
|
||||||
|
verification on its own, so it is better to use a robust filesystem such as
|
||||||
|
BTRFS or ZFS. Users have reported that when using the LMDB database engine
|
||||||
|
(the default), database files have a tendency of becoming corrupted after an
|
||||||
|
unclean shutdown (e.g. a power outage), so you should take regular snapshots
|
||||||
|
to be able to recover from such a situation. This can be done using Garage's
|
||||||
|
built-in automatic snapshotting (since v0.9.4), or by using filesystem level
|
||||||
|
snapshots. If you cannot do so, you might want to switch to Sqlite which is
|
||||||
|
more robust.
|
||||||
|
|
||||||
|
- LMDB is the fastest and most tested database engine, but it has the following
|
||||||
|
weaknesses: 1/ data files are not architecture-independent, you cannot simply
|
||||||
|
move a Garage metadata directory between nodes running different architectures,
|
||||||
|
and 2/ LMDB is not suited for 32-bit platforms. Sqlite is a viable alternative
|
||||||
|
if any of these are of concern.
|
||||||
|
|
||||||
|
- If you only have an HDD and no SSD, it's fine to put your metadata alongside
|
||||||
|
the data on the same drive, but then consider your filesystem choice wisely
|
||||||
|
(see above). Having lots of RAM for your kernel to cache the metadata will
|
||||||
|
help a lot with performance. The default LMDB database engine is the most
|
||||||
|
tested and has good performance.
|
||||||
|
|
||||||
## Get a Docker image
|
## Get a Docker image
|
||||||
|
|
||||||
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||||
We encourage you to use a fixed tag (eg. `v0.9.1`) and not the `latest` tag.
|
We encourage you to use a fixed tag (eg. `v1.0.1`) and not the `latest` tag.
|
||||||
For this example, we will use the latest published version at the time of the writing which is `v0.9.1` but it's up to you
|
For this example, we will use the latest published version at the time of the writing which is `v1.0.1` but it's up to you
|
||||||
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo docker pull dxflrs/garage:v0.9.1
|
sudo docker pull dxflrs/garage:v1.0.1
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploying and configuring Garage
|
## Deploying and configuring Garage
|
||||||
|
@ -115,8 +126,9 @@ A valid `/etc/garage.toml` for our cluster would look as follows:
|
||||||
metadata_dir = "/var/lib/garage/meta"
|
metadata_dir = "/var/lib/garage/meta"
|
||||||
data_dir = "/var/lib/garage/data"
|
data_dir = "/var/lib/garage/data"
|
||||||
db_engine = "lmdb"
|
db_engine = "lmdb"
|
||||||
|
metadata_auto_snapshot_interval = "6h"
|
||||||
|
|
||||||
replication_mode = "3"
|
replication_factor = 3
|
||||||
|
|
||||||
compression_level = 2
|
compression_level = 2
|
||||||
|
|
||||||
|
@ -140,6 +152,8 @@ Check the following for your configuration files:
|
||||||
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
|
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
|
||||||
This parameter is optional but recommended: if your nodes have trouble communicating with
|
This parameter is optional but recommended: if your nodes have trouble communicating with
|
||||||
one another, consider adding it.
|
one another, consider adding it.
|
||||||
|
Alternatively, you can also set `rpc_public_addr_subnet`, which can filter
|
||||||
|
the addresses announced to other peers to a specific subnet.
|
||||||
|
|
||||||
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
|
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
|
||||||
You can generate such a key with `openssl rand -hex 32`.
|
You can generate such a key with `openssl rand -hex 32`.
|
||||||
|
@ -157,7 +171,7 @@ docker run \
|
||||||
-v /etc/garage.toml:/etc/garage.toml \
|
-v /etc/garage.toml:/etc/garage.toml \
|
||||||
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
-v /var/lib/garage/meta:/var/lib/garage/meta \
|
||||||
-v /var/lib/garage/data:/var/lib/garage/data \
|
-v /var/lib/garage/data:/var/lib/garage/data \
|
||||||
dxflrs/garage:v0.9.1
|
dxflrs/garage:v1.0.1
|
||||||
```
|
```
|
||||||
|
|
||||||
With this command line, Garage should be started automatically at each boot.
|
With this command line, Garage should be started automatically at each boot.
|
||||||
|
@ -171,7 +185,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
|
||||||
version: "3"
|
version: "3"
|
||||||
services:
|
services:
|
||||||
garage:
|
garage:
|
||||||
image: dxflrs/garage:v0.9.1
|
image: dxflrs/garage:v1.0.1
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -187,7 +201,7 @@ upgrades. With the containerized setup proposed here, the upgrade process
|
||||||
will require stopping and removing the existing container, and re-creating it
|
will require stopping and removing the existing container, and re-creating it
|
||||||
with the upgraded version.
|
with the upgraded version.
|
||||||
|
|
||||||
## Controling the daemon
|
## Controlling the daemon
|
||||||
|
|
||||||
The `garage` binary has two purposes:
|
The `garage` binary has two purposes:
|
||||||
- it acts as a daemon when launched with `garage server`
|
- it acts as a daemon when launched with `garage server`
|
||||||
|
@ -245,7 +259,7 @@ You can then instruct nodes to connect to one another as follows:
|
||||||
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
|
Venus$ garage node connect 563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901
|
||||||
```
|
```
|
||||||
|
|
||||||
You don't nead to instruct all node to connect to all other nodes:
|
You don't need to instruct all node to connect to all other nodes:
|
||||||
nodes will discover one another transitively.
|
nodes will discover one another transitively.
|
||||||
|
|
||||||
Now if your run `garage status` on any node, you should have an output that looks as follows:
|
Now if your run `garage status` on any node, you should have an output that looks as follows:
|
||||||
|
|
|
@ -472,3 +472,32 @@ https:// {
|
||||||
|
|
||||||
More information on how this endpoint is implemented in Garage is available
|
More information on how this endpoint is implemented in Garage is available
|
||||||
in the [Admin API Reference](@/documentation/reference-manual/admin-api.md) page.
|
in the [Admin API Reference](@/documentation/reference-manual/admin-api.md) page.
|
||||||
|
|
||||||
|
### Fileserver browser
|
||||||
|
|
||||||
|
Caddy's built-in
|
||||||
|
[file_server](https://caddyserver.com/docs/caddyfile/directives/file_server)
|
||||||
|
browser functionality can be extended with the
|
||||||
|
[caddy-fs-s3](https://github.com/sagikazarmark/caddy-fs-s3) module.
|
||||||
|
|
||||||
|
This can be configured to use Garage as a backend with the following
|
||||||
|
configuration:
|
||||||
|
|
||||||
|
```caddy
|
||||||
|
browse.garage.tld {
|
||||||
|
file_server {
|
||||||
|
fs s3 {
|
||||||
|
bucket test-bucket
|
||||||
|
region garage
|
||||||
|
|
||||||
|
endpoint https://s3.garage.tld
|
||||||
|
use_path_style
|
||||||
|
}
|
||||||
|
|
||||||
|
browse
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Caddy must also be configured with the required `AWS_ACCESS_KEY_ID` and
|
||||||
|
`AWS_SECRET_ACCESS_KEY` environment variables to access the bucket.
|
||||||
|
|
|
@ -48,7 +48,22 @@ locations. They use Garage themselves for the following tasks:
|
||||||
|
|
||||||
- As a backup target using `rclone` and `restic`
|
- As a backup target using `rclone` and `restic`
|
||||||
|
|
||||||
- In the Drone continuous integration platform to store task logs
|
|
||||||
|
|
||||||
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
|
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
|
||||||
9 nodes in 3 physical locations.
|
9 nodes in 3 physical locations.
|
||||||
|
|
||||||
|
### Triplebit
|
||||||
|
|
||||||
|
[Triplebit](https://www.triplebit.org) is a non-profit hosting provider and
|
||||||
|
ISP focused on improving access to privacy-related services. They use
|
||||||
|
Garage themselves for the following tasks:
|
||||||
|
|
||||||
|
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
|
||||||
|
|
||||||
|
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
|
||||||
|
|
||||||
|
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
|
||||||
|
|
||||||
|
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
|
||||||
|
|
||||||
|
Triplebit's Garage cluster is a multi-site cluster currently composed of
|
||||||
|
10 nodes in 3 physical locations.
|
||||||
|
|
|
@ -97,7 +97,7 @@ delete a tombstone, the following condition has to be met:
|
||||||
superseeded by the tombstone. This ensures that deleting the tombstone is
|
superseeded by the tombstone. This ensures that deleting the tombstone is
|
||||||
safe and that no deleted value will come back in the system.
|
safe and that no deleted value will come back in the system.
|
||||||
|
|
||||||
Garage makes use of Sled's atomic operations (such as compare-and-swap and
|
Garage uses atomic database operations (such as compare-and-swap and
|
||||||
transactions) to ensure that only tombstones that have been correctly
|
transactions) to ensure that only tombstones that have been correctly
|
||||||
propagated to other nodes are ever deleted from the local entry tree.
|
propagated to other nodes are ever deleted from the local entry tree.
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ Pithos has been abandonned and should probably not used yet, in the following we
|
||||||
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
|
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
|
||||||
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
|
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
|
||||||
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
|
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
|
||||||
We considered there v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
|
We considered their v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
|
||||||
|
|
||||||
**[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):**
|
**[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):**
|
||||||
*Not written yet*
|
*Not written yet*
|
||||||
|
|
|
@ -80,7 +80,7 @@ nix-build \
|
||||||
--git_version $(git rev-parse HEAD)
|
--git_version $(git rev-parse HEAD)
|
||||||
```
|
```
|
||||||
|
|
||||||
*The result is located in `result/bin`. You can pass arguments to cross compile: check `.drone.yml` for examples.*
|
*The result is located in `result/bin`. You can pass arguments to cross compile: check `.woodpecker/release.yml` for examples.*
|
||||||
|
|
||||||
If you modify a `Cargo.toml` or regenerate any `Cargo.lock`, you must run `cargo2nix`:
|
If you modify a `Cargo.toml` or regenerate any `Cargo.lock`, you must run `cargo2nix`:
|
||||||
|
|
||||||
|
|
|
@ -81,12 +81,9 @@ Our cache will be checked.
|
||||||
- http://www.lpenz.org/articles/nixchannel/index.html
|
- http://www.lpenz.org/articles/nixchannel/index.html
|
||||||
|
|
||||||
|
|
||||||
## Drone
|
## Woodpecker
|
||||||
|
|
||||||
Do not try to set a build as trusted from the interface or the CLI tool,
|
Woodpecker can do parallelism both at the step and the pipeline level. At the step level, parallelism is restricted to the same runner.
|
||||||
your request would be ignored. Instead, directly edit the database (table `repos`, column `repo_trusted`).
|
|
||||||
|
|
||||||
Drone can do parallelism both at the step and the pipeline level. At the step level, parallelism is restricted to the same runner.
|
|
||||||
|
|
||||||
## Building Docker containers
|
## Building Docker containers
|
||||||
|
|
||||||
|
@ -99,3 +96,4 @@ We were:
|
||||||
- Unable to use the kaniko container provided by Google as we can't run arbitrary logic: we need to put our secret in .docker/config.json.
|
- Unable to use the kaniko container provided by Google as we can't run arbitrary logic: we need to put our secret in .docker/config.json.
|
||||||
|
|
||||||
Finally we chose to build kaniko through nix and use it in a `nix-shell`.
|
Finally we chose to build kaniko through nix and use it in a `nix-shell`.
|
||||||
|
We then switched to using kaniko from nixpkgs when it was packaged.
|
||||||
|
|
|
@ -42,7 +42,7 @@ and the docker containers on Docker Hub.
|
||||||
|
|
||||||
## Automation
|
## Automation
|
||||||
|
|
||||||
We automated our release process with Nix and Drone to make it more reliable.
|
We automated our release process with Nix and Woodpecker to make it more reliable.
|
||||||
Here we describe how we have done in case you want to debug or improve it.
|
Here we describe how we have done in case you want to debug or improve it.
|
||||||
|
|
||||||
### Caching build steps
|
### Caching build steps
|
||||||
|
@ -62,52 +62,31 @@ Sending to the cache is done through `nix copy`, for example:
|
||||||
nix copy --to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/etc/nix/signing-key.sec' result
|
nix copy --to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/etc/nix/signing-key.sec' result
|
||||||
```
|
```
|
||||||
|
|
||||||
*Note that you need the signing key. In our case, it is stored as a secret in Drone.*
|
*The signing key possessed by the Garage maintainers is required to update the Nix cache.*
|
||||||
|
|
||||||
The previous command will only send the built packet and not its dependencies.
|
The previous command will only send the built package and not its dependencies.
|
||||||
To send its dependency, a tool named `nix-copy-closure` has been created but it is not compatible with the S3 protocol.
|
In the case of our CI pipeline, we want to cache all intermediate build steps
|
||||||
|
as well. This can be done using this quite involved command (here as an example
|
||||||
Instead, you can use the following commands to list all the runtime dependencies:
|
for the `pkgs.amd64.relase` package):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
nix copy \
|
nix copy -j8 \
|
||||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/etc/nix/signing-key.sec' \
|
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/etc/nix/nix-signing-key.sec' \
|
||||||
$(nix-store -qR result/)
|
$(nix path-info pkgs.amd64.release --file default.nix --derivation --recursive | sed 's/\.drv$/.drv^*/')
|
||||||
```
|
```
|
||||||
|
|
||||||
*We could also write this expression with xargs but this tool is not available in our container.*
|
This command will simultaneously build all of the required Nix paths (using at
|
||||||
|
most 8 parallel Nix builder jobs) and send the resulting objects to the cache.
|
||||||
|
|
||||||
But in certain cases, we want to cache compile time dependencies also.
|
This can be run for all the Garage packages we build using the following command:
|
||||||
For example, the Nix project does not provide binaries for cross compiling to i686 and thus we need to compile gcc on our own.
|
|
||||||
We do not want to compile gcc each time, so even if it is a compile time dependency, we want to cache it.
|
|
||||||
|
|
||||||
This time, the command is a bit more involved:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
nix copy --to \
|
|
||||||
's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/etc/nix/signing-key.sec' \
|
|
||||||
$(nix-store -qR --include-outputs \
|
|
||||||
$(nix-instantiate))
|
|
||||||
```
|
|
||||||
|
|
||||||
This is the command we use in our CI as we expect the final binary to change, so we mainly focus on
|
|
||||||
caching our development dependencies.
|
|
||||||
|
|
||||||
*Currently there is no automatic garbage collection of the cache: we should monitor its growth.
|
|
||||||
Hopefully, we can erase it totally without breaking any build, the next build will only be slower.*
|
|
||||||
|
|
||||||
In practise, we concluded that we do not want to cache all the compilation dependencies.
|
|
||||||
Instead, we want to cache the toolchain we use to build Garage each time we change it.
|
|
||||||
So we removed from Drone any automatic update of the cache and instead handle them manually with:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
source ~/.awsrc
|
source ~/.awsrc
|
||||||
nix-shell --run 'refresh_toolchain'
|
nix-shell --attr cache --run 'refresh_cache'
|
||||||
```
|
```
|
||||||
|
|
||||||
Internally, it will run `nix-build` on `nix/toolchain.nix` and send the output plus its depedencies to the cache.
|
We don't automate this step at each CI build, as *there is currently no automatic garbage collection of the cache.*
|
||||||
|
This means we should also monitor the cache's size; if it ever becomes too big we can erase it with:
|
||||||
To erase the cache:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
mc rm --recursive --force 'garage/nix/'
|
mc rm --recursive --force 'garage/nix/'
|
||||||
|
@ -157,9 +136,9 @@ nix-shell --run refresh_index
|
||||||
|
|
||||||
If you want to compile for different architectures, you will need to repeat all these commands for each architecture.
|
If you want to compile for different architectures, you will need to repeat all these commands for each architecture.
|
||||||
|
|
||||||
**In practise, and except for debugging, you will never directly run these commands. Release is handled by drone**
|
**In practice, and except for debugging, you will never directly run these commands. Release is handled by Woodpecker.**
|
||||||
|
|
||||||
### Drone
|
### Drone (obsolete)
|
||||||
|
|
||||||
Our instance is available at [https://drone.deuxfleurs.fr](https://drone.deuxfleurs.fr).
|
Our instance is available at [https://drone.deuxfleurs.fr](https://drone.deuxfleurs.fr).
|
||||||
You need an account on [https://git.deuxfleurs.fr](https://git.deuxfleurs.fr) to use it.
|
You need an account on [https://git.deuxfleurs.fr](https://git.deuxfleurs.fr) to use it.
|
||||||
|
|
|
@ -19,7 +19,7 @@ connecting to. To run on all nodes, add the `-a` flag as follows:
|
||||||
|
|
||||||
# Data block operations
|
# Data block operations
|
||||||
|
|
||||||
## Data store scrub
|
## Data store scrub {#scrub}
|
||||||
|
|
||||||
Scrubbing the data store means examining each individual data block to check that
|
Scrubbing the data store means examining each individual data block to check that
|
||||||
their content is correct, by verifying their hash. Any block found to be corrupted
|
their content is correct, by verifying their hash. Any block found to be corrupted
|
||||||
|
@ -49,7 +49,7 @@ verifications. Of course, scrubbing the entire data store will also take longer.
|
||||||
## Block check and resync
|
## Block check and resync
|
||||||
|
|
||||||
In some cases, nodes hold a reference to a block but do not actually have the block
|
In some cases, nodes hold a reference to a block but do not actually have the block
|
||||||
stored on disk. Conversely, they may also have on disk blocks that are not referenced
|
stored on disk. Conversely, they may also have on-disk blocks that are not referenced
|
||||||
any more. To fix both cases, a block repair may be run with `garage repair blocks`.
|
any more. To fix both cases, a block repair may be run with `garage repair blocks`.
|
||||||
This will scan the entire block reference counter table to check that the blocks
|
This will scan the entire block reference counter table to check that the blocks
|
||||||
exist on disk, and will scan the entire disk store to check that stored blocks
|
exist on disk, and will scan the entire disk store to check that stored blocks
|
||||||
|
@ -95,7 +95,7 @@ using the `garage block purge` command.
|
||||||
|
|
||||||
In [multi-HDD setups](@/documentation/operations/multi-hdd.md), to ensure that
|
In [multi-HDD setups](@/documentation/operations/multi-hdd.md), to ensure that
|
||||||
data blocks are well balanced between storage locations, you may run a
|
data blocks are well balanced between storage locations, you may run a
|
||||||
rebalance operation using `garage repair rebalance`. This is usefull when
|
rebalance operation using `garage repair rebalance`. This is useful when
|
||||||
adding storage locations or when capacities of the storage locations have been
|
adding storage locations or when capacities of the storage locations have been
|
||||||
changed. Once this is finished, Garage will know for each block of a single
|
changed. Once this is finished, Garage will know for each block of a single
|
||||||
possible location where it can be, which can increase access speed. This
|
possible location where it can be, which can increase access speed. This
|
||||||
|
@ -104,6 +104,24 @@ operation will also move out all data from locations marked as read-only.
|
||||||
|
|
||||||
# Metadata operations
|
# Metadata operations
|
||||||
|
|
||||||
|
## Metadata snapshotting
|
||||||
|
|
||||||
|
It is good practice to setup automatic snapshotting of your metadata database
|
||||||
|
file, to recover from situations where it becomes corrupted on disk. This can
|
||||||
|
be done at the filesystem level if you are using ZFS or BTRFS.
|
||||||
|
|
||||||
|
Since Garage v0.9.4, Garage is able to take snapshots of the metadata database
|
||||||
|
itself. This basically amounts to copying the database file, except that it can
|
||||||
|
be run live while Garage is running without the risk of corruption or
|
||||||
|
inconsistencies. This can be setup to run automatically on a schedule using
|
||||||
|
[`metadata_auto_snapshot_interval`](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval).
|
||||||
|
A snapshot can also be triggered manually using the `garage meta snapshot`
|
||||||
|
command. Note that taking a snapshot using this method is very intensive as it
|
||||||
|
requires making a full copy of the database file, so you might prefer using
|
||||||
|
filesystem-level snapshots if possible. To recover a corrupted node from such a
|
||||||
|
snapshot, read the instructions
|
||||||
|
[here](@/documentation/operations/recovering.md#corrupted_meta).
|
||||||
|
|
||||||
## Metadata table resync
|
## Metadata table resync
|
||||||
|
|
||||||
Garage automatically resyncs all entries stored in the metadata tables every hour,
|
Garage automatically resyncs all entries stored in the metadata tables every hour,
|
||||||
|
@ -123,4 +141,7 @@ blocks may still be held by Garage. If you suspect that such corruption has occu
|
||||||
in your cluster, you can run one of the following repair procedures:
|
in your cluster, you can run one of the following repair procedures:
|
||||||
|
|
||||||
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
|
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
|
||||||
- `garage repair block_refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
|
|
||||||
|
- `garage repair block-refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
|
||||||
|
|
||||||
|
- `garage repair block-rc`: checks that the reference counters for blocks are in sync with the actual number of non-deleted entries in the block reference table
|
||||||
|
|
|
@ -12,8 +12,8 @@ An introduction to building cluster layouts can be found in the [production depl
|
||||||
In Garage, all of the data that can be stored in a given cluster is divided
|
In Garage, all of the data that can be stored in a given cluster is divided
|
||||||
into slices which we call *partitions*. Each partition is stored by
|
into slices which we call *partitions*. Each partition is stored by
|
||||||
one or several nodes in the cluster
|
one or several nodes in the cluster
|
||||||
(see [`replication_mode`](@/documentation/reference-manual/configuration.md#replication_mode)).
|
(see [`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)).
|
||||||
The layout determines the correspondence between these partition,
|
The layout determines the correspondence between these partitions,
|
||||||
which exist on a logical level, and actual storage nodes.
|
which exist on a logical level, and actual storage nodes.
|
||||||
|
|
||||||
## How cluster layouts work in Garage
|
## How cluster layouts work in Garage
|
||||||
|
@ -94,10 +94,10 @@ follow the following recommendations:
|
||||||
## Understanding unexpected layout calculations
|
## Understanding unexpected layout calculations
|
||||||
|
|
||||||
When adding, removing or modifying nodes in a cluster layout, sometimes
|
When adding, removing or modifying nodes in a cluster layout, sometimes
|
||||||
unexpected assigntations of partitions to node can occur. These assignations
|
unexpected assignations of partitions to node can occur. These assignations
|
||||||
are in fact normal and logical, given the objectives of the algorihtm. Indeed,
|
are in fact normal and logical, given the objectives of the algorithm. Indeed,
|
||||||
**the layout algorithm prioritizes moving less data between nodes over the fact
|
**the layout algorithm prioritizes moving less data between nodes over
|
||||||
of achieving equal distribution of load. It also tries to use all links between
|
achieving equal distribution of load. It also tries to use all links between
|
||||||
pairs of nodes in equal proportions when moving data.** This section presents
|
pairs of nodes in equal proportions when moving data.** This section presents
|
||||||
two examples and illustrates how one can control Garage's behavior to obtain
|
two examples and illustrates how one can control Garage's behavior to obtain
|
||||||
the desired results.
|
the desired results.
|
||||||
|
@ -270,5 +270,5 @@ that is moved to node1).
|
||||||
This illustrates the second principle of the layout computation: **if there is
|
This illustrates the second principle of the layout computation: **if there is
|
||||||
a choice in moving data out of some nodes, then all links between pairs of
|
a choice in moving data out of some nodes, then all links between pairs of
|
||||||
nodes are used in equal proportions** (this is approximately true, there is
|
nodes are used in equal proportions** (this is approximately true, there is
|
||||||
randomness in the algorihtm to achieve this so there might be some small
|
randomness in the algorithm to achieve this so there might be some small
|
||||||
fluctuations, as we see above).
|
fluctuations, as we see above).
|
||||||
|
|
|
@ -5,7 +5,7 @@ weight = 40
|
||||||
|
|
||||||
Garage is meant to work on old, second-hand hardware.
|
Garage is meant to work on old, second-hand hardware.
|
||||||
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
|
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
|
||||||
Fear not! For Garage is fully equipped to handle drive failures, in most common cases.
|
Fear not! Garage is fully equipped to handle drive failures, in most common cases.
|
||||||
|
|
||||||
## A note on availability of Garage
|
## A note on availability of Garage
|
||||||
|
|
||||||
|
@ -108,3 +108,57 @@ garage layout apply # once satisfied, apply the changes
|
||||||
|
|
||||||
Garage will then start synchronizing all required data on the new node.
|
Garage will then start synchronizing all required data on the new node.
|
||||||
This process can be monitored using the `garage stats -a` command.
|
This process can be monitored using the `garage stats -a` command.
|
||||||
|
|
||||||
|
## Replacement scenario 3: corrupted metadata {#corrupted_meta}
|
||||||
|
|
||||||
|
In some cases, your metadata DB file might become corrupted, for instance if
|
||||||
|
your node suffered a power outage and did not shut down properly. In this case,
|
||||||
|
you can recover without having to change the node ID and rebuilding a cluster
|
||||||
|
layout. This means that data blocks will not need to be shuffled around, you
|
||||||
|
must simply find a way to repair the metadata file. The best way is generally
|
||||||
|
to discard the corrupted file and recover it from another source.
|
||||||
|
|
||||||
|
First of all, start by locating the database file in your metadata directory,
|
||||||
|
which [depends on your `db_engine`
|
||||||
|
choice](@/documentation/reference-manual/configuration.md#db_engine). Then,
|
||||||
|
your recovery options are as follows:
|
||||||
|
|
||||||
|
- **Option 1: resyncing from other nodes.** In case your cluster is replicated
|
||||||
|
with two or three copies, you can simply delete the database file, and Garage
|
||||||
|
will resync from other nodes. To do so, stop Garage, delete the database file
|
||||||
|
or directory, and restart Garage. Then, do a full table repair by calling
|
||||||
|
`garage repair -a --yes tables`. This will take a bit of time to complete as
|
||||||
|
the new node will need to receive copies of the metadata tables from the
|
||||||
|
network.
|
||||||
|
|
||||||
|
- **Option 2: restoring a snapshot taken by Garage.** Since v0.9.4, Garage can
|
||||||
|
[automatically take regular
|
||||||
|
snapshots](@/documentation/reference-manual/configuration.md#metadata_auto_snapshot_interval)
|
||||||
|
of your metadata DB file. This file or directory should be located under
|
||||||
|
`<metadata_dir>/snapshots`, and is named according to the UTC time at which it
|
||||||
|
was taken. Stop Garage, discard the database file/directory and replace it by the
|
||||||
|
snapshot you want to use. For instance, in the case of LMDB:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd $METADATA_DIR
|
||||||
|
mv db.lmdb db.lmdb.bak
|
||||||
|
cp -r snapshots/2024-03-15T12:13:52Z db.lmdb
|
||||||
|
```
|
||||||
|
|
||||||
|
And for Sqlite:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd $METADATA_DIR
|
||||||
|
mv db.sqlite db.sqlite.bak
|
||||||
|
cp snapshots/2024-03-15T12:13:52Z db.sqlite
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, restart Garage and run a full table repair by calling `garage repair -a
|
||||||
|
--yes tables`. This should run relatively fast as only the changes that
|
||||||
|
occurred since the snapshot was taken will need to be resynchronized. Of
|
||||||
|
course, if your cluster is not replicated, you will lose all changes that
|
||||||
|
occurred since the snapshot was taken.
|
||||||
|
|
||||||
|
- **Option 3: restoring a filesystem-level snapshot.** If you are using ZFS or
|
||||||
|
BTRFS to snapshot your metadata partition, refer to their specific
|
||||||
|
documentation on rolling back or copying files from an old snapshot.
|
||||||
|
|
|
@ -9,7 +9,7 @@ On a new version release, there is 2 possibilities:
|
||||||
- protocols and data structures remained the same ➡️ this is a **minor upgrade**
|
- protocols and data structures remained the same ➡️ this is a **minor upgrade**
|
||||||
- protocols or data structures changed ➡️ this is a **major upgrade**
|
- protocols or data structures changed ➡️ this is a **major upgrade**
|
||||||
|
|
||||||
You can quickly now what type of update you will have to operate by looking at the version identifier:
|
You can quickly know what type of update you will have to operate by looking at the version identifier:
|
||||||
when we require our users to do a major upgrade, we will always bump the first nonzero component of the version identifier
|
when we require our users to do a major upgrade, we will always bump the first nonzero component of the version identifier
|
||||||
(e.g. from v0.7.2 to v0.8.0).
|
(e.g. from v0.7.2 to v0.8.0).
|
||||||
Conversely, for versions that only require a minor upgrade, the first nonzero component will always stay the same (e.g. from v0.8.0 to v0.8.1).
|
Conversely, for versions that only require a minor upgrade, the first nonzero component will always stay the same (e.g. from v0.8.0 to v0.8.1).
|
||||||
|
@ -73,6 +73,18 @@ The entire procedure would look something like this:
|
||||||
You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
|
You can do all of the nodes in a single zone at once as that won't impact global cluster availability.
|
||||||
Do not try to make a backup of the metadata folder of a running node.
|
Do not try to make a backup of the metadata folder of a running node.
|
||||||
|
|
||||||
|
**Since Garage v0.9.4,** you can use the `garage meta snapshot --all` command
|
||||||
|
to take a simultaneous snapshot of the metadata database files of all your
|
||||||
|
nodes. This avoids the tedious process of having to take them down one by
|
||||||
|
one before upgrading. Be careful that if automatic snapshotting is enabled,
|
||||||
|
Garage only keeps the last two snapshots and deletes older ones, so you might
|
||||||
|
want to disable automatic snapshotting in your upgraded configuration file
|
||||||
|
until you have confirmed that the upgrade ran successfully. In addition to
|
||||||
|
snapshotting the metadata databases of your nodes, you should back-up at
|
||||||
|
least the `cluster_layout` file of one of your Garage instances (this file
|
||||||
|
should be the same on all nodes and you can copy it safely while Garage is
|
||||||
|
running).
|
||||||
|
|
||||||
3. Prepare your binaries and configuration files for the new Garage version
|
3. Prepare your binaries and configuration files for the new Garage version
|
||||||
|
|
||||||
4. Restart all nodes simultaneously in the new version
|
4. Restart all nodes simultaneously in the new version
|
||||||
|
|
|
@ -42,6 +42,13 @@ If a binary of the last version is not available for your architecture,
|
||||||
or if you want a build customized for your system,
|
or if you want a build customized for your system,
|
||||||
you can [build Garage from source](@/documentation/cookbook/from-source.md).
|
you can [build Garage from source](@/documentation/cookbook/from-source.md).
|
||||||
|
|
||||||
|
If none of these option work for you, you can also run Garage in a Docker
|
||||||
|
container. When using Docker, the commands used in this guide will not work
|
||||||
|
anymore. We recommend reading the tutorial on [configuring a
|
||||||
|
multi-node cluster](@/documentation/cookbook/real-world.md) to learn about
|
||||||
|
using Garage as a Docker container. For simplicity, a minimal command to launch
|
||||||
|
Garage using Docker is provided in this quick start guide as well.
|
||||||
|
|
||||||
|
|
||||||
## Configuring and starting Garage
|
## Configuring and starting Garage
|
||||||
|
|
||||||
|
@ -57,9 +64,9 @@ to generate unique and private secrets for security reasons:
|
||||||
cat > garage.toml <<EOF
|
cat > garage.toml <<EOF
|
||||||
metadata_dir = "/tmp/meta"
|
metadata_dir = "/tmp/meta"
|
||||||
data_dir = "/tmp/data"
|
data_dir = "/tmp/data"
|
||||||
db_engine = "lmdb"
|
db_engine = "sqlite"
|
||||||
|
|
||||||
replication_mode = "none"
|
replication_factor = 1
|
||||||
|
|
||||||
rpc_bind_addr = "[::]:3901"
|
rpc_bind_addr = "[::]:3901"
|
||||||
rpc_public_addr = "127.0.0.1:3901"
|
rpc_public_addr = "127.0.0.1:3901"
|
||||||
|
@ -79,11 +86,15 @@ index = "index.html"
|
||||||
api_bind_addr = "[::]:3904"
|
api_bind_addr = "[::]:3904"
|
||||||
|
|
||||||
[admin]
|
[admin]
|
||||||
api_bind_addr = "0.0.0.0:3903"
|
api_bind_addr = "[::]:3903"
|
||||||
admin_token = "$(openssl rand -base64 32)"
|
admin_token = "$(openssl rand -base64 32)"
|
||||||
|
metrics_token = "$(openssl rand -base64 32)"
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
|
See the [Configuration file format](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/)
|
||||||
|
for complete options and values.
|
||||||
|
|
||||||
Now that your configuration file has been created, you may save it to the directory of your choice.
|
Now that your configuration file has been created, you may save it to the directory of your choice.
|
||||||
By default, Garage looks for **`/etc/garage.toml`.**
|
By default, Garage looks for **`/etc/garage.toml`.**
|
||||||
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
|
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
|
||||||
|
@ -110,10 +121,31 @@ garage -c path/to/garage.toml server
|
||||||
|
|
||||||
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
|
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
|
||||||
|
|
||||||
You can tune Garage's verbosity as follows (from less verbose to more verbose):
|
Alternatively, if you cannot or do not wish to run the Garage binary directly,
|
||||||
|
you may use Docker to run Garage in a container using the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run \
|
||||||
|
-d \
|
||||||
|
--name garaged \
|
||||||
|
-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \
|
||||||
|
-v /etc/garage.toml:/path/to/garage.toml \
|
||||||
|
-v /var/lib/garage/meta:/path/to/garage/meta \
|
||||||
|
-v /var/lib/garage/data:/path/to/garage/data \
|
||||||
|
dxflrs/garage:v0.9.4
|
||||||
```
|
```
|
||||||
RUST_LOG=garage=info garage server
|
|
||||||
|
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
|
||||||
|
|
||||||
|
#### Troubleshooting
|
||||||
|
|
||||||
|
Ensure your configuration file, `metadata_dir` and `data_dir` are readable by the user running the `garage` server or Docker.
|
||||||
|
|
||||||
|
You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \
|
||||||
|
Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
RUST_LOG=garage=info garage server # default
|
||||||
RUST_LOG=garage=debug garage server
|
RUST_LOG=garage=debug garage server
|
||||||
RUST_LOG=garage=trace garage server
|
RUST_LOG=garage=trace garage server
|
||||||
```
|
```
|
||||||
|
@ -129,6 +161,9 @@ It uses values from the TOML configuration file to find the Garage daemon runnin
|
||||||
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
|
local node, therefore if your configuration file is not at `/etc/garage.toml` you will
|
||||||
again have to specify `-c path/to/garage.toml` at each invocation.
|
again have to specify `-c path/to/garage.toml` at each invocation.
|
||||||
|
|
||||||
|
If you are running Garage in a Docker container, you can set `alias garage="docker exec -ti <container name> /garage"`
|
||||||
|
to use the Garage binary inside your container.
|
||||||
|
|
||||||
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
|
If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
|
||||||
the following command should be enough to show the status of your cluster:
|
the following command should be enough to show the status of your cluster:
|
||||||
|
|
||||||
|
@ -249,7 +284,7 @@ garage bucket info nextcloud-bucket
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Uploading and downlading from Garage
|
## Uploading and downloading from Garage
|
||||||
|
|
||||||
To download and upload files on garage, we can use a third-party tool named `awscli`.
|
To download and upload files on garage, we can use a third-party tool named `awscli`.
|
||||||
|
|
||||||
|
|
|
@ -8,8 +8,8 @@ listen address is specified in the `[admin]` section of the configuration
|
||||||
file (see [configuration file
|
file (see [configuration file
|
||||||
reference](@/documentation/reference-manual/configuration.md))
|
reference](@/documentation/reference-manual/configuration.md))
|
||||||
|
|
||||||
**WARNING.** At this point, there is no comittement to stability of the APIs described in this document.
|
**WARNING.** At this point, there is no commitment to the stability of the APIs described in this document.
|
||||||
We will bump the version numbers prefixed to each API endpoint at each time the syntax
|
We will bump the version numbers prefixed to each API endpoint each time the syntax
|
||||||
or semantics change, meaning that code that relies on these endpoint will break
|
or semantics change, meaning that code that relies on these endpoint will break
|
||||||
when changes are introduced.
|
when changes are introduced.
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ Versions:
|
||||||
|
|
||||||
## Access control
|
## Access control
|
||||||
|
|
||||||
The admin API uses two different tokens for acces control, that are specified in the config file's `[admin]` section:
|
The admin API uses two different tokens for access control, that are specified in the config file's `[admin]` section:
|
||||||
|
|
||||||
- `metrics_token`: the token for accessing the Metrics endpoint (if this token
|
- `metrics_token`: the token for accessing the Metrics endpoint (if this token
|
||||||
is not set in the config file, the Metrics endpoint can be accessed without
|
is not set in the config file, the Metrics endpoint can be accessed without
|
||||||
|
@ -88,8 +88,8 @@ Consult the full health check API endpoint at /v0/health for more details
|
||||||
|
|
||||||
### On-demand TLS `GET /check`
|
### On-demand TLS `GET /check`
|
||||||
|
|
||||||
To prevent abuses for on-demand TLS, Caddy developpers have specified an endpoint that can be queried by the reverse proxy
|
To prevent abuse for on-demand TLS, Caddy developers have specified an endpoint that can be queried by the reverse proxy
|
||||||
to know if a given domain is allowed to get a certificate. Garage implements this endpoints to tell if a given domain is handled by Garage or is garbage.
|
to know if a given domain is allowed to get a certificate. Garage implements these endpoints to tell if a given domain is handled by Garage or is garbage.
|
||||||
|
|
||||||
Garage responds with the following logic:
|
Garage responds with the following logic:
|
||||||
- If the domain matches the pattern `<bucket-name>.<s3_api.root_domain>`, returns 200 OK
|
- If the domain matches the pattern `<bucket-name>.<s3_api.root_domain>`, returns 200 OK
|
||||||
|
@ -102,7 +102,7 @@ You must manually declare the domain in your reverse-proxy. Idem for K2V.*
|
||||||
|
|
||||||
*Note 2: buckets in a user's namespace are not supported yet by this endpoint. This is a limitation of this endpoint currently.*
|
*Note 2: buckets in a user's namespace are not supported yet by this endpoint. This is a limitation of this endpoint currently.*
|
||||||
|
|
||||||
**Example:** Suppose a Garage instance configured with `s3_api.root_domain = .s3.garage.localhost` and `s3_web.root_domain = .web.garage.localhost`.
|
**Example:** Suppose a Garage instance is configured with `s3_api.root_domain = .s3.garage.localhost` and `s3_web.root_domain = .web.garage.localhost`.
|
||||||
|
|
||||||
With a private `media` bucket (name in the global namespace, website is disabled), the endpoint will feature the following behavior:
|
With a private `media` bucket (name in the global namespace, website is disabled), the endpoint will feature the following behavior:
|
||||||
|
|
||||||
|
|
|
@ -8,30 +8,39 @@ weight = 20
|
||||||
Here is an example `garage.toml` configuration file that illustrates all of the possible options:
|
Here is an example `garage.toml` configuration file that illustrates all of the possible options:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
replication_mode = "3"
|
replication_factor = 3
|
||||||
|
consistency_mode = "consistent"
|
||||||
|
|
||||||
metadata_dir = "/var/lib/garage/meta"
|
metadata_dir = "/var/lib/garage/meta"
|
||||||
data_dir = "/var/lib/garage/data"
|
data_dir = "/var/lib/garage/data"
|
||||||
metadata_fsync = true
|
metadata_fsync = true
|
||||||
data_fsync = false
|
data_fsync = false
|
||||||
|
disable_scrub = false
|
||||||
|
use_local_tz = false
|
||||||
|
metadata_auto_snapshot_interval = "6h"
|
||||||
|
|
||||||
db_engine = "lmdb"
|
db_engine = "lmdb"
|
||||||
|
|
||||||
block_size = 1048576
|
block_size = "1M"
|
||||||
|
block_ram_buffer_max = "256MiB"
|
||||||
|
|
||||||
sled_cache_capacity = "128MiB"
|
|
||||||
sled_flush_every_ms = 2000
|
|
||||||
lmdb_map_size = "1T"
|
lmdb_map_size = "1T"
|
||||||
|
|
||||||
compression_level = 1
|
compression_level = 1
|
||||||
|
|
||||||
rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
|
rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
|
||||||
rpc_bind_addr = "[::]:3901"
|
rpc_bind_addr = "[::]:3901"
|
||||||
|
rpc_bind_outgoing = false
|
||||||
rpc_public_addr = "[fc00:1::1]:3901"
|
rpc_public_addr = "[fc00:1::1]:3901"
|
||||||
|
# or set rpc_public_adr_subnet to filter down autodiscovery to a subnet:
|
||||||
|
# rpc_public_addr_subnet = "2001:0db8:f00:b00:/64"
|
||||||
|
|
||||||
|
|
||||||
|
allow_world_readable_secrets = false
|
||||||
|
|
||||||
bootstrap_peers = [
|
bootstrap_peers = [
|
||||||
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
|
"563e1ac825ee3323aa441e72c26d1030d6d4414aeb3dd25287c531e7fc2bc95d@[fc00:1::1]:3901",
|
||||||
"86f0f26ae4afbd59aaf9cfb059eefac844951efd5b8caeec0d53f4ed6c85f332[fc00:1::2]:3901",
|
"86f0f26ae4afbd59aaf9cfb059eefac844951efd5b8caeec0d53f4ed6c85f332@[fc00:1::2]:3901",
|
||||||
"681456ab91350f92242e80a531a3ec9392cb7c974f72640112f90a600d7921a4@[fc00:B::1]:3901",
|
"681456ab91350f92242e80a531a3ec9392cb7c974f72640112f90a600d7921a4@[fc00:B::1]:3901",
|
||||||
"212fd62eeaca72c122b45a7f4fa0f55e012aa5e24ac384a72a3016413fa724ff@[fc00:F::1]:3901",
|
"212fd62eeaca72c122b45a7f4fa0f55e012aa5e24ac384a72a3016413fa724ff@[fc00:F::1]:3901",
|
||||||
]
|
]
|
||||||
|
@ -68,8 +77,8 @@ root_domain = ".web.garage"
|
||||||
|
|
||||||
[admin]
|
[admin]
|
||||||
api_bind_addr = "0.0.0.0:3903"
|
api_bind_addr = "0.0.0.0:3903"
|
||||||
metrics_token = "cacce0b2de4bc2d9f5b5fdff551e01ac1496055aed248202d415398987e35f81"
|
metrics_token = "BCAdFjoa9G0KJR0WXnHHm7fs1ZAbfpI8iIZ+Z/a2NgI="
|
||||||
admin_token = "ae8cb40ea7368bbdbb6430af11cca7da833d3458a5f52086f4e805a570fb5c2a"
|
admin_token = "UkLeGWEvHnXBqnueR3ISEMWpOnm40jH2tM2HnnL/0F4="
|
||||||
trace_sink = "http://localhost:4317"
|
trace_sink = "http://localhost:4317"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -79,33 +88,40 @@ The following gives details about each available configuration option.
|
||||||
|
|
||||||
### Index
|
### Index
|
||||||
|
|
||||||
|
[Environment variables](#env_variables).
|
||||||
|
|
||||||
Top-level configuration options:
|
Top-level configuration options:
|
||||||
|
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
|
||||||
|
[`block_ram_buffer_max`](#block_ram_buffer_max),
|
||||||
[`block_size`](#block_size),
|
[`block_size`](#block_size),
|
||||||
[`bootstrap_peers`](#bootstrap_peers),
|
[`bootstrap_peers`](#bootstrap_peers),
|
||||||
[`compression_level`](#compression_level),
|
[`compression_level`](#compression_level),
|
||||||
[`data_dir`](#metadata_dir),
|
[`data_dir`](#data_dir),
|
||||||
[`data_fsync`](#data_fsync),
|
[`data_fsync`](#data_fsync),
|
||||||
[`db_engine`](#db_engine),
|
[`db_engine`](#db_engine),
|
||||||
|
[`disable_scrub`](#disable_scrub),
|
||||||
|
[`use_local_tz`](#use_local_tz),
|
||||||
[`lmdb_map_size`](#lmdb_map_size),
|
[`lmdb_map_size`](#lmdb_map_size),
|
||||||
|
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
||||||
[`metadata_dir`](#metadata_dir),
|
[`metadata_dir`](#metadata_dir),
|
||||||
[`metadata_fsync`](#metadata_fsync),
|
[`metadata_fsync`](#metadata_fsync),
|
||||||
[`replication_mode`](#replication_mode),
|
[`replication_factor`](#replication_factor),
|
||||||
|
[`consistency_mode`](#consistency_mode),
|
||||||
[`rpc_bind_addr`](#rpc_bind_addr),
|
[`rpc_bind_addr`](#rpc_bind_addr),
|
||||||
|
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
|
||||||
[`rpc_public_addr`](#rpc_public_addr),
|
[`rpc_public_addr`](#rpc_public_addr),
|
||||||
[`rpc_secret`](#rpc_secret),
|
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
|
||||||
[`rpc_secret_file`](#rpc_secret),
|
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
|
||||||
[`sled_cache_capacity`](#sled_cache_capacity),
|
|
||||||
[`sled_flush_every_ms`](#sled_flush_every_ms).
|
|
||||||
|
|
||||||
The `[consul_discovery]` section:
|
The `[consul_discovery]` section:
|
||||||
[`api`](#consul_api),
|
[`api`](#consul_api),
|
||||||
[`ca_cert`](#consul_ca_cert),
|
[`ca_cert`](#consul_ca_cert),
|
||||||
[`client_cert`](#consul_client_cert),
|
[`client_cert`](#consul_client_cert_and_key),
|
||||||
[`client_key`](#consul_client_cert),
|
[`client_key`](#consul_client_cert_and_key),
|
||||||
[`consul_http_addr`](#consul_http_addr),
|
[`consul_http_addr`](#consul_http_addr),
|
||||||
[`meta`](#consul_tags),
|
[`meta`](#consul_tags_and_meta),
|
||||||
[`service_name`](#consul_service_name),
|
[`service_name`](#consul_service_name),
|
||||||
[`tags`](#consul_tags),
|
[`tags`](#consul_tags_and_meta),
|
||||||
[`tls_skip_verify`](#consul_tls_skip_verify),
|
[`tls_skip_verify`](#consul_tls_skip_verify),
|
||||||
[`token`](#consul_token).
|
[`token`](#consul_token).
|
||||||
|
|
||||||
|
@ -125,20 +141,36 @@ The `[s3_web]` section:
|
||||||
|
|
||||||
The `[admin]` section:
|
The `[admin]` section:
|
||||||
[`api_bind_addr`](#admin_api_bind_addr),
|
[`api_bind_addr`](#admin_api_bind_addr),
|
||||||
[`metrics_token`](#admin_metrics_token),
|
[`metrics_token`/`metrics_token_file`](#admin_metrics_token),
|
||||||
[`metrics_token_file`](#admin_metrics_token),
|
[`admin_token`/`admin_token_file`](#admin_token),
|
||||||
[`admin_token`](#admin_token),
|
|
||||||
[`admin_token_file`](#admin_token),
|
|
||||||
[`trace_sink`](#admin_trace_sink),
|
[`trace_sink`](#admin_trace_sink),
|
||||||
|
|
||||||
|
### Environment variables {#env_variables}
|
||||||
|
|
||||||
|
The following configuration parameter must be specified as an environment
|
||||||
|
variable, it does not exist in the configuration file:
|
||||||
|
|
||||||
|
- `GARAGE_LOG_TO_SYSLOG` (since v0.9.4): set this to `1` or `true` to make the
|
||||||
|
Garage daemon send its logs to `syslog` (using the libc `syslog` function)
|
||||||
|
instead of printing to stderr.
|
||||||
|
|
||||||
|
The following environment variables can be used to override the corresponding
|
||||||
|
values in the configuration file:
|
||||||
|
|
||||||
|
- [`GARAGE_ALLOW_WORLD_READABLE_SECRETS`](#allow_world_readable_secrets)
|
||||||
|
- [`GARAGE_RPC_SECRET` and `GARAGE_RPC_SECRET_FILE`](#rpc_secret)
|
||||||
|
- [`GARAGE_ADMIN_TOKEN` and `GARAGE_ADMIN_TOKEN_FILE`](#admin_token)
|
||||||
|
- [`GARAGE_METRICS_TOKEN` and `GARAGE_METRICS_TOKEN`](#admin_metrics_token)
|
||||||
|
|
||||||
|
|
||||||
### Top-level configuration options
|
### Top-level configuration options
|
||||||
|
|
||||||
#### `replication_mode` {#replication_mode}
|
#### `replication_factor` {#replication_factor}
|
||||||
|
|
||||||
Garage supports the following replication modes:
|
The replication factor can be any positive integer smaller or equal the node count in your cluster.
|
||||||
|
The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
|
||||||
|
|
||||||
- `none` or `1`: data stored on Garage is stored on a single node. There is no
|
- `1`: data stored on Garage is stored on a single node. There is no
|
||||||
redundancy, and data will be unavailable as soon as one node fails or its
|
redundancy, and data will be unavailable as soon as one node fails or its
|
||||||
network is disconnected. Do not use this for anything else than test
|
network is disconnected. Do not use this for anything else than test
|
||||||
deployments.
|
deployments.
|
||||||
|
@ -149,17 +181,6 @@ Garage supports the following replication modes:
|
||||||
before losing data. Data remains available in read-only mode when one node is
|
before losing data. Data remains available in read-only mode when one node is
|
||||||
down, but write operations will fail.
|
down, but write operations will fail.
|
||||||
|
|
||||||
- `2-dangerous`: a variant of mode `2`, where written objects are written to
|
|
||||||
the second replica asynchronously. This means that Garage will return `200
|
|
||||||
OK` to a PutObject request before the second copy is fully written (or even
|
|
||||||
before it even starts being written). This means that data can more easily
|
|
||||||
be lost if the node crashes before a second copy can be completed. This
|
|
||||||
also means that written objects might not be visible immediately in read
|
|
||||||
operations. In other words, this mode severely breaks the consistency and
|
|
||||||
durability guarantees of standard Garage cluster operation. Benefits of
|
|
||||||
this mode: you can still write to your cluster when one node is
|
|
||||||
unavailable.
|
|
||||||
|
|
||||||
- `3`: data stored on Garage will be stored on three different nodes, if
|
- `3`: data stored on Garage will be stored on three different nodes, if
|
||||||
possible each in a different zones. Garage tolerates two node failure, or
|
possible each in a different zones. Garage tolerates two node failure, or
|
||||||
several node failures but in no more than two zones (in a deployment with at
|
several node failures but in no more than two zones (in a deployment with at
|
||||||
|
@ -167,55 +188,84 @@ Garage supports the following replication modes:
|
||||||
or node failures are only in a single zone, reading and writing data to
|
or node failures are only in a single zone, reading and writing data to
|
||||||
Garage can continue normally.
|
Garage can continue normally.
|
||||||
|
|
||||||
- `3-degraded`: a variant of replication mode `3`, that lowers the read
|
- `5`, `7`, ...: When setting the replication factor above 3, it is most useful to
|
||||||
quorum to `1`, to allow you to read data from your cluster when several
|
choose an uneven value, since for every two copies added, one more node can fail
|
||||||
nodes (or nodes in several zones) are unavailable. In this mode, Garage
|
before losing the ability to write and read to the cluster.
|
||||||
does not provide read-after-write consistency anymore. The write quorum is
|
|
||||||
still 2, ensuring that data successfully written to Garage is stored on at
|
|
||||||
least two nodes.
|
|
||||||
|
|
||||||
- `3-dangerous`: a variant of replication mode `3` that lowers both the read
|
|
||||||
and write quorums to `1`, to allow you to both read and write to your
|
|
||||||
cluster when several nodes (or nodes in several zones) are unavailable. It
|
|
||||||
is the least consistent mode of operation proposed by Garage, and also one
|
|
||||||
that should probably never be used.
|
|
||||||
|
|
||||||
Note that in modes `2` and `3`,
|
Note that in modes `2` and `3`,
|
||||||
if at least the same number of zones are available, an arbitrary number of failures in
|
if at least the same number of zones are available, an arbitrary number of failures in
|
||||||
any given zone is tolerated as copies of data will be spread over several zones.
|
any given zone is tolerated as copies of data will be spread over several zones.
|
||||||
|
|
||||||
**Make sure `replication_mode` is the same in the configuration files of all nodes.
|
**Make sure `replication_factor` is the same in the configuration files of all nodes.
|
||||||
Never run a Garage cluster where that is not the case.**
|
Never run a Garage cluster where that is not the case.**
|
||||||
|
|
||||||
|
It is technically possible to change the replication factor although it's a
|
||||||
|
dangerous operation that is not officially supported. This requires you to
|
||||||
|
delete the existing cluster layout and create a new layout from scratch,
|
||||||
|
meaning that a full rebalancing of your cluster's data will be needed. To do
|
||||||
|
it, shut down your cluster entirely, delete the `custer_layout` files in the
|
||||||
|
meta directories of all your nodes, update all your configuration files with
|
||||||
|
the new `replication_factor` parameter, restart your cluster, and then create a
|
||||||
|
new layout with all the nodes you want to keep. Rebalancing data will take
|
||||||
|
some time, and data might temporarily appear unavailable to your users.
|
||||||
|
It is recommended to shut down public access to the cluster while rebalancing
|
||||||
|
is in progress. In theory, no data should be lost as rebalancing is a
|
||||||
|
routine operation for Garage, although we cannot guarantee you that everything
|
||||||
|
will go right in such an extreme scenario.
|
||||||
|
|
||||||
|
#### `consistency_mode` {#consistency_mode}
|
||||||
|
|
||||||
|
The consistency mode setting determines the read and write behaviour of your cluster.
|
||||||
|
|
||||||
|
- `consistent`: The default setting. This is what the paragraph above describes.
|
||||||
|
The read and write quorum will be determined so that read-after-write consistency
|
||||||
|
is guaranteed.
|
||||||
|
- `degraded`: Lowers the read
|
||||||
|
quorum to `1`, to allow you to read data from your cluster when several
|
||||||
|
nodes (or nodes in several zones) are unavailable. In this mode, Garage
|
||||||
|
does not provide read-after-write consistency anymore.
|
||||||
|
The write quorum stays the same as in the `consistent` mode, ensuring that
|
||||||
|
data successfully written to Garage is stored on multiple nodes (depending
|
||||||
|
the replication factor).
|
||||||
|
- `dangerous`: This mode lowers both the read
|
||||||
|
and write quorums to `1`, to allow you to both read and write to your
|
||||||
|
cluster when several nodes (or nodes in several zones) are unavailable. It
|
||||||
|
is the least consistent mode of operation proposed by Garage, and also one
|
||||||
|
that should probably never be used.
|
||||||
|
|
||||||
|
Changing the `consistency_mode` between modes while leaving the `replication_factor` untouched
|
||||||
|
(e.g. setting your node's `consistency_mode` to `degraded` when it was previously unset, or from
|
||||||
|
`dangerous` to `consistent`), can be done easily by just changing the `consistency_mode`
|
||||||
|
parameter in your config files and restarting all your Garage nodes.
|
||||||
|
|
||||||
|
The consistency mode can be used together with various replication factors, to achieve
|
||||||
|
a wide range of read and write characteristics. Some examples:
|
||||||
|
|
||||||
|
- Replication factor `2`, consistency mode `degraded`: While this mode
|
||||||
|
technically exists, its properties are the same as with consistency mode `consistent`,
|
||||||
|
since the read quorum with replication factor `2`, consistency mode `consistent` is already 1.
|
||||||
|
|
||||||
|
- Replication factor `2`, consistency mode `dangerous`: written objects are written to
|
||||||
|
the second replica asynchronously. This means that Garage will return `200
|
||||||
|
OK` to a PutObject request before the second copy is fully written (or even
|
||||||
|
before it even starts being written). This means that data can more easily
|
||||||
|
be lost if the node crashes before a second copy can be completed. This
|
||||||
|
also means that written objects might not be visible immediately in read
|
||||||
|
operations. In other words, this configuration severely breaks the consistency and
|
||||||
|
durability guarantees of standard Garage cluster operation. Benefits of
|
||||||
|
this configuration: you can still write to your cluster when one node is
|
||||||
|
unavailable.
|
||||||
|
|
||||||
The quorums associated with each replication mode are described below:
|
The quorums associated with each replication mode are described below:
|
||||||
|
|
||||||
| `replication_mode` | Number of replicas | Write quorum | Read quorum | Read-after-write consistency? |
|
| `consistency_mode` | `replication_factor` | Write quorum | Read quorum | Read-after-write consistency? |
|
||||||
| ------------------ | ------------------ | ------------ | ----------- | ----------------------------- |
|
| ------------------ | -------------------- | ------------ | ----------- | ----------------------------- |
|
||||||
| `none` or `1` | 1 | 1 | 1 | yes |
|
| `consistent` | 1 | 1 | 1 | yes |
|
||||||
| `2` | 2 | 2 | 1 | yes |
|
| `consistent` | 2 | 2 | 1 | yes |
|
||||||
| `2-dangerous` | 2 | 1 | 1 | NO |
|
| `dangerous` | 2 | 1 | 1 | NO |
|
||||||
| `3` | 3 | 2 | 2 | yes |
|
| `consistent` | 3 | 2 | 2 | yes |
|
||||||
| `3-degraded` | 3 | 2 | 1 | NO |
|
| `degraded` | 3 | 2 | 1 | NO |
|
||||||
| `3-dangerous` | 3 | 1 | 1 | NO |
|
| `dangerous` | 3 | 1 | 1 | NO |
|
||||||
|
|
||||||
Changing the `replication_mode` between modes with the same number of replicas
|
|
||||||
(e.g. from `3` to `3-degraded`, or from `2-dangerous` to `2`), can be done easily by
|
|
||||||
just changing the `replication_mode` parameter in your config files and restarting all your
|
|
||||||
Garage nodes.
|
|
||||||
|
|
||||||
It is also technically possible to change the replication mode to a mode with a
|
|
||||||
different numbers of replicas, although it's a dangerous operation that is not
|
|
||||||
officially supported. This requires you to delete the existing cluster layout
|
|
||||||
and create a new layout from scratch, meaning that a full rebalancing of your
|
|
||||||
cluster's data will be needed. To do it, shut down your cluster entirely,
|
|
||||||
delete the `custer_layout` files in the meta directories of all your nodes,
|
|
||||||
update all your configuration files with the new `replication_mode` parameter,
|
|
||||||
restart your cluster, and then create a new layout with all the nodes you want
|
|
||||||
to keep. Rebalancing data will take some time, and data might temporarily
|
|
||||||
appear unavailable to your users. It is recommended to shut down public access
|
|
||||||
to the cluster while rebalancing is in progress. In theory, no data should be
|
|
||||||
lost as rebalancing is a routine operation for Garage, although we cannot
|
|
||||||
guarantee you that everything will go right in such an extreme scenario.
|
|
||||||
|
|
||||||
#### `metadata_dir` {#metadata_dir}
|
#### `metadata_dir` {#metadata_dir}
|
||||||
|
|
||||||
|
@ -251,32 +301,43 @@ Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
||||||
|
|
||||||
| DB engine | `db_engine` value | Database path |
|
| DB engine | `db_engine` value | Database path |
|
||||||
| --------- | ----------------- | ------------- |
|
| --------- | ----------------- | ------------- |
|
||||||
| [LMDB](https://www.lmdb.tech) (default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
|
||||||
| [Sled](https://sled.rs) (default up to `v0.8.0`) | `"sled"` | `<metadata_dir>/db/` |
|
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
||||||
| [Sqlite](https://sqlite.org) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
|
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` |
|
||||||
|
|
||||||
Sled was the only database engine up to Garage v0.7.0. Performance issues and
|
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0.
|
||||||
API limitations of Sled prompted the addition of alternative engines in v0.8.0.
|
You can still use an older binary of Garage (e.g. v0.9.4) to migrate
|
||||||
Since v0.9.0, LMDB is the default engine instead of Sled, and Sled is
|
old Sled metadata databases to another engine.
|
||||||
deprecated. We plan to remove Sled in Garage v1.0.
|
|
||||||
|
|
||||||
Performance characteristics of the different DB engines are as follows:
|
Performance characteristics of the different DB engines are as follows:
|
||||||
|
|
||||||
- Sled: tends to produce large data files and also has performance issues,
|
- LMDB: the recommended database engine for high-performance distributed clusters.
|
||||||
especially when the metadata folder is on a traditional HDD and not on SSD.
|
LMDB works very well, but is known to have the following limitations:
|
||||||
|
|
||||||
- LMDB: the recommended database engine on 64-bit systems, much more
|
- The data format of LMDB is not portable between architectures, so for
|
||||||
space-efficient and slightly faster. Note that the data format of LMDB is not
|
instance the Garage database of an x86-64 node cannot be moved to an ARM64
|
||||||
portable between architectures, so for instance the Garage database of an
|
node.
|
||||||
x86-64 node cannot be moved to an ARM64 node. Also note that, while LMDB can
|
|
||||||
technically be used on 32-bit systems, this will limit your node to very
|
- While LMDB can technically be used on 32-bit systems, this will limit your
|
||||||
small database sizes due to how LMDB works; it is therefore not recommended.
|
node to very small database sizes due to how LMDB works; it is therefore
|
||||||
|
not recommended.
|
||||||
|
|
||||||
|
- Several users have reported corrupted LMDB database files after an unclean
|
||||||
|
shutdown (e.g. a power outage). This situation can generally be recovered
|
||||||
|
from if your cluster is geo-replicated (by rebuilding your metadata db from
|
||||||
|
other nodes), or if you have saved regular snapshots at the filesystem
|
||||||
|
level.
|
||||||
|
|
||||||
|
- Keys in LMDB are limited to 511 bytes. This limit translates to limits on
|
||||||
|
object keys in S3 and sort keys in K2V that are limted to 479 bytes.
|
||||||
|
|
||||||
- Sqlite: Garage supports Sqlite as an alternative storage backend for
|
- Sqlite: Garage supports Sqlite as an alternative storage backend for
|
||||||
metadata, and although it has not been tested as much, it is expected to work
|
metadata, which does not have the issues listed above for LMDB.
|
||||||
satisfactorily. Since Garage v0.9.0, performance issues have largely been
|
On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal
|
||||||
fixed by allowing for a no-fsync mode (see `metadata_fsync`). Sqlite does not
|
performance, which was fixed with the addition of `metadata_fsync`.
|
||||||
have the database size limitation of LMDB on 32-bit systems.
|
Sqlite is still probably slower than LMDB due to the way we use it,
|
||||||
|
so it is not the best choice for high-performance storage clusters,
|
||||||
|
but it should work fine in many cases.
|
||||||
|
|
||||||
It is possible to convert Garage's metadata directory from one format to another
|
It is possible to convert Garage's metadata directory from one format to another
|
||||||
using the `garage convert-db` command, which should be used as follows:
|
using the `garage convert-db` command, which should be used as follows:
|
||||||
|
@ -313,7 +374,6 @@ Here is how this option impacts the different database engines:
|
||||||
|
|
||||||
| Database | `metadata_fsync = false` (default) | `metadata_fsync = true` |
|
| Database | `metadata_fsync = false` (default) | `metadata_fsync = true` |
|
||||||
|----------|------------------------------------|-------------------------------|
|
|----------|------------------------------------|-------------------------------|
|
||||||
| Sled | default options | *unsupported* |
|
|
||||||
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
|
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
|
||||||
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
|
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
|
||||||
|
|
||||||
|
@ -332,6 +392,50 @@ at the cost of a moderate drop in write performance.
|
||||||
Similarly to `metatada_fsync`, this is likely not necessary
|
Similarly to `metatada_fsync`, this is likely not necessary
|
||||||
if geographical replication is used.
|
if geographical replication is used.
|
||||||
|
|
||||||
|
#### `metadata_auto_snapshot_interval` (since Garage v0.9.4) {#metadata_auto_snapshot_interval}
|
||||||
|
|
||||||
|
If this value is set, Garage will automatically take a snapshot of the metadata
|
||||||
|
DB file at a regular interval and save it in the metadata directory.
|
||||||
|
This parameter can take any duration string that can be parsed by
|
||||||
|
the [`parse_duration`](https://docs.rs/parse_duration/latest/parse_duration/#syntax) crate.
|
||||||
|
|
||||||
|
Snapshots can allow to recover from situations where the metadata DB file is
|
||||||
|
corrupted, for instance after an unclean shutdown. See [this
|
||||||
|
page](@/documentation/operations/recovering.md#corrupted_meta) for details.
|
||||||
|
Garage keeps only the two most recent snapshots of the metadata DB and deletes
|
||||||
|
older ones automatically.
|
||||||
|
|
||||||
|
Note that taking a metadata snapshot is a relatively intensive operation as the
|
||||||
|
entire data file is copied. A snapshot being taken might have performance
|
||||||
|
impacts on the Garage node while it is running. If the cluster is under heavy
|
||||||
|
write load when a snapshot operation is running, this might also cause the
|
||||||
|
database file to grow in size significantly as pages cannot be recycled easily.
|
||||||
|
For this reason, it might be better to use filesystem-level snapshots instead
|
||||||
|
if possible.
|
||||||
|
|
||||||
|
#### `disable_scrub` {#disable_scrub}
|
||||||
|
|
||||||
|
By default, Garage runs a scrub of the data directory approximately once per
|
||||||
|
month, with a random delay to avoid all nodes running at the same time. When
|
||||||
|
it scrubs the data directory, Garage will read all of the data files stored on
|
||||||
|
disk to check their integrity, and will rebuild any data files that it finds
|
||||||
|
corrupted, using the remaining valid copies stored on other nodes.
|
||||||
|
See [this page](@/documentation/operations/durability-repairs.md#scrub) for details.
|
||||||
|
|
||||||
|
Set the `disable_scrub` configuration value to `true` if you don't need Garage
|
||||||
|
to scrub the data directory, for instance if you are already scrubbing at the
|
||||||
|
filesystem level. Note that in this case, if you find a corrupted data file,
|
||||||
|
you should delete it from the data directory and then call `garage repair
|
||||||
|
blocks` on the node to ensure that it re-obtains a copy from another node on
|
||||||
|
the network.
|
||||||
|
|
||||||
|
#### `use_local_tz` {#use_local_tz}
|
||||||
|
|
||||||
|
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
|
||||||
|
`use_local_tz` configuration value to `true` if you want Garage to run the
|
||||||
|
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
|
||||||
|
you should also ensure that each node has the same timezone configuration.
|
||||||
|
|
||||||
#### `block_size` {#block_size}
|
#### `block_size` {#block_size}
|
||||||
|
|
||||||
Garage splits stored objects in consecutive chunks of size `block_size`
|
Garage splits stored objects in consecutive chunks of size `block_size`
|
||||||
|
@ -347,20 +451,36 @@ files will remain available. This however means that chunks from existing files
|
||||||
will not be deduplicated with chunks from newly uploaded files, meaning you
|
will not be deduplicated with chunks from newly uploaded files, meaning you
|
||||||
might use more storage space that is optimally possible.
|
might use more storage space that is optimally possible.
|
||||||
|
|
||||||
#### `sled_cache_capacity` {#sled_cache_capacity}
|
#### `block_ram_buffer_max` (since v0.9.4) {#block_ram_buffer_max}
|
||||||
|
|
||||||
This parameter can be used to tune the capacity of the cache used by
|
A limit on the total size of data blocks kept in RAM by S3 API nodes awaiting
|
||||||
[sled](https://sled.rs), the database Garage uses internally to store metadata.
|
to be sent to storage nodes asynchronously.
|
||||||
Tune this to fit the RAM you wish to make available to your Garage instance.
|
|
||||||
This value has a conservative default (128MB) so that Garage doesn't use too much
|
|
||||||
RAM by default, but feel free to increase this for higher performance.
|
|
||||||
|
|
||||||
#### `sled_flush_every_ms` {#sled_flush_every_ms}
|
Explanation: since Garage wants to tolerate node failures, it uses quorum
|
||||||
|
writes to send data blocks to storage nodes: try to write the block to three
|
||||||
|
nodes, and return ok as soon as two writes complete. So even if all three nodes
|
||||||
|
are online, the third write always completes asynchronously. In general, there
|
||||||
|
are not many writes to a cluster, and the third asynchronous write can
|
||||||
|
terminate early enough so as to not cause unbounded RAM growth. However, if
|
||||||
|
the S3 API node is continuously receiving large quantities of data and the
|
||||||
|
third node is never able to catch up, many data blocks will be kept buffered in
|
||||||
|
RAM as they are awaiting transfer to the third node.
|
||||||
|
|
||||||
This parameters can be used to tune the flushing interval of sled.
|
The `block_ram_buffer_max` sets a limit to the size of buffers that can be kept
|
||||||
Increase this if sled is thrashing your SSD, at the risk of losing more data in case
|
in RAM in this process. When the limit is reached, backpressure is applied
|
||||||
of a power outage (though this should not matter much as data is replicated on other
|
back to the S3 client.
|
||||||
nodes). The default value, 2000ms, should be appropriate for most use cases.
|
|
||||||
|
Note that this only counts buffers that have arrived to a certain stage of
|
||||||
|
processing (received from the client + encrypted and/or compressed as
|
||||||
|
necessary) and are ready to send to the storage nodes. Many other buffers will
|
||||||
|
not be counted and this is not a hard limit on RAM consumption. In particular,
|
||||||
|
if many clients send requests simultaneously with large objects, the RAM
|
||||||
|
consumption will always grow linearly with the number of concurrent requests,
|
||||||
|
as each request will use a few buffers of size `block_size` for receiving and
|
||||||
|
intermediate processing before even trying to send the data to the storage
|
||||||
|
node.
|
||||||
|
|
||||||
|
The default value is 256MiB.
|
||||||
|
|
||||||
#### `lmdb_map_size` {#lmdb_map_size}
|
#### `lmdb_map_size` {#lmdb_map_size}
|
||||||
|
|
||||||
|
@ -418,6 +538,17 @@ the node, even in the case of a NAT: the NAT should be configured to forward the
|
||||||
port number to the same internal port nubmer. This means that if you have several nodes running
|
port number to the same internal port nubmer. This means that if you have several nodes running
|
||||||
behind a NAT, they should each use a different RPC port number.
|
behind a NAT, they should each use a different RPC port number.
|
||||||
|
|
||||||
|
#### `rpc_bind_outgoing`(since v0.9.2) {#rpc_bind_outgoing}
|
||||||
|
|
||||||
|
If enabled, pre-bind all sockets for outgoing connections to the same IP address
|
||||||
|
used for listening (the IP address specified in `rpc_bind_addr`) before
|
||||||
|
trying to connect to remote nodes.
|
||||||
|
This can be necessary if a node has multiple IP addresses,
|
||||||
|
but only one is allowed or able to reach the other nodes,
|
||||||
|
for instance due to firewall rules or specific routing configuration.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
#### `rpc_public_addr` {#rpc_public_addr}
|
#### `rpc_public_addr` {#rpc_public_addr}
|
||||||
|
|
||||||
The address and port that other nodes need to use to contact this node for
|
The address and port that other nodes need to use to contact this node for
|
||||||
|
@ -425,6 +556,14 @@ RPC calls. **This parameter is optional but recommended.** In case you have
|
||||||
a NAT that binds the RPC port to a port that is different on your public IP,
|
a NAT that binds the RPC port to a port that is different on your public IP,
|
||||||
this field might help making it work.
|
this field might help making it work.
|
||||||
|
|
||||||
|
#### `rpc_public_addr_subnet` {#rpc_public_addr_subnet}
|
||||||
|
In case `rpc_public_addr` is not set, but autodiscovery is used, this allows
|
||||||
|
filtering the list of automatically discovered IPs to a specific subnet.
|
||||||
|
|
||||||
|
For example, if nodes should pick *their* IP inside a specific subnet, but you
|
||||||
|
don't want to explicitly write the IP down (as it's dynamic, or you want to
|
||||||
|
share configs across nodes), you can use this option.
|
||||||
|
|
||||||
#### `bootstrap_peers` {#bootstrap_peers}
|
#### `bootstrap_peers` {#bootstrap_peers}
|
||||||
|
|
||||||
A list of peer identifiers on which to contact other Garage peers of this cluster.
|
A list of peer identifiers on which to contact other Garage peers of this cluster.
|
||||||
|
@ -441,7 +580,7 @@ be obtained by running `garage node id` and then included directly in the
|
||||||
key will be returned by `garage node id` and you will have to add the IP
|
key will be returned by `garage node id` and you will have to add the IP
|
||||||
yourself.
|
yourself.
|
||||||
|
|
||||||
### `allow_world_readable_secrets`
|
### `allow_world_readable_secrets` or `GARAGE_ALLOW_WORLD_READABLE_SECRETS` (env) {#allow_world_readable_secrets}
|
||||||
|
|
||||||
Garage checks the permissions of your secret files to make sure they're not
|
Garage checks the permissions of your secret files to make sure they're not
|
||||||
world-readable. In some cases, the check might fail and consider your files as
|
world-readable. In some cases, the check might fail and consider your files as
|
||||||
|
@ -474,7 +613,7 @@ the `/v1/catalog` endpoints, enabling mTLS if `client_cert` and `client_key` are
|
||||||
`service_name` should be set to the service name under which Garage's
|
`service_name` should be set to the service name under which Garage's
|
||||||
RPC ports are announced.
|
RPC ports are announced.
|
||||||
|
|
||||||
#### `client_cert`, `client_key` {#consul_client_cert}
|
#### `client_cert`, `client_key` {#consul_client_cert_and_key}
|
||||||
|
|
||||||
TLS client certificate and client key to use when communicating with Consul over TLS. Both are mandatory when doing so.
|
TLS client certificate and client key to use when communicating with Consul over TLS. Both are mandatory when doing so.
|
||||||
Only available when `api = "catalog"`.
|
Only available when `api = "catalog"`.
|
||||||
|
@ -508,7 +647,7 @@ node_prefix "" {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `tags` and `meta` {#consul_tags}
|
#### `tags` and `meta` {#consul_tags_and_meta}
|
||||||
|
|
||||||
Additional list of tags and map of service meta to add during service registration.
|
Additional list of tags and map of service meta to add during service registration.
|
||||||
|
|
||||||
|
@ -602,7 +741,7 @@ the socket will have 0220 mode. Make sure to set user and group permissions acco
|
||||||
The token for accessing the Metrics endpoint. If this token is not set, the
|
The token for accessing the Metrics endpoint. If this token is not set, the
|
||||||
Metrics endpoint can be accessed without access control.
|
Metrics endpoint can be accessed without access control.
|
||||||
|
|
||||||
You can use any random string for this value. We recommend generating a random token with `openssl rand -hex 32`.
|
You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
|
||||||
|
|
||||||
`metrics_token` was introduced in Garage `v0.7.2`.
|
`metrics_token` was introduced in Garage `v0.7.2`.
|
||||||
`metrics_token_file` and the `GARAGE_METRICS_TOKEN` environment variable are supported since Garage `v0.8.2`.
|
`metrics_token_file` and the `GARAGE_METRICS_TOKEN` environment variable are supported since Garage `v0.8.2`.
|
||||||
|
@ -614,7 +753,7 @@ You can use any random string for this value. We recommend generating a random t
|
||||||
The token for accessing all of the other administration endpoints. If this
|
The token for accessing all of the other administration endpoints. If this
|
||||||
token is not set, access to these endpoints is disabled entirely.
|
token is not set, access to these endpoints is disabled entirely.
|
||||||
|
|
||||||
You can use any random string for this value. We recommend generating a random token with `openssl rand -hex 32`.
|
You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
|
||||||
|
|
||||||
`admin_token` was introduced in Garage `v0.7.2`.
|
`admin_token` was introduced in Garage `v0.7.2`.
|
||||||
`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`.
|
`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`.
|
||||||
|
|
|
@ -37,6 +37,21 @@ A Garage cluster can very easily evolve over time, as storage nodes are added or
|
||||||
Garage will automatically rebalance data between nodes as needed to ensure the desired number of copies.
|
Garage will automatically rebalance data between nodes as needed to ensure the desired number of copies.
|
||||||
Read about cluster layout management [here](@/documentation/operations/layout.md).
|
Read about cluster layout management [here](@/documentation/operations/layout.md).
|
||||||
|
|
||||||
|
### Several replication modes
|
||||||
|
|
||||||
|
Garage supports a variety of replication modes, with configurable replica count,
|
||||||
|
and with various levels of consistency, in order to adapt to a variety of usage scenarios.
|
||||||
|
Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_factor)
|
||||||
|
to select the replication mode best suited to your use case (hint: in most cases, `replication_factor = 3` is what you want).
|
||||||
|
|
||||||
|
### Compression and deduplication
|
||||||
|
|
||||||
|
All data stored in Garage is deduplicated, and optionnally compressed using
|
||||||
|
Zstd. Objects uploaded to Garage are chunked in blocks of constant sizes (see
|
||||||
|
[`block_size`](@/documentation/reference-manual/configuration.md#block_size)),
|
||||||
|
and the hashes of individual blocks are used to dispatch them to storage nodes
|
||||||
|
and to deduplicate them.
|
||||||
|
|
||||||
### No RAFT slowing you down
|
### No RAFT slowing you down
|
||||||
|
|
||||||
It might seem strange to tout the absence of something as a desirable feature,
|
It might seem strange to tout the absence of something as a desirable feature,
|
||||||
|
@ -48,13 +63,6 @@ As a consequence, requests can be handled much faster, even in cases where laten
|
||||||
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
|
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
|
||||||
This is particularly usefull when nodes are far from one another and talk to one other through standard Internet connections.
|
This is particularly usefull when nodes are far from one another and talk to one other through standard Internet connections.
|
||||||
|
|
||||||
### Several replication modes
|
|
||||||
|
|
||||||
Garage supports a variety of replication modes, with 1 copy, 2 copies or 3 copies of your data,
|
|
||||||
and with various levels of consistency, in order to adapt to a variety of usage scenarios.
|
|
||||||
Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_mode)
|
|
||||||
to select the replication mode best suited to your use case (hint: in most cases, `replication_mode = "3"` is what you want).
|
|
||||||
|
|
||||||
### Web server for static websites
|
### Web server for static websites
|
||||||
|
|
||||||
A storage bucket can easily be configured to be served directly by Garage as a static web site.
|
A storage bucket can easily be configured to be served directly by Garage as a static web site.
|
||||||
|
|
|
@ -27,6 +27,112 @@ Exposes the Garage replication factor configured on the node
|
||||||
garage_replication_factor 3
|
garage_replication_factor 3
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `garage_local_disk_avail` and `garage_local_disk_total` (gauge)
|
||||||
|
|
||||||
|
Reports the available and total disk space on each node, for data and metadata separately.
|
||||||
|
|
||||||
|
```
|
||||||
|
garage_local_disk_avail{volume="data"} 540341960704
|
||||||
|
garage_local_disk_avail{volume="metadata"} 540341960704
|
||||||
|
garage_local_disk_total{volume="data"} 763063566336
|
||||||
|
garage_local_disk_total{volume="metadata"} 763063566336
|
||||||
|
```
|
||||||
|
|
||||||
|
### Cluster health status metrics
|
||||||
|
|
||||||
|
#### `cluster_healthy` (gauge)
|
||||||
|
|
||||||
|
Whether all storage nodes are connected (0 or 1)
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_healthy 0
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `cluster_available` (gauge)
|
||||||
|
|
||||||
|
Whether all requests can be served, even if some storage nodes are disconnected
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_available 1
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `cluster_connected_nodes` (gauge)
|
||||||
|
|
||||||
|
Number of nodes currently connected
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_connected_nodes 3
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `cluster_known_nodes` (gauge)
|
||||||
|
|
||||||
|
Number of nodes already seen once in the cluster
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_known_nodes 3
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `cluster_layout_node_connected` (gauge)
|
||||||
|
|
||||||
|
Connection status for individual nodes of the cluster layout
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_layout_node_connected{id="62b218d848e86a64",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 1
|
||||||
|
cluster_layout_node_connected{id="a11c7cf18af29737",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 0
|
||||||
|
cluster_layout_node_connected{id="a235ac7695e0c54d",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 1
|
||||||
|
cluster_layout_node_connected{id="b10c110e4e854e5a",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 1
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `cluster_layout_node_disconnected_time` (gauge)
|
||||||
|
|
||||||
|
Time (in seconds) since last connection to individual nodes of the cluster layout
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_layout_node_disconnected_time{id="62b218d848e86a64",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 0
|
||||||
|
cluster_layout_node_disconnected_time{id="a235ac7695e0c54d",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 0
|
||||||
|
cluster_layout_node_disconnected_time{id="b10c110e4e854e5a",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 0
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `cluster_storage_nodes` (gauge)
|
||||||
|
|
||||||
|
Number of storage nodes declared in the current layout
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_storage_nodes 4
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `cluster_storage_nodes_ok` (gauge)
|
||||||
|
|
||||||
|
Number of storage nodes currently connected
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_storage_nodes_ok 3
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `cluster_partitions` (gauge)
|
||||||
|
|
||||||
|
Number of partitions in the layout (this is always 256)
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_partitions 256
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `cluster_partitions_all_ok` (gauge)
|
||||||
|
|
||||||
|
Number of partitions for which all storage nodes are connected
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_partitions_all_ok 64
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `cluster_partitions_quorum` (gauge)
|
||||||
|
|
||||||
|
Number of partitions for which we have a quorum of connected nodes and all requests can be served
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_partitions_quorum 256
|
||||||
|
```
|
||||||
|
|
||||||
### Metrics of the API endpoints
|
### Metrics of the API endpoints
|
||||||
|
|
||||||
#### `api_admin_request_counter` (counter)
|
#### `api_admin_request_counter` (counter)
|
||||||
|
@ -119,6 +225,17 @@ block_bytes_read 120586322022
|
||||||
block_bytes_written 3386618077
|
block_bytes_written 3386618077
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `block_ram_buffer_free_kb` (gauge)
|
||||||
|
|
||||||
|
Kibibytes available for buffering blocks that have to be sent to remote nodes.
|
||||||
|
When clients send too much data to this node and a storage node is not receiving
|
||||||
|
data fast enough due to slower network conditions, this will decrease down to
|
||||||
|
zero and backpressure will be applied.
|
||||||
|
|
||||||
|
```
|
||||||
|
block_ram_buffer_free_kb 219829
|
||||||
|
```
|
||||||
|
|
||||||
#### `block_compression_level` (counter)
|
#### `block_compression_level` (counter)
|
||||||
|
|
||||||
Exposes the block compression level configured for the Garage node.
|
Exposes the block compression level configured for the Garage node.
|
||||||
|
|
|
@ -33,6 +33,7 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
|
||||||
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
||||||
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
||||||
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
|
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
|
||||||
|
| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
|
||||||
|
|
||||||
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part
|
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part
|
||||||
of signature v4 and they claim they support it without additional precisions,
|
of signature v4 and they claim they support it without additional precisions,
|
||||||
|
|
77
doc/book/working-documents/migration-1.md
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
+++
|
||||||
|
title = "Migrating from 0.9 to 1.0"
|
||||||
|
weight = 11
|
||||||
|
+++
|
||||||
|
|
||||||
|
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
|
||||||
|
We don't recommend trying to migrate to 1.0 directly from 0.8 or older.**
|
||||||
|
|
||||||
|
This migration procedure has been tested on several clusters without issues.
|
||||||
|
However, it is still a *critical procedure* that might cause issues.
|
||||||
|
**Make sure to back up all your data before attempting it!**
|
||||||
|
|
||||||
|
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
|
||||||
|
|
||||||
|
## Changes introduced in v1.0
|
||||||
|
|
||||||
|
The following are **breaking changes** in Garage v1.0 that require your attention when migrating:
|
||||||
|
|
||||||
|
- The Sled metadata db engine has been **removed**. If your cluster was still
|
||||||
|
using Sled, you will need to **use a Garage v0.9.x binary** to convert the
|
||||||
|
database using the `garage convert-db` subcommand. See
|
||||||
|
[here](@/documentation/reference-manual/configuration.md#db_engine) for the
|
||||||
|
details of the procedure.
|
||||||
|
|
||||||
|
The following syntax changes have been made to the configuration file:
|
||||||
|
|
||||||
|
- The `replication_mode` parameter has been split into two parameters:
|
||||||
|
[`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)
|
||||||
|
and
|
||||||
|
[`consistency_mode`](@/documentation/reference-manual/configuration.md#consistency_mode).
|
||||||
|
The old syntax using `replication_mode` is still supported for legacy
|
||||||
|
reasons and can still be used.
|
||||||
|
|
||||||
|
- The parameters `sled_cache_capacity` and `sled_flush_every_ms` have been removed.
|
||||||
|
|
||||||
|
## Migration procedure
|
||||||
|
|
||||||
|
The migration to Garage v1.0 can be done with almost no downtime,
|
||||||
|
by restarting all nodes at once in the new version.
|
||||||
|
|
||||||
|
The migration steps are as follows:
|
||||||
|
|
||||||
|
1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
|
||||||
|
all data seems to be synced correctly between nodes. If you have time, do
|
||||||
|
additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
|
||||||
|
etc.)
|
||||||
|
|
||||||
|
2. Ensure you have a snapshot of your Garage installation that you can restore
|
||||||
|
to in case the upgrade goes wrong:
|
||||||
|
|
||||||
|
- If you are running Garage v0.9.4 or later, use the `garage meta snapshot
|
||||||
|
--all` to make a backup snapshot of the metadata directories of your nodes
|
||||||
|
for backup purposes, and save a copy of the following files in the
|
||||||
|
metadata directories of your nodes: `cluster_layout`, `data_layout`,
|
||||||
|
`node_key`, `node_key.pub`.
|
||||||
|
|
||||||
|
- If you are running a filesystem such as ZFS or BTRFS that support
|
||||||
|
snapshotting, you can create a filesystem-level snapshot to be used as a
|
||||||
|
restoration point if needed.
|
||||||
|
|
||||||
|
- In other cases, make a backup using the old procedure: turn off each node
|
||||||
|
individually; back up its metadata folder (for instance, use the following
|
||||||
|
command if your metadata directory is `/var/lib/garage/meta`: `cd
|
||||||
|
/var/lib/garage ; tar -acf meta-v0.9.tar.zst meta/`); turn it back on
|
||||||
|
again. This will allow you to take a backup of all nodes without
|
||||||
|
impacting global cluster availability. You can do all nodes of a single
|
||||||
|
zone at once as this does not impact the availability of Garage.
|
||||||
|
|
||||||
|
3. Prepare your updated binaries and configuration files for Garage v1.0
|
||||||
|
|
||||||
|
4. Shut down all v0.9 nodes simultaneously, and restart them all simultaneously
|
||||||
|
in v1.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
|
||||||
|
achieve this as fast as possible. Garage v1.0 should be in a working state
|
||||||
|
as soon as enough nodes have started.
|
||||||
|
|
||||||
|
5. Monitor your cluster in the following hours to see if it works well under
|
||||||
|
your production load.
|
|
@ -8,9 +8,9 @@ listen address is specified in the `[admin]` section of the configuration
|
||||||
file (see [configuration file
|
file (see [configuration file
|
||||||
reference](@/documentation/reference-manual/configuration.md))
|
reference](@/documentation/reference-manual/configuration.md))
|
||||||
|
|
||||||
**WARNING.** At this point, there is no comittement to stability of the APIs described in this document.
|
**WARNING.** At this point, there is no commitment to the stability of the APIs described in this document.
|
||||||
We will bump the version numbers prefixed to each API endpoint at each time the syntax
|
We will bump the version numbers prefixed to each API endpoint each time the syntax
|
||||||
or semantics change, meaning that code that relies on these endpoint will break
|
or semantics change, meaning that code that relies on these endpoints will break
|
||||||
when changes are introduced.
|
when changes are introduced.
|
||||||
|
|
||||||
The Garage administration API was introduced in version 0.7.2, this document
|
The Garage administration API was introduced in version 0.7.2, this document
|
||||||
|
@ -19,7 +19,7 @@ does not apply to older versions of Garage.
|
||||||
|
|
||||||
## Access control
|
## Access control
|
||||||
|
|
||||||
The admin API uses two different tokens for acces control, that are specified in the config file's `[admin]` section:
|
The admin API uses two different tokens for access control, that are specified in the config file's `[admin]` section:
|
||||||
|
|
||||||
- `metrics_token`: the token for accessing the Metrics endpoint (if this token
|
- `metrics_token`: the token for accessing the Metrics endpoint (if this token
|
||||||
is not set in the config file, the Metrics endpoint can be accessed without
|
is not set in the config file, the Metrics endpoint can be accessed without
|
||||||
|
@ -69,11 +69,10 @@ Example response body:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
|
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||||
"garageVersion": "git:v0.9.0-dev",
|
"garageVersion": "v1.0.1",
|
||||||
"garageFeatures": [
|
"garageFeatures": [
|
||||||
"k2v",
|
"k2v",
|
||||||
"sled",
|
|
||||||
"lmdb",
|
"lmdb",
|
||||||
"sqlite",
|
"sqlite",
|
||||||
"metrics",
|
"metrics",
|
||||||
|
@ -81,83 +80,92 @@ Example response body:
|
||||||
],
|
],
|
||||||
"rustVersion": "1.68.0",
|
"rustVersion": "1.68.0",
|
||||||
"dbEngine": "LMDB (using Heed crate)",
|
"dbEngine": "LMDB (using Heed crate)",
|
||||||
"knownNodes": [
|
"layoutVersion": 5,
|
||||||
|
"nodes": [
|
||||||
{
|
{
|
||||||
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
|
"id": "62b218d848e86a64f7fe1909735f29a4350547b54c4b204f91246a14eb0a1a8c",
|
||||||
"addr": "10.0.0.11:3901",
|
"role": {
|
||||||
|
"id": "62b218d848e86a64f7fe1909735f29a4350547b54c4b204f91246a14eb0a1a8c",
|
||||||
|
"zone": "dc1",
|
||||||
|
"capacity": 100000000000,
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"addr": "10.0.0.3:3901",
|
||||||
|
"hostname": "node3",
|
||||||
"isUp": true,
|
"isUp": true,
|
||||||
"lastSeenSecsAgo": 9,
|
"lastSeenSecsAgo": 12,
|
||||||
"hostname": "node1"
|
"draining": false,
|
||||||
|
"dataPartition": {
|
||||||
|
"available": 660270088192,
|
||||||
|
"total": 873862266880
|
||||||
|
},
|
||||||
|
"metadataPartition": {
|
||||||
|
"available": 660270088192,
|
||||||
|
"total": 873862266880
|
||||||
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
|
"id": "a11c7cf18af297379eff8688360155fe68d9061654449ba0ce239252f5a7487f",
|
||||||
"addr": "10.0.0.12:3901",
|
"role": null,
|
||||||
|
"addr": "10.0.0.2:3901",
|
||||||
|
"hostname": "node2",
|
||||||
"isUp": true,
|
"isUp": true,
|
||||||
"lastSeenSecsAgo": 1,
|
"lastSeenSecsAgo": 11,
|
||||||
"hostname": "node2"
|
"draining": true,
|
||||||
|
"dataPartition": {
|
||||||
|
"available": 660270088192,
|
||||||
|
"total": 873862266880
|
||||||
|
},
|
||||||
|
"metadataPartition": {
|
||||||
|
"available": 660270088192,
|
||||||
|
"total": 873862266880
|
||||||
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
|
"id": "a235ac7695e0c54d7b403943025f57504d500fdcc5c3e42c71c5212faca040a2",
|
||||||
"addr": "10.0.0.21:3901",
|
"role": {
|
||||||
|
"id": "a235ac7695e0c54d7b403943025f57504d500fdcc5c3e42c71c5212faca040a2",
|
||||||
|
"zone": "dc1",
|
||||||
|
"capacity": 100000000000,
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"addr": "127.0.0.1:3904",
|
||||||
|
"hostname": "lindy",
|
||||||
"isUp": true,
|
"isUp": true,
|
||||||
"lastSeenSecsAgo": 7,
|
"lastSeenSecsAgo": 2,
|
||||||
"hostname": "node3"
|
"draining": false,
|
||||||
|
"dataPartition": {
|
||||||
|
"available": 660270088192,
|
||||||
|
"total": 873862266880
|
||||||
|
},
|
||||||
|
"metadataPartition": {
|
||||||
|
"available": 660270088192,
|
||||||
|
"total": 873862266880
|
||||||
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
|
"id": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||||
"addr": "10.0.0.22:3901",
|
"role": {
|
||||||
|
"id": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
|
||||||
|
"zone": "dc1",
|
||||||
|
"capacity": 100000000000,
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"addr": "10.0.0.1:3901",
|
||||||
|
"hostname": "node1",
|
||||||
"isUp": true,
|
"isUp": true,
|
||||||
"lastSeenSecsAgo": 1,
|
"lastSeenSecsAgo": 3,
|
||||||
"hostname": "node4"
|
"draining": false,
|
||||||
|
"dataPartition": {
|
||||||
|
"available": 660270088192,
|
||||||
|
"total": 873862266880
|
||||||
|
},
|
||||||
|
"metadataPartition": {
|
||||||
|
"available": 660270088192,
|
||||||
|
"total": 873862266880
|
||||||
|
}
|
||||||
}
|
}
|
||||||
],
|
]
|
||||||
"layout": {
|
|
||||||
"version": 12,
|
|
||||||
"roles": [
|
|
||||||
{
|
|
||||||
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
|
|
||||||
"zone": "dc1",
|
|
||||||
"capacity": 10737418240,
|
|
||||||
"tags": [
|
|
||||||
"node1"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
|
|
||||||
"zone": "dc1",
|
|
||||||
"capacity": 10737418240,
|
|
||||||
"tags": [
|
|
||||||
"node2"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
|
|
||||||
"zone": "dc2",
|
|
||||||
"capacity": 10737418240,
|
|
||||||
"tags": [
|
|
||||||
"node3"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"stagedRoleChanges": [
|
|
||||||
{
|
|
||||||
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
|
|
||||||
"remove": false,
|
|
||||||
"zone": "dc2",
|
|
||||||
"capacity": 10737418240,
|
|
||||||
"tags": [
|
|
||||||
"node4"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
{
|
|
||||||
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
|
|
||||||
"remove": true,
|
|
||||||
"zone": null,
|
|
||||||
"capacity": null,
|
|
||||||
"tags": null,
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -146,7 +146,7 @@ in a bucket, as the partition key becomes the sort key in the index.
|
||||||
How indexing works:
|
How indexing works:
|
||||||
|
|
||||||
- Each node keeps a local count of how many items it stores for each partition,
|
- Each node keeps a local count of how many items it stores for each partition,
|
||||||
in a local Sled tree that is updated atomically when an item is modified.
|
in a local database tree that is updated atomically when an item is modified.
|
||||||
- These local counters are asynchronously stored in the index table which is
|
- These local counters are asynchronously stored in the index table which is
|
||||||
a regular Garage table spread in the network. Counters are stored as LWW values,
|
a regular Garage table spread in the network. Counters are stored as LWW values,
|
||||||
so basically the final table will have the following structure:
|
so basically the final table will have the following structure:
|
||||||
|
|
|
@ -1,39 +0,0 @@
|
||||||
### (fr) Garage, un système de stockage de données géo-distribué léger et robuste
|
|
||||||
|
|
||||||
Garage est un système de stockage de données léger, géo-distribué, qui
|
|
||||||
implémente le protocole de stockage S3 de Amazon. Garage est destiné
|
|
||||||
principalement à l'auto-hébergement sur du matériel courant d'occasion. À ce
|
|
||||||
titre, il doit tolérer un grand nombre de pannes: coupures de courant, coupures
|
|
||||||
de connexion Internet, pannes de machines, ... Il doit également être facile à
|
|
||||||
déployer et à maintenir, afin de pouvoir être facilement utilisé par des
|
|
||||||
amateurs ou des petites organisations.
|
|
||||||
|
|
||||||
Cette présentation vous proposera un aperçu de Garage et du choix technique
|
|
||||||
principal qui rend un système comme Garage possible: le refus d'utiliser des
|
|
||||||
algorithmes de consensus, remplacés avantageusement par des méthodes à
|
|
||||||
cohérence faible. Notre modèle est fortement inspiré de la base de donnée
|
|
||||||
Dynamo (DeCandia et al, 2007), et fait usage des types de données CRDT (Shapiro
|
|
||||||
et al, 2011). Nous exploreront comment ces méthodes s'appliquent à la
|
|
||||||
construction de l'abstraction "stockage objet" dans un système distribué, et
|
|
||||||
quelles autres abstractions peuvent ou ne peuvent pas être construites dans ce
|
|
||||||
modèle.
|
|
||||||
|
|
||||||
### (en) Garage, a lightweight and robust geo-distributed data storage system
|
|
||||||
|
|
||||||
Garage is a lightweight geo-distributed data store that implements the Amazon
|
|
||||||
S3 object storage protocol. Garage is meant primarily for self-hosting at home
|
|
||||||
on second-hand commodity hardware, meaning it has to tolerate a wide variety of
|
|
||||||
failure scenarios such as power cuts, Internet disconnections and machine
|
|
||||||
crashes. It also has to be easy to deploy and maintain, so that hobbyists and
|
|
||||||
small organizations can use it without trouble.
|
|
||||||
|
|
||||||
This talk will present Garage and the key technical choice that made Garage
|
|
||||||
possible: refusing to use consensus algorithms and using instead weak
|
|
||||||
consistency methods, with a model that is loosely based on that of the Dynamo
|
|
||||||
database (DeCandia et al, 2007) and that makes heavy use of conflict-free
|
|
||||||
replicated data types (Shapiro et al, 2011). We will explore how these methods
|
|
||||||
are suited to building the "object store" abstraction in a distributed system,
|
|
||||||
and what other abstractions are possible or impossible to build in this model.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
\nonstopmode
|
\nonstopmode
|
||||||
\documentclass[aspectratio=169]{beamer}
|
\documentclass[aspectratio=169,xcolor={svgnames}]{beamer}
|
||||||
\usepackage[utf8]{inputenc}
|
\usepackage[utf8]{inputenc}
|
||||||
% \usepackage[frenchb]{babel}
|
% \usepackage[frenchb]{babel}
|
||||||
\usepackage{amsmath}
|
\usepackage{amsmath}
|
||||||
|
@ -176,7 +176,12 @@
|
||||||
|
|
||||||
\begin{frame}
|
\begin{frame}
|
||||||
\frametitle{CRDTs / weak consistency instead of consensus}
|
\frametitle{CRDTs / weak consistency instead of consensus}
|
||||||
Consensus can be implemented reasonably well in practice, so why avoid it?
|
|
||||||
|
\underline{Internally, Garage uses only CRDTs} (conflict-free replicated data types)
|
||||||
|
|
||||||
|
\vspace{2em}
|
||||||
|
Why not Raft, Paxos, ...? Issues of consensus algorithms:
|
||||||
|
|
||||||
\vspace{1em}
|
\vspace{1em}
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item<2-> \textbf{Software complexity}
|
\item<2-> \textbf{Software complexity}
|
||||||
|
@ -191,8 +196,6 @@
|
||||||
\item<6-> \textbf{Takes time to reconverge} when disrupted (e.g. node going down)
|
\item<6-> \textbf{Takes time to reconverge} when disrupted (e.g. node going down)
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\vspace{2em}
|
|
||||||
\visible<7->{\underline{Internally, Garage uses only CRDTs} (conflict-free replicated data types)}
|
|
||||||
\end{frame}
|
\end{frame}
|
||||||
|
|
||||||
\begin{frame}
|
\begin{frame}
|
||||||
|
@ -263,11 +266,9 @@
|
||||||
\vspace{1em}
|
\vspace{1em}
|
||||||
\item Replication modes with 1 or 2 copies / weaker consistency
|
\item Replication modes with 1 or 2 copies / weaker consistency
|
||||||
\vspace{1em}
|
\vspace{1em}
|
||||||
\item Kubernetes integration
|
\item Kubernetes integration for node discovery
|
||||||
\vspace{1em}
|
\vspace{1em}
|
||||||
\item Admin API (v0.7.2)
|
\item Admin API (v0.7.2)
|
||||||
\vspace{1em}
|
|
||||||
\item Experimental K2V API (v0.7.2)
|
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\end{frame}
|
\end{frame}
|
||||||
|
|
||||||
|
@ -323,7 +324,8 @@
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
\vspace{2em}
|
\vspace{2em}
|
||||||
\textbf{LMDB:} very stable, good performance, reasonably small files on disk
|
\textbf{LMDB:} very stable, good performance, file size is reasonable\\
|
||||||
|
\textbf{Sqlite} also available as a second choice
|
||||||
|
|
||||||
\vspace{1em}
|
\vspace{1em}
|
||||||
Sled will be removed in Garage v1.0
|
Sled will be removed in Garage v1.0
|
||||||
|
@ -417,15 +419,15 @@
|
||||||
\textbf{Partition} & \textbf{Node 1} & \textbf{Node 2} & \textbf{Node 3} \\
|
\textbf{Partition} & \textbf{Node 1} & \textbf{Node 2} & \textbf{Node 3} \\
|
||||||
\hline
|
\hline
|
||||||
\hline
|
\hline
|
||||||
Partition 0 & Io (jupiter) & Drosera (atuin) & Courgette (neptune) \\
|
Partition 0 & df-ymk (bespin) & Abricot (scorpio) & Courgette (neptune) \\
|
||||||
\hline
|
\hline
|
||||||
Partition 1 & Datura (atuin) & Courgette (neptune) & Io (jupiter) \\
|
Partition 1 & Ananas (scorpio) & Courgette (neptune) & df-ykl (bespin) \\
|
||||||
\hline
|
\hline
|
||||||
Partition 2 & Io(jupiter) & Celeri (neptune) & Drosera (atuin) \\
|
Partition 2 & df-ymf (bespin) & Celeri (neptune) & Abricot (scorpio) \\
|
||||||
\hline
|
\hline
|
||||||
\hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ \\
|
\hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ \\
|
||||||
\hline
|
\hline
|
||||||
Partition 255 & Concombre (neptune) & Io (jupiter) & Drosera (atuin) \\
|
Partition 255 & Concombre (neptune) & df-ykl (bespin) & Abricot (scorpio) \\
|
||||||
\hline
|
\hline
|
||||||
\end{tabular}
|
\end{tabular}
|
||||||
\end{center}
|
\end{center}
|
||||||
|
@ -484,9 +486,9 @@
|
||||||
|
|
||||||
\vspace{1em}
|
\vspace{1em}
|
||||||
{\small
|
{\small
|
||||||
\textbf{Property:} If node $A$ did an operation $write(x)$ and received an OK response,\\
|
\textbf{Property:} If client 1 did an operation $write(x)$ and received an OK response,\\
|
||||||
\hspace{2cm} and node $B$ starts an operation $read()$ after $A$ received OK,\\
|
\hspace{2cm} and client 2 starts an operation $read()$ after client 1 received OK,\\
|
||||||
\hspace{2cm} then $B$ will read a value $x' \sqsupseteq x$.
|
\hspace{2cm} then client 2 will read a value $x' \sqsupseteq x$.
|
||||||
}
|
}
|
||||||
|
|
||||||
\vspace{1.5em}
|
\vspace{1.5em}
|
||||||
|
@ -539,10 +541,53 @@
|
||||||
\item We rely on quorums $k > n/2$ within each partition:\\
|
\item We rely on quorums $k > n/2$ within each partition:\\
|
||||||
$$n=3,~~~~~~~k\ge 2$$
|
$$n=3,~~~~~~~k\ge 2$$
|
||||||
\item<2-> When rebalancing, the set of nodes responsible for a partition can change:\\
|
\item<2-> When rebalancing, the set of nodes responsible for a partition can change:\\
|
||||||
$$\{A, B, C\} \to \{A, D, E\}$$
|
|
||||||
\vspace{.01em}
|
\vspace{1em}
|
||||||
\item<3-> During the rebalancing, $D$ and $E$ don't yet have the data,\\
|
\begin{minipage}{.04\linewidth}~
|
||||||
~~~~~~~~~~~~~~~~~~~and $B$ and $C$ want to get rid of the data to free up space\\
|
\end{minipage}
|
||||||
|
\begin{minipage}{.40\linewidth}
|
||||||
|
{\tiny
|
||||||
|
\begin{tabular}{|l|l|l|l|}
|
||||||
|
\hline
|
||||||
|
\textbf{Partition} & \textbf{Node 1} & \textbf{Node 2} & \textbf{Node 3} \\
|
||||||
|
\hline
|
||||||
|
\hline
|
||||||
|
Partition 0 & \textcolor{Crimson}{df-ymk} & Abricot & \textcolor{Crimson}{Courgette} \\
|
||||||
|
\hline
|
||||||
|
Partition 1 & Ananas & \textcolor{Crimson}{Courgette} & \textcolor{Crimson}{df-ykl} \\
|
||||||
|
\hline
|
||||||
|
Partition 2 & \textcolor{Crimson}{df-ymf} & \textcolor{Crimson}{Celeri} & Abricot \\
|
||||||
|
\hline
|
||||||
|
\hspace{1em}$\dots$ & \hspace{1em}$\dots$ & \hspace{1em}$\dots$ & \hspace{1em}$\dots$ \\
|
||||||
|
\hline
|
||||||
|
\end{tabular}
|
||||||
|
}
|
||||||
|
\end{minipage}
|
||||||
|
\begin{minipage}{.04\linewidth}
|
||||||
|
$\to$
|
||||||
|
\end{minipage}
|
||||||
|
\begin{minipage}{.40\linewidth}
|
||||||
|
{\tiny
|
||||||
|
\begin{tabular}{|l|l|l|l|}
|
||||||
|
\hline
|
||||||
|
\textbf{Partition} & \textbf{Node 1} & \textbf{Node 2} & \textbf{Node 3} \\
|
||||||
|
\hline
|
||||||
|
\hline
|
||||||
|
Partition 0 & \textcolor{ForestGreen}{Dahlia} & Abricot & \textcolor{ForestGreen}{Eucalyptus} \\
|
||||||
|
\hline
|
||||||
|
Partition 1 & Ananas & \textcolor{ForestGreen}{Euphorbe} & \textcolor{ForestGreen}{Doradille} \\
|
||||||
|
\hline
|
||||||
|
Partition 2 & \textcolor{ForestGreen}{Dahlia} & \textcolor{ForestGreen}{Echinops} & Abricot \\
|
||||||
|
\hline
|
||||||
|
\hspace{1em}$\dots$ & \hspace{1em}$\dots$ & \hspace{1em}$\dots$ & \hspace{1em}$\dots$ \\
|
||||||
|
\hline
|
||||||
|
\end{tabular}
|
||||||
|
}
|
||||||
|
\end{minipage}
|
||||||
|
|
||||||
|
\vspace{2em}
|
||||||
|
\item<3-> During the rebalancing, new nodes don't yet have the data,\\
|
||||||
|
~~~~~~~~~~~~~~~~~~~and old nodes want to get rid of the data to free up space\\
|
||||||
\vspace{1.2em}
|
\vspace{1.2em}
|
||||||
$\to$ risk of inconsistency, \textbf{how to coordinate?}
|
$\to$ risk of inconsistency, \textbf{how to coordinate?}
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
@ -589,7 +634,7 @@
|
||||||
\end{frame}
|
\end{frame}
|
||||||
|
|
||||||
\begin{frame}
|
\begin{frame}
|
||||||
\frametitle{Towards v1.0}
|
\frametitle{Towards v1.0...}
|
||||||
Focus on \underline{security \& stability}
|
Focus on \underline{security \& stability}
|
||||||
\vspace{2em}
|
\vspace{2em}
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
|
@ -603,6 +648,13 @@
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\end{frame}
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{...and beyond!}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=.6\linewidth]{../assets/survey_requested_features.png}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
% ======================================== OPERATING
|
% ======================================== OPERATING
|
||||||
% ======================================== OPERATING
|
% ======================================== OPERATING
|
||||||
% ======================================== OPERATING
|
% ======================================== OPERATING
|
||||||
|
@ -684,7 +736,7 @@
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\vspace{.5em}
|
\vspace{.5em}
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
Current deployments: $< 10$ TB, we don't have much experience with more
|
Our deployments: $< 10$ TB. Some people have done more!
|
||||||
\end{frame}
|
\end{frame}
|
||||||
|
|
||||||
|
|
||||||
|
|
17
doc/talks/2024-02-29-capitoul/.gitignore
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
*
|
||||||
|
|
||||||
|
!*.txt
|
||||||
|
!*.md
|
||||||
|
|
||||||
|
!assets
|
||||||
|
|
||||||
|
!.gitignore
|
||||||
|
!*.svg
|
||||||
|
!*.png
|
||||||
|
!*.jpg
|
||||||
|
!*.tex
|
||||||
|
!Makefile
|
||||||
|
!.gitignore
|
||||||
|
!assets/*.drawio.pdf
|
||||||
|
|
||||||
|
!talk.pdf
|
10
doc/talks/2024-02-29-capitoul/Makefile
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
ASSETS=../assets/logos/deuxfleurs.pdf
|
||||||
|
|
||||||
|
talk.pdf: talk.tex $(ASSETS)
|
||||||
|
pdflatex talk.tex
|
||||||
|
|
||||||
|
%.pdf: %.svg
|
||||||
|
inkscape -D -z --file=$^ --export-pdf=$@
|
||||||
|
|
||||||
|
%.pdf_tex: %.svg
|
||||||
|
inkscape -D -z --file=$^ --export-pdf=$@ --export-latex
|
BIN
doc/talks/2024-02-29-capitoul/talk.pdf
Normal file
543
doc/talks/2024-02-29-capitoul/talk.tex
Normal file
|
@ -0,0 +1,543 @@
|
||||||
|
\nonstopmode
|
||||||
|
\documentclass[aspectratio=169,xcolor={svgnames}]{beamer}
|
||||||
|
\usepackage[utf8]{inputenc}
|
||||||
|
% \usepackage[frenchb]{babel}
|
||||||
|
\usepackage{amsmath}
|
||||||
|
\usepackage{mathtools}
|
||||||
|
\usepackage{breqn}
|
||||||
|
\usepackage{multirow}
|
||||||
|
\usetheme{boxes}
|
||||||
|
\usepackage{graphicx}
|
||||||
|
\usepackage{import}
|
||||||
|
\usepackage{adjustbox}
|
||||||
|
\usepackage[absolute,overlay]{textpos}
|
||||||
|
%\useoutertheme[footline=authortitle,subsection=false]{miniframes}
|
||||||
|
%\useoutertheme[footline=authorinstitute,subsection=false]{miniframes}
|
||||||
|
\useoutertheme{infolines}
|
||||||
|
\setbeamertemplate{headline}{}
|
||||||
|
|
||||||
|
\beamertemplatenavigationsymbolsempty
|
||||||
|
|
||||||
|
\definecolor{TitleOrange}{RGB}{255,137,0}
|
||||||
|
\setbeamercolor{title}{fg=TitleOrange}
|
||||||
|
\setbeamercolor{frametitle}{fg=TitleOrange}
|
||||||
|
|
||||||
|
\definecolor{ListOrange}{RGB}{255,145,5}
|
||||||
|
\setbeamertemplate{itemize item}{\color{ListOrange}$\blacktriangleright$}
|
||||||
|
|
||||||
|
\definecolor{verygrey}{RGB}{70,70,70}
|
||||||
|
\setbeamercolor{normal text}{fg=verygrey}
|
||||||
|
|
||||||
|
|
||||||
|
\usepackage{tabu}
|
||||||
|
\usepackage{multicol}
|
||||||
|
\usepackage{vwcol}
|
||||||
|
\usepackage{stmaryrd}
|
||||||
|
\usepackage{graphicx}
|
||||||
|
|
||||||
|
\usepackage[normalem]{ulem}
|
||||||
|
|
||||||
|
\AtBeginSection[]{
|
||||||
|
\begin{frame}
|
||||||
|
\vfill
|
||||||
|
\centering
|
||||||
|
\begin{beamercolorbox}[sep=8pt,center,shadow=true,rounded=true]{title}
|
||||||
|
\usebeamerfont{title}\insertsectionhead\par%
|
||||||
|
\end{beamercolorbox}
|
||||||
|
\vfill
|
||||||
|
\end{frame}
|
||||||
|
}
|
||||||
|
|
||||||
|
\title{Garage}
|
||||||
|
\author{Alex Auvolat, Deuxfleurs}
|
||||||
|
\date{Capitoul, 2024-02-29}
|
||||||
|
|
||||||
|
\begin{document}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\centering
|
||||||
|
\includegraphics[width=.3\linewidth]{../../sticker/Garage.png}
|
||||||
|
\vspace{1em}
|
||||||
|
|
||||||
|
{\large\bf Alex Auvolat, Deuxfleurs Association}
|
||||||
|
\vspace{1em}
|
||||||
|
|
||||||
|
\url{https://garagehq.deuxfleurs.fr/}
|
||||||
|
|
||||||
|
Matrix channel: \texttt{\#garage:deuxfleurs.fr}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Who I am}
|
||||||
|
\begin{columns}[t]
|
||||||
|
\begin{column}{.2\textwidth}
|
||||||
|
\centering
|
||||||
|
\adjincludegraphics[width=.4\linewidth, valign=t]{../assets/alex.jpg}
|
||||||
|
\end{column}
|
||||||
|
\begin{column}{.6\textwidth}
|
||||||
|
\textbf{Alex Auvolat}\\
|
||||||
|
PhD; co-founder of Deuxfleurs
|
||||||
|
\end{column}
|
||||||
|
\begin{column}{.2\textwidth}
|
||||||
|
~
|
||||||
|
\end{column}
|
||||||
|
\end{columns}
|
||||||
|
\vspace{2em}
|
||||||
|
|
||||||
|
\begin{columns}[t]
|
||||||
|
\begin{column}{.2\textwidth}
|
||||||
|
\centering
|
||||||
|
\adjincludegraphics[width=.5\linewidth, valign=t]{../assets/logos/deuxfleurs.pdf}
|
||||||
|
\end{column}
|
||||||
|
\begin{column}{.6\textwidth}
|
||||||
|
\textbf{Deuxfleurs}\\
|
||||||
|
A non-profit self-hosting collective,\\
|
||||||
|
member of the CHATONS network
|
||||||
|
\end{column}
|
||||||
|
\begin{column}{.2\textwidth}
|
||||||
|
\centering
|
||||||
|
\adjincludegraphics[width=.7\linewidth, valign=t]{../assets/logos/logo_chatons.png}
|
||||||
|
\end{column}
|
||||||
|
\end{columns}
|
||||||
|
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Our objective at Deuxfleurs}
|
||||||
|
|
||||||
|
\begin{center}
|
||||||
|
\textbf{Promote self-hosting and small-scale hosting\\
|
||||||
|
as an alternative to large cloud providers}
|
||||||
|
\end{center}
|
||||||
|
\vspace{2em}
|
||||||
|
\visible<2->{
|
||||||
|
Why is it hard?
|
||||||
|
\vspace{2em}
|
||||||
|
\begin{center}
|
||||||
|
\textbf{\underline{Resilience}}\\
|
||||||
|
{\footnotesize we want good uptime/availability with low supervision}
|
||||||
|
\end{center}
|
||||||
|
}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Our very low-tech infrastructure}
|
||||||
|
|
||||||
|
\only<1,3-6>{
|
||||||
|
\begin{itemize}
|
||||||
|
\item \textcolor<4->{gray}{Commodity hardware (e.g. old desktop PCs)\\
|
||||||
|
\vspace{.5em}
|
||||||
|
\visible<3->{{\footnotesize (can die at any time)}}}
|
||||||
|
\vspace{1.5em}
|
||||||
|
\item<4-> \textcolor<6->{gray}{Regular Internet (e.g. FTTB, FTTH) and power grid connections\\
|
||||||
|
\vspace{.5em}
|
||||||
|
\visible<5->{{\footnotesize (can be unavailable randomly)}}}
|
||||||
|
\vspace{1.5em}
|
||||||
|
\item<6-> \textbf{Geographical redundancy} (multi-site replication)
|
||||||
|
\end{itemize}
|
||||||
|
}
|
||||||
|
\only<2>{
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=.8\linewidth]{../assets/neptune.jpg}
|
||||||
|
\end{center}
|
||||||
|
}
|
||||||
|
\only<7>{
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=.8\linewidth]{../assets/inframap_jdll2023.pdf}
|
||||||
|
\end{center}
|
||||||
|
}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{How to make this happen}
|
||||||
|
\begin{center}
|
||||||
|
\only<1>{\includegraphics[width=.8\linewidth]{../assets/intro/slide1.png}}%
|
||||||
|
\only<2>{\includegraphics[width=.8\linewidth]{../assets/intro/slide2.png}}%
|
||||||
|
\only<3>{\includegraphics[width=.8\linewidth]{../assets/intro/slide3.png}}%
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Distributed file systems are slow}
|
||||||
|
File systems are complex, for example:
|
||||||
|
\vspace{1em}
|
||||||
|
\begin{itemize}
|
||||||
|
\item Concurrent modification by several processes
|
||||||
|
\vspace{1em}
|
||||||
|
\item Folder hierarchies
|
||||||
|
\vspace{1em}
|
||||||
|
\item Other requirements of the POSIX spec (e.g.~locks)
|
||||||
|
\end{itemize}
|
||||||
|
\vspace{1em}
|
||||||
|
Coordination in a distributed system is costly
|
||||||
|
|
||||||
|
\vspace{1em}
|
||||||
|
Costs explode with commodity hardware / Internet connections\\
|
||||||
|
{\small (we experienced this!)}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{A simpler solution: object storage}
|
||||||
|
Only two operations:
|
||||||
|
\vspace{1em}
|
||||||
|
\begin{itemize}
|
||||||
|
\item Put an object at a key
|
||||||
|
\vspace{1em}
|
||||||
|
\item Retrieve an object from its key
|
||||||
|
\end{itemize}
|
||||||
|
\vspace{1em}
|
||||||
|
{\footnotesize (and a few others)}
|
||||||
|
|
||||||
|
\vspace{1em}
|
||||||
|
Sufficient for many applications!
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{A simpler solution: object storage}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[height=6em]{../assets/logos/Amazon-S3.jpg}
|
||||||
|
\hspace{3em}
|
||||||
|
\visible<2->{\includegraphics[height=5em]{../assets/logos/minio.png}}
|
||||||
|
\hspace{3em}
|
||||||
|
\visible<3>{\includegraphics[height=6em]{../../logo/garage_hires_crop.png}}
|
||||||
|
\end{center}
|
||||||
|
\vspace{1em}
|
||||||
|
S3: a de-facto standard, many compatible applications
|
||||||
|
|
||||||
|
\vspace{1em}
|
||||||
|
\visible<2->{MinIO is self-hostable but not suited for geo-distributed deployments}
|
||||||
|
|
||||||
|
\vspace{1em}
|
||||||
|
\visible<3->{\textbf{Garage is a self-hosted drop-in replacement for the Amazon S3 object store}}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
% --------- BASED ON CRDTS ----------
|
||||||
|
|
||||||
|
\section{Principle 1: based on CRDTs}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{CRDTs / weak consistency instead of consensus}
|
||||||
|
|
||||||
|
\underline{Internally, Garage uses only CRDTs} (conflict-free replicated data types)
|
||||||
|
|
||||||
|
\vspace{2em}
|
||||||
|
Why not Raft, Paxos, ...? Issues of consensus algorithms:
|
||||||
|
|
||||||
|
\vspace{1em}
|
||||||
|
\begin{itemize}
|
||||||
|
\item<2-> \textbf{Software complexity}
|
||||||
|
\vspace{1em}
|
||||||
|
\item<3-> \textbf{Performance issues:}
|
||||||
|
\vspace{.5em}
|
||||||
|
\begin{itemize}
|
||||||
|
\item<4-> The leader is a \textbf{bottleneck} for all requests\\
|
||||||
|
\vspace{.5em}
|
||||||
|
\item<5-> \textbf{Sensitive to higher latency} between nodes
|
||||||
|
\vspace{.5em}
|
||||||
|
\item<6-> \textbf{Takes time to reconverge} when disrupted (e.g. node going down)
|
||||||
|
\end{itemize}
|
||||||
|
\end{itemize}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{The data model of object storage}
|
||||||
|
Object storage is basically a \textbf{key-value store}:
|
||||||
|
\vspace{.5em}
|
||||||
|
|
||||||
|
{\scriptsize
|
||||||
|
\begin{center}
|
||||||
|
\begin{tabular}{|l|p{7cm}|}
|
||||||
|
\hline
|
||||||
|
\textbf{Key: file path + name} & \textbf{Value: file data + metadata} \\
|
||||||
|
\hline
|
||||||
|
\hline
|
||||||
|
\texttt{index.html} &
|
||||||
|
\texttt{Content-Type: text/html; charset=utf-8} \newline
|
||||||
|
\texttt{Content-Length: 24929} \newline
|
||||||
|
\texttt{<binary blob>} \\
|
||||||
|
\hline
|
||||||
|
\texttt{img/logo.svg} &
|
||||||
|
\texttt{Content-Type: text/svg+xml} \newline
|
||||||
|
\texttt{Content-Length: 13429} \newline
|
||||||
|
\texttt{<binary blob>} \\
|
||||||
|
\hline
|
||||||
|
\texttt{download/index.html} &
|
||||||
|
\texttt{Content-Type: text/html; charset=utf-8} \newline
|
||||||
|
\texttt{Content-Length: 26563} \newline
|
||||||
|
\texttt{<binary blob>} \\
|
||||||
|
\hline
|
||||||
|
\end{tabular}
|
||||||
|
\end{center}
|
||||||
|
}
|
||||||
|
|
||||||
|
\vspace{.5em}
|
||||||
|
\begin{itemize}
|
||||||
|
\item<2-> Maps well to CRDT data types
|
||||||
|
\item<3> Read-after-write consistency with quorums
|
||||||
|
\end{itemize}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Performance gains in practice}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=.8\linewidth]{../assets/perf/endpoint_latency_0.7_0.8_minio.png}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
% --------- GEO-DISTRIBUTED MODEL ----------
|
||||||
|
|
||||||
|
\section{Principle 2: geo-distributed data model}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Key-value stores, upgraded: the Dynamo model}
|
||||||
|
\textbf{Two keys:}
|
||||||
|
\begin{itemize}
|
||||||
|
\item Partition key: used to divide data into partitions {\small (a.k.a.~shards)}
|
||||||
|
\item Sort key: used to identify items inside a partition
|
||||||
|
\end{itemize}
|
||||||
|
|
||||||
|
\vspace{1em}
|
||||||
|
|
||||||
|
\begin{center}
|
||||||
|
\begin{tabular}{|l|l|p{3cm}|}
|
||||||
|
\hline
|
||||||
|
\textbf{Partition key: bucket} & \textbf{Sort key: filename} & \textbf{Value} \\
|
||||||
|
\hline
|
||||||
|
\hline
|
||||||
|
\texttt{website} & \texttt{index.html} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\texttt{website} & \texttt{img/logo.svg} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\texttt{website} & \texttt{download/index.html} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\hline
|
||||||
|
\texttt{backup} & \texttt{borg/index.2822} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\texttt{backup} & \texttt{borg/data/2/2329} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\texttt{backup} & \texttt{borg/data/2/2680} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\hline
|
||||||
|
\texttt{private} & \texttt{qq3a2nbe1qjq0ebbvo6ocsp6co} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\end{tabular}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Layout computation}
|
||||||
|
\begin{overprint}
|
||||||
|
\onslide<1>
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=\linewidth, trim=0 0 0 -4cm]{../assets/screenshots/garage_status_0.9_prod_zonehl.png}
|
||||||
|
\end{center}
|
||||||
|
\onslide<2>
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=.7\linewidth]{../assets/map.png}
|
||||||
|
\end{center}
|
||||||
|
\end{overprint}
|
||||||
|
\vspace{1em}
|
||||||
|
Garage stores replicas on different zones when possible
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{What a "layout" is}
|
||||||
|
\textbf{A layout is a precomputed index table:}
|
||||||
|
\vspace{1em}
|
||||||
|
|
||||||
|
{\footnotesize
|
||||||
|
\begin{center}
|
||||||
|
\begin{tabular}{|l|l|l|l|}
|
||||||
|
\hline
|
||||||
|
\textbf{Partition} & \textbf{Node 1} & \textbf{Node 2} & \textbf{Node 3} \\
|
||||||
|
\hline
|
||||||
|
\hline
|
||||||
|
Partition 0 & df-ymk (bespin) & Abricot (scorpio) & Courgette (neptune) \\
|
||||||
|
\hline
|
||||||
|
Partition 1 & Ananas (scorpio) & Courgette (neptune) & df-ykl (bespin) \\
|
||||||
|
\hline
|
||||||
|
Partition 2 & df-ymf (bespin) & Celeri (neptune) & Abricot (scorpio) \\
|
||||||
|
\hline
|
||||||
|
\hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ & \hspace{1em}$\vdots$ \\
|
||||||
|
\hline
|
||||||
|
Partition 255 & Concombre (neptune) & df-ykl (bespin) & Abricot (scorpio) \\
|
||||||
|
\hline
|
||||||
|
\end{tabular}
|
||||||
|
\end{center}
|
||||||
|
}
|
||||||
|
|
||||||
|
\vspace{2em}
|
||||||
|
\visible<2->{
|
||||||
|
The index table is built centrally using an optimal algorithm,\\
|
||||||
|
then propagated to all nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
\vspace{1em}
|
||||||
|
\visible<3->{
|
||||||
|
\footnotesize
|
||||||
|
Oulamara, M., \& Auvolat, A. (2023). \emph{An algorithm for geo-distributed and redundant storage in Garage}.\\ arXiv preprint arXiv:2302.13798.
|
||||||
|
}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{The relationship between \emph{partition} and \emph{partition key}}
|
||||||
|
\begin{center}
|
||||||
|
\begin{tabular}{|l|l|l|l|}
|
||||||
|
\hline
|
||||||
|
\textbf{Partition key} & \textbf{Partition} & \textbf{Sort key} & \textbf{Value} \\
|
||||||
|
\hline
|
||||||
|
\hline
|
||||||
|
\texttt{website} & Partition 12 & \texttt{index.html} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\texttt{website} & Partition 12 & \texttt{img/logo.svg} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\texttt{website} & Partition 12 &\texttt{download/index.html} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\hline
|
||||||
|
\texttt{backup} & Partition 42 & \texttt{borg/index.2822} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\texttt{backup} & Partition 42 & \texttt{borg/data/2/2329} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\texttt{backup} & Partition 42 & \texttt{borg/data/2/2680} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\hline
|
||||||
|
\texttt{private} & Partition 42 & \texttt{qq3a2nbe1qjq0ebbvo6ocsp6co} & (file data) \\
|
||||||
|
\hline
|
||||||
|
\end{tabular}
|
||||||
|
\end{center}
|
||||||
|
\vspace{1em}
|
||||||
|
\textbf{To read or write an item:} hash partition key
|
||||||
|
\\ \hspace{5cm} $\to$ determine partition number (first 8 bits)
|
||||||
|
\\ \hspace{5cm} $\to$ find associated nodes
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Garage's internal data structures}
|
||||||
|
\centering
|
||||||
|
\includegraphics[width=.75\columnwidth]{../assets/garage_tables.pdf}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
% ---------- OPERATING GARAGE ---------
|
||||||
|
|
||||||
|
\section{Operating Garage clusters}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Operating Garage}
|
||||||
|
\begin{center}
|
||||||
|
\only<1-2>{
|
||||||
|
\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_0.10.png}
|
||||||
|
\\\vspace{1em}
|
||||||
|
\visible<2>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_status_unhealthy_0.10.png}}
|
||||||
|
}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Background synchronization}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=.6\linewidth]{../assets/garage_sync.drawio.pdf}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Digging deeper}
|
||||||
|
\begin{center}
|
||||||
|
\only<1>{\includegraphics[width=.9\linewidth]{../assets/screenshots/garage_stats_0.10.png}}
|
||||||
|
\only<2>{\includegraphics[width=.5\linewidth]{../assets/screenshots/garage_worker_list_0.10.png}}
|
||||||
|
\only<3>{\includegraphics[width=.6\linewidth]{../assets/screenshots/garage_worker_param_0.10.png}}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Monitoring with Prometheus + Grafana}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=.9\linewidth]{../assets/screenshots/grafana_dashboard.png}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Debugging with traces}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=.8\linewidth]{../assets/screenshots/jaeger_listobjects.png}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
% ---------- SCALING GARAGE ---------
|
||||||
|
|
||||||
|
\section{Scaling Garage clusters}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Potential limitations and bottlenecks}
|
||||||
|
\begin{itemize}
|
||||||
|
\item Global:
|
||||||
|
\begin{itemize}
|
||||||
|
\item Max. $\sim$100 nodes per cluster (excluding gateways)
|
||||||
|
\end{itemize}
|
||||||
|
\vspace{1em}
|
||||||
|
\item Metadata:
|
||||||
|
\begin{itemize}
|
||||||
|
\item One big bucket = bottleneck, object list on 3 nodes only
|
||||||
|
\end{itemize}
|
||||||
|
\vspace{1em}
|
||||||
|
\item Block manager:
|
||||||
|
\begin{itemize}
|
||||||
|
\item Lots of small files on disk
|
||||||
|
\item Processing the resync queue can be slow
|
||||||
|
\end{itemize}
|
||||||
|
\end{itemize}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Deployment advice for very large clusters}
|
||||||
|
\begin{itemize}
|
||||||
|
\item Metadata storage:
|
||||||
|
\begin{itemize}
|
||||||
|
\item ZFS mirror (x2) on fast NVMe
|
||||||
|
\item Use LMDB storage engine
|
||||||
|
\end{itemize}
|
||||||
|
\vspace{.5em}
|
||||||
|
\item Data block storage:
|
||||||
|
\begin{itemize}
|
||||||
|
\item Use Garage's native multi-HDD support
|
||||||
|
\item XFS on individual drives
|
||||||
|
\item Increase block size (1MB $\to$ 10MB, requires more RAM and good networking)
|
||||||
|
\item Tune \texttt{resync-tranquility} and \texttt{resync-worker-count} dynamically
|
||||||
|
\end{itemize}
|
||||||
|
\vspace{.5em}
|
||||||
|
\item Other :
|
||||||
|
\begin{itemize}
|
||||||
|
\item Split data over several buckets
|
||||||
|
\item Use less than 100 storage nodes
|
||||||
|
\item Use gateway nodes
|
||||||
|
\end{itemize}
|
||||||
|
\vspace{.5em}
|
||||||
|
\end{itemize}
|
||||||
|
Our deployments: $< 10$ TB. Some people have done more!
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
|
||||||
|
% ======================================== END
|
||||||
|
% ======================================== END
|
||||||
|
% ======================================== END
|
||||||
|
|
||||||
|
\begin{frame}
|
||||||
|
\frametitle{Where to find us}
|
||||||
|
\begin{center}
|
||||||
|
\includegraphics[width=.25\linewidth]{../../logo/garage_hires.png}\\
|
||||||
|
\vspace{-1em}
|
||||||
|
\url{https://garagehq.deuxfleurs.fr/}\\
|
||||||
|
\url{mailto:garagehq@deuxfleurs.fr}\\
|
||||||
|
\texttt{\#garage:deuxfleurs.fr} on Matrix
|
||||||
|
|
||||||
|
\vspace{1.5em}
|
||||||
|
\includegraphics[width=.06\linewidth]{../assets/logos/rust_logo.png}
|
||||||
|
\includegraphics[width=.13\linewidth]{../assets/logos/AGPLv3_Logo.png}
|
||||||
|
\end{center}
|
||||||
|
\end{frame}
|
||||||
|
|
||||||
|
\end{document}
|
||||||
|
|
||||||
|
%% vim: set ts=4 sw=4 tw=0 noet spelllang=en :
|
BIN
doc/talks/assets/garage2a.drawio.pdf
Normal file
BIN
doc/talks/assets/garage2b.drawio.pdf
Normal file
BIN
doc/talks/assets/garage_tables.pdf
Normal file
BIN
doc/talks/assets/intro/slide1.png
Normal file
After Width: | Height: | Size: 87 KiB |
BIN
doc/talks/assets/intro/slide2.png
Normal file
After Width: | Height: | Size: 81 KiB |
BIN
doc/talks/assets/intro/slide3.png
Normal file
After Width: | Height: | Size: 124 KiB |
BIN
doc/talks/assets/intro/slideB1.png
Normal file
After Width: | Height: | Size: 84 KiB |
BIN
doc/talks/assets/intro/slideB2.png
Normal file
After Width: | Height: | Size: 81 KiB |
BIN
doc/talks/assets/intro/slideB3.png
Normal file
After Width: | Height: | Size: 81 KiB |
4326
doc/talks/assets/intro/slides.svg
Normal file
After Width: | Height: | Size: 315 KiB |
444
doc/talks/assets/intro/slidesB.svg
Normal file
After Width: | Height: | Size: 286 KiB |
Before Width: | Height: | Size: 458 KiB After Width: | Height: | Size: 394 KiB |
BIN
doc/talks/assets/survey_requested_features.png
Normal file
After Width: | Height: | Size: 79 KiB |
84
flake.lock
|
@ -28,11 +28,11 @@
|
||||||
},
|
},
|
||||||
"flake-compat": {
|
"flake-compat": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1688025799,
|
"lastModified": 1717312683,
|
||||||
"narHash": "sha256-ktpB4dRtnksm9F5WawoIkEneh1nrEvuxb5lJFt1iOyw=",
|
"narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "flake-compat",
|
"repo": "flake-compat",
|
||||||
"rev": "8bf105319d44f6b9f0d764efa4fdef9f1cc9ba1c",
|
"rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -42,33 +42,12 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-utils": {
|
"flake-utils": {
|
||||||
"inputs": {
|
|
||||||
"systems": "systems"
|
|
||||||
},
|
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1681202837,
|
"lastModified": 1659877975,
|
||||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils_2": {
|
|
||||||
"inputs": {
|
|
||||||
"systems": "systems_2"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681202837,
|
|
||||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -79,11 +58,11 @@
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1682109806,
|
"lastModified": 1724395761,
|
||||||
"narHash": "sha256-d9g7RKNShMLboTWwukM+RObDWWpHKaqTYXB48clBWXI=",
|
"narHash": "sha256-zRkDV/nbrnp3Y8oCADf5ETl1sDrdmAW6/bBVJ8EbIdQ=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "2362848adf8def2866fabbffc50462e929d7fffb",
|
"rev": "ae815cee91b417be55d43781eb4b73ae1ecc396c",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -95,17 +74,17 @@
|
||||||
},
|
},
|
||||||
"nixpkgs_2": {
|
"nixpkgs_2": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1682423271,
|
"lastModified": 1724681257,
|
||||||
"narHash": "sha256-WHhl1GiOij1ob4cTLL+yhqr+vFOUH8E5wAX8Ir8fvjE=",
|
"narHash": "sha256-EJRuc5Qp7yfXko5ZNeEMYAs4DzAvkCyALuJ/tGllhN4=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "94517a501434a627c5d9e72ac6e7f26174b978d3",
|
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "94517a501434a627c5d9e72ac6e7f26174b978d3",
|
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -122,15 +101,14 @@
|
||||||
},
|
},
|
||||||
"rust-overlay": {
|
"rust-overlay": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-utils": "flake-utils_2",
|
|
||||||
"nixpkgs": "nixpkgs"
|
"nixpkgs": "nixpkgs"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1682389182,
|
"lastModified": 1724638882,
|
||||||
"narHash": "sha256-8t2nmFnH+8V48+IJsf8AK51ebXNlVbOSVYOpiqJKvJE=",
|
"narHash": "sha256-ap2jIQi/FuUHR6HCht6ASWhoz8EiB99XmI8Esot38VE=",
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "74f1a64dd28faeeb85ef081f32cad2989850322c",
|
"rev": "19b70f147b9c67a759e35824b241f1ed92e46694",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -138,36 +116,6 @@
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"systems": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"systems_2": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": "root",
|
"root": "root",
|
||||||
|
|
81
flake.nix
|
@ -2,9 +2,9 @@
|
||||||
description =
|
description =
|
||||||
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||||
|
|
||||||
# Nixpkgs unstable as of 2023-04-25, has rustc v1.68
|
# Nixpkgs 24.05 as of 2024-08-26 has rustc v1.77
|
||||||
inputs.nixpkgs.url =
|
inputs.nixpkgs.url =
|
||||||
"github:NixOS/nixpkgs/94517a501434a627c5d9e72ac6e7f26174b978d3";
|
"github:NixOS/nixpkgs/0239aeb2f82ea27ccd6b61582b8f7fb8750eeada";
|
||||||
|
|
||||||
inputs.flake-compat.url = "github:nix-community/flake-compat";
|
inputs.flake-compat.url = "github:nix-community/flake-compat";
|
||||||
|
|
||||||
|
@ -17,9 +17,9 @@
|
||||||
# - rustc v1.66
|
# - rustc v1.66
|
||||||
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
|
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
|
||||||
|
|
||||||
# Rust overlay as of 2023-04-25
|
# Rust overlay as of 2024-08-26
|
||||||
inputs.rust-overlay.url =
|
inputs.rust-overlay.url =
|
||||||
"github:oxalica/rust-overlay/74f1a64dd28faeeb85ef081f32cad2989850322c";
|
"github:oxalica/rust-overlay/19b70f147b9c67a759e35824b241f1ed92e46694";
|
||||||
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
inputs.flake-compat.follows = "flake-compat";
|
inputs.flake-compat.follows = "flake-compat";
|
||||||
|
@ -33,25 +33,58 @@
|
||||||
compile = import ./nix/compile.nix;
|
compile = import ./nix/compile.nix;
|
||||||
in
|
in
|
||||||
flake-utils.lib.eachDefaultSystem (system:
|
flake-utils.lib.eachDefaultSystem (system:
|
||||||
let pkgs = nixpkgs.legacyPackages.${system};
|
let
|
||||||
in {
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
packages = {
|
in
|
||||||
default = (compile {
|
{
|
||||||
inherit system git_version;
|
packages =
|
||||||
pkgsSrc = nixpkgs;
|
let
|
||||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
packageFor = target: (compile {
|
||||||
release = true;
|
inherit system git_version target;
|
||||||
}).workspace.garage { compileMode = "build"; };
|
pkgsSrc = nixpkgs;
|
||||||
};
|
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||||
devShell = (compile {
|
release = true;
|
||||||
inherit system git_version;
|
}).workspace.garage { compileMode = "build"; };
|
||||||
pkgsSrc = nixpkgs;
|
in
|
||||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
{
|
||||||
release = false;
|
# default = native release build
|
||||||
}).workspaceShell { packages = with pkgs; [
|
default = packageFor null;
|
||||||
rustfmt
|
# other = cross-compiled, statically-linked builds
|
||||||
clang
|
amd64 = packageFor "x86_64-unknown-linux-musl";
|
||||||
mold
|
i386 = packageFor "i686-unknown-linux-musl";
|
||||||
]; };
|
arm64 = packageFor "aarch64-unknown-linux-musl";
|
||||||
|
arm = packageFor "armv6l-unknown-linux-musl";
|
||||||
|
};
|
||||||
|
|
||||||
|
# ---- developpment shell, for making native builds only ----
|
||||||
|
devShells =
|
||||||
|
let
|
||||||
|
shellWithPackages = (packages: (compile {
|
||||||
|
inherit system git_version;
|
||||||
|
pkgsSrc = nixpkgs;
|
||||||
|
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||||
|
}).workspaceShell { inherit packages; });
|
||||||
|
in
|
||||||
|
{
|
||||||
|
default = shellWithPackages
|
||||||
|
(with pkgs; [
|
||||||
|
rustfmt
|
||||||
|
clang
|
||||||
|
mold
|
||||||
|
]);
|
||||||
|
|
||||||
|
# import the full shell using `nix develop .#full`
|
||||||
|
full = shellWithPackages (with pkgs; [
|
||||||
|
rustfmt
|
||||||
|
rust-analyzer
|
||||||
|
clang
|
||||||
|
mold
|
||||||
|
# ---- extra packages for dev tasks ----
|
||||||
|
cargo-audit
|
||||||
|
cargo-outdated
|
||||||
|
cargo-machete
|
||||||
|
nixpkgs-fmt
|
||||||
|
]);
|
||||||
|
};
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
158
k2v_test.py
|
@ -1,158 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
import os
|
|
||||||
import requests
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
# let's talk to our AWS Elasticsearch cluster
|
|
||||||
#from requests_aws4auth import AWS4Auth
|
|
||||||
#auth = AWS4Auth('GK31c2f218a2e44f485b94239e',
|
|
||||||
# 'b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
|
||||||
# 'us-east-1',
|
|
||||||
# 's3')
|
|
||||||
|
|
||||||
from aws_requests_auth.aws_auth import AWSRequestsAuth
|
|
||||||
auth = AWSRequestsAuth(aws_access_key='GK31c2f218a2e44f485b94239e',
|
|
||||||
aws_secret_access_key='b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
|
||||||
aws_host='localhost:3812',
|
|
||||||
aws_region='us-east-1',
|
|
||||||
aws_service='k2v')
|
|
||||||
|
|
||||||
|
|
||||||
print("-- ReadIndex")
|
|
||||||
response = requests.get('http://localhost:3812/alex',
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
|
|
||||||
sort_keys = ["a", "b", "c", "d"]
|
|
||||||
|
|
||||||
for sk in sort_keys:
|
|
||||||
print("-- (%s) Put initial (no CT)"%sk)
|
|
||||||
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth,
|
|
||||||
data='{}: Hello, world!'.format(datetime.timestamp(datetime.now())))
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- Get")
|
|
||||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
ct = response.headers["x-garage-causality-token"]
|
|
||||||
|
|
||||||
print("-- ReadIndex")
|
|
||||||
response = requests.get('http://localhost:3812/alex',
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- Put with CT")
|
|
||||||
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth,
|
|
||||||
headers={'x-garage-causality-token': ct},
|
|
||||||
data='{}: Good bye, world!'.format(datetime.timestamp(datetime.now())))
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- Get")
|
|
||||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- Put again with same CT (concurrent)")
|
|
||||||
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth,
|
|
||||||
headers={'x-garage-causality-token': ct},
|
|
||||||
data='{}: Concurrent value, oops'.format(datetime.timestamp(datetime.now())))
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
for sk in sort_keys:
|
|
||||||
print("-- (%s) Get"%sk)
|
|
||||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
ct = response.headers["x-garage-causality-token"]
|
|
||||||
|
|
||||||
print("-- Delete")
|
|
||||||
response = requests.delete('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
headers={'x-garage-causality-token': ct},
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- ReadIndex")
|
|
||||||
response = requests.get('http://localhost:3812/alex',
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- InsertBatch")
|
|
||||||
response = requests.post('http://localhost:3812/alex',
|
|
||||||
auth=auth,
|
|
||||||
data='''
|
|
||||||
[
|
|
||||||
{"pk": "root", "sk": "a", "ct": null, "v": "aW5pdGlhbCB0ZXN0Cg=="},
|
|
||||||
{"pk": "root", "sk": "b", "ct": null, "v": "aW5pdGlhbCB0ZXN1Cg=="},
|
|
||||||
{"pk": "root", "sk": "c", "ct": null, "v": "aW5pdGlhbCB0ZXN2Cg=="}
|
|
||||||
]
|
|
||||||
''')
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- ReadIndex")
|
|
||||||
response = requests.get('http://localhost:3812/alex',
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
for sk in sort_keys:
|
|
||||||
print("-- (%s) Get"%sk)
|
|
||||||
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
|
||||||
auth=auth)
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
ct = response.headers["x-garage-causality-token"]
|
|
||||||
|
|
||||||
print("-- ReadBatch")
|
|
||||||
response = requests.post('http://localhost:3812/alex?search',
|
|
||||||
auth=auth,
|
|
||||||
data='''
|
|
||||||
[
|
|
||||||
{"partitionKey": "root"},
|
|
||||||
{"partitionKey": "root", "tombstones": true},
|
|
||||||
{"partitionKey": "root", "tombstones": true, "limit": 2},
|
|
||||||
{"partitionKey": "root", "start": "c", "singleItem": true},
|
|
||||||
{"partitionKey": "root", "start": "b", "end": "d", "tombstones": true}
|
|
||||||
]
|
|
||||||
''')
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
|
|
||||||
print("-- DeleteBatch")
|
|
||||||
response = requests.post('http://localhost:3812/alex?delete',
|
|
||||||
auth=auth,
|
|
||||||
data='''
|
|
||||||
[
|
|
||||||
{"partitionKey": "root", "start": "b", "end": "c"}
|
|
||||||
]
|
|
||||||
''')
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
print("-- ReadBatch")
|
|
||||||
response = requests.post('http://localhost:3812/alex?search',
|
|
||||||
auth=auth,
|
|
||||||
data='''
|
|
||||||
[
|
|
||||||
{"partitionKey": "root"}
|
|
||||||
]
|
|
||||||
''')
|
|
||||||
print(response.headers)
|
|
||||||
print(response.text)
|
|
|
@ -14,4 +14,5 @@ rec {
|
||||||
pkgsSrc = flake.defaultNix.inputs.nixpkgs;
|
pkgsSrc = flake.defaultNix.inputs.nixpkgs;
|
||||||
cargo2nix = flake.defaultNix.inputs.cargo2nix;
|
cargo2nix = flake.defaultNix.inputs.cargo2nix;
|
||||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||||
|
devShells = builtins.getAttr builtins.currentSystem flake.defaultNix.devShells;
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,30 +19,9 @@ let
|
||||||
overlays = [ cargo2nixOverlay ];
|
overlays = [ cargo2nixOverlay ];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Cargo2nix is built for rustOverlay which installs Rust from Mozilla releases.
|
toolchainOptions = {
|
||||||
This is fine for 64-bit platforms, but for 32-bit platforms, we need our own Rust
|
rustVersion = "1.77.0";
|
||||||
to avoid incompatibilities with time_t between different versions of musl
|
|
||||||
(>= 1.2.0 shipped by NixOS, < 1.2.0 with which rustc was built), which lead to compilation breakage.
|
|
||||||
So we want a Rust release that is bound to our Nix repository to avoid these problems.
|
|
||||||
See here for more info: https://musl.libc.org/time64.html
|
|
||||||
Because Cargo2nix does not support the Rust environment shipped by NixOS,
|
|
||||||
we emulate the structure of the Rust object created by rustOverlay.
|
|
||||||
In practise, rustOverlay ships rustc+cargo in a single derivation while
|
|
||||||
NixOS ships them in separate ones. We reunite them with symlinkJoin.
|
|
||||||
*/
|
|
||||||
toolchainOptions = if target == null || target == "x86_64-unknown-linux-musl"
|
|
||||||
|| target == "aarch64-unknown-linux-musl" then {
|
|
||||||
rustVersion = "1.68.0";
|
|
||||||
extraRustComponents = [ "clippy" ];
|
extraRustComponents = [ "clippy" ];
|
||||||
} else {
|
|
||||||
rustToolchain = pkgs.symlinkJoin {
|
|
||||||
name = "rust-static-toolchain-${target}";
|
|
||||||
paths = [
|
|
||||||
pkgs.rustPlatform.rust.cargo
|
|
||||||
pkgs.rustPlatform.rust.rustc
|
|
||||||
# clippy not needed, it only runs on amd64
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
buildEnv = (drv:
|
buildEnv = (drv:
|
||||||
|
@ -189,13 +168,12 @@ let
|
||||||
rootFeatures = if features != null then
|
rootFeatures = if features != null then
|
||||||
features
|
features
|
||||||
else
|
else
|
||||||
([ "garage/bundled-libs" "garage/sled" "garage/lmdb" "garage/k2v" ] ++ (if release then [
|
([ "garage/bundled-libs" "garage/lmdb" "garage/sqlite" "garage/k2v" ] ++ (if release then [
|
||||||
"garage/consul-discovery"
|
"garage/consul-discovery"
|
||||||
"garage/kubernetes-discovery"
|
"garage/kubernetes-discovery"
|
||||||
"garage/metrics"
|
"garage/metrics"
|
||||||
"garage/telemetry-otlp"
|
"garage/telemetry-otlp"
|
||||||
"garage/lmdb"
|
"garage/syslog"
|
||||||
"garage/sqlite"
|
|
||||||
] else
|
] else
|
||||||
[ ]));
|
[ ]));
|
||||||
|
|
||||||
|
|
|
@ -1,24 +0,0 @@
|
||||||
pkgs:
|
|
||||||
pkgs.buildGoModule rec {
|
|
||||||
pname = "kaniko";
|
|
||||||
version = "1.9.2";
|
|
||||||
|
|
||||||
src = pkgs.fetchFromGitHub {
|
|
||||||
owner = "GoogleContainerTools";
|
|
||||||
repo = "kaniko";
|
|
||||||
rev = "v${version}";
|
|
||||||
sha256 = "dXQ0/o1qISv+sjNVIpfF85bkbM9sGOGwqVbWZpMWfMY=";
|
|
||||||
};
|
|
||||||
|
|
||||||
vendorSha256 = null;
|
|
||||||
|
|
||||||
checkPhase = "true";
|
|
||||||
|
|
||||||
meta = with pkgs.lib; {
|
|
||||||
description =
|
|
||||||
"kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster.";
|
|
||||||
homepage = "https://github.com/GoogleContainerTools/kaniko";
|
|
||||||
license = licenses.asl20;
|
|
||||||
platforms = platforms.linux;
|
|
||||||
};
|
|
||||||
}
|
|
|
@ -1,24 +0,0 @@
|
||||||
pkgs:
|
|
||||||
pkgs.buildGoModule rec {
|
|
||||||
pname = "manifest-tool";
|
|
||||||
version = "2.0.5";
|
|
||||||
|
|
||||||
src = pkgs.fetchFromGitHub {
|
|
||||||
owner = "estesp";
|
|
||||||
repo = "manifest-tool";
|
|
||||||
rev = "v${version}";
|
|
||||||
sha256 = "hjCGKnE0yrlnF/VIzOwcDzmQX3Wft+21KCny/opqdLg=";
|
|
||||||
} + "/v2";
|
|
||||||
|
|
||||||
vendorSha256 = null;
|
|
||||||
|
|
||||||
checkPhase = "true";
|
|
||||||
|
|
||||||
meta = with pkgs.lib; {
|
|
||||||
description =
|
|
||||||
"Command line tool to create and query container image manifest list/indexes";
|
|
||||||
homepage = "https://github.com/estesp/manifest-tool";
|
|
||||||
license = licenses.asl20;
|
|
||||||
platforms = platforms.linux;
|
|
||||||
};
|
|
||||||
}
|
|
|
@ -1,31 +0,0 @@
|
||||||
{ system ? builtins.currentSystem, }:
|
|
||||||
|
|
||||||
with import ./common.nix;
|
|
||||||
|
|
||||||
let
|
|
||||||
platforms = [
|
|
||||||
#"x86_64-unknown-linux-musl"
|
|
||||||
"i686-unknown-linux-musl"
|
|
||||||
#"aarch64-unknown-linux-musl"
|
|
||||||
"armv6l-unknown-linux-musleabihf"
|
|
||||||
];
|
|
||||||
pkgsList = builtins.map (target:
|
|
||||||
import pkgsSrc {
|
|
||||||
inherit system;
|
|
||||||
crossSystem = {
|
|
||||||
config = target;
|
|
||||||
isStatic = true;
|
|
||||||
};
|
|
||||||
overlays = [ cargo2nixOverlay ];
|
|
||||||
}) platforms;
|
|
||||||
pkgsHost = import pkgsSrc { };
|
|
||||||
lib = pkgsHost.lib;
|
|
||||||
kaniko = (import ./kaniko.nix) pkgsHost;
|
|
||||||
winscp = (import ./winscp.nix) pkgsHost;
|
|
||||||
manifestTool = (import ./manifest-tool.nix) pkgsHost;
|
|
||||||
in lib.flatten (builtins.map (pkgs: [
|
|
||||||
pkgs.rustPlatform.rust.rustc
|
|
||||||
pkgs.rustPlatform.rust.cargo
|
|
||||||
pkgs.buildPackages.stdenv.cc
|
|
||||||
]) pkgsList) ++ [ kaniko winscp manifestTool ]
|
|
||||||
|
|
|
@ -15,10 +15,10 @@ type: application
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.4.1
|
version: 0.5.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "v0.9.1"
|
appVersion: "v1.0.1"
|
||||||
|
|
|
@ -11,6 +11,7 @@ spec:
|
||||||
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
||||||
replicas: {{ .Values.deployment.replicaCount }}
|
replicas: {{ .Values.deployment.replicaCount }}
|
||||||
serviceName: {{ include "garage.fullname" . }}
|
serviceName: {{ include "garage.fullname" . }}
|
||||||
|
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -63,6 +64,10 @@ spec:
|
||||||
name: web-api
|
name: web-api
|
||||||
- containerPort: 3903
|
- containerPort: 3903
|
||||||
name: admin
|
name: admin
|
||||||
|
{{- with .Values.environment }}
|
||||||
|
env:
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: meta
|
- name: meta
|
||||||
mountPath: /mnt/meta
|
mountPath: /mnt/meta
|
||||||
|
@ -71,6 +76,9 @@ spec:
|
||||||
- name: etc
|
- name: etc
|
||||||
mountPath: /etc/garage.toml
|
mountPath: /etc/garage.toml
|
||||||
subPath: garage.toml
|
subPath: garage.toml
|
||||||
|
{{- with .Values.extraVolumeMounts }}
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
# TODO
|
# TODO
|
||||||
# livenessProbe:
|
# livenessProbe:
|
||||||
# httpGet:
|
# httpGet:
|
||||||
|
@ -105,6 +113,9 @@ spec:
|
||||||
- name: data
|
- name: data
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.extraVolumes }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.nodeSelector }}
|
{{- with .Values.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
|
|
|
@ -6,18 +6,13 @@
|
||||||
garage:
|
garage:
|
||||||
# Can be changed for better performance on certain systems
|
# Can be changed for better performance on certain systems
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
|
||||||
dbEngine: "sled"
|
dbEngine: "lmdb"
|
||||||
|
|
||||||
# Defaults is 1MB
|
# Defaults is 1MB
|
||||||
# An increase can result in better performance in certain scenarios
|
# An increase can result in better performance in certain scenarios
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
|
||||||
blockSize: "1048576"
|
blockSize: "1048576"
|
||||||
|
|
||||||
# Tuning parameters for the sled DB engine
|
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#sled-cache-capacity
|
|
||||||
sledCacheCapacity: "134217728"
|
|
||||||
sledFlushEveryMs: "2000"
|
|
||||||
|
|
||||||
# Default to 3 replicas, see the replication_mode section at
|
# Default to 3 replicas, see the replication_mode section at
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
|
||||||
replicationMode: "3"
|
replicationMode: "3"
|
||||||
|
@ -50,11 +45,6 @@ garage:
|
||||||
|
|
||||||
block_size = {{ .Values.garage.blockSize }}
|
block_size = {{ .Values.garage.blockSize }}
|
||||||
|
|
||||||
{{- if eq .Values.garage.dbEngine "sled"}}
|
|
||||||
sled_cache_capacity = {{ .Values.garage.sledCacheCapacity }}
|
|
||||||
sled_flush_every_ms = {{ .Values.garage.sledFlushEveryMs }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
replication_mode = "{{ .Values.garage.replicationMode }}"
|
replication_mode = "{{ .Values.garage.replicationMode }}"
|
||||||
|
|
||||||
compression_level = {{ .Values.garage.compressionLevel }}
|
compression_level = {{ .Values.garage.compressionLevel }}
|
||||||
|
@ -106,6 +96,8 @@ deployment:
|
||||||
kind: StatefulSet
|
kind: StatefulSet
|
||||||
# Number of StatefulSet replicas/garage nodes to start
|
# Number of StatefulSet replicas/garage nodes to start
|
||||||
replicaCount: 3
|
replicaCount: 3
|
||||||
|
# If using statefulset, allow Parallel or OrderedReady (default)
|
||||||
|
podManagementPolicy: OrderedReady
|
||||||
|
|
||||||
image:
|
image:
|
||||||
repository: dxflrs/amd64_garage
|
repository: dxflrs/amd64_garage
|
||||||
|
@ -224,6 +216,12 @@ tolerations: []
|
||||||
|
|
||||||
affinity: {}
|
affinity: {}
|
||||||
|
|
||||||
|
environment: {}
|
||||||
|
|
||||||
|
extraVolumes: {}
|
||||||
|
|
||||||
|
extraVolumeMounts: {}
|
||||||
|
|
||||||
monitoring:
|
monitoring:
|
||||||
metrics:
|
metrics:
|
||||||
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
||||||
|
|
14
script/jepsen.garage/Vagrantfile
vendored
|
@ -30,11 +30,11 @@ Vagrant.configure("2") do |config|
|
||||||
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
|
config.vm.define "n6" do |config| vm(config, "n6", "192.168.56.26") end
|
||||||
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
|
config.vm.define "n7" do |config| vm(config, "n7", "192.168.56.27") end
|
||||||
|
|
||||||
config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
|
#config.vm.define "n8" do |config| vm(config, "n8", "192.168.56.28") end
|
||||||
config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
|
#config.vm.define "n9" do |config| vm(config, "n9", "192.168.56.29") end
|
||||||
config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
|
#config.vm.define "n10" do |config| vm(config, "n10", "192.168.56.30") end
|
||||||
config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
|
#config.vm.define "n11" do |config| vm(config, "n11", "192.168.56.31") end
|
||||||
config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
|
#config.vm.define "n12" do |config| vm(config, "n12", "192.168.56.32") end
|
||||||
config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
|
#config.vm.define "n13" do |config| vm(config, "n13", "192.168.56.33") end
|
||||||
config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
|
#config.vm.define "n14" do |config| vm(config, "n14", "192.168.56.34") end
|
||||||
end
|
end
|
||||||
|
|
|
@ -3,11 +3,10 @@
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
#for ppatch in task3c task3a tsfix2; do
|
#for ppatch in task3c task3a tsfix2; do
|
||||||
for ppatch in tsfix2; do
|
for ppatch in v093 v1rc1; do
|
||||||
#for psc in c cp cdp r pr cpr dpr; do
|
#for psc in c cp cdp r pr cpr dpr; do
|
||||||
for psc in cdp r pr cpr dpr; do
|
for ptsk in reg2 set2; do
|
||||||
#for ptsk in reg2 set1 set2; do
|
for psc in c cp cdp r pr cpr dpr; do
|
||||||
for ptsk in set1; do
|
|
||||||
for irun in $(seq 10); do
|
for irun in $(seq 10); do
|
||||||
lein run test --nodes-file nodes.vagrant \
|
lein run test --nodes-file nodes.vagrant \
|
||||||
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \
|
--time-limit 60 --rate 100 --concurrency 100 --ops-per-key 100 \
|
||||||
|
|
|
@ -38,7 +38,9 @@
|
||||||
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
|
"tsfix2" "c82d91c6bccf307186332b6c5c6fc0b128b1b2b1"
|
||||||
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
|
"task3a" "707442f5de416fdbed4681a33b739f0a787b7834"
|
||||||
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
|
"task3b" "431b28e0cfdc9cac6c649193cf602108a8b02997"
|
||||||
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"})
|
"task3c" "0041b013a473e3ae72f50209d8f79db75a72848b"
|
||||||
|
"v093" "v0.9.3"
|
||||||
|
"v1rc1" "v1.0.0-rc1"})
|
||||||
|
|
||||||
(def cli-opts
|
(def cli-opts
|
||||||
"Additional command line options."
|
"Additional command line options."
|
||||||
|
|
|
@ -43,7 +43,7 @@
|
||||||
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
|
"rpc_bind_addr = \"0.0.0.0:3901\"\n"
|
||||||
"rpc_public_addr = \"" node ":3901\"\n"
|
"rpc_public_addr = \"" node ":3901\"\n"
|
||||||
"db_engine = \"lmdb\"\n"
|
"db_engine = \"lmdb\"\n"
|
||||||
"replication_mode = \"2\"\n"
|
"replication_mode = \"3\"\n"
|
||||||
"data_dir = \"" data-dir "\"\n"
|
"data_dir = \"" data-dir "\"\n"
|
||||||
"metadata_dir = \"" meta-dir "\"\n"
|
"metadata_dir = \"" meta-dir "\"\n"
|
||||||
"[s3_api]\n"
|
"[s3_api]\n"
|
||||||
|
|
|
@ -81,10 +81,21 @@ if [ -z "$SKIP_AWS" ]; then
|
||||||
echo "Invalid multipart upload"
|
echo "Invalid multipart upload"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
aws s3api delete-object --bucket eprouvette --key upload
|
||||||
|
|
||||||
echo "OK!!"
|
echo "🛠️ Test SSE-C with awscli (aws s3)"
|
||||||
exit 0
|
SSEC_KEY="u8zCfnEyt5Imo/krN+sxA1DQXxLWtPJavU6T6gOVj1Y="
|
||||||
|
SSEC_KEY_MD5="jMGbs3GyZkYjJUP6q5jA7g=="
|
||||||
|
echo "$SSEC_KEY" | base64 -d > /tmp/garage.ssec-key
|
||||||
|
for idx in {1,2}.rnd; do
|
||||||
|
aws s3 cp --sse-c AES256 --sse-c-key fileb:///tmp/garage.ssec-key \
|
||||||
|
"/tmp/garage.$idx" "s3://eprouvette/garage.$idx.aws.sse-c"
|
||||||
|
aws s3 cp --sse-c AES256 --sse-c-key fileb:///tmp/garage.ssec-key \
|
||||||
|
"s3://eprouvette/garage.$idx.aws.sse-c" "/tmp/garage.$idx.dl.sse-c"
|
||||||
|
diff "/tmp/garage.$idx" "/tmp/garage.$idx.dl.sse-c"
|
||||||
|
aws s3api delete-object --bucket eprouvette --key "garage.$idx.aws.sse-c"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
# S3CMD
|
# S3CMD
|
||||||
if [ -z "$SKIP_S3CMD" ]; then
|
if [ -z "$SKIP_S3CMD" ]; then
|
||||||
|
|
122
shell.nix
|
@ -5,101 +5,49 @@ with import ./nix/common.nix;
|
||||||
let
|
let
|
||||||
pkgs = import pkgsSrc {
|
pkgs = import pkgsSrc {
|
||||||
inherit system;
|
inherit system;
|
||||||
overlays = [ cargo2nixOverlay ];
|
|
||||||
};
|
};
|
||||||
kaniko = (import ./nix/kaniko.nix) pkgs;
|
|
||||||
manifest-tool = (import ./nix/manifest-tool.nix) pkgs;
|
|
||||||
winscp = (import ./nix/winscp.nix) pkgs;
|
winscp = (import ./nix/winscp.nix) pkgs;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# --- Dev shell inherited from flake.nix ---
|
||||||
|
devShell = devShells.default;
|
||||||
|
devShellFull = devShells.full;
|
||||||
|
|
||||||
in {
|
# --- Continuous integration shell ---
|
||||||
# --- Rust Shell ---
|
# The shell used for all CI jobs (along with devShell)
|
||||||
# Use it to compile Garage
|
ci = pkgs.mkShell {
|
||||||
rust = pkgs.mkShell {
|
|
||||||
nativeBuildInputs = with pkgs; [
|
nativeBuildInputs = with pkgs; [
|
||||||
#rustPlatform.rust.rustc
|
|
||||||
rustPlatform.rust.cargo
|
|
||||||
clang
|
|
||||||
mold
|
|
||||||
#clippy
|
|
||||||
rustfmt
|
|
||||||
#perl
|
|
||||||
#protobuf
|
|
||||||
#pkg-config
|
|
||||||
#openssl
|
|
||||||
file
|
|
||||||
#cargo2nix.packages.x86_64-linux.cargo2nix
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
# --- Integration shell ---
|
|
||||||
# Use it to test Garage with common S3 clients
|
|
||||||
integration = pkgs.mkShell {
|
|
||||||
nativeBuildInputs = [
|
|
||||||
winscp
|
winscp
|
||||||
pkgs.s3cmd
|
|
||||||
pkgs.awscli2
|
kaniko
|
||||||
pkgs.minio-client
|
manifest-tool
|
||||||
pkgs.rclone
|
awscli2
|
||||||
pkgs.socat
|
file
|
||||||
pkgs.psmisc
|
s3cmd
|
||||||
pkgs.which
|
minio-client
|
||||||
pkgs.openssl
|
rclone
|
||||||
pkgs.curl
|
socat
|
||||||
pkgs.jq
|
psmisc
|
||||||
|
which
|
||||||
|
openssl
|
||||||
|
curl
|
||||||
|
jq
|
||||||
];
|
];
|
||||||
};
|
|
||||||
|
|
||||||
# --- Release shell ---
|
|
||||||
# A shell built to make releasing easier
|
|
||||||
release = pkgs.mkShell {
|
|
||||||
shellHook = ''
|
shellHook = ''
|
||||||
function refresh_toolchain {
|
|
||||||
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
|
||||||
nix copy \
|
|
||||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
|
||||||
$(nix-store -qR \
|
|
||||||
$(nix-build --no-build-output --no-out-link nix/toolchain.nix))
|
|
||||||
rm /tmp/nix-signing-key.sec
|
|
||||||
}
|
|
||||||
|
|
||||||
function refresh_cache {
|
|
||||||
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
|
||||||
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.{debug,release}; do
|
|
||||||
echo "Updating cache for ''${attr}"
|
|
||||||
derivation=$(nix-instantiate --attr ''${attr})
|
|
||||||
nix copy -j8 \
|
|
||||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
|
||||||
$(nix-store -qR ''${derivation%\!bin})
|
|
||||||
done
|
|
||||||
rm /tmp/nix-signing-key.sec
|
|
||||||
}
|
|
||||||
|
|
||||||
function refresh_flake_cache {
|
|
||||||
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
|
||||||
for attr in packages.x86_64-linux.default devShell.x86_64-linux; do
|
|
||||||
echo "Updating cache for ''${attr}"
|
|
||||||
derivation=$(nix path-info --derivation ".#''${attr}")
|
|
||||||
nix copy -j8 \
|
|
||||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
|
||||||
$(nix-store -qR ''${derivation})
|
|
||||||
done
|
|
||||||
rm /tmp/nix-signing-key.sec
|
|
||||||
}
|
|
||||||
|
|
||||||
function to_s3 {
|
function to_s3 {
|
||||||
aws \
|
aws \
|
||||||
--endpoint-url https://garage.deuxfleurs.fr \
|
--endpoint-url https://garage.deuxfleurs.fr \
|
||||||
--region garage \
|
--region garage \
|
||||||
s3 cp \
|
s3 cp \
|
||||||
./result-bin/bin/garage \
|
./result-bin/bin/garage \
|
||||||
s3://garagehq.deuxfleurs.fr/_releases/''${DRONE_TAG:-$DRONE_COMMIT}/''${TARGET}/garage
|
s3://garagehq.deuxfleurs.fr/_releases/''${CI_COMMIT_TAG:-$CI_COMMIT_SHA}/''${TARGET}/garage
|
||||||
}
|
}
|
||||||
|
|
||||||
function to_docker {
|
function to_docker {
|
||||||
executor \
|
executor \
|
||||||
--force \
|
--force \
|
||||||
--customPlatform="''${DOCKER_PLATFORM}" \
|
--customPlatform="$(echo "''${DOCKER_PLATFORM}" | sed 's/i386/386/')" \
|
||||||
--destination "''${CONTAINER_NAME}:''${CONTAINER_TAG}" \
|
--destination "$(echo "''${CONTAINER_NAME}" | sed 's/i386/386/'):''${CONTAINER_TAG}" \
|
||||||
--context dir://`pwd` \
|
--context dir://`pwd` \
|
||||||
--verbosity=debug
|
--verbosity=debug
|
||||||
}
|
}
|
||||||
|
@ -158,7 +106,25 @@ in {
|
||||||
s3://garagehq.deuxfleurs.fr/
|
s3://garagehq.deuxfleurs.fr/
|
||||||
}
|
}
|
||||||
'';
|
'';
|
||||||
nativeBuildInputs = [ pkgs.awscli2 kaniko manifest-tool ];
|
|
||||||
|
};
|
||||||
|
|
||||||
|
# --- Cache shell ---
|
||||||
|
# A shell for refreshing caches
|
||||||
|
cache = pkgs.mkShell {
|
||||||
|
shellHook = ''
|
||||||
|
function refresh_cache {
|
||||||
|
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
||||||
|
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
|
||||||
|
echo "Updating cache for ''${attr}"
|
||||||
|
nix copy -j8 \
|
||||||
|
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
||||||
|
$(nix path-info ''${attr} --file default.nix --derivation --recursive | sed 's/\.drv$/.drv^*/')
|
||||||
|
|
||||||
|
done
|
||||||
|
rm /tmp/nix-signing-key.sec
|
||||||
|
}
|
||||||
|
'';
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "0.9.1"
|
version = "1.0.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -17,47 +17,57 @@ path = "lib.rs"
|
||||||
garage_model.workspace = true
|
garage_model.workspace = true
|
||||||
garage_table.workspace = true
|
garage_table.workspace = true
|
||||||
garage_block.workspace = true
|
garage_block.workspace = true
|
||||||
|
garage_net.workspace = true
|
||||||
garage_util.workspace = true
|
garage_util.workspace = true
|
||||||
garage_rpc.workspace = true
|
garage_rpc.workspace = true
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
aes-gcm.workspace = true
|
||||||
base64 = "0.21"
|
argon2.workspace = true
|
||||||
bytes = "1.0"
|
async-compression.workspace = true
|
||||||
chrono = "0.4"
|
async-trait.workspace = true
|
||||||
crypto-common = "0.1"
|
base64.workspace = true
|
||||||
err-derive = "0.3"
|
bytes.workspace = true
|
||||||
hex = "0.4"
|
chrono.workspace = true
|
||||||
hmac = "0.12"
|
crc32fast.workspace = true
|
||||||
idna = "0.4"
|
crc32c.workspace = true
|
||||||
tracing = "0.1"
|
crypto-common.workspace = true
|
||||||
md-5 = "0.10"
|
err-derive.workspace = true
|
||||||
nom = "7.1"
|
hex.workspace = true
|
||||||
sha2 = "0.10"
|
hmac.workspace = true
|
||||||
|
idna.workspace = true
|
||||||
|
tracing.workspace = true
|
||||||
|
md-5.workspace = true
|
||||||
|
nom.workspace = true
|
||||||
|
pin-project.workspace = true
|
||||||
|
sha1.workspace = true
|
||||||
|
sha2.workspace = true
|
||||||
|
|
||||||
futures = "0.3"
|
futures.workspace = true
|
||||||
futures-util = "0.3"
|
futures-util.workspace = true
|
||||||
pin-project = "1.0.12"
|
tokio.workspace = true
|
||||||
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
tokio-stream.workspace = true
|
||||||
tokio-stream = "0.1"
|
tokio-util.workspace = true
|
||||||
|
|
||||||
form_urlencoded = "1.0.0"
|
form_urlencoded.workspace = true
|
||||||
http = "0.2"
|
http.workspace = true
|
||||||
httpdate = "1.0"
|
httpdate.workspace = true
|
||||||
http-range = "0.1"
|
http-range.workspace = true
|
||||||
hyper = { version = "0.14", features = ["server", "http1", "runtime", "tcp", "stream"] }
|
http-body-util.workspace = true
|
||||||
hyperlocal = { version = "0.8.0", default-features = false, features = ["server"] }
|
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
|
||||||
multer = "2.0"
|
hyper-util.workspace = true
|
||||||
percent-encoding = "2.1.0"
|
multer.workspace = true
|
||||||
roxmltree = "0.18"
|
percent-encoding.workspace = true
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
roxmltree.workspace = true
|
||||||
serde_bytes = "0.11"
|
url.workspace = true
|
||||||
serde_json = "1.0"
|
|
||||||
quick-xml = { version = "0.26", features = [ "serialize" ] }
|
|
||||||
url = "2.3"
|
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
serde.workspace = true
|
||||||
opentelemetry-prometheus = { version = "0.10", optional = true }
|
serde_bytes.workspace = true
|
||||||
prometheus = { version = "0.13", optional = true }
|
serde_json.workspace = true
|
||||||
|
quick-xml.workspace = true
|
||||||
|
|
||||||
|
opentelemetry.workspace = true
|
||||||
|
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||||
|
prometheus = { workspace = true, optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
k2v = [ "garage_util/k2v", "garage_model/k2v" ]
|
k2v = [ "garage_util/k2v", "garage_model/k2v" ]
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use argon2::password_hash::PasswordHash;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
use futures::future::Future;
|
|
||||||
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
|
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||||
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use opentelemetry::trace::SpanRef;
|
use opentelemetry::trace::SpanRef;
|
||||||
|
|
||||||
|
@ -27,7 +28,9 @@ use crate::admin::error::*;
|
||||||
use crate::admin::key::*;
|
use crate::admin::key::*;
|
||||||
use crate::admin::router_v0;
|
use crate::admin::router_v0;
|
||||||
use crate::admin::router_v1::{Authorization, Endpoint};
|
use crate::admin::router_v1::{Authorization, Endpoint};
|
||||||
use crate::helpers::host_to_bucket;
|
use crate::helpers::*;
|
||||||
|
|
||||||
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
pub struct AdminApiServer {
|
pub struct AdminApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
|
@ -43,14 +46,8 @@ impl AdminApiServer {
|
||||||
#[cfg(feature = "metrics")] exporter: PrometheusExporter,
|
#[cfg(feature = "metrics")] exporter: PrometheusExporter,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let cfg = &garage.config.admin;
|
let cfg = &garage.config.admin;
|
||||||
let metrics_token = cfg
|
let metrics_token = cfg.metrics_token.as_deref().map(hash_bearer_token);
|
||||||
.metrics_token
|
let admin_token = cfg.admin_token.as_deref().map(hash_bearer_token);
|
||||||
.as_ref()
|
|
||||||
.map(|tok| format!("Bearer {}", tok));
|
|
||||||
let admin_token = cfg
|
|
||||||
.admin_token
|
|
||||||
.as_ref()
|
|
||||||
.map(|tok| format!("Bearer {}", tok));
|
|
||||||
Self {
|
Self {
|
||||||
garage,
|
garage,
|
||||||
#[cfg(feature = "metrics")]
|
#[cfg(feature = "metrics")]
|
||||||
|
@ -63,24 +60,27 @@ impl AdminApiServer {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
self,
|
self,
|
||||||
bind_addr: UnixOrTCPSocketAddress,
|
bind_addr: UnixOrTCPSocketAddress,
|
||||||
shutdown_signal: impl Future<Output = ()>,
|
must_exit: watch::Receiver<bool>,
|
||||||
) -> Result<(), GarageError> {
|
) -> Result<(), GarageError> {
|
||||||
let region = self.garage.config.s3_api.s3_region.clone();
|
let region = self.garage.config.s3_api.s3_region.clone();
|
||||||
ApiServer::new(region, self)
|
ApiServer::new(region, self)
|
||||||
.run_server(bind_addr, Some(0o220), shutdown_signal)
|
.run_server(bind_addr, Some(0o220), must_exit)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_options(&self, _req: &Request<Body>) -> Result<Response<Body>, Error> {
|
fn handle_options(&self, _req: &Request<IncomingBody>) -> Result<Response<ResBody>, Error> {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.header(ALLOW, "OPTIONS, GET, POST")
|
.header(ALLOW, "OPTIONS, GET, POST")
|
||||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST")
|
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST")
|
||||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_check_domain(&self, req: Request<Body>) -> Result<Response<Body>, Error> {
|
async fn handle_check_domain(
|
||||||
|
&self,
|
||||||
|
req: Request<IncomingBody>,
|
||||||
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let query_params: HashMap<String, String> = req
|
let query_params: HashMap<String, String> = req
|
||||||
.uri()
|
.uri()
|
||||||
.query()
|
.query()
|
||||||
|
@ -104,7 +104,7 @@ impl AdminApiServer {
|
||||||
if self.check_domain(domain).await? {
|
if self.check_domain(domain).await? {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::from(format!(
|
.body(string_body(format!(
|
||||||
"Domain '{domain}' is managed by Garage"
|
"Domain '{domain}' is managed by Garage"
|
||||||
)))?)
|
)))?)
|
||||||
} else {
|
} else {
|
||||||
|
@ -167,7 +167,7 @@ impl AdminApiServer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_health(&self) -> Result<Response<Body>, Error> {
|
fn handle_health(&self) -> Result<Response<ResBody>, Error> {
|
||||||
let health = self.garage.system.health();
|
let health = self.garage.system.health();
|
||||||
|
|
||||||
let (status, status_str) = match health.status {
|
let (status, status_str) = match health.status {
|
||||||
|
@ -189,10 +189,10 @@ impl AdminApiServer {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(status)
|
.status(status)
|
||||||
.header(http::header::CONTENT_TYPE, "text/plain")
|
.header(http::header::CONTENT_TYPE, "text/plain")
|
||||||
.body(Body::from(status_str))?)
|
.body(string_body(status_str))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_metrics(&self) -> Result<Response<Body>, Error> {
|
fn handle_metrics(&self) -> Result<Response<ResBody>, Error> {
|
||||||
#[cfg(feature = "metrics")]
|
#[cfg(feature = "metrics")]
|
||||||
{
|
{
|
||||||
use opentelemetry::trace::Tracer;
|
use opentelemetry::trace::Tracer;
|
||||||
|
@ -212,7 +212,7 @@ impl AdminApiServer {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.header(http::header::CONTENT_TYPE, encoder.format_type())
|
.header(http::header::CONTENT_TYPE, encoder.format_type())
|
||||||
.body(Body::from(buffer))?)
|
.body(bytes_body(buffer.into()))?)
|
||||||
}
|
}
|
||||||
#[cfg(not(feature = "metrics"))]
|
#[cfg(not(feature = "metrics"))]
|
||||||
Err(Error::bad_request(
|
Err(Error::bad_request(
|
||||||
|
@ -229,7 +229,7 @@ impl ApiHandler for AdminApiServer {
|
||||||
type Endpoint = Endpoint;
|
type Endpoint = Endpoint;
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
fn parse_endpoint(&self, req: &Request<Body>) -> Result<Endpoint, Error> {
|
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<Endpoint, Error> {
|
||||||
if req.uri().path().starts_with("/v0/") {
|
if req.uri().path().starts_with("/v0/") {
|
||||||
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
|
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
|
||||||
Endpoint::from_v0(endpoint_v0)
|
Endpoint::from_v0(endpoint_v0)
|
||||||
|
@ -240,14 +240,14 @@ impl ApiHandler for AdminApiServer {
|
||||||
|
|
||||||
async fn handle(
|
async fn handle(
|
||||||
&self,
|
&self,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
endpoint: Endpoint,
|
endpoint: Endpoint,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let expected_auth_header =
|
let required_auth_hash =
|
||||||
match endpoint.authorization_type() {
|
match endpoint.authorization_type() {
|
||||||
Authorization::None => None,
|
Authorization::None => None,
|
||||||
Authorization::MetricsToken => self.metrics_token.as_ref(),
|
Authorization::MetricsToken => self.metrics_token.as_deref(),
|
||||||
Authorization::AdminToken => match &self.admin_token {
|
Authorization::AdminToken => match self.admin_token.as_deref() {
|
||||||
None => return Err(Error::forbidden(
|
None => return Err(Error::forbidden(
|
||||||
"Admin token isn't configured, admin API access is disabled for security.",
|
"Admin token isn't configured, admin API access is disabled for security.",
|
||||||
)),
|
)),
|
||||||
|
@ -255,14 +255,11 @@ impl ApiHandler for AdminApiServer {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(h) = expected_auth_header {
|
if let Some(password_hash) = required_auth_hash {
|
||||||
match req.headers().get("Authorization") {
|
match req.headers().get("Authorization") {
|
||||||
None => return Err(Error::forbidden("Authorization token must be provided")),
|
None => return Err(Error::forbidden("Authorization token must be provided")),
|
||||||
Some(v) => {
|
Some(authorization) => {
|
||||||
let authorized = v.to_str().map(|hv| hv.trim() == h).unwrap_or(false);
|
verify_bearer_token(&authorization, password_hash)?;
|
||||||
if !authorized {
|
|
||||||
return Err(Error::forbidden("Invalid authorization token provided"));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -279,7 +276,7 @@ impl ApiHandler for AdminApiServer {
|
||||||
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
|
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
|
||||||
Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
|
Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
|
||||||
Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
|
Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
|
||||||
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage, req).await,
|
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage).await,
|
||||||
// Keys
|
// Keys
|
||||||
Endpoint::ListKeys => handle_list_keys(&self.garage).await,
|
Endpoint::ListKeys => handle_list_keys(&self.garage).await,
|
||||||
Endpoint::GetKeyInfo {
|
Endpoint::GetKeyInfo {
|
||||||
|
@ -337,3 +334,35 @@ impl ApiEndpoint for Endpoint {
|
||||||
|
|
||||||
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
|
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn hash_bearer_token(token: &str) -> String {
|
||||||
|
use argon2::{
|
||||||
|
password_hash::{rand_core::OsRng, PasswordHasher, SaltString},
|
||||||
|
Argon2,
|
||||||
|
};
|
||||||
|
|
||||||
|
let salt = SaltString::generate(&mut OsRng);
|
||||||
|
let argon2 = Argon2::default();
|
||||||
|
argon2
|
||||||
|
.hash_password(token.trim().as_bytes(), &salt)
|
||||||
|
.expect("could not hash API token")
|
||||||
|
.to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_bearer_token(token: &hyper::http::HeaderValue, password_hash: &str) -> Result<(), Error> {
|
||||||
|
use argon2::{password_hash::PasswordVerifier, Argon2};
|
||||||
|
|
||||||
|
let parsed_hash = PasswordHash::new(&password_hash).unwrap();
|
||||||
|
|
||||||
|
token
|
||||||
|
.to_str()?
|
||||||
|
.strip_prefix("Bearer ")
|
||||||
|
.and_then(|token| {
|
||||||
|
Argon2::default()
|
||||||
|
.verify_password(token.trim().as_bytes(), &parsed_hash)
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.ok_or_else(|| Error::forbidden("Invalid authorization token"))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
|
@ -17,12 +17,13 @@ use garage_model::permission::*;
|
||||||
use garage_model::s3::mpu_table;
|
use garage_model::s3::mpu_table;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
|
use crate::admin::api_server::ResBody;
|
||||||
use crate::admin::error::*;
|
use crate::admin::error::*;
|
||||||
use crate::admin::key::ApiBucketKeyPerm;
|
use crate::admin::key::ApiBucketKeyPerm;
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
use crate::helpers::{json_ok_response, parse_json_body};
|
use crate::helpers::*;
|
||||||
|
|
||||||
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let buckets = garage
|
let buckets = garage
|
||||||
.bucket_table
|
.bucket_table
|
||||||
.get_range(
|
.get_range(
|
||||||
|
@ -90,7 +91,7 @@ pub async fn handle_get_bucket_info(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
id: Option<String>,
|
id: Option<String>,
|
||||||
global_alias: Option<String>,
|
global_alias: Option<String>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket_id = match (id, global_alias) {
|
let bucket_id = match (id, global_alias) {
|
||||||
(Some(id), None) => parse_bucket_id(&id)?,
|
(Some(id), None) => parse_bucket_id(&id)?,
|
||||||
(None, Some(ga)) => garage
|
(None, Some(ga)) => garage
|
||||||
|
@ -111,7 +112,7 @@ pub async fn handle_get_bucket_info(
|
||||||
async fn bucket_info_results(
|
async fn bucket_info_results(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket = garage
|
let bucket = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.get_existing_bucket(bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
|
@ -122,7 +123,7 @@ async fn bucket_info_results(
|
||||||
.table
|
.table
|
||||||
.get(&bucket_id, &EmptyKey)
|
.get(&bucket_id, &EmptyKey)
|
||||||
.await?
|
.await?
|
||||||
.map(|x| x.filtered_values(&garage.system.ring.borrow()))
|
.map(|x| x.filtered_values(&garage.system.cluster_layout()))
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
|
||||||
let mpu_counters = garage
|
let mpu_counters = garage
|
||||||
|
@ -130,7 +131,7 @@ async fn bucket_info_results(
|
||||||
.table
|
.table
|
||||||
.get(&bucket_id, &EmptyKey)
|
.get(&bucket_id, &EmptyKey)
|
||||||
.await?
|
.await?
|
||||||
.map(|x| x.filtered_values(&garage.system.ring.borrow()))
|
.map(|x| x.filtered_values(&garage.system.cluster_layout()))
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
|
||||||
let mut relevant_keys = HashMap::new();
|
let mut relevant_keys = HashMap::new();
|
||||||
|
@ -268,9 +269,11 @@ struct GetBucketInfoKey {
|
||||||
|
|
||||||
pub async fn handle_create_bucket(
|
pub async fn handle_create_bucket(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<CreateBucketRequest>(req).await?;
|
let req = parse_json_body::<CreateBucketRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
|
let helper = garage.locked_helper().await;
|
||||||
|
|
||||||
if let Some(ga) = &req.global_alias {
|
if let Some(ga) = &req.global_alias {
|
||||||
if !is_valid_bucket_name(ga) {
|
if !is_valid_bucket_name(ga) {
|
||||||
|
@ -295,10 +298,7 @@ pub async fn handle_create_bucket(
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let key = garage
|
let key = helper.key().get_existing_key(&la.access_key_id).await?;
|
||||||
.key_helper()
|
|
||||||
.get_existing_key(&la.access_key_id)
|
|
||||||
.await?;
|
|
||||||
let state = key.state.as_option().unwrap();
|
let state = key.state.as_option().unwrap();
|
||||||
if matches!(state.local_aliases.get(&la.alias), Some(_)) {
|
if matches!(state.local_aliases.get(&la.alias), Some(_)) {
|
||||||
return Err(Error::bad_request("Local alias already exists"));
|
return Err(Error::bad_request("Local alias already exists"));
|
||||||
|
@ -309,21 +309,16 @@ pub async fn handle_create_bucket(
|
||||||
garage.bucket_table.insert(&bucket).await?;
|
garage.bucket_table.insert(&bucket).await?;
|
||||||
|
|
||||||
if let Some(ga) = &req.global_alias {
|
if let Some(ga) = &req.global_alias {
|
||||||
garage
|
helper.set_global_bucket_alias(bucket.id, ga).await?;
|
||||||
.bucket_helper()
|
|
||||||
.set_global_bucket_alias(bucket.id, ga)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(la) = &req.local_alias {
|
if let Some(la) = &req.local_alias {
|
||||||
garage
|
helper
|
||||||
.bucket_helper()
|
|
||||||
.set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias)
|
.set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if la.allow.read || la.allow.write || la.allow.owner {
|
if la.allow.read || la.allow.write || la.allow.owner {
|
||||||
garage
|
helper
|
||||||
.bucket_helper()
|
|
||||||
.set_bucket_key_permissions(
|
.set_bucket_key_permissions(
|
||||||
bucket.id,
|
bucket.id,
|
||||||
&la.access_key_id,
|
&la.access_key_id,
|
||||||
|
@ -360,16 +355,16 @@ struct CreateBucketLocalAlias {
|
||||||
pub async fn handle_delete_bucket(
|
pub async fn handle_delete_bucket(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
id: String,
|
id: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let helper = garage.bucket_helper();
|
let helper = garage.locked_helper().await;
|
||||||
|
|
||||||
let bucket_id = parse_bucket_id(&id)?;
|
let bucket_id = parse_bucket_id(&id)?;
|
||||||
|
|
||||||
let mut bucket = helper.get_existing_bucket(bucket_id).await?;
|
let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
|
||||||
let state = bucket.state.as_option().unwrap();
|
let state = bucket.state.as_option().unwrap();
|
||||||
|
|
||||||
// Check bucket is empty
|
// Check bucket is empty
|
||||||
if !helper.is_bucket_empty(bucket_id).await? {
|
if !helper.bucket().is_bucket_empty(bucket_id).await? {
|
||||||
return Err(CommonError::BucketNotEmpty.into());
|
return Err(CommonError::BucketNotEmpty.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -403,15 +398,15 @@ pub async fn handle_delete_bucket(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_update_bucket(
|
pub async fn handle_update_bucket(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
id: String,
|
id: String,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<UpdateBucketRequest>(req).await?;
|
let req = parse_json_body::<UpdateBucketRequest, _, Error>(req).await?;
|
||||||
let bucket_id = parse_bucket_id(&id)?;
|
let bucket_id = parse_bucket_id(&id)?;
|
||||||
|
|
||||||
let mut bucket = garage
|
let mut bucket = garage
|
||||||
|
@ -470,23 +465,19 @@ struct UpdateBucketWebsiteAccess {
|
||||||
|
|
||||||
pub async fn handle_bucket_change_key_perm(
|
pub async fn handle_bucket_change_key_perm(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
new_perm_flag: bool,
|
new_perm_flag: bool,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<BucketKeyPermChangeRequest>(req).await?;
|
let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
|
let helper = garage.locked_helper().await;
|
||||||
|
|
||||||
let bucket_id = parse_bucket_id(&req.bucket_id)?;
|
let bucket_id = parse_bucket_id(&req.bucket_id)?;
|
||||||
|
|
||||||
let bucket = garage
|
let bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
|
||||||
.bucket_helper()
|
|
||||||
.get_existing_bucket(bucket_id)
|
|
||||||
.await?;
|
|
||||||
let state = bucket.state.as_option().unwrap();
|
let state = bucket.state.as_option().unwrap();
|
||||||
|
|
||||||
let key = garage
|
let key = helper.key().get_existing_key(&req.access_key_id).await?;
|
||||||
.key_helper()
|
|
||||||
.get_existing_key(&req.access_key_id)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut perm = state
|
let mut perm = state
|
||||||
.authorized_keys
|
.authorized_keys
|
||||||
|
@ -504,8 +495,7 @@ pub async fn handle_bucket_change_key_perm(
|
||||||
perm.allow_owner = new_perm_flag;
|
perm.allow_owner = new_perm_flag;
|
||||||
}
|
}
|
||||||
|
|
||||||
garage
|
helper
|
||||||
.bucket_helper()
|
|
||||||
.set_bucket_key_permissions(bucket.id, &key.key_id, perm)
|
.set_bucket_key_permissions(bucket.id, &key.key_id, perm)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -526,13 +516,12 @@ pub async fn handle_global_alias_bucket(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
bucket_id: String,
|
bucket_id: String,
|
||||||
alias: String,
|
alias: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
garage
|
let helper = garage.locked_helper().await;
|
||||||
.bucket_helper()
|
|
||||||
.set_global_bucket_alias(bucket_id, &alias)
|
helper.set_global_bucket_alias(bucket_id, &alias).await?;
|
||||||
.await?;
|
|
||||||
|
|
||||||
bucket_info_results(garage, bucket_id).await
|
bucket_info_results(garage, bucket_id).await
|
||||||
}
|
}
|
||||||
|
@ -541,13 +530,12 @@ pub async fn handle_global_unalias_bucket(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
bucket_id: String,
|
bucket_id: String,
|
||||||
alias: String,
|
alias: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
garage
|
let helper = garage.locked_helper().await;
|
||||||
.bucket_helper()
|
|
||||||
.unset_global_bucket_alias(bucket_id, &alias)
|
helper.unset_global_bucket_alias(bucket_id, &alias).await?;
|
||||||
.await?;
|
|
||||||
|
|
||||||
bucket_info_results(garage, bucket_id).await
|
bucket_info_results(garage, bucket_id).await
|
||||||
}
|
}
|
||||||
|
@ -557,11 +545,12 @@ pub async fn handle_local_alias_bucket(
|
||||||
bucket_id: String,
|
bucket_id: String,
|
||||||
access_key_id: String,
|
access_key_id: String,
|
||||||
alias: String,
|
alias: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
garage
|
let helper = garage.locked_helper().await;
|
||||||
.bucket_helper()
|
|
||||||
|
helper
|
||||||
.set_local_bucket_alias(bucket_id, &access_key_id, &alias)
|
.set_local_bucket_alias(bucket_id, &access_key_id, &alias)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -573,11 +562,12 @@ pub async fn handle_local_unalias_bucket(
|
||||||
bucket_id: String,
|
bucket_id: String,
|
||||||
access_key_id: String,
|
access_key_id: String,
|
||||||
alias: String,
|
alias: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
garage
|
let helper = garage.locked_helper().await;
|
||||||
.bucket_helper()
|
|
||||||
|
helper
|
||||||
.unset_local_bucket_alias(bucket_id, &access_key_id, &alias)
|
.unset_local_bucket_alias(bucket_id, &access_key_id, &alias)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response};
|
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
|
@ -11,35 +12,110 @@ use garage_rpc::layout;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
|
|
||||||
|
use crate::admin::api_server::ResBody;
|
||||||
use crate::admin::error::*;
|
use crate::admin::error::*;
|
||||||
use crate::helpers::{json_ok_response, parse_json_body};
|
use crate::helpers::{json_ok_response, parse_json_body};
|
||||||
|
|
||||||
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
|
let layout = garage.system.cluster_layout();
|
||||||
|
let mut nodes = garage
|
||||||
|
.system
|
||||||
|
.get_known_nodes()
|
||||||
|
.into_iter()
|
||||||
|
.map(|i| {
|
||||||
|
(
|
||||||
|
i.id,
|
||||||
|
NodeResp {
|
||||||
|
id: hex::encode(i.id),
|
||||||
|
addr: i.addr,
|
||||||
|
hostname: i.status.hostname,
|
||||||
|
is_up: i.is_up,
|
||||||
|
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||||
|
data_partition: i
|
||||||
|
.status
|
||||||
|
.data_disk_avail
|
||||||
|
.map(|(avail, total)| FreeSpaceResp {
|
||||||
|
available: avail,
|
||||||
|
total,
|
||||||
|
}),
|
||||||
|
metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
|
||||||
|
FreeSpaceResp {
|
||||||
|
available: avail,
|
||||||
|
total,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect::<HashMap<_, _>>();
|
||||||
|
|
||||||
|
for (id, _, role) in layout.current().roles.items().iter() {
|
||||||
|
if let layout::NodeRoleV(Some(r)) = role {
|
||||||
|
let role = NodeRoleResp {
|
||||||
|
id: hex::encode(id),
|
||||||
|
zone: r.zone.to_string(),
|
||||||
|
capacity: r.capacity,
|
||||||
|
tags: r.tags.clone(),
|
||||||
|
};
|
||||||
|
match nodes.get_mut(id) {
|
||||||
|
None => {
|
||||||
|
nodes.insert(
|
||||||
|
*id,
|
||||||
|
NodeResp {
|
||||||
|
id: hex::encode(id),
|
||||||
|
role: Some(role),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Some(n) => {
|
||||||
|
n.role = Some(role);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for ver in layout.versions().iter().rev().skip(1) {
|
||||||
|
for (id, _, role) in ver.roles.items().iter() {
|
||||||
|
if let layout::NodeRoleV(Some(r)) = role {
|
||||||
|
if r.capacity.is_some() {
|
||||||
|
if let Some(n) = nodes.get_mut(id) {
|
||||||
|
if n.role.is_none() {
|
||||||
|
n.draining = true;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
nodes.insert(
|
||||||
|
*id,
|
||||||
|
NodeResp {
|
||||||
|
id: hex::encode(id),
|
||||||
|
draining: true,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut nodes = nodes.into_values().collect::<Vec<_>>();
|
||||||
|
nodes.sort_by(|x, y| x.id.cmp(&y.id));
|
||||||
|
|
||||||
let res = GetClusterStatusResponse {
|
let res = GetClusterStatusResponse {
|
||||||
node: hex::encode(garage.system.id),
|
node: hex::encode(garage.system.id),
|
||||||
garage_version: garage_util::version::garage_version(),
|
garage_version: garage_util::version::garage_version(),
|
||||||
garage_features: garage_util::version::garage_features(),
|
garage_features: garage_util::version::garage_features(),
|
||||||
rust_version: garage_util::version::rust_version(),
|
rust_version: garage_util::version::rust_version(),
|
||||||
db_engine: garage.db.engine(),
|
db_engine: garage.db.engine(),
|
||||||
known_nodes: garage
|
layout_version: layout.current().version,
|
||||||
.system
|
nodes,
|
||||||
.get_known_nodes()
|
|
||||||
.into_iter()
|
|
||||||
.map(|i| KnownNodeResp {
|
|
||||||
id: hex::encode(i.id),
|
|
||||||
addr: i.addr,
|
|
||||||
is_up: i.is_up,
|
|
||||||
last_seen_secs_ago: i.last_seen_secs_ago,
|
|
||||||
hostname: i.status.hostname,
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
layout: format_cluster_layout(&garage.system.get_cluster_layout()),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
use garage_rpc::system::ClusterHealthStatus;
|
use garage_rpc::system::ClusterHealthStatus;
|
||||||
let health = garage.system.health();
|
let health = garage.system.health();
|
||||||
let health = ClusterHealth {
|
let health = ClusterHealth {
|
||||||
|
@ -61,9 +137,9 @@ pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<
|
||||||
|
|
||||||
pub async fn handle_connect_cluster_nodes(
|
pub async fn handle_connect_cluster_nodes(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<Vec<String>>(req).await?;
|
let req = parse_json_body::<Vec<String>, _, Error>(req).await?;
|
||||||
|
|
||||||
let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node)))
|
let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node)))
|
||||||
.await
|
.await
|
||||||
|
@ -83,14 +159,15 @@ pub async fn handle_connect_cluster_nodes(
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let res = format_cluster_layout(&garage.system.get_cluster_layout());
|
let res = format_cluster_layout(garage.system.cluster_layout().inner());
|
||||||
|
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResponse {
|
fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse {
|
||||||
let roles = layout
|
let roles = layout
|
||||||
|
.current()
|
||||||
.roles
|
.roles
|
||||||
.items()
|
.items()
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -104,10 +181,12 @@ fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResp
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let staged_role_changes = layout
|
let staged_role_changes = layout
|
||||||
.staging_roles
|
.staging
|
||||||
|
.get()
|
||||||
|
.roles
|
||||||
.items()
|
.items()
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|(k, _, v)| layout.roles.get(k) != Some(v))
|
.filter(|(k, _, v)| layout.current().roles.get(k) != Some(v))
|
||||||
.map(|(k, _, v)| match &v.0 {
|
.map(|(k, _, v)| match &v.0 {
|
||||||
None => NodeRoleChange {
|
None => NodeRoleChange {
|
||||||
id: hex::encode(k),
|
id: hex::encode(k),
|
||||||
|
@ -125,7 +204,7 @@ fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResp
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
GetClusterLayoutResponse {
|
GetClusterLayoutResponse {
|
||||||
version: layout.version,
|
version: layout.current().version,
|
||||||
roles,
|
roles,
|
||||||
staged_role_changes,
|
staged_role_changes,
|
||||||
}
|
}
|
||||||
|
@ -154,8 +233,8 @@ struct GetClusterStatusResponse {
|
||||||
garage_features: Option<&'static [&'static str]>,
|
garage_features: Option<&'static [&'static str]>,
|
||||||
rust_version: &'static str,
|
rust_version: &'static str,
|
||||||
db_engine: String,
|
db_engine: String,
|
||||||
known_nodes: Vec<KnownNodeResp>,
|
layout_version: u64,
|
||||||
layout: GetClusterLayoutResponse,
|
nodes: Vec<NodeResp>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
|
@ -189,28 +268,41 @@ struct NodeRoleResp {
|
||||||
tags: Vec<String>,
|
tags: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize, Default)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct KnownNodeResp {
|
struct FreeSpaceResp {
|
||||||
|
available: u64,
|
||||||
|
total: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Default)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct NodeResp {
|
||||||
id: String,
|
id: String,
|
||||||
addr: SocketAddr,
|
role: Option<NodeRoleResp>,
|
||||||
|
addr: Option<SocketAddr>,
|
||||||
|
hostname: Option<String>,
|
||||||
is_up: bool,
|
is_up: bool,
|
||||||
last_seen_secs_ago: Option<u64>,
|
last_seen_secs_ago: Option<u64>,
|
||||||
hostname: String,
|
draining: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
data_partition: Option<FreeSpaceResp>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
metadata_partition: Option<FreeSpaceResp>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- update functions ----
|
// ---- update functions ----
|
||||||
|
|
||||||
pub async fn handle_update_cluster_layout(
|
pub async fn handle_update_cluster_layout(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let updates = parse_json_body::<UpdateClusterLayoutRequest>(req).await?;
|
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let mut layout = garage.system.get_cluster_layout();
|
let mut layout = garage.system.cluster_layout().inner().clone();
|
||||||
|
|
||||||
let mut roles = layout.roles.clone();
|
let mut roles = layout.current().roles.clone();
|
||||||
roles.merge(&layout.staging_roles);
|
roles.merge(&layout.staging.get().roles);
|
||||||
|
|
||||||
for change in updates {
|
for change in updates {
|
||||||
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
|
||||||
|
@ -231,11 +323,17 @@ pub async fn handle_update_cluster_layout(
|
||||||
};
|
};
|
||||||
|
|
||||||
layout
|
layout
|
||||||
.staging_roles
|
.staging
|
||||||
|
.get_mut()
|
||||||
|
.roles
|
||||||
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
|
||||||
}
|
}
|
||||||
|
|
||||||
garage.system.update_cluster_layout(&layout).await?;
|
garage
|
||||||
|
.system
|
||||||
|
.layout_manager
|
||||||
|
.update_cluster_layout(&layout)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let res = format_cluster_layout(&layout);
|
let res = format_cluster_layout(&layout);
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
|
@ -243,14 +341,18 @@ pub async fn handle_update_cluster_layout(
|
||||||
|
|
||||||
pub async fn handle_apply_cluster_layout(
|
pub async fn handle_apply_cluster_layout(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let param = parse_json_body::<ApplyRevertLayoutRequest>(req).await?;
|
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let layout = garage.system.get_cluster_layout();
|
let layout = garage.system.cluster_layout().inner().clone();
|
||||||
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
||||||
|
|
||||||
garage.system.update_cluster_layout(&layout).await?;
|
garage
|
||||||
|
.system
|
||||||
|
.layout_manager
|
||||||
|
.update_cluster_layout(&layout)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let res = ApplyClusterLayoutResponse {
|
let res = ApplyClusterLayoutResponse {
|
||||||
message: msg,
|
message: msg,
|
||||||
|
@ -261,13 +363,14 @@ pub async fn handle_apply_cluster_layout(
|
||||||
|
|
||||||
pub async fn handle_revert_cluster_layout(
|
pub async fn handle_revert_cluster_layout(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
) -> Result<Response<ResBody>, Error> {
|
||||||
) -> Result<Response<Body>, Error> {
|
let layout = garage.system.cluster_layout().inner().clone();
|
||||||
let param = parse_json_body::<ApplyRevertLayoutRequest>(req).await?;
|
let layout = layout.revert_staged_changes()?;
|
||||||
|
garage
|
||||||
let layout = garage.system.get_cluster_layout();
|
.system
|
||||||
let layout = layout.revert_staged_changes(Some(param.version))?;
|
.layout_manager
|
||||||
garage.system.update_cluster_layout(&layout).await?;
|
.update_cluster_layout(&layout)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let res = format_cluster_layout(&layout);
|
let res = format_cluster_layout(&layout);
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
|
@ -279,7 +382,7 @@ type UpdateClusterLayoutRequest = Vec<NodeRoleChange>;
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct ApplyRevertLayoutRequest {
|
struct ApplyLayoutRequest {
|
||||||
version: u64,
|
version: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
use err_derive::Error;
|
use err_derive::Error;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{Body, HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
|
||||||
pub use garage_model::helper::error::Error as HelperError;
|
pub use garage_model::helper::error::Error as HelperError;
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
use crate::generic_server::ApiError;
|
use crate::generic_server::ApiError;
|
||||||
use crate::helpers::CustomApiErrorBody;
|
use crate::helpers::*;
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
|
@ -40,18 +40,6 @@ where
|
||||||
|
|
||||||
impl CommonErrorDerivative for Error {}
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
impl From<HelperError> for Error {
|
|
||||||
fn from(err: HelperError) -> Self {
|
|
||||||
match err {
|
|
||||||
HelperError::Internal(i) => Self::Common(CommonError::InternalError(i)),
|
|
||||||
HelperError::BadRequest(b) => Self::Common(CommonError::BadRequest(b)),
|
|
||||||
HelperError::InvalidBucketName(n) => Self::Common(CommonError::InvalidBucketName(n)),
|
|
||||||
HelperError::NoSuchBucket(n) => Self::Common(CommonError::NoSuchBucket(n)),
|
|
||||||
HelperError::NoSuchAccessKey(n) => Self::NoSuchAccessKey(n),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
fn code(&self) -> &'static str {
|
fn code(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
|
@ -77,14 +65,14 @@ impl ApiError for Error {
|
||||||
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn http_body(&self, garage_region: &str, path: &str) -> Body {
|
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody {
|
||||||
let error = CustomApiErrorBody {
|
let error = CustomApiErrorBody {
|
||||||
code: self.code().to_string(),
|
code: self.code().to_string(),
|
||||||
message: format!("{}", self),
|
message: format!("{}", self),
|
||||||
path: path.to_string(),
|
path: path.to_string(),
|
||||||
region: garage_region.to_string(),
|
region: garage_region.to_string(),
|
||||||
};
|
};
|
||||||
Body::from(serde_json::to_string_pretty(&error).unwrap_or_else(|_| {
|
let error_str = serde_json::to_string_pretty(&error).unwrap_or_else(|_| {
|
||||||
r#"
|
r#"
|
||||||
{
|
{
|
||||||
"code": "InternalError",
|
"code": "InternalError",
|
||||||
|
@ -92,6 +80,7 @@ impl ApiError for Error {
|
||||||
}
|
}
|
||||||
"#
|
"#
|
||||||
.into()
|
.into()
|
||||||
}))
|
});
|
||||||
|
error_body(error_str)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
@ -9,10 +9,11 @@ use garage_table::*;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
|
|
||||||
|
use crate::admin::api_server::ResBody;
|
||||||
use crate::admin::error::*;
|
use crate::admin::error::*;
|
||||||
use crate::helpers::{is_default, json_ok_response, parse_json_body};
|
use crate::helpers::*;
|
||||||
|
|
||||||
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let res = garage
|
let res = garage
|
||||||
.key_table
|
.key_table
|
||||||
.get_range(
|
.get_range(
|
||||||
|
@ -45,7 +46,7 @@ pub async fn handle_get_key_info(
|
||||||
id: Option<String>,
|
id: Option<String>,
|
||||||
search: Option<String>,
|
search: Option<String>,
|
||||||
show_secret_key: bool,
|
show_secret_key: bool,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let key = if let Some(id) = id {
|
let key = if let Some(id) = id {
|
||||||
garage.key_helper().get_existing_key(&id).await?
|
garage.key_helper().get_existing_key(&id).await?
|
||||||
} else if let Some(search) = search {
|
} else if let Some(search) = search {
|
||||||
|
@ -62,9 +63,9 @@ pub async fn handle_get_key_info(
|
||||||
|
|
||||||
pub async fn handle_create_key(
|
pub async fn handle_create_key(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<CreateKeyRequest>(req).await?;
|
let req = parse_json_body::<CreateKeyRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key"));
|
let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key"));
|
||||||
garage.key_table.insert(&key).await?;
|
garage.key_table.insert(&key).await?;
|
||||||
|
@ -80,9 +81,9 @@ struct CreateKeyRequest {
|
||||||
|
|
||||||
pub async fn handle_import_key(
|
pub async fn handle_import_key(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<ImportKeyRequest>(req).await?;
|
let req = parse_json_body::<ImportKeyRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?;
|
let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?;
|
||||||
if prev_key.is_some() {
|
if prev_key.is_some() {
|
||||||
|
@ -111,9 +112,9 @@ struct ImportKeyRequest {
|
||||||
pub async fn handle_update_key(
|
pub async fn handle_update_key(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
id: String,
|
id: String,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<UpdateKeyRequest>(req).await?;
|
let req = parse_json_body::<UpdateKeyRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
||||||
|
|
||||||
|
@ -146,23 +147,26 @@ struct UpdateKeyRequest {
|
||||||
deny: Option<KeyPerm>,
|
deny: Option<KeyPerm>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_key(garage: &Arc<Garage>, id: String) -> Result<Response<Body>, Error> {
|
pub async fn handle_delete_key(
|
||||||
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
garage: &Arc<Garage>,
|
||||||
|
id: String,
|
||||||
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
let helper = garage.locked_helper().await;
|
||||||
|
|
||||||
key.state.as_option().unwrap();
|
let mut key = helper.key().get_existing_key(&id).await?;
|
||||||
|
|
||||||
garage.key_helper().delete_key(&mut key).await?;
|
helper.delete_key(&mut key).await?;
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn key_info_results(
|
async fn key_info_results(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
key: Key,
|
key: Key,
|
||||||
show_secret: bool,
|
show_secret: bool,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let mut relevant_buckets = HashMap::new();
|
let mut relevant_buckets = HashMap::new();
|
||||||
|
|
||||||
let key_state = key.state.as_option().unwrap();
|
let key_state = key.state.as_option().unwrap();
|
||||||
|
|
|
@ -3,6 +3,8 @@ use hyper::StatusCode;
|
||||||
|
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
|
use garage_model::helper::error::Error as HelperError;
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum CommonError {
|
pub enum CommonError {
|
||||||
|
@ -28,6 +30,10 @@ pub enum CommonError {
|
||||||
#[error(display = "Bad request: {}", _0)]
|
#[error(display = "Bad request: {}", _0)]
|
||||||
BadRequest(String),
|
BadRequest(String),
|
||||||
|
|
||||||
|
/// The client sent a header with invalid value
|
||||||
|
#[error(display = "Invalid header value: {}", _0)]
|
||||||
|
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
||||||
|
|
||||||
// ---- SPECIFIC ERROR CONDITIONS ----
|
// ---- SPECIFIC ERROR CONDITIONS ----
|
||||||
// These have to be error codes referenced in the S3 spec here:
|
// These have to be error codes referenced in the S3 spec here:
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
||||||
|
@ -53,9 +59,7 @@ impl CommonError {
|
||||||
pub fn http_status_code(&self) -> StatusCode {
|
pub fn http_status_code(&self) -> StatusCode {
|
||||||
match self {
|
match self {
|
||||||
CommonError::InternalError(
|
CommonError::InternalError(
|
||||||
GarageError::Timeout
|
GarageError::Timeout | GarageError::RemoteError(_) | GarageError::Quorum(..),
|
||||||
| GarageError::RemoteError(_)
|
|
||||||
| GarageError::Quorum(_, _, _, _),
|
|
||||||
) => StatusCode::SERVICE_UNAVAILABLE,
|
) => StatusCode::SERVICE_UNAVAILABLE,
|
||||||
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
|
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
|
||||||
StatusCode::INTERNAL_SERVER_ERROR
|
StatusCode::INTERNAL_SERVER_ERROR
|
||||||
|
@ -64,7 +68,9 @@ impl CommonError {
|
||||||
CommonError::Forbidden(_) => StatusCode::FORBIDDEN,
|
CommonError::Forbidden(_) => StatusCode::FORBIDDEN,
|
||||||
CommonError::NoSuchBucket(_) => StatusCode::NOT_FOUND,
|
CommonError::NoSuchBucket(_) => StatusCode::NOT_FOUND,
|
||||||
CommonError::BucketNotEmpty | CommonError::BucketAlreadyExists => StatusCode::CONFLICT,
|
CommonError::BucketNotEmpty | CommonError::BucketAlreadyExists => StatusCode::CONFLICT,
|
||||||
CommonError::InvalidBucketName(_) => StatusCode::BAD_REQUEST,
|
CommonError::InvalidBucketName(_) | CommonError::InvalidHeader(_) => {
|
||||||
|
StatusCode::BAD_REQUEST
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,9 +78,7 @@ impl CommonError {
|
||||||
match self {
|
match self {
|
||||||
CommonError::Forbidden(_) => "AccessDenied",
|
CommonError::Forbidden(_) => "AccessDenied",
|
||||||
CommonError::InternalError(
|
CommonError::InternalError(
|
||||||
GarageError::Timeout
|
GarageError::Timeout | GarageError::RemoteError(_) | GarageError::Quorum(..),
|
||||||
| GarageError::RemoteError(_)
|
|
||||||
| GarageError::Quorum(_, _, _, _),
|
|
||||||
) => "ServiceUnavailable",
|
) => "ServiceUnavailable",
|
||||||
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
|
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
|
||||||
"InternalError"
|
"InternalError"
|
||||||
|
@ -84,6 +88,7 @@ impl CommonError {
|
||||||
CommonError::BucketAlreadyExists => "BucketAlreadyExists",
|
CommonError::BucketAlreadyExists => "BucketAlreadyExists",
|
||||||
CommonError::BucketNotEmpty => "BucketNotEmpty",
|
CommonError::BucketNotEmpty => "BucketNotEmpty",
|
||||||
CommonError::InvalidBucketName(_) => "InvalidBucketName",
|
CommonError::InvalidBucketName(_) => "InvalidBucketName",
|
||||||
|
CommonError::InvalidHeader(_) => "InvalidHeaderValue",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,6 +97,18 @@ impl CommonError {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<HelperError> for CommonError {
|
||||||
|
fn from(err: HelperError) -> Self {
|
||||||
|
match err {
|
||||||
|
HelperError::Internal(i) => Self::InternalError(i),
|
||||||
|
HelperError::BadRequest(b) => Self::BadRequest(b),
|
||||||
|
HelperError::InvalidBucketName(n) => Self::InvalidBucketName(n),
|
||||||
|
HelperError::NoSuchBucket(n) => Self::NoSuchBucket(n),
|
||||||
|
e => Self::bad_request(format!("{}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub trait CommonErrorDerivative: From<CommonError> {
|
pub trait CommonErrorDerivative: From<CommonError> {
|
||||||
fn internal_error<M: ToString>(msg: M) -> Self {
|
fn internal_error<M: ToString>(msg: M) -> Self {
|
||||||
Self::from(CommonError::InternalError(GarageError::Message(
|
Self::from(CommonError::InternalError(GarageError::Message(
|
||||||
|
|
|
@ -1,20 +1,26 @@
|
||||||
|
use std::convert::Infallible;
|
||||||
use std::fs::{self, Permissions};
|
use std::fs::{self, Permissions};
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
use futures::future::Future;
|
use futures::future::Future;
|
||||||
|
use futures::stream::{futures_unordered::FuturesUnordered, StreamExt};
|
||||||
|
|
||||||
|
use http_body_util::BodyExt;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::server::conn::AddrStream;
|
use hyper::server::conn::http1;
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
use hyper::service::service_fn;
|
||||||
use hyper::{Body, Request, Response, Server};
|
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
use hyper_util::rt::TokioIo;
|
||||||
|
|
||||||
use hyperlocal::UnixServerExt;
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
|
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
|
||||||
use tokio::net::UnixStream;
|
use tokio::sync::watch;
|
||||||
|
use tokio::time::{sleep_until, Instant};
|
||||||
|
|
||||||
use opentelemetry::{
|
use opentelemetry::{
|
||||||
global,
|
global,
|
||||||
|
@ -28,6 +34,8 @@ use garage_util::forwarded_headers;
|
||||||
use garage_util::metrics::{gen_trace_id, RecordDuration};
|
use garage_util::metrics::{gen_trace_id, RecordDuration};
|
||||||
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||||
|
|
||||||
|
use crate::helpers::{BoxBody, ErrorBody};
|
||||||
|
|
||||||
pub(crate) trait ApiEndpoint: Send + Sync + 'static {
|
pub(crate) trait ApiEndpoint: Send + Sync + 'static {
|
||||||
fn name(&self) -> &'static str;
|
fn name(&self) -> &'static str;
|
||||||
fn add_span_attributes(&self, span: SpanRef<'_>);
|
fn add_span_attributes(&self, span: SpanRef<'_>);
|
||||||
|
@ -36,7 +44,7 @@ pub(crate) trait ApiEndpoint: Send + Sync + 'static {
|
||||||
pub trait ApiError: std::error::Error + Send + Sync + 'static {
|
pub trait ApiError: std::error::Error + Send + Sync + 'static {
|
||||||
fn http_status_code(&self) -> StatusCode;
|
fn http_status_code(&self) -> StatusCode;
|
||||||
fn add_http_headers(&self, header_map: &mut HeaderMap<HeaderValue>);
|
fn add_http_headers(&self, header_map: &mut HeaderMap<HeaderValue>);
|
||||||
fn http_body(&self, garage_region: &str, path: &str) -> Body;
|
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
@ -47,12 +55,12 @@ pub(crate) trait ApiHandler: Send + Sync + 'static {
|
||||||
type Endpoint: ApiEndpoint;
|
type Endpoint: ApiEndpoint;
|
||||||
type Error: ApiError;
|
type Error: ApiError;
|
||||||
|
|
||||||
fn parse_endpoint(&self, r: &Request<Body>) -> Result<Self::Endpoint, Self::Error>;
|
fn parse_endpoint(&self, r: &Request<IncomingBody>) -> Result<Self::Endpoint, Self::Error>;
|
||||||
async fn handle(
|
async fn handle(
|
||||||
&self,
|
&self,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
endpoint: Self::Endpoint,
|
endpoint: Self::Endpoint,
|
||||||
) -> Result<Response<Body>, Self::Error>;
|
) -> Result<Response<BoxBody<Self::Error>>, Self::Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct ApiServer<A: ApiHandler> {
|
pub(crate) struct ApiServer<A: ApiHandler> {
|
||||||
|
@ -99,74 +107,42 @@ impl<A: ApiHandler> ApiServer<A> {
|
||||||
self: Arc<Self>,
|
self: Arc<Self>,
|
||||||
bind_addr: UnixOrTCPSocketAddress,
|
bind_addr: UnixOrTCPSocketAddress,
|
||||||
unix_bind_addr_mode: Option<u32>,
|
unix_bind_addr_mode: Option<u32>,
|
||||||
shutdown_signal: impl Future<Output = ()>,
|
must_exit: watch::Receiver<bool>,
|
||||||
) -> Result<(), GarageError> {
|
) -> Result<(), GarageError> {
|
||||||
let tcp_service = make_service_fn(|conn: &AddrStream| {
|
let server_name = format!("{} API", A::API_NAME_DISPLAY);
|
||||||
let this = self.clone();
|
info!("{} server listening on {}", server_name, bind_addr);
|
||||||
|
|
||||||
let client_addr = conn.remote_addr();
|
|
||||||
async move {
|
|
||||||
Ok::<_, GarageError>(service_fn(move |req: Request<Body>| {
|
|
||||||
let this = this.clone();
|
|
||||||
|
|
||||||
this.handler(req, client_addr.to_string())
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let unix_service = make_service_fn(|_: &UnixStream| {
|
|
||||||
let this = self.clone();
|
|
||||||
|
|
||||||
let path = bind_addr.to_string();
|
|
||||||
async move {
|
|
||||||
Ok::<_, GarageError>(service_fn(move |req: Request<Body>| {
|
|
||||||
let this = this.clone();
|
|
||||||
|
|
||||||
this.handler(req, path.clone())
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"{} API server listening on {}",
|
|
||||||
A::API_NAME_DISPLAY,
|
|
||||||
bind_addr
|
|
||||||
);
|
|
||||||
|
|
||||||
match bind_addr {
|
match bind_addr {
|
||||||
UnixOrTCPSocketAddress::TCPSocket(addr) => {
|
UnixOrTCPSocketAddress::TCPSocket(addr) => {
|
||||||
Server::bind(&addr)
|
let listener = TcpListener::bind(addr).await?;
|
||||||
.serve(tcp_service)
|
|
||||||
.with_graceful_shutdown(shutdown_signal)
|
let handler = move |request, socketaddr| self.clone().handler(request, socketaddr);
|
||||||
.await?
|
server_loop(server_name, listener, handler, must_exit).await
|
||||||
}
|
}
|
||||||
UnixOrTCPSocketAddress::UnixSocket(ref path) => {
|
UnixOrTCPSocketAddress::UnixSocket(ref path) => {
|
||||||
if path.exists() {
|
if path.exists() {
|
||||||
fs::remove_file(path)?
|
fs::remove_file(path)?
|
||||||
}
|
}
|
||||||
|
|
||||||
let bound = Server::bind_unix(path)?;
|
let listener = UnixListener::bind(path)?;
|
||||||
|
let listener = UnixListenerOn(listener, path.display().to_string());
|
||||||
|
|
||||||
fs::set_permissions(
|
fs::set_permissions(
|
||||||
path,
|
path,
|
||||||
Permissions::from_mode(unix_bind_addr_mode.unwrap_or(0o222)),
|
Permissions::from_mode(unix_bind_addr_mode.unwrap_or(0o222)),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
bound
|
let handler = move |request, socketaddr| self.clone().handler(request, socketaddr);
|
||||||
.serve(unix_service)
|
server_loop(server_name, listener, handler, must_exit).await
|
||||||
.with_graceful_shutdown(shutdown_signal)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handler(
|
async fn handler(
|
||||||
self: Arc<Self>,
|
self: Arc<Self>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
addr: String,
|
addr: String,
|
||||||
) -> Result<Response<Body>, GarageError> {
|
) -> Result<Response<BoxBody<A::Error>>, http::Error> {
|
||||||
let uri = req.uri().clone();
|
let uri = req.uri().clone();
|
||||||
|
|
||||||
if let Ok(forwarded_for_ip_addr) =
|
if let Ok(forwarded_for_ip_addr) =
|
||||||
|
@ -205,7 +181,7 @@ impl<A: ApiHandler> ApiServer<A> {
|
||||||
Ok(x)
|
Ok(x)
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
let body: Body = e.http_body(&self.region, uri.path());
|
let body = e.http_body(&self.region, uri.path());
|
||||||
let mut http_error_builder = Response::builder().status(e.http_status_code());
|
let mut http_error_builder = Response::builder().status(e.http_status_code());
|
||||||
|
|
||||||
if let Some(header_map) = http_error_builder.headers_mut() {
|
if let Some(header_map) = http_error_builder.headers_mut() {
|
||||||
|
@ -219,12 +195,16 @@ impl<A: ApiHandler> ApiServer<A> {
|
||||||
} else {
|
} else {
|
||||||
info!("Response: error {}, {}", e.http_status_code(), e);
|
info!("Response: error {}, {}", e.http_status_code(), e);
|
||||||
}
|
}
|
||||||
Ok(http_error)
|
Ok(http_error
|
||||||
|
.map(|body| BoxBody::new(body.map_err(|_: Infallible| unreachable!()))))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handler_stage2(&self, req: Request<Body>) -> Result<Response<Body>, A::Error> {
|
async fn handler_stage2(
|
||||||
|
&self,
|
||||||
|
req: Request<IncomingBody>,
|
||||||
|
) -> Result<Response<BoxBody<A::Error>>, A::Error> {
|
||||||
let endpoint = self.api_handler.parse_endpoint(&req)?;
|
let endpoint = self.api_handler.parse_endpoint(&req)?;
|
||||||
debug!("Endpoint: {}", endpoint.name());
|
debug!("Endpoint: {}", endpoint.name());
|
||||||
|
|
||||||
|
@ -265,3 +245,134 @@ impl<A: ApiHandler> ApiServer<A> {
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ==== helper functions ====
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Accept: Send + Sync + 'static {
|
||||||
|
type Stream: AsyncRead + AsyncWrite + Send + Sync + 'static;
|
||||||
|
async fn accept(&self) -> std::io::Result<(Self::Stream, String)>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Accept for TcpListener {
|
||||||
|
type Stream = TcpStream;
|
||||||
|
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
|
||||||
|
self.accept()
|
||||||
|
.await
|
||||||
|
.map(|(stream, addr)| (stream, addr.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct UnixListenerOn(pub UnixListener, pub String);
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Accept for UnixListenerOn {
|
||||||
|
type Stream = UnixStream;
|
||||||
|
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
|
||||||
|
self.0
|
||||||
|
.accept()
|
||||||
|
.await
|
||||||
|
.map(|(stream, _addr)| (stream, self.1.clone()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn server_loop<A, H, F, E>(
|
||||||
|
server_name: String,
|
||||||
|
listener: A,
|
||||||
|
handler: H,
|
||||||
|
mut must_exit: watch::Receiver<bool>,
|
||||||
|
) -> Result<(), GarageError>
|
||||||
|
where
|
||||||
|
A: Accept,
|
||||||
|
H: Fn(Request<IncomingBody>, String) -> F + Send + Sync + Clone + 'static,
|
||||||
|
F: Future<Output = Result<Response<BoxBody<E>>, http::Error>> + Send + 'static,
|
||||||
|
E: Send + Sync + std::error::Error + 'static,
|
||||||
|
{
|
||||||
|
let (conn_in, mut conn_out) = tokio::sync::mpsc::unbounded_channel();
|
||||||
|
let connection_collector = tokio::spawn({
|
||||||
|
let server_name = server_name.clone();
|
||||||
|
async move {
|
||||||
|
let mut connections = FuturesUnordered::<tokio::task::JoinHandle<()>>::new();
|
||||||
|
loop {
|
||||||
|
let collect_next = async {
|
||||||
|
if connections.is_empty() {
|
||||||
|
futures::future::pending().await
|
||||||
|
} else {
|
||||||
|
connections.next().await
|
||||||
|
}
|
||||||
|
};
|
||||||
|
tokio::select! {
|
||||||
|
result = collect_next => {
|
||||||
|
trace!("{} server: HTTP connection finished: {:?}", server_name, result);
|
||||||
|
}
|
||||||
|
new_fut = conn_out.recv() => {
|
||||||
|
match new_fut {
|
||||||
|
Some(f) => connections.push(f),
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let deadline = Instant::now() + Duration::from_secs(10);
|
||||||
|
while !connections.is_empty() {
|
||||||
|
info!(
|
||||||
|
"{} server: {} connections still open, deadline in {:.2}s",
|
||||||
|
server_name,
|
||||||
|
connections.len(),
|
||||||
|
(deadline - Instant::now()).as_secs_f32(),
|
||||||
|
);
|
||||||
|
tokio::select! {
|
||||||
|
conn_res = connections.next() => {
|
||||||
|
trace!(
|
||||||
|
"{} server: HTTP connection finished: {:?}",
|
||||||
|
server_name,
|
||||||
|
conn_res.unwrap(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
_ = sleep_until(deadline) => {
|
||||||
|
warn!("{} server: exit deadline reached with {} connections still open, killing them now",
|
||||||
|
server_name,
|
||||||
|
connections.len());
|
||||||
|
for conn in connections.iter() {
|
||||||
|
conn.abort();
|
||||||
|
}
|
||||||
|
for conn in connections {
|
||||||
|
assert!(conn.await.unwrap_err().is_cancelled());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
while !*must_exit.borrow() {
|
||||||
|
let (stream, client_addr) = tokio::select! {
|
||||||
|
acc = listener.accept() => acc?,
|
||||||
|
_ = must_exit.changed() => continue,
|
||||||
|
};
|
||||||
|
|
||||||
|
let io = TokioIo::new(stream);
|
||||||
|
|
||||||
|
let handler = handler.clone();
|
||||||
|
let serve = move |req: Request<IncomingBody>| handler(req, client_addr.clone());
|
||||||
|
|
||||||
|
let fut = tokio::task::spawn(async move {
|
||||||
|
let io = Box::pin(io);
|
||||||
|
if let Err(e) = http1::Builder::new()
|
||||||
|
.serve_connection(io, service_fn(serve))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
debug!("Error handling HTTP connection: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
conn_in.send(fut)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("{} server exiting", server_name);
|
||||||
|
drop(conn_in);
|
||||||
|
connection_collector.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,22 @@
|
||||||
use hyper::{Body, Request, Response};
|
use std::convert::Infallible;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use futures::{Stream, StreamExt, TryStreamExt};
|
||||||
|
|
||||||
|
use http_body_util::{BodyExt, Full as FullBody};
|
||||||
|
use hyper::{
|
||||||
|
body::{Body, Bytes},
|
||||||
|
Request, Response,
|
||||||
|
};
|
||||||
use idna::domain_to_unicode;
|
use idna::domain_to_unicode;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use garage_model::bucket_table::BucketParams;
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
use garage_model::key_table::Key;
|
||||||
|
use garage_util::data::Uuid;
|
||||||
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
use crate::common_error::{CommonError as Error, *};
|
use crate::common_error::{CommonError as Error, *};
|
||||||
|
|
||||||
/// What kind of authorization is required to perform a given action
|
/// What kind of authorization is required to perform a given action
|
||||||
|
@ -17,6 +32,15 @@ pub enum Authorization {
|
||||||
Owner,
|
Owner,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The values which are known for each request related to a bucket
|
||||||
|
pub struct ReqCtx {
|
||||||
|
pub garage: Arc<Garage>,
|
||||||
|
pub bucket_id: Uuid,
|
||||||
|
pub bucket_name: String,
|
||||||
|
pub bucket_params: BucketParams,
|
||||||
|
pub api_key: Key,
|
||||||
|
}
|
||||||
|
|
||||||
/// Host to bucket
|
/// Host to bucket
|
||||||
///
|
///
|
||||||
/// Convert a host, like "bucket.garage-site.tld" to the corresponding bucket "bucket",
|
/// Convert a host, like "bucket.garage-site.tld" to the corresponding bucket "bucket",
|
||||||
|
@ -138,18 +162,64 @@ pub fn key_after_prefix(pfx: &str) -> Option<String> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn parse_json_body<T: for<'de> Deserialize<'de>>(req: Request<Body>) -> Result<T, Error> {
|
// =============== body helpers =================
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
|
||||||
|
pub type EmptyBody = http_body_util::Empty<bytes::Bytes>;
|
||||||
|
pub type ErrorBody = FullBody<bytes::Bytes>;
|
||||||
|
pub type BoxBody<E> = http_body_util::combinators::BoxBody<bytes::Bytes, E>;
|
||||||
|
|
||||||
|
pub fn string_body<E>(s: String) -> BoxBody<E> {
|
||||||
|
bytes_body(bytes::Bytes::from(s.into_bytes()))
|
||||||
|
}
|
||||||
|
pub fn bytes_body<E>(b: bytes::Bytes) -> BoxBody<E> {
|
||||||
|
BoxBody::new(FullBody::new(b).map_err(|_: Infallible| unreachable!()))
|
||||||
|
}
|
||||||
|
pub fn empty_body<E>() -> BoxBody<E> {
|
||||||
|
BoxBody::new(http_body_util::Empty::new().map_err(|_: Infallible| unreachable!()))
|
||||||
|
}
|
||||||
|
pub fn error_body(s: String) -> ErrorBody {
|
||||||
|
ErrorBody::from(bytes::Bytes::from(s.into_bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn parse_json_body<T, B, E>(req: Request<B>) -> Result<T, E>
|
||||||
|
where
|
||||||
|
T: for<'de> Deserialize<'de>,
|
||||||
|
B: Body,
|
||||||
|
E: From<<B as Body>::Error> + From<Error>,
|
||||||
|
{
|
||||||
|
let body = req.into_body().collect().await?.to_bytes();
|
||||||
let resp: T = serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?;
|
let resp: T = serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?;
|
||||||
Ok(resp)
|
Ok(resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn json_ok_response<T: Serialize>(res: &T) -> Result<Response<Body>, Error> {
|
pub fn json_ok_response<E, T: Serialize>(res: &T) -> Result<Response<BoxBody<E>>, E>
|
||||||
let resp_json = serde_json::to_string_pretty(res).map_err(garage_util::error::Error::from)?;
|
where
|
||||||
|
E: From<Error>,
|
||||||
|
{
|
||||||
|
let resp_json = serde_json::to_string_pretty(res)
|
||||||
|
.map_err(GarageError::from)
|
||||||
|
.map_err(Error::from)?;
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(hyper::StatusCode::OK)
|
.status(hyper::StatusCode::OK)
|
||||||
.header(http::header::CONTENT_TYPE, "application/json")
|
.header(http::header::CONTENT_TYPE, "application/json")
|
||||||
.body(Body::from(resp_json))?)
|
.body(string_body(resp_json))
|
||||||
|
.unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn body_stream<B, E>(body: B) -> impl Stream<Item = Result<Bytes, E>>
|
||||||
|
where
|
||||||
|
B: Body<Data = Bytes>,
|
||||||
|
<B as Body>::Error: Into<E>,
|
||||||
|
E: From<Error>,
|
||||||
|
{
|
||||||
|
let stream = http_body_util::BodyStream::new(body);
|
||||||
|
let stream = TryStreamExt::map_err(stream, Into::into);
|
||||||
|
stream.map(|x| {
|
||||||
|
x.and_then(|f| {
|
||||||
|
f.into_data()
|
||||||
|
.map_err(|_| E::from(Error::bad_request("non-data frame")))
|
||||||
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_default<T: Default + PartialEq>(v: &T) -> bool {
|
pub fn is_default<T: Default + PartialEq>(v: &T) -> bool {
|
||||||
|
|
|
@ -2,8 +2,8 @@ use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
use futures::future::Future;
|
use hyper::{body::Incoming as IncomingBody, Method, Request, Response};
|
||||||
use hyper::{Body, Method, Request, Response};
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use opentelemetry::{trace::SpanRef, KeyValue};
|
use opentelemetry::{trace::SpanRef, KeyValue};
|
||||||
|
|
||||||
|
@ -15,8 +15,7 @@ use garage_model::garage::Garage;
|
||||||
use crate::generic_server::*;
|
use crate::generic_server::*;
|
||||||
use crate::k2v::error::*;
|
use crate::k2v::error::*;
|
||||||
|
|
||||||
use crate::signature::payload::check_payload_signature;
|
use crate::signature::verify_request;
|
||||||
use crate::signature::streaming::*;
|
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::k2v::batch::*;
|
use crate::k2v::batch::*;
|
||||||
|
@ -25,6 +24,9 @@ use crate::k2v::item::*;
|
||||||
use crate::k2v::router::Endpoint;
|
use crate::k2v::router::Endpoint;
|
||||||
use crate::s3::cors::*;
|
use crate::s3::cors::*;
|
||||||
|
|
||||||
|
pub use crate::signature::streaming::ReqBody;
|
||||||
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
pub struct K2VApiServer {
|
pub struct K2VApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
}
|
}
|
||||||
|
@ -39,10 +41,10 @@ impl K2VApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
bind_addr: UnixOrTCPSocketAddress,
|
bind_addr: UnixOrTCPSocketAddress,
|
||||||
s3_region: String,
|
s3_region: String,
|
||||||
shutdown_signal: impl Future<Output = ()>,
|
must_exit: watch::Receiver<bool>,
|
||||||
) -> Result<(), GarageError> {
|
) -> Result<(), GarageError> {
|
||||||
ApiServer::new(s3_region, K2VApiServer { garage })
|
ApiServer::new(s3_region, K2VApiServer { garage })
|
||||||
.run_server(bind_addr, None, shutdown_signal)
|
.run_server(bind_addr, None, must_exit)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -55,7 +57,7 @@ impl ApiHandler for K2VApiServer {
|
||||||
type Endpoint = K2VApiEndpoint;
|
type Endpoint = K2VApiEndpoint;
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
fn parse_endpoint(&self, req: &Request<Body>) -> Result<K2VApiEndpoint, Error> {
|
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<K2VApiEndpoint, Error> {
|
||||||
let (endpoint, bucket_name) = Endpoint::from_request(req)?;
|
let (endpoint, bucket_name) = Endpoint::from_request(req)?;
|
||||||
|
|
||||||
Ok(K2VApiEndpoint {
|
Ok(K2VApiEndpoint {
|
||||||
|
@ -66,9 +68,9 @@ impl ApiHandler for K2VApiServer {
|
||||||
|
|
||||||
async fn handle(
|
async fn handle(
|
||||||
&self,
|
&self,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
endpoint: K2VApiEndpoint,
|
endpoint: K2VApiEndpoint,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let K2VApiEndpoint {
|
let K2VApiEndpoint {
|
||||||
bucket_name,
|
bucket_name,
|
||||||
endpoint,
|
endpoint,
|
||||||
|
@ -77,22 +79,13 @@ impl ApiHandler for K2VApiServer {
|
||||||
|
|
||||||
// The OPTIONS method is procesed early, before we even check for an API key
|
// The OPTIONS method is procesed early, before we even check for an API key
|
||||||
if let Endpoint::Options = endpoint {
|
if let Endpoint::Options = endpoint {
|
||||||
return Ok(handle_options_s3api(garage, &req, Some(bucket_name))
|
let options_res = handle_options_api(garage, &req, Some(bucket_name))
|
||||||
.await
|
.await
|
||||||
.ok_or_bad_request("Error handling OPTIONS")?);
|
.ok_or_bad_request("Error handling OPTIONS")?;
|
||||||
|
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (api_key, mut content_sha256) = check_payload_signature(&garage, "k2v", &req).await?;
|
let (req, api_key, _content_sha256) = verify_request(&garage, req, "k2v").await?;
|
||||||
let api_key = api_key
|
|
||||||
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
|
||||||
|
|
||||||
let req = parse_streaming_body(
|
|
||||||
&api_key,
|
|
||||||
req,
|
|
||||||
&mut content_sha256,
|
|
||||||
&garage.config.s3_api.s3_region,
|
|
||||||
"k2v",
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let bucket_id = garage
|
let bucket_id = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
|
@ -102,6 +95,7 @@ impl ApiHandler for K2VApiServer {
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.get_existing_bucket(bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
.await?;
|
.await?;
|
||||||
|
let bucket_params = bucket.state.into_option().unwrap();
|
||||||
|
|
||||||
let allowed = match endpoint.authorization_type() {
|
let allowed = match endpoint.authorization_type() {
|
||||||
Authorization::Read => api_key.allow_read(&bucket_id),
|
Authorization::Read => api_key.allow_read(&bucket_id),
|
||||||
|
@ -119,40 +113,42 @@ impl ApiHandler for K2VApiServer {
|
||||||
// are always preflighted, i.e. the browser should make
|
// are always preflighted, i.e. the browser should make
|
||||||
// an OPTIONS call before to check it is allowed
|
// an OPTIONS call before to check it is allowed
|
||||||
let matching_cors_rule = match *req.method() {
|
let matching_cors_rule = match *req.method() {
|
||||||
Method::GET | Method::HEAD | Method::POST => find_matching_cors_rule(&bucket, &req)
|
Method::GET | Method::HEAD | Method::POST => {
|
||||||
.ok_or_internal_error("Error looking up CORS rule")?,
|
find_matching_cors_rule(&bucket_params, &req)
|
||||||
|
.ok_or_internal_error("Error looking up CORS rule")?
|
||||||
|
.cloned()
|
||||||
|
}
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let ctx = ReqCtx {
|
||||||
|
garage,
|
||||||
|
bucket_id,
|
||||||
|
bucket_name,
|
||||||
|
bucket_params,
|
||||||
|
api_key,
|
||||||
|
};
|
||||||
|
|
||||||
let resp = match endpoint {
|
let resp = match endpoint {
|
||||||
Endpoint::DeleteItem {
|
Endpoint::DeleteItem {
|
||||||
partition_key,
|
partition_key,
|
||||||
sort_key,
|
sort_key,
|
||||||
} => handle_delete_item(garage, req, bucket_id, &partition_key, &sort_key).await,
|
} => handle_delete_item(ctx, req, &partition_key, &sort_key).await,
|
||||||
Endpoint::InsertItem {
|
Endpoint::InsertItem {
|
||||||
partition_key,
|
partition_key,
|
||||||
sort_key,
|
sort_key,
|
||||||
} => handle_insert_item(garage, req, bucket_id, &partition_key, &sort_key).await,
|
} => handle_insert_item(ctx, req, &partition_key, &sort_key).await,
|
||||||
Endpoint::ReadItem {
|
Endpoint::ReadItem {
|
||||||
partition_key,
|
partition_key,
|
||||||
sort_key,
|
sort_key,
|
||||||
} => handle_read_item(garage, &req, bucket_id, &partition_key, &sort_key).await,
|
} => handle_read_item(ctx, &req, &partition_key, &sort_key).await,
|
||||||
Endpoint::PollItem {
|
Endpoint::PollItem {
|
||||||
partition_key,
|
partition_key,
|
||||||
sort_key,
|
sort_key,
|
||||||
causality_token,
|
causality_token,
|
||||||
timeout,
|
timeout,
|
||||||
} => {
|
} => {
|
||||||
handle_poll_item(
|
handle_poll_item(ctx, &req, partition_key, sort_key, causality_token, timeout).await
|
||||||
garage,
|
|
||||||
&req,
|
|
||||||
bucket_id,
|
|
||||||
partition_key,
|
|
||||||
sort_key,
|
|
||||||
causality_token,
|
|
||||||
timeout,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
Endpoint::ReadIndex {
|
Endpoint::ReadIndex {
|
||||||
prefix,
|
prefix,
|
||||||
|
@ -160,12 +156,12 @@ impl ApiHandler for K2VApiServer {
|
||||||
end,
|
end,
|
||||||
limit,
|
limit,
|
||||||
reverse,
|
reverse,
|
||||||
} => handle_read_index(garage, bucket_id, prefix, start, end, limit, reverse).await,
|
} => handle_read_index(ctx, prefix, start, end, limit, reverse).await,
|
||||||
Endpoint::InsertBatch {} => handle_insert_batch(garage, bucket_id, req).await,
|
Endpoint::InsertBatch {} => handle_insert_batch(ctx, req).await,
|
||||||
Endpoint::ReadBatch {} => handle_read_batch(garage, bucket_id, req).await,
|
Endpoint::ReadBatch {} => handle_read_batch(ctx, req).await,
|
||||||
Endpoint::DeleteBatch {} => handle_delete_batch(garage, bucket_id, req).await,
|
Endpoint::DeleteBatch {} => handle_delete_batch(ctx, req).await,
|
||||||
Endpoint::PollRange { partition_key } => {
|
Endpoint::PollRange { partition_key } => {
|
||||||
handle_poll_range(garage, bucket_id, &partition_key, req).await
|
handle_poll_range(ctx, &partition_key, req).await
|
||||||
}
|
}
|
||||||
Endpoint::Options => unreachable!(),
|
Endpoint::Options => unreachable!(),
|
||||||
};
|
};
|
||||||
|
@ -174,7 +170,7 @@ impl ApiHandler for K2VApiServer {
|
||||||
// add the corresponding CORS headers to the response
|
// add the corresponding CORS headers to the response
|
||||||
let mut resp_ok = resp?;
|
let mut resp_ok = resp?;
|
||||||
if let Some(rule) = matching_cors_rule {
|
if let Some(rule) = matching_cors_rule {
|
||||||
add_cors_headers(&mut resp_ok, rule)
|
add_cors_headers(&mut resp_ok, &rule)
|
||||||
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,27 +1,25 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use base64::prelude::*;
|
use base64::prelude::*;
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{Request, Response, StatusCode};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::data::*;
|
|
||||||
|
|
||||||
use garage_table::{EnumerationOrder, TableSchema};
|
use garage_table::{EnumerationOrder, TableSchema};
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
|
||||||
use garage_model::k2v::causality::*;
|
use garage_model::k2v::causality::*;
|
||||||
use garage_model::k2v::item_table::*;
|
use garage_model::k2v::item_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::api_server::{ReqBody, ResBody};
|
||||||
use crate::k2v::error::*;
|
use crate::k2v::error::*;
|
||||||
use crate::k2v::range::read_range;
|
use crate::k2v::range::read_range;
|
||||||
|
|
||||||
pub async fn handle_insert_batch(
|
pub async fn handle_insert_batch(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
bucket_id: Uuid,
|
req: Request<ReqBody>,
|
||||||
req: Request<Body>,
|
) -> Result<Response<ResBody>, Error> {
|
||||||
) -> Result<Response<Body>, Error> {
|
let ReqCtx {
|
||||||
let items = parse_json_body::<Vec<InsertBatchItem>>(req).await?;
|
garage, bucket_id, ..
|
||||||
|
} = &ctx;
|
||||||
|
let items = parse_json_body::<Vec<InsertBatchItem>, _, Error>(req).await?;
|
||||||
|
|
||||||
let mut items2 = vec![];
|
let mut items2 = vec![];
|
||||||
for it in items {
|
for it in items {
|
||||||
|
@ -37,24 +35,23 @@ pub async fn handle_insert_batch(
|
||||||
items2.push((it.pk, it.sk, ct, v));
|
items2.push((it.pk, it.sk, ct, v));
|
||||||
}
|
}
|
||||||
|
|
||||||
garage.k2v.rpc.insert_batch(bucket_id, items2).await?;
|
garage.k2v.rpc.insert_batch(*bucket_id, items2).await?;
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_read_batch(
|
pub async fn handle_read_batch(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
bucket_id: Uuid,
|
req: Request<ReqBody>,
|
||||||
req: Request<Body>,
|
) -> Result<Response<ResBody>, Error> {
|
||||||
) -> Result<Response<Body>, Error> {
|
let queries = parse_json_body::<Vec<ReadBatchQuery>, _, Error>(req).await?;
|
||||||
let queries = parse_json_body::<Vec<ReadBatchQuery>>(req).await?;
|
|
||||||
|
|
||||||
let resp_results = futures::future::join_all(
|
let resp_results = futures::future::join_all(
|
||||||
queries
|
queries
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|q| handle_read_batch_query(&garage, bucket_id, q)),
|
.map(|q| handle_read_batch_query(&ctx, q)),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
@ -67,12 +64,15 @@ pub async fn handle_read_batch(
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_read_batch_query(
|
async fn handle_read_batch_query(
|
||||||
garage: &Arc<Garage>,
|
ctx: &ReqCtx,
|
||||||
bucket_id: Uuid,
|
|
||||||
query: ReadBatchQuery,
|
query: ReadBatchQuery,
|
||||||
) -> Result<ReadBatchResponse, Error> {
|
) -> Result<ReadBatchResponse, Error> {
|
||||||
|
let ReqCtx {
|
||||||
|
garage, bucket_id, ..
|
||||||
|
} = ctx;
|
||||||
|
|
||||||
let partition = K2VItemPartition {
|
let partition = K2VItemPartition {
|
||||||
bucket_id,
|
bucket_id: *bucket_id,
|
||||||
partition_key: query.partition_key.clone(),
|
partition_key: query.partition_key.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -137,16 +137,15 @@ async fn handle_read_batch_query(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_batch(
|
pub async fn handle_delete_batch(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
bucket_id: Uuid,
|
req: Request<ReqBody>,
|
||||||
req: Request<Body>,
|
) -> Result<Response<ResBody>, Error> {
|
||||||
) -> Result<Response<Body>, Error> {
|
let queries = parse_json_body::<Vec<DeleteBatchQuery>, _, Error>(req).await?;
|
||||||
let queries = parse_json_body::<Vec<DeleteBatchQuery>>(req).await?;
|
|
||||||
|
|
||||||
let resp_results = futures::future::join_all(
|
let resp_results = futures::future::join_all(
|
||||||
queries
|
queries
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|q| handle_delete_batch_query(&garage, bucket_id, q)),
|
.map(|q| handle_delete_batch_query(&ctx, q)),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
@ -159,12 +158,15 @@ pub async fn handle_delete_batch(
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_delete_batch_query(
|
async fn handle_delete_batch_query(
|
||||||
garage: &Arc<Garage>,
|
ctx: &ReqCtx,
|
||||||
bucket_id: Uuid,
|
|
||||||
query: DeleteBatchQuery,
|
query: DeleteBatchQuery,
|
||||||
) -> Result<DeleteBatchResponse, Error> {
|
) -> Result<DeleteBatchResponse, Error> {
|
||||||
|
let ReqCtx {
|
||||||
|
garage, bucket_id, ..
|
||||||
|
} = &ctx;
|
||||||
|
|
||||||
let partition = K2VItemPartition {
|
let partition = K2VItemPartition {
|
||||||
bucket_id,
|
bucket_id: *bucket_id,
|
||||||
partition_key: query.partition_key.clone(),
|
partition_key: query.partition_key.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -194,7 +196,7 @@ async fn handle_delete_batch_query(
|
||||||
.k2v
|
.k2v
|
||||||
.rpc
|
.rpc
|
||||||
.insert(
|
.insert(
|
||||||
bucket_id,
|
*bucket_id,
|
||||||
i.partition.partition_key,
|
i.partition.partition_key,
|
||||||
i.sort_key,
|
i.sort_key,
|
||||||
Some(cc),
|
Some(cc),
|
||||||
|
@ -234,7 +236,7 @@ async fn handle_delete_batch_query(
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let n = items.len();
|
let n = items.len();
|
||||||
|
|
||||||
garage.k2v.rpc.insert_batch(bucket_id, items).await?;
|
garage.k2v.rpc.insert_batch(*bucket_id, items).await?;
|
||||||
|
|
||||||
n
|
n
|
||||||
};
|
};
|
||||||
|
@ -250,14 +252,16 @@ async fn handle_delete_batch_query(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn handle_poll_range(
|
pub(crate) async fn handle_poll_range(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
bucket_id: Uuid,
|
|
||||||
partition_key: &str,
|
partition_key: &str,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
let ReqCtx {
|
||||||
|
garage, bucket_id, ..
|
||||||
|
} = ctx;
|
||||||
use garage_model::k2v::sub::PollRange;
|
use garage_model::k2v::sub::PollRange;
|
||||||
|
|
||||||
let query = parse_json_body::<PollRangeQuery>(req).await?;
|
let query = parse_json_body::<PollRangeQuery, _, Error>(req).await?;
|
||||||
|
|
||||||
let timeout_msec = query.timeout.unwrap_or(300).clamp(1, 600) * 1000;
|
let timeout_msec = query.timeout.unwrap_or(300).clamp(1, 600) * 1000;
|
||||||
|
|
||||||
|
@ -292,7 +296,7 @@ pub(crate) async fn handle_poll_range(
|
||||||
} else {
|
} else {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NOT_MODIFIED)
|
.status(StatusCode::NOT_MODIFIED)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,11 @@
|
||||||
use err_derive::Error;
|
use err_derive::Error;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{Body, HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
|
||||||
use garage_model::helper::error::Error as HelperError;
|
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
use crate::generic_server::ApiError;
|
use crate::generic_server::ApiError;
|
||||||
use crate::helpers::CustomApiErrorBody;
|
use crate::helpers::*;
|
||||||
use crate::signature::error::Error as SignatureError;
|
use crate::signature::error::Error as SignatureError;
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
|
@ -30,10 +28,6 @@ pub enum Error {
|
||||||
#[error(display = "Invalid base64: {}", _0)]
|
#[error(display = "Invalid base64: {}", _0)]
|
||||||
InvalidBase64(#[error(source)] base64::DecodeError),
|
InvalidBase64(#[error(source)] base64::DecodeError),
|
||||||
|
|
||||||
/// The client sent a header with invalid value
|
|
||||||
#[error(display = "Invalid header value: {}", _0)]
|
|
||||||
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
|
||||||
|
|
||||||
/// The client asked for an invalid return format (invalid Accept header)
|
/// The client asked for an invalid return format (invalid Accept header)
|
||||||
#[error(display = "Not acceptable: {}", _0)]
|
#[error(display = "Not acceptable: {}", _0)]
|
||||||
NotAcceptable(String),
|
NotAcceptable(String),
|
||||||
|
@ -54,18 +48,6 @@ where
|
||||||
|
|
||||||
impl CommonErrorDerivative for Error {}
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
impl From<HelperError> for Error {
|
|
||||||
fn from(err: HelperError) -> Self {
|
|
||||||
match err {
|
|
||||||
HelperError::Internal(i) => Self::Common(CommonError::InternalError(i)),
|
|
||||||
HelperError::BadRequest(b) => Self::Common(CommonError::BadRequest(b)),
|
|
||||||
HelperError::InvalidBucketName(n) => Self::Common(CommonError::InvalidBucketName(n)),
|
|
||||||
HelperError::NoSuchBucket(n) => Self::Common(CommonError::NoSuchBucket(n)),
|
|
||||||
e => Self::Common(CommonError::BadRequest(format!("{}", e))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<SignatureError> for Error {
|
impl From<SignatureError> for Error {
|
||||||
fn from(err: SignatureError) -> Self {
|
fn from(err: SignatureError) -> Self {
|
||||||
match err {
|
match err {
|
||||||
|
@ -74,7 +56,6 @@ impl From<SignatureError> for Error {
|
||||||
Self::AuthorizationHeaderMalformed(c)
|
Self::AuthorizationHeaderMalformed(c)
|
||||||
}
|
}
|
||||||
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
||||||
SignatureError::InvalidHeader(h) => Self::InvalidHeader(h),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -90,7 +71,6 @@ impl Error {
|
||||||
Error::NotAcceptable(_) => "NotAcceptable",
|
Error::NotAcceptable(_) => "NotAcceptable",
|
||||||
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
||||||
Error::InvalidBase64(_) => "InvalidBase64",
|
Error::InvalidBase64(_) => "InvalidBase64",
|
||||||
Error::InvalidHeader(_) => "InvalidHeaderValue",
|
|
||||||
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
|
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -105,7 +85,6 @@ impl ApiError for Error {
|
||||||
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
|
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
|
||||||
Error::AuthorizationHeaderMalformed(_)
|
Error::AuthorizationHeaderMalformed(_)
|
||||||
| Error::InvalidBase64(_)
|
| Error::InvalidBase64(_)
|
||||||
| Error::InvalidHeader(_)
|
|
||||||
| Error::InvalidUtf8Str(_) => StatusCode::BAD_REQUEST,
|
| Error::InvalidUtf8Str(_) => StatusCode::BAD_REQUEST,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -115,14 +94,14 @@ impl ApiError for Error {
|
||||||
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn http_body(&self, garage_region: &str, path: &str) -> Body {
|
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody {
|
||||||
let error = CustomApiErrorBody {
|
let error = CustomApiErrorBody {
|
||||||
code: self.code().to_string(),
|
code: self.code().to_string(),
|
||||||
message: format!("{}", self),
|
message: format!("{}", self),
|
||||||
path: path.to_string(),
|
path: path.to_string(),
|
||||||
region: garage_region.to_string(),
|
region: garage_region.to_string(),
|
||||||
};
|
};
|
||||||
Body::from(serde_json::to_string_pretty(&error).unwrap_or_else(|_| {
|
let error_str = serde_json::to_string_pretty(&error).unwrap_or_else(|_| {
|
||||||
r#"
|
r#"
|
||||||
{
|
{
|
||||||
"code": "InternalError",
|
"code": "InternalError",
|
||||||
|
@ -130,6 +109,7 @@ impl ApiError for Error {
|
||||||
}
|
}
|
||||||
"#
|
"#
|
||||||
.into()
|
.into()
|
||||||
}))
|
});
|
||||||
|
error_body(error_str)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,32 +1,34 @@
|
||||||
use std::sync::Arc;
|
use hyper::Response;
|
||||||
|
|
||||||
use hyper::{Body, Response};
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use garage_util::data::*;
|
|
||||||
|
|
||||||
use garage_rpc::ring::Ring;
|
|
||||||
use garage_table::util::*;
|
use garage_table::util::*;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
|
||||||
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
|
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::api_server::ResBody;
|
||||||
use crate::k2v::error::*;
|
use crate::k2v::error::*;
|
||||||
use crate::k2v::range::read_range;
|
use crate::k2v::range::read_range;
|
||||||
|
|
||||||
pub async fn handle_read_index(
|
pub async fn handle_read_index(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
bucket_id: Uuid,
|
|
||||||
prefix: Option<String>,
|
prefix: Option<String>,
|
||||||
start: Option<String>,
|
start: Option<String>,
|
||||||
end: Option<String>,
|
end: Option<String>,
|
||||||
limit: Option<u64>,
|
limit: Option<u64>,
|
||||||
reverse: Option<bool>,
|
reverse: Option<bool>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
let ReqCtx {
|
||||||
|
garage, bucket_id, ..
|
||||||
|
} = &ctx;
|
||||||
|
|
||||||
let reverse = reverse.unwrap_or(false);
|
let reverse = reverse.unwrap_or(false);
|
||||||
|
|
||||||
let ring: Arc<Ring> = garage.system.ring.borrow().clone();
|
let node_id_vec = garage
|
||||||
|
.system
|
||||||
|
.cluster_layout()
|
||||||
|
.all_nongateway_nodes()
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
let (partition_keys, more, next_start) = read_range(
|
let (partition_keys, more, next_start) = read_range(
|
||||||
&garage.k2v.counter_table.table,
|
&garage.k2v.counter_table.table,
|
||||||
|
@ -35,7 +37,7 @@ pub async fn handle_read_index(
|
||||||
&start,
|
&start,
|
||||||
&end,
|
&end,
|
||||||
limit,
|
limit,
|
||||||
Some((DeletedFilter::NotDeleted, ring.layout.node_id_vec.clone())),
|
Some((DeletedFilter::NotDeleted, node_id_vec)),
|
||||||
EnumerationOrder::from_reverse(reverse),
|
EnumerationOrder::from_reverse(reverse),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -54,7 +56,7 @@ pub async fn handle_read_index(
|
||||||
partition_keys: partition_keys
|
partition_keys: partition_keys
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|part| {
|
.map(|part| {
|
||||||
let vals = part.filtered_values(&ring);
|
let vals = part.filtered_values(&garage.system.cluster_layout());
|
||||||
ReadIndexResponseEntry {
|
ReadIndexResponseEntry {
|
||||||
pk: part.sk,
|
pk: part.sk,
|
||||||
entries: *vals.get(&s_entries).unwrap_or(&0),
|
entries: *vals.get(&s_entries).unwrap_or(&0),
|
||||||
|
@ -68,7 +70,7 @@ pub async fn handle_read_index(
|
||||||
next_start,
|
next_start,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(json_ok_response(&resp)?)
|
json_ok_response::<Error, _>(&resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
|
|
|
@ -1,16 +1,13 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use base64::prelude::*;
|
use base64::prelude::*;
|
||||||
use http::header;
|
use http::header;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{Request, Response, StatusCode};
|
||||||
|
|
||||||
use garage_util::data::*;
|
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
|
||||||
use garage_model::k2v::causality::*;
|
use garage_model::k2v::causality::*;
|
||||||
use garage_model::k2v::item_table::*;
|
use garage_model::k2v::item_table::*;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::api_server::{ReqBody, ResBody};
|
||||||
use crate::k2v::error::*;
|
use crate::k2v::error::*;
|
||||||
|
|
||||||
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
|
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
|
||||||
|
@ -22,7 +19,7 @@ pub enum ReturnFormat {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReturnFormat {
|
impl ReturnFormat {
|
||||||
pub fn from(req: &Request<Body>) -> Result<Self, Error> {
|
pub fn from(req: &Request<ReqBody>) -> Result<Self, Error> {
|
||||||
let accept = match req.headers().get(header::ACCEPT) {
|
let accept = match req.headers().get(header::ACCEPT) {
|
||||||
Some(a) => a.to_str()?,
|
Some(a) => a.to_str()?,
|
||||||
None => return Ok(Self::Json),
|
None => return Ok(Self::Json),
|
||||||
|
@ -40,7 +37,7 @@ impl ReturnFormat {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_response(&self, item: &K2VItem) -> Result<Response<Body>, Error> {
|
pub fn make_response(&self, item: &K2VItem) -> Result<Response<ResBody>, Error> {
|
||||||
let vals = item.values();
|
let vals = item.values();
|
||||||
|
|
||||||
if vals.is_empty() {
|
if vals.is_empty() {
|
||||||
|
@ -52,7 +49,7 @@ impl ReturnFormat {
|
||||||
Self::Binary if vals.len() > 1 => Ok(Response::builder()
|
Self::Binary if vals.len() > 1 => Ok(Response::builder()
|
||||||
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
.status(StatusCode::CONFLICT)
|
.status(StatusCode::CONFLICT)
|
||||||
.body(Body::empty())?),
|
.body(empty_body())?),
|
||||||
Self::Binary => {
|
Self::Binary => {
|
||||||
assert!(vals.len() == 1);
|
assert!(vals.len() == 1);
|
||||||
Self::make_binary_response(ct, vals[0])
|
Self::make_binary_response(ct, vals[0])
|
||||||
|
@ -62,22 +59,22 @@ impl ReturnFormat {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_binary_response(ct: String, v: &DvvsValue) -> Result<Response<Body>, Error> {
|
fn make_binary_response(ct: String, v: &DvvsValue) -> Result<Response<ResBody>, Error> {
|
||||||
match v {
|
match v {
|
||||||
DvvsValue::Deleted => Ok(Response::builder()
|
DvvsValue::Deleted => Ok(Response::builder()
|
||||||
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?),
|
.body(empty_body())?),
|
||||||
DvvsValue::Value(v) => Ok(Response::builder()
|
DvvsValue::Value(v) => Ok(Response::builder()
|
||||||
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::from(v.to_vec()))?),
|
.body(bytes_body(v.to_vec().into()))?),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_json_response(ct: String, v: &[&DvvsValue]) -> Result<Response<Body>, Error> {
|
fn make_json_response(ct: String, v: &[&DvvsValue]) -> Result<Response<ResBody>, Error> {
|
||||||
let items = v
|
let items = v
|
||||||
.iter()
|
.iter()
|
||||||
.map(|v| match v {
|
.map(|v| match v {
|
||||||
|
@ -91,19 +88,22 @@ impl ReturnFormat {
|
||||||
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
.header(header::CONTENT_TYPE, "application/json")
|
.header(header::CONTENT_TYPE, "application/json")
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::from(json_body))?)
|
.body(string_body(json_body))?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle ReadItem request
|
/// Handle ReadItem request
|
||||||
#[allow(clippy::ptr_arg)]
|
#[allow(clippy::ptr_arg)]
|
||||||
pub async fn handle_read_item(
|
pub async fn handle_read_item(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
req: &Request<Body>,
|
req: &Request<ReqBody>,
|
||||||
bucket_id: Uuid,
|
|
||||||
partition_key: &str,
|
partition_key: &str,
|
||||||
sort_key: &String,
|
sort_key: &String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
let ReqCtx {
|
||||||
|
garage, bucket_id, ..
|
||||||
|
} = &ctx;
|
||||||
|
|
||||||
let format = ReturnFormat::from(req)?;
|
let format = ReturnFormat::from(req)?;
|
||||||
|
|
||||||
let item = garage
|
let item = garage
|
||||||
|
@ -111,7 +111,7 @@ pub async fn handle_read_item(
|
||||||
.item_table
|
.item_table
|
||||||
.get(
|
.get(
|
||||||
&K2VItemPartition {
|
&K2VItemPartition {
|
||||||
bucket_id,
|
bucket_id: *bucket_id,
|
||||||
partition_key: partition_key.to_string(),
|
partition_key: partition_key.to_string(),
|
||||||
},
|
},
|
||||||
sort_key,
|
sort_key,
|
||||||
|
@ -123,12 +123,14 @@ pub async fn handle_read_item(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_insert_item(
|
pub async fn handle_insert_item(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
bucket_id: Uuid,
|
|
||||||
partition_key: &str,
|
partition_key: &str,
|
||||||
sort_key: &str,
|
sort_key: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
let ReqCtx {
|
||||||
|
garage, bucket_id, ..
|
||||||
|
} = &ctx;
|
||||||
let causal_context = req
|
let causal_context = req
|
||||||
.headers()
|
.headers()
|
||||||
.get(X_GARAGE_CAUSALITY_TOKEN)
|
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||||
|
@ -137,14 +139,17 @@ pub async fn handle_insert_item(
|
||||||
.map(CausalContext::parse_helper)
|
.map(CausalContext::parse_helper)
|
||||||
.transpose()?;
|
.transpose()?;
|
||||||
|
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let body = http_body_util::BodyExt::collect(req.into_body())
|
||||||
|
.await?
|
||||||
|
.to_bytes();
|
||||||
|
|
||||||
let value = DvvsValue::Value(body.to_vec());
|
let value = DvvsValue::Value(body.to_vec());
|
||||||
|
|
||||||
garage
|
garage
|
||||||
.k2v
|
.k2v
|
||||||
.rpc
|
.rpc
|
||||||
.insert(
|
.insert(
|
||||||
bucket_id,
|
*bucket_id,
|
||||||
partition_key.to_string(),
|
partition_key.to_string(),
|
||||||
sort_key.to_string(),
|
sort_key.to_string(),
|
||||||
causal_context,
|
causal_context,
|
||||||
|
@ -154,16 +159,18 @@ pub async fn handle_insert_item(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_item(
|
pub async fn handle_delete_item(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
bucket_id: Uuid,
|
|
||||||
partition_key: &str,
|
partition_key: &str,
|
||||||
sort_key: &str,
|
sort_key: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
let ReqCtx {
|
||||||
|
garage, bucket_id, ..
|
||||||
|
} = &ctx;
|
||||||
let causal_context = req
|
let causal_context = req
|
||||||
.headers()
|
.headers()
|
||||||
.get(X_GARAGE_CAUSALITY_TOKEN)
|
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||||
|
@ -178,7 +185,7 @@ pub async fn handle_delete_item(
|
||||||
.k2v
|
.k2v
|
||||||
.rpc
|
.rpc
|
||||||
.insert(
|
.insert(
|
||||||
bucket_id,
|
*bucket_id,
|
||||||
partition_key.to_string(),
|
partition_key.to_string(),
|
||||||
sort_key.to_string(),
|
sort_key.to_string(),
|
||||||
causal_context,
|
causal_context,
|
||||||
|
@ -188,20 +195,22 @@ pub async fn handle_delete_item(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle ReadItem request
|
/// Handle ReadItem request
|
||||||
#[allow(clippy::ptr_arg)]
|
#[allow(clippy::ptr_arg)]
|
||||||
pub async fn handle_poll_item(
|
pub async fn handle_poll_item(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
req: &Request<Body>,
|
req: &Request<ReqBody>,
|
||||||
bucket_id: Uuid,
|
|
||||||
partition_key: String,
|
partition_key: String,
|
||||||
sort_key: String,
|
sort_key: String,
|
||||||
causality_token: String,
|
causality_token: String,
|
||||||
timeout_secs: Option<u64>,
|
timeout_secs: Option<u64>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
let ReqCtx {
|
||||||
|
garage, bucket_id, ..
|
||||||
|
} = &ctx;
|
||||||
let format = ReturnFormat::from(req)?;
|
let format = ReturnFormat::from(req)?;
|
||||||
|
|
||||||
let causal_context =
|
let causal_context =
|
||||||
|
@ -213,7 +222,7 @@ pub async fn handle_poll_item(
|
||||||
.k2v
|
.k2v
|
||||||
.rpc
|
.rpc
|
||||||
.poll_item(
|
.poll_item(
|
||||||
bucket_id,
|
*bucket_id,
|
||||||
partition_key,
|
partition_key,
|
||||||
sort_key,
|
sort_key,
|
||||||
causal_context,
|
causal_context,
|
||||||
|
@ -226,6 +235,6 @@ pub async fn handle_poll_item(
|
||||||
} else {
|
} else {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NOT_MODIFIED)
|
.status(StatusCode::NOT_MODIFIED)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,9 +2,9 @@ use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
use futures::future::Future;
|
|
||||||
use hyper::header;
|
use hyper::header;
|
||||||
use hyper::{Body, Request, Response};
|
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||||
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use opentelemetry::{trace::SpanRef, KeyValue};
|
use opentelemetry::{trace::SpanRef, KeyValue};
|
||||||
|
|
||||||
|
@ -17,8 +17,7 @@ use garage_model::key_table::Key;
|
||||||
use crate::generic_server::*;
|
use crate::generic_server::*;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
|
|
||||||
use crate::signature::payload::check_payload_signature;
|
use crate::signature::verify_request;
|
||||||
use crate::signature::streaming::*;
|
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
use crate::s3::bucket::*;
|
use crate::s3::bucket::*;
|
||||||
|
@ -34,6 +33,9 @@ use crate::s3::put::*;
|
||||||
use crate::s3::router::Endpoint;
|
use crate::s3::router::Endpoint;
|
||||||
use crate::s3::website::*;
|
use crate::s3::website::*;
|
||||||
|
|
||||||
|
pub use crate::signature::streaming::ReqBody;
|
||||||
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
pub struct S3ApiServer {
|
pub struct S3ApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
}
|
}
|
||||||
|
@ -48,19 +50,19 @@ impl S3ApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
addr: UnixOrTCPSocketAddress,
|
addr: UnixOrTCPSocketAddress,
|
||||||
s3_region: String,
|
s3_region: String,
|
||||||
shutdown_signal: impl Future<Output = ()>,
|
must_exit: watch::Receiver<bool>,
|
||||||
) -> Result<(), GarageError> {
|
) -> Result<(), GarageError> {
|
||||||
ApiServer::new(s3_region, S3ApiServer { garage })
|
ApiServer::new(s3_region, S3ApiServer { garage })
|
||||||
.run_server(addr, None, shutdown_signal)
|
.run_server(addr, None, must_exit)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_request_without_bucket(
|
async fn handle_request_without_bucket(
|
||||||
&self,
|
&self,
|
||||||
_req: Request<Body>,
|
_req: Request<ReqBody>,
|
||||||
api_key: Key,
|
api_key: Key,
|
||||||
endpoint: Endpoint,
|
endpoint: Endpoint,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
match endpoint {
|
match endpoint {
|
||||||
Endpoint::ListBuckets => handle_list_buckets(&self.garage, &api_key).await,
|
Endpoint::ListBuckets => handle_list_buckets(&self.garage, &api_key).await,
|
||||||
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
||||||
|
@ -76,7 +78,7 @@ impl ApiHandler for S3ApiServer {
|
||||||
type Endpoint = S3ApiEndpoint;
|
type Endpoint = S3ApiEndpoint;
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
fn parse_endpoint(&self, req: &Request<Body>) -> Result<S3ApiEndpoint, Error> {
|
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<S3ApiEndpoint, Error> {
|
||||||
let authority = req
|
let authority = req
|
||||||
.headers()
|
.headers()
|
||||||
.get(header::HOST)
|
.get(header::HOST)
|
||||||
|
@ -104,9 +106,9 @@ impl ApiHandler for S3ApiServer {
|
||||||
|
|
||||||
async fn handle(
|
async fn handle(
|
||||||
&self,
|
&self,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
endpoint: S3ApiEndpoint,
|
endpoint: S3ApiEndpoint,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let S3ApiEndpoint {
|
let S3ApiEndpoint {
|
||||||
bucket_name,
|
bucket_name,
|
||||||
endpoint,
|
endpoint,
|
||||||
|
@ -118,20 +120,11 @@ impl ApiHandler for S3ApiServer {
|
||||||
return handle_post_object(garage, req, bucket_name.unwrap()).await;
|
return handle_post_object(garage, req, bucket_name.unwrap()).await;
|
||||||
}
|
}
|
||||||
if let Endpoint::Options = endpoint {
|
if let Endpoint::Options = endpoint {
|
||||||
return handle_options_s3api(garage, &req, bucket_name).await;
|
let options_res = handle_options_api(garage, &req, bucket_name).await?;
|
||||||
|
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (api_key, mut content_sha256) = check_payload_signature(&garage, "s3", &req).await?;
|
let (req, api_key, content_sha256) = verify_request(&garage, req, "s3").await?;
|
||||||
let api_key = api_key
|
|
||||||
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
|
||||||
|
|
||||||
let req = parse_streaming_body(
|
|
||||||
&api_key,
|
|
||||||
req,
|
|
||||||
&mut content_sha256,
|
|
||||||
&garage.config.s3_api.s3_region,
|
|
||||||
"s3",
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let bucket_name = match bucket_name {
|
let bucket_name = match bucket_name {
|
||||||
None => {
|
None => {
|
||||||
|
@ -144,7 +137,14 @@ impl ApiHandler for S3ApiServer {
|
||||||
|
|
||||||
// Special code path for CreateBucket API endpoint
|
// Special code path for CreateBucket API endpoint
|
||||||
if let Endpoint::CreateBucket {} = endpoint {
|
if let Endpoint::CreateBucket {} = endpoint {
|
||||||
return handle_create_bucket(&garage, req, content_sha256, api_key, bucket_name).await;
|
return handle_create_bucket(
|
||||||
|
&garage,
|
||||||
|
req,
|
||||||
|
content_sha256,
|
||||||
|
&api_key.key_id,
|
||||||
|
bucket_name,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let bucket_id = garage
|
let bucket_id = garage
|
||||||
|
@ -155,6 +155,7 @@ impl ApiHandler for S3ApiServer {
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.get_existing_bucket(bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
.await?;
|
.await?;
|
||||||
|
let bucket_params = bucket.state.into_option().unwrap();
|
||||||
|
|
||||||
let allowed = match endpoint.authorization_type() {
|
let allowed = match endpoint.authorization_type() {
|
||||||
Authorization::Read => api_key.allow_read(&bucket_id),
|
Authorization::Read => api_key.allow_read(&bucket_id),
|
||||||
|
@ -167,82 +168,70 @@ impl ApiHandler for S3ApiServer {
|
||||||
return Err(Error::forbidden("Operation is not allowed for this key."));
|
return Err(Error::forbidden("Operation is not allowed for this key."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let matching_cors_rule = find_matching_cors_rule(&bucket, &req)?;
|
let matching_cors_rule = find_matching_cors_rule(&bucket_params, &req)?.cloned();
|
||||||
|
|
||||||
|
let ctx = ReqCtx {
|
||||||
|
garage,
|
||||||
|
bucket_id,
|
||||||
|
bucket_name,
|
||||||
|
bucket_params,
|
||||||
|
api_key,
|
||||||
|
};
|
||||||
|
|
||||||
let resp = match endpoint {
|
let resp = match endpoint {
|
||||||
Endpoint::HeadObject {
|
Endpoint::HeadObject {
|
||||||
key, part_number, ..
|
key, part_number, ..
|
||||||
} => handle_head(garage, &req, bucket_id, &key, part_number).await,
|
} => handle_head(ctx, &req, &key, part_number).await,
|
||||||
Endpoint::GetObject {
|
Endpoint::GetObject {
|
||||||
key, part_number, ..
|
key,
|
||||||
} => handle_get(garage, &req, bucket_id, &key, part_number).await,
|
part_number,
|
||||||
|
response_cache_control,
|
||||||
|
response_content_disposition,
|
||||||
|
response_content_encoding,
|
||||||
|
response_content_language,
|
||||||
|
response_content_type,
|
||||||
|
response_expires,
|
||||||
|
..
|
||||||
|
} => {
|
||||||
|
let overrides = GetObjectOverrides {
|
||||||
|
response_cache_control,
|
||||||
|
response_content_disposition,
|
||||||
|
response_content_encoding,
|
||||||
|
response_content_language,
|
||||||
|
response_content_type,
|
||||||
|
response_expires,
|
||||||
|
};
|
||||||
|
handle_get(ctx, &req, &key, part_number, overrides).await
|
||||||
|
}
|
||||||
Endpoint::UploadPart {
|
Endpoint::UploadPart {
|
||||||
key,
|
key,
|
||||||
part_number,
|
part_number,
|
||||||
upload_id,
|
upload_id,
|
||||||
} => {
|
} => handle_put_part(ctx, req, &key, part_number, &upload_id, content_sha256).await,
|
||||||
handle_put_part(
|
Endpoint::CopyObject { key } => handle_copy(ctx, &req, &key).await,
|
||||||
garage,
|
|
||||||
req,
|
|
||||||
bucket_id,
|
|
||||||
&key,
|
|
||||||
part_number,
|
|
||||||
&upload_id,
|
|
||||||
content_sha256,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
Endpoint::CopyObject { key } => {
|
|
||||||
handle_copy(garage, &api_key, &req, bucket_id, &key).await
|
|
||||||
}
|
|
||||||
Endpoint::UploadPartCopy {
|
Endpoint::UploadPartCopy {
|
||||||
key,
|
key,
|
||||||
part_number,
|
part_number,
|
||||||
upload_id,
|
upload_id,
|
||||||
} => {
|
} => handle_upload_part_copy(ctx, &req, &key, part_number, &upload_id).await,
|
||||||
handle_upload_part_copy(
|
Endpoint::PutObject { key } => handle_put(ctx, req, &key, content_sha256).await,
|
||||||
garage,
|
|
||||||
&api_key,
|
|
||||||
&req,
|
|
||||||
bucket_id,
|
|
||||||
&key,
|
|
||||||
part_number,
|
|
||||||
&upload_id,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
Endpoint::PutObject { key } => {
|
|
||||||
handle_put(garage, req, &bucket, &key, content_sha256).await
|
|
||||||
}
|
|
||||||
Endpoint::AbortMultipartUpload { key, upload_id } => {
|
Endpoint::AbortMultipartUpload { key, upload_id } => {
|
||||||
handle_abort_multipart_upload(garage, bucket_id, &key, &upload_id).await
|
handle_abort_multipart_upload(ctx, &key, &upload_id).await
|
||||||
}
|
}
|
||||||
Endpoint::DeleteObject { key, .. } => handle_delete(garage, bucket_id, &key).await,
|
Endpoint::DeleteObject { key, .. } => handle_delete(ctx, &key).await,
|
||||||
Endpoint::CreateMultipartUpload { key } => {
|
Endpoint::CreateMultipartUpload { key } => {
|
||||||
handle_create_multipart_upload(garage, &req, &bucket_name, bucket_id, &key).await
|
handle_create_multipart_upload(ctx, &req, &key).await
|
||||||
}
|
}
|
||||||
Endpoint::CompleteMultipartUpload { key, upload_id } => {
|
Endpoint::CompleteMultipartUpload { key, upload_id } => {
|
||||||
handle_complete_multipart_upload(
|
handle_complete_multipart_upload(ctx, req, &key, &upload_id, content_sha256).await
|
||||||
garage,
|
|
||||||
req,
|
|
||||||
&bucket_name,
|
|
||||||
&bucket,
|
|
||||||
&key,
|
|
||||||
&upload_id,
|
|
||||||
content_sha256,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
Endpoint::CreateBucket {} => unreachable!(),
|
Endpoint::CreateBucket {} => unreachable!(),
|
||||||
Endpoint::HeadBucket {} => {
|
Endpoint::HeadBucket {} => {
|
||||||
let empty_body: Body = Body::from(vec![]);
|
let response = Response::builder().body(empty_body()).unwrap();
|
||||||
let response = Response::builder().body(empty_body).unwrap();
|
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
Endpoint::DeleteBucket {} => {
|
Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await,
|
||||||
handle_delete_bucket(&garage, bucket_id, bucket_name, api_key).await
|
Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx),
|
||||||
}
|
|
||||||
Endpoint::GetBucketLocation {} => handle_get_bucket_location(garage),
|
|
||||||
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
|
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
|
||||||
Endpoint::ListObjects {
|
Endpoint::ListObjects {
|
||||||
delimiter,
|
delimiter,
|
||||||
|
@ -251,24 +240,21 @@ impl ApiHandler for S3ApiServer {
|
||||||
max_keys,
|
max_keys,
|
||||||
prefix,
|
prefix,
|
||||||
} => {
|
} => {
|
||||||
handle_list(
|
let query = ListObjectsQuery {
|
||||||
garage,
|
common: ListQueryCommon {
|
||||||
&ListObjectsQuery {
|
bucket_name: ctx.bucket_name.clone(),
|
||||||
common: ListQueryCommon {
|
bucket_id,
|
||||||
bucket_name,
|
delimiter,
|
||||||
bucket_id,
|
page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
|
||||||
delimiter: delimiter.map(|d| d.to_string()),
|
prefix: prefix.unwrap_or_default(),
|
||||||
page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
|
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
||||||
prefix: prefix.unwrap_or_default(),
|
|
||||||
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
|
||||||
},
|
|
||||||
is_v2: false,
|
|
||||||
marker,
|
|
||||||
continuation_token: None,
|
|
||||||
start_after: None,
|
|
||||||
},
|
},
|
||||||
)
|
is_v2: false,
|
||||||
.await
|
marker,
|
||||||
|
continuation_token: None,
|
||||||
|
start_after: None,
|
||||||
|
};
|
||||||
|
handle_list(ctx, &query).await
|
||||||
}
|
}
|
||||||
Endpoint::ListObjectsV2 {
|
Endpoint::ListObjectsV2 {
|
||||||
delimiter,
|
delimiter,
|
||||||
|
@ -281,24 +267,21 @@ impl ApiHandler for S3ApiServer {
|
||||||
..
|
..
|
||||||
} => {
|
} => {
|
||||||
if list_type == "2" {
|
if list_type == "2" {
|
||||||
handle_list(
|
let query = ListObjectsQuery {
|
||||||
garage,
|
common: ListQueryCommon {
|
||||||
&ListObjectsQuery {
|
bucket_name: ctx.bucket_name.clone(),
|
||||||
common: ListQueryCommon {
|
bucket_id,
|
||||||
bucket_name,
|
delimiter,
|
||||||
bucket_id,
|
page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
|
||||||
delimiter: delimiter.map(|d| d.to_string()),
|
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
||||||
page_size: max_keys.unwrap_or(1000).clamp(1, 1000),
|
prefix: prefix.unwrap_or_default(),
|
||||||
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
|
||||||
prefix: prefix.unwrap_or_default(),
|
|
||||||
},
|
|
||||||
is_v2: true,
|
|
||||||
marker: None,
|
|
||||||
continuation_token,
|
|
||||||
start_after,
|
|
||||||
},
|
},
|
||||||
)
|
is_v2: true,
|
||||||
.await
|
marker: None,
|
||||||
|
continuation_token,
|
||||||
|
start_after,
|
||||||
|
};
|
||||||
|
handle_list(ctx, &query).await
|
||||||
} else {
|
} else {
|
||||||
Err(Error::bad_request(format!(
|
Err(Error::bad_request(format!(
|
||||||
"Invalid endpoint: list-type={}",
|
"Invalid endpoint: list-type={}",
|
||||||
|
@ -314,22 +297,19 @@ impl ApiHandler for S3ApiServer {
|
||||||
prefix,
|
prefix,
|
||||||
upload_id_marker,
|
upload_id_marker,
|
||||||
} => {
|
} => {
|
||||||
handle_list_multipart_upload(
|
let query = ListMultipartUploadsQuery {
|
||||||
garage,
|
common: ListQueryCommon {
|
||||||
&ListMultipartUploadsQuery {
|
bucket_name: ctx.bucket_name.clone(),
|
||||||
common: ListQueryCommon {
|
bucket_id,
|
||||||
bucket_name,
|
delimiter,
|
||||||
bucket_id,
|
page_size: max_uploads.unwrap_or(1000).clamp(1, 1000),
|
||||||
delimiter: delimiter.map(|d| d.to_string()),
|
prefix: prefix.unwrap_or_default(),
|
||||||
page_size: max_uploads.unwrap_or(1000).clamp(1, 1000),
|
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
||||||
prefix: prefix.unwrap_or_default(),
|
|
||||||
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
|
||||||
},
|
|
||||||
key_marker,
|
|
||||||
upload_id_marker,
|
|
||||||
},
|
},
|
||||||
)
|
key_marker,
|
||||||
.await
|
upload_id_marker,
|
||||||
|
};
|
||||||
|
handle_list_multipart_upload(ctx, &query).await
|
||||||
}
|
}
|
||||||
Endpoint::ListParts {
|
Endpoint::ListParts {
|
||||||
key,
|
key,
|
||||||
|
@ -337,39 +317,28 @@ impl ApiHandler for S3ApiServer {
|
||||||
part_number_marker,
|
part_number_marker,
|
||||||
upload_id,
|
upload_id,
|
||||||
} => {
|
} => {
|
||||||
handle_list_parts(
|
let query = ListPartsQuery {
|
||||||
garage,
|
bucket_name: ctx.bucket_name.clone(),
|
||||||
&ListPartsQuery {
|
bucket_id,
|
||||||
bucket_name,
|
key,
|
||||||
bucket_id,
|
upload_id,
|
||||||
key,
|
part_number_marker: part_number_marker.map(|p| p.min(10000)),
|
||||||
upload_id,
|
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
|
||||||
part_number_marker: part_number_marker.map(|p| p.min(10000)),
|
};
|
||||||
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
|
handle_list_parts(ctx, req, &query).await
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
Endpoint::DeleteObjects {} => {
|
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
|
||||||
handle_delete_objects(garage, bucket_id, req, content_sha256).await
|
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,
|
||||||
}
|
Endpoint::PutBucketWebsite {} => handle_put_website(ctx, req, content_sha256).await,
|
||||||
Endpoint::GetBucketWebsite {} => handle_get_website(&bucket).await,
|
Endpoint::DeleteBucketWebsite {} => handle_delete_website(ctx).await,
|
||||||
Endpoint::PutBucketWebsite {} => {
|
Endpoint::GetBucketCors {} => handle_get_cors(ctx).await,
|
||||||
handle_put_website(garage, bucket.clone(), req, content_sha256).await
|
Endpoint::PutBucketCors {} => handle_put_cors(ctx, req, content_sha256).await,
|
||||||
}
|
Endpoint::DeleteBucketCors {} => handle_delete_cors(ctx).await,
|
||||||
Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket.clone()).await,
|
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(ctx).await,
|
||||||
Endpoint::GetBucketCors {} => handle_get_cors(&bucket).await,
|
|
||||||
Endpoint::PutBucketCors {} => {
|
|
||||||
handle_put_cors(garage, bucket.clone(), req, content_sha256).await
|
|
||||||
}
|
|
||||||
Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket.clone()).await,
|
|
||||||
Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(&bucket).await,
|
|
||||||
Endpoint::PutBucketLifecycleConfiguration {} => {
|
Endpoint::PutBucketLifecycleConfiguration {} => {
|
||||||
handle_put_lifecycle(garage, bucket.clone(), req, content_sha256).await
|
handle_put_lifecycle(ctx, req, content_sha256).await
|
||||||
}
|
|
||||||
Endpoint::DeleteBucketLifecycle {} => {
|
|
||||||
handle_delete_lifecycle(garage, bucket.clone()).await
|
|
||||||
}
|
}
|
||||||
|
Endpoint::DeleteBucketLifecycle {} => handle_delete_lifecycle(ctx).await,
|
||||||
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -377,7 +346,7 @@ impl ApiHandler for S3ApiServer {
|
||||||
// add the corresponding CORS headers to the response
|
// add the corresponding CORS headers to the response
|
||||||
let mut resp_ok = resp?;
|
let mut resp_ok = resp?;
|
||||||
if let Some(rule) = matching_cors_rule {
|
if let Some(rule) = matching_cors_rule {
|
||||||
add_cors_headers(&mut resp_ok, rule)
|
add_cors_headers(&mut resp_ok, &rule)
|
||||||
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use http_body_util::BodyExt;
|
||||||
|
use hyper::{Request, Response, StatusCode};
|
||||||
|
|
||||||
use garage_model::bucket_alias_table::*;
|
use garage_model::bucket_alias_table::*;
|
||||||
use garage_model::bucket_table::Bucket;
|
use garage_model::bucket_table::Bucket;
|
||||||
|
@ -14,11 +14,14 @@ use garage_util::data::*;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
pub fn handle_get_bucket_location(garage: Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub fn handle_get_bucket_location(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||||
|
let ReqCtx { garage, .. } = ctx;
|
||||||
let loc = s3_xml::LocationConstraint {
|
let loc = s3_xml::LocationConstraint {
|
||||||
xmlns: (),
|
xmlns: (),
|
||||||
region: garage.config.s3_api.s3_region.to_string(),
|
region: garage.config.s3_api.s3_region.to_string(),
|
||||||
|
@ -27,10 +30,10 @@ pub fn handle_get_bucket_location(garage: Arc<Garage>) -> Result<Response<Body>,
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml.into_bytes()))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_get_bucket_versioning() -> Result<Response<Body>, Error> {
|
pub fn handle_get_bucket_versioning() -> Result<Response<ResBody>, Error> {
|
||||||
let versioning = s3_xml::VersioningConfiguration {
|
let versioning = s3_xml::VersioningConfiguration {
|
||||||
xmlns: (),
|
xmlns: (),
|
||||||
status: None,
|
status: None,
|
||||||
|
@ -40,10 +43,13 @@ pub fn handle_get_bucket_versioning() -> Result<Response<Body>, Error> {
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml.into_bytes()))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result<Response<Body>, Error> {
|
pub async fn handle_list_buckets(
|
||||||
|
garage: &Garage,
|
||||||
|
api_key: &Key,
|
||||||
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let key_p = api_key.params().ok_or_internal_error(
|
let key_p = api_key.params().ok_or_internal_error(
|
||||||
"Key should not be in deleted state at this point (in handle_list_buckets)",
|
"Key should not be in deleted state at this point (in handle_list_buckets)",
|
||||||
)?;
|
)?;
|
||||||
|
@ -109,17 +115,17 @@ pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result<Respo
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_create_bucket(
|
pub async fn handle_create_bucket(
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
api_key: Key,
|
api_key_id: &String,
|
||||||
bucket_name: String,
|
bucket_name: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
|
@ -138,16 +144,18 @@ pub async fn handle_create_bucket(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let key_params = api_key
|
let helper = garage.locked_helper().await;
|
||||||
.params()
|
|
||||||
.ok_or_internal_error("Key should not be deleted at this point")?;
|
// refetch API key after taking lock to ensure up-to-date data
|
||||||
|
let api_key = helper.key().get_existing_key(api_key_id).await?;
|
||||||
|
let key_params = api_key.params().unwrap();
|
||||||
|
|
||||||
let existing_bucket = if let Some(Some(bucket_id)) = key_params.local_aliases.get(&bucket_name)
|
let existing_bucket = if let Some(Some(bucket_id)) = key_params.local_aliases.get(&bucket_name)
|
||||||
{
|
{
|
||||||
Some(*bucket_id)
|
Some(*bucket_id)
|
||||||
} else {
|
} else {
|
||||||
garage
|
helper
|
||||||
.bucket_helper()
|
.bucket()
|
||||||
.resolve_global_bucket_name(&bucket_name)
|
.resolve_global_bucket_name(&bucket_name)
|
||||||
.await?
|
.await?
|
||||||
};
|
};
|
||||||
|
@ -181,40 +189,35 @@ pub async fn handle_create_bucket(
|
||||||
let bucket = Bucket::new();
|
let bucket = Bucket::new();
|
||||||
garage.bucket_table.insert(&bucket).await?;
|
garage.bucket_table.insert(&bucket).await?;
|
||||||
|
|
||||||
garage
|
helper
|
||||||
.bucket_helper()
|
|
||||||
.set_bucket_key_permissions(bucket.id, &api_key.key_id, BucketKeyPerm::ALL_PERMISSIONS)
|
.set_bucket_key_permissions(bucket.id, &api_key.key_id, BucketKeyPerm::ALL_PERMISSIONS)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
garage
|
helper
|
||||||
.bucket_helper()
|
|
||||||
.set_local_bucket_alias(bucket.id, &api_key.key_id, &bucket_name)
|
.set_local_bucket_alias(bucket.id, &api_key.key_id, &bucket_name)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Location", format!("/{}", bucket_name))
|
.header("Location", format!("/{}", bucket_name))
|
||||||
.body(Body::empty())
|
.body(empty_body())
|
||||||
.unwrap())
|
.unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_bucket(
|
pub async fn handle_delete_bucket(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||||
garage: &Garage,
|
let ReqCtx {
|
||||||
bucket_id: Uuid,
|
garage,
|
||||||
bucket_name: String,
|
bucket_id,
|
||||||
api_key: Key,
|
bucket_name,
|
||||||
) -> Result<Response<Body>, Error> {
|
bucket_params: bucket_state,
|
||||||
let key_params = api_key
|
api_key,
|
||||||
.params()
|
..
|
||||||
.ok_or_internal_error("Key should not be deleted at this point")?;
|
} = &ctx;
|
||||||
|
let helper = garage.locked_helper().await;
|
||||||
|
|
||||||
let is_local_alias = matches!(key_params.local_aliases.get(&bucket_name), Some(Some(_)));
|
let key_params = api_key.params().unwrap();
|
||||||
|
|
||||||
let mut bucket = garage
|
let is_local_alias = matches!(key_params.local_aliases.get(bucket_name), Some(Some(_)));
|
||||||
.bucket_helper()
|
|
||||||
.get_existing_bucket(bucket_id)
|
|
||||||
.await?;
|
|
||||||
let bucket_state = bucket.state.as_option().unwrap();
|
|
||||||
|
|
||||||
// If the bucket has no other aliases, this is a true deletion.
|
// If the bucket has no other aliases, this is a true deletion.
|
||||||
// Otherwise, it is just an alias removal.
|
// Otherwise, it is just an alias removal.
|
||||||
|
@ -224,65 +227,63 @@ pub async fn handle_delete_bucket(
|
||||||
.items()
|
.items()
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|(_, _, active)| *active)
|
.filter(|(_, _, active)| *active)
|
||||||
.any(|(n, _, _)| is_local_alias || (*n != bucket_name));
|
.any(|(n, _, _)| is_local_alias || (*n != *bucket_name));
|
||||||
|
|
||||||
let has_other_local_aliases = bucket_state
|
let has_other_local_aliases = bucket_state
|
||||||
.local_aliases
|
.local_aliases
|
||||||
.items()
|
.items()
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|(_, _, active)| *active)
|
.filter(|(_, _, active)| *active)
|
||||||
.any(|((k, n), _, _)| !is_local_alias || *n != bucket_name || *k != api_key.key_id);
|
.any(|((k, n), _, _)| !is_local_alias || *n != *bucket_name || *k != api_key.key_id);
|
||||||
|
|
||||||
if !has_other_global_aliases && !has_other_local_aliases {
|
if !has_other_global_aliases && !has_other_local_aliases {
|
||||||
// Delete bucket
|
// Delete bucket
|
||||||
|
|
||||||
// Check bucket is empty
|
// Check bucket is empty
|
||||||
if !garage.bucket_helper().is_bucket_empty(bucket_id).await? {
|
if !helper.bucket().is_bucket_empty(*bucket_id).await? {
|
||||||
return Err(CommonError::BucketNotEmpty.into());
|
return Err(CommonError::BucketNotEmpty.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- done checking, now commit ---
|
// --- done checking, now commit ---
|
||||||
// 1. delete bucket alias
|
// 1. delete bucket alias
|
||||||
if is_local_alias {
|
if is_local_alias {
|
||||||
garage
|
helper
|
||||||
.bucket_helper()
|
.unset_local_bucket_alias(*bucket_id, &api_key.key_id, bucket_name)
|
||||||
.unset_local_bucket_alias(bucket_id, &api_key.key_id, &bucket_name)
|
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
garage
|
helper
|
||||||
.bucket_helper()
|
.unset_global_bucket_alias(*bucket_id, bucket_name)
|
||||||
.unset_global_bucket_alias(bucket_id, &bucket_name)
|
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. delete authorization from keys that had access
|
// 2. delete authorization from keys that had access
|
||||||
for (key_id, _) in bucket.authorized_keys() {
|
for (key_id, _) in bucket_state.authorized_keys.items() {
|
||||||
garage
|
helper
|
||||||
.bucket_helper()
|
.set_bucket_key_permissions(*bucket_id, key_id, BucketKeyPerm::NO_PERMISSIONS)
|
||||||
.set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
|
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let bucket = Bucket {
|
||||||
|
id: *bucket_id,
|
||||||
|
state: Deletable::delete(),
|
||||||
|
};
|
||||||
// 3. delete bucket
|
// 3. delete bucket
|
||||||
bucket.state = Deletable::delete();
|
|
||||||
garage.bucket_table.insert(&bucket).await?;
|
garage.bucket_table.insert(&bucket).await?;
|
||||||
} else if is_local_alias {
|
} else if is_local_alias {
|
||||||
// Just unalias
|
// Just unalias
|
||||||
garage
|
helper
|
||||||
.bucket_helper()
|
.unset_local_bucket_alias(*bucket_id, &api_key.key_id, bucket_name)
|
||||||
.unset_local_bucket_alias(bucket_id, &api_key.key_id, &bucket_name)
|
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
// Just unalias (but from global namespace)
|
// Just unalias (but from global namespace)
|
||||||
garage
|
helper
|
||||||
.bucket_helper()
|
.unset_global_bucket_alias(*bucket_id, bucket_name)
|
||||||
.unset_global_bucket_alias(bucket_id, &bucket_name)
|
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option<Option<String>> {
|
fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option<Option<String>> {
|
||||||
|
|
406
src/api/s3/checksum.rs
Normal file
|
@ -0,0 +1,406 @@
|
||||||
|
use std::convert::{TryFrom, TryInto};
|
||||||
|
use std::hash::Hasher;
|
||||||
|
|
||||||
|
use base64::prelude::*;
|
||||||
|
use crc32c::Crc32cHasher as Crc32c;
|
||||||
|
use crc32fast::Hasher as Crc32;
|
||||||
|
use md5::{Digest, Md5};
|
||||||
|
use sha1::Sha1;
|
||||||
|
use sha2::Sha256;
|
||||||
|
|
||||||
|
use http::{HeaderMap, HeaderName, HeaderValue};
|
||||||
|
|
||||||
|
use garage_util::data::*;
|
||||||
|
use garage_util::error::OkOrMessage;
|
||||||
|
|
||||||
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
|
use crate::s3::error::*;
|
||||||
|
|
||||||
|
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
|
||||||
|
HeaderName::from_static("x-amz-checksum-algorithm");
|
||||||
|
pub const X_AMZ_CHECKSUM_MODE: HeaderName = HeaderName::from_static("x-amz-checksum-mode");
|
||||||
|
pub const X_AMZ_CHECKSUM_CRC32: HeaderName = HeaderName::from_static("x-amz-checksum-crc32");
|
||||||
|
pub const X_AMZ_CHECKSUM_CRC32C: HeaderName = HeaderName::from_static("x-amz-checksum-crc32c");
|
||||||
|
pub const X_AMZ_CHECKSUM_SHA1: HeaderName = HeaderName::from_static("x-amz-checksum-sha1");
|
||||||
|
pub const X_AMZ_CHECKSUM_SHA256: HeaderName = HeaderName::from_static("x-amz-checksum-sha256");
|
||||||
|
|
||||||
|
pub type Crc32Checksum = [u8; 4];
|
||||||
|
pub type Crc32cChecksum = [u8; 4];
|
||||||
|
pub type Md5Checksum = [u8; 16];
|
||||||
|
pub type Sha1Checksum = [u8; 20];
|
||||||
|
pub type Sha256Checksum = [u8; 32];
|
||||||
|
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub(crate) struct ExpectedChecksums {
|
||||||
|
// base64-encoded md5 (content-md5 header)
|
||||||
|
pub md5: Option<String>,
|
||||||
|
// content_sha256 (as a Hash / FixedBytes32)
|
||||||
|
pub sha256: Option<Hash>,
|
||||||
|
// extra x-amz-checksum-* header
|
||||||
|
pub extra: Option<ChecksumValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct Checksummer {
|
||||||
|
pub crc32: Option<Crc32>,
|
||||||
|
pub crc32c: Option<Crc32c>,
|
||||||
|
pub md5: Option<Md5>,
|
||||||
|
pub sha1: Option<Sha1>,
|
||||||
|
pub sha256: Option<Sha256>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub(crate) struct Checksums {
|
||||||
|
pub crc32: Option<Crc32Checksum>,
|
||||||
|
pub crc32c: Option<Crc32cChecksum>,
|
||||||
|
pub md5: Option<Md5Checksum>,
|
||||||
|
pub sha1: Option<Sha1Checksum>,
|
||||||
|
pub sha256: Option<Sha256Checksum>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Checksummer {
|
||||||
|
pub(crate) fn init(expected: &ExpectedChecksums, require_md5: bool) -> Self {
|
||||||
|
let mut ret = Self {
|
||||||
|
crc32: None,
|
||||||
|
crc32c: None,
|
||||||
|
md5: None,
|
||||||
|
sha1: None,
|
||||||
|
sha256: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if expected.md5.is_some() || require_md5 {
|
||||||
|
ret.md5 = Some(Md5::new());
|
||||||
|
}
|
||||||
|
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
|
||||||
|
ret.sha256 = Some(Sha256::new());
|
||||||
|
}
|
||||||
|
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
|
||||||
|
ret.crc32 = Some(Crc32::new());
|
||||||
|
}
|
||||||
|
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
|
||||||
|
ret.crc32c = Some(Crc32c::default());
|
||||||
|
}
|
||||||
|
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
|
||||||
|
ret.sha1 = Some(Sha1::new());
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
|
||||||
|
match algo {
|
||||||
|
Some(ChecksumAlgorithm::Crc32) => {
|
||||||
|
self.crc32 = Some(Crc32::new());
|
||||||
|
}
|
||||||
|
Some(ChecksumAlgorithm::Crc32c) => {
|
||||||
|
self.crc32c = Some(Crc32c::default());
|
||||||
|
}
|
||||||
|
Some(ChecksumAlgorithm::Sha1) => {
|
||||||
|
self.sha1 = Some(Sha1::new());
|
||||||
|
}
|
||||||
|
Some(ChecksumAlgorithm::Sha256) => {
|
||||||
|
self.sha256 = Some(Sha256::new());
|
||||||
|
}
|
||||||
|
None => (),
|
||||||
|
}
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn update(&mut self, bytes: &[u8]) {
|
||||||
|
if let Some(crc32) = &mut self.crc32 {
|
||||||
|
crc32.update(bytes);
|
||||||
|
}
|
||||||
|
if let Some(crc32c) = &mut self.crc32c {
|
||||||
|
crc32c.write(bytes);
|
||||||
|
}
|
||||||
|
if let Some(md5) = &mut self.md5 {
|
||||||
|
md5.update(bytes);
|
||||||
|
}
|
||||||
|
if let Some(sha1) = &mut self.sha1 {
|
||||||
|
sha1.update(bytes);
|
||||||
|
}
|
||||||
|
if let Some(sha256) = &mut self.sha256 {
|
||||||
|
sha256.update(bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn finalize(self) -> Checksums {
|
||||||
|
Checksums {
|
||||||
|
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
|
||||||
|
crc32c: self
|
||||||
|
.crc32c
|
||||||
|
.map(|x| u32::to_be_bytes(u32::try_from(x.finish()).unwrap())),
|
||||||
|
md5: self.md5.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||||
|
sha1: self.sha1.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||||
|
sha256: self.sha256.map(|x| x.finalize()[..].try_into().unwrap()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Checksums {
|
||||||
|
pub fn verify(&self, expected: &ExpectedChecksums) -> Result<(), Error> {
|
||||||
|
if let Some(expected_md5) = &expected.md5 {
|
||||||
|
match self.md5 {
|
||||||
|
Some(md5) if BASE64_STANDARD.encode(&md5) == expected_md5.trim_matches('"') => (),
|
||||||
|
_ => {
|
||||||
|
return Err(Error::InvalidDigest(
|
||||||
|
"MD5 checksum verification failed (from content-md5)".into(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(expected_sha256) = &expected.sha256 {
|
||||||
|
match self.sha256 {
|
||||||
|
Some(sha256) if &sha256[..] == expected_sha256.as_slice() => (),
|
||||||
|
_ => {
|
||||||
|
return Err(Error::InvalidDigest(
|
||||||
|
"SHA256 checksum verification failed (from x-amz-content-sha256)".into(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(extra) = expected.extra {
|
||||||
|
let algo = extra.algorithm();
|
||||||
|
if self.extract(Some(algo)) != Some(extra) {
|
||||||
|
return Err(Error::InvalidDigest(format!(
|
||||||
|
"Failed to validate checksum for algorithm {:?}",
|
||||||
|
algo
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extract(&self, algo: Option<ChecksumAlgorithm>) -> Option<ChecksumValue> {
|
||||||
|
match algo {
|
||||||
|
None => None,
|
||||||
|
Some(ChecksumAlgorithm::Crc32) => Some(ChecksumValue::Crc32(self.crc32.unwrap())),
|
||||||
|
Some(ChecksumAlgorithm::Crc32c) => Some(ChecksumValue::Crc32c(self.crc32c.unwrap())),
|
||||||
|
Some(ChecksumAlgorithm::Sha1) => Some(ChecksumValue::Sha1(self.sha1.unwrap())),
|
||||||
|
Some(ChecksumAlgorithm::Sha256) => Some(ChecksumValue::Sha256(self.sha256.unwrap())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub(crate) struct MultipartChecksummer {
|
||||||
|
pub md5: Md5,
|
||||||
|
pub extra: Option<MultipartExtraChecksummer>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) enum MultipartExtraChecksummer {
|
||||||
|
Crc32(Crc32),
|
||||||
|
Crc32c(Crc32c),
|
||||||
|
Sha1(Sha1),
|
||||||
|
Sha256(Sha256),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MultipartChecksummer {
|
||||||
|
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
|
||||||
|
Self {
|
||||||
|
md5: Md5::new(),
|
||||||
|
extra: match algo {
|
||||||
|
None => None,
|
||||||
|
Some(ChecksumAlgorithm::Crc32) => {
|
||||||
|
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
|
||||||
|
}
|
||||||
|
Some(ChecksumAlgorithm::Crc32c) => {
|
||||||
|
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
|
||||||
|
}
|
||||||
|
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
|
||||||
|
Some(ChecksumAlgorithm::Sha256) => {
|
||||||
|
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn update(
|
||||||
|
&mut self,
|
||||||
|
etag: &str,
|
||||||
|
checksum: Option<ChecksumValue>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
self.md5
|
||||||
|
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
|
||||||
|
match (&mut self.extra, checksum) {
|
||||||
|
(None, _) => (),
|
||||||
|
(
|
||||||
|
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
|
||||||
|
Some(ChecksumValue::Crc32(x)),
|
||||||
|
) => {
|
||||||
|
crc32.update(&x);
|
||||||
|
}
|
||||||
|
(
|
||||||
|
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
|
||||||
|
Some(ChecksumValue::Crc32c(x)),
|
||||||
|
) => {
|
||||||
|
crc32c.write(&x);
|
||||||
|
}
|
||||||
|
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
|
||||||
|
sha1.update(&x);
|
||||||
|
}
|
||||||
|
(
|
||||||
|
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
|
||||||
|
Some(ChecksumValue::Sha256(x)),
|
||||||
|
) => {
|
||||||
|
sha256.update(&x);
|
||||||
|
}
|
||||||
|
(Some(_), b) => {
|
||||||
|
return Err(Error::internal_error(format!(
|
||||||
|
"part checksum was not computed correctly, got: {:?}",
|
||||||
|
b
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
|
||||||
|
let md5 = self.md5.finalize()[..].try_into().unwrap();
|
||||||
|
let extra = match self.extra {
|
||||||
|
None => None,
|
||||||
|
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
|
||||||
|
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
|
||||||
|
}
|
||||||
|
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
|
||||||
|
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
|
||||||
|
)),
|
||||||
|
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
|
||||||
|
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
|
||||||
|
}
|
||||||
|
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
|
||||||
|
sha256.finalize()[..].try_into().unwrap(),
|
||||||
|
)),
|
||||||
|
};
|
||||||
|
(md5, extra)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----
|
||||||
|
|
||||||
|
/// Extract the value of the x-amz-checksum-algorithm header
|
||||||
|
pub(crate) fn request_checksum_algorithm(
|
||||||
|
headers: &HeaderMap<HeaderValue>,
|
||||||
|
) -> Result<Option<ChecksumAlgorithm>, Error> {
|
||||||
|
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
||||||
|
None => Ok(None),
|
||||||
|
Some(x) if x == "CRC32" => Ok(Some(ChecksumAlgorithm::Crc32)),
|
||||||
|
Some(x) if x == "CRC32C" => Ok(Some(ChecksumAlgorithm::Crc32c)),
|
||||||
|
Some(x) if x == "SHA1" => Ok(Some(ChecksumAlgorithm::Sha1)),
|
||||||
|
Some(x) if x == "SHA256" => Ok(Some(ChecksumAlgorithm::Sha256)),
|
||||||
|
_ => Err(Error::bad_request("invalid checksum algorithm")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract the value of any of the x-amz-checksum-* headers
|
||||||
|
pub(crate) fn request_checksum_value(
|
||||||
|
headers: &HeaderMap<HeaderValue>,
|
||||||
|
) -> Result<Option<ChecksumValue>, Error> {
|
||||||
|
let mut ret = vec![];
|
||||||
|
|
||||||
|
if let Some(crc32_str) = headers.get(X_AMZ_CHECKSUM_CRC32) {
|
||||||
|
let crc32 = BASE64_STANDARD
|
||||||
|
.decode(&crc32_str)
|
||||||
|
.ok()
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
||||||
|
ret.push(ChecksumValue::Crc32(crc32))
|
||||||
|
}
|
||||||
|
if let Some(crc32c_str) = headers.get(X_AMZ_CHECKSUM_CRC32C) {
|
||||||
|
let crc32c = BASE64_STANDARD
|
||||||
|
.decode(&crc32c_str)
|
||||||
|
.ok()
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
||||||
|
ret.push(ChecksumValue::Crc32c(crc32c))
|
||||||
|
}
|
||||||
|
if let Some(sha1_str) = headers.get(X_AMZ_CHECKSUM_SHA1) {
|
||||||
|
let sha1 = BASE64_STANDARD
|
||||||
|
.decode(&sha1_str)
|
||||||
|
.ok()
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
||||||
|
ret.push(ChecksumValue::Sha1(sha1))
|
||||||
|
}
|
||||||
|
if let Some(sha256_str) = headers.get(X_AMZ_CHECKSUM_SHA256) {
|
||||||
|
let sha256 = BASE64_STANDARD
|
||||||
|
.decode(&sha256_str)
|
||||||
|
.ok()
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
||||||
|
ret.push(ChecksumValue::Sha256(sha256))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ret.len() > 1 {
|
||||||
|
return Err(Error::bad_request(
|
||||||
|
"multiple x-amz-checksum-* headers given",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(ret.pop())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks for the presense of x-amz-checksum-algorithm
|
||||||
|
/// if so extract the corrseponding x-amz-checksum-* value
|
||||||
|
pub(crate) fn request_checksum_algorithm_value(
|
||||||
|
headers: &HeaderMap<HeaderValue>,
|
||||||
|
) -> Result<Option<ChecksumValue>, Error> {
|
||||||
|
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
|
||||||
|
Some(x) if x == "CRC32" => {
|
||||||
|
let crc32 = headers
|
||||||
|
.get(X_AMZ_CHECKSUM_CRC32)
|
||||||
|
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
|
||||||
|
Ok(Some(ChecksumValue::Crc32(crc32)))
|
||||||
|
}
|
||||||
|
Some(x) if x == "CRC32C" => {
|
||||||
|
let crc32c = headers
|
||||||
|
.get(X_AMZ_CHECKSUM_CRC32C)
|
||||||
|
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
|
||||||
|
Ok(Some(ChecksumValue::Crc32c(crc32c)))
|
||||||
|
}
|
||||||
|
Some(x) if x == "SHA1" => {
|
||||||
|
let sha1 = headers
|
||||||
|
.get(X_AMZ_CHECKSUM_SHA1)
|
||||||
|
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
|
||||||
|
Ok(Some(ChecksumValue::Sha1(sha1)))
|
||||||
|
}
|
||||||
|
Some(x) if x == "SHA256" => {
|
||||||
|
let sha256 = headers
|
||||||
|
.get(X_AMZ_CHECKSUM_SHA256)
|
||||||
|
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
|
||||||
|
.and_then(|x| x.try_into().ok())
|
||||||
|
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
|
||||||
|
Ok(Some(ChecksumValue::Sha256(sha256)))
|
||||||
|
}
|
||||||
|
Some(_) => Err(Error::bad_request("invalid x-amz-checksum-algorithm")),
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn add_checksum_response_headers(
|
||||||
|
checksum: &Option<ChecksumValue>,
|
||||||
|
mut resp: http::response::Builder,
|
||||||
|
) -> http::response::Builder {
|
||||||
|
match checksum {
|
||||||
|
Some(ChecksumValue::Crc32(crc32)) => {
|
||||||
|
resp = resp.header(X_AMZ_CHECKSUM_CRC32, BASE64_STANDARD.encode(&crc32));
|
||||||
|
}
|
||||||
|
Some(ChecksumValue::Crc32c(crc32c)) => {
|
||||||
|
resp = resp.header(X_AMZ_CHECKSUM_CRC32C, BASE64_STANDARD.encode(&crc32c));
|
||||||
|
}
|
||||||
|
Some(ChecksumValue::Sha1(sha1)) => {
|
||||||
|
resp = resp.header(X_AMZ_CHECKSUM_SHA1, BASE64_STANDARD.encode(&sha1));
|
||||||
|
}
|
||||||
|
Some(ChecksumValue::Sha256(sha256)) => {
|
||||||
|
resp = resp.header(X_AMZ_CHECKSUM_SHA256, BASE64_STANDARD.encode(&sha256));
|
||||||
|
}
|
||||||
|
None => (),
|
||||||
|
}
|
||||||
|
resp
|
||||||
|
}
|
|
@ -1,43 +1,47 @@
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
use futures::{stream, stream::Stream, StreamExt};
|
use futures::{stream, stream::Stream, StreamExt, TryStreamExt};
|
||||||
use md5::{Digest as Md5Digest, Md5};
|
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use hyper::{Body, Request, Response};
|
use hyper::{Request, Response};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use garage_rpc::netapp::bytes_buf::BytesBuf;
|
use garage_net::bytes_buf::BytesBuf;
|
||||||
|
use garage_net::stream::read_stream_to_end;
|
||||||
use garage_rpc::rpc_helper::OrderTag;
|
use garage_rpc::rpc_helper::OrderTag;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
use garage_util::error::Error as GarageError;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
|
||||||
use garage_model::key_table::Key;
|
|
||||||
use garage_model::s3::block_ref_table::*;
|
use garage_model::s3::block_ref_table::*;
|
||||||
use garage_model::s3::mpu_table::*;
|
use garage_model::s3::mpu_table::*;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::s3::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::helpers::parse_bucket_key;
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
|
use crate::s3::checksum::*;
|
||||||
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
|
use crate::s3::get::full_object_byte_stream;
|
||||||
use crate::s3::multipart;
|
use crate::s3::multipart;
|
||||||
use crate::s3::put::get_headers;
|
use crate::s3::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
|
||||||
use crate::s3::xml::{self as s3_xml, xmlns_tag};
|
use crate::s3::xml::{self as s3_xml, xmlns_tag};
|
||||||
|
|
||||||
|
// -------- CopyObject ---------
|
||||||
|
|
||||||
pub async fn handle_copy(
|
pub async fn handle_copy(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
api_key: &Key,
|
req: &Request<ReqBody>,
|
||||||
req: &Request<Body>,
|
|
||||||
dest_bucket_id: Uuid,
|
|
||||||
dest_key: &str,
|
dest_key: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
||||||
|
|
||||||
let source_object = get_copy_source(&garage, api_key, req).await?;
|
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
|
||||||
|
|
||||||
|
let source_object = get_copy_source(&ctx, req).await?;
|
||||||
|
|
||||||
let (source_version, source_version_data, source_version_meta) =
|
let (source_version, source_version_data, source_version_meta) =
|
||||||
extract_source_info(&source_object)?;
|
extract_source_info(&source_object)?;
|
||||||
|
@ -45,26 +49,150 @@ pub async fn handle_copy(
|
||||||
// Check precondition, e.g. x-amz-copy-source-if-match
|
// Check precondition, e.g. x-amz-copy-source-if-match
|
||||||
copy_precondition.check(source_version, &source_version_meta.etag)?;
|
copy_precondition.check(source_version, &source_version_meta.etag)?;
|
||||||
|
|
||||||
|
// Determine encryption parameters
|
||||||
|
let (source_encryption, source_object_meta_inner) =
|
||||||
|
EncryptionParams::check_decrypt_for_copy_source(
|
||||||
|
&ctx.garage,
|
||||||
|
req.headers(),
|
||||||
|
&source_version_meta.encryption,
|
||||||
|
)?;
|
||||||
|
let dest_encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
|
||||||
|
|
||||||
|
// Extract source checksum info before source_object_meta_inner is consumed
|
||||||
|
let source_checksum = source_object_meta_inner.checksum;
|
||||||
|
let source_checksum_algorithm = source_checksum.map(|x| x.algorithm());
|
||||||
|
|
||||||
|
// If source object has a checksum, the destination object must as well.
|
||||||
|
// The x-amz-checksum-algorihtm header allows to change that algorithm,
|
||||||
|
// but if it is absent, we must use the same as before
|
||||||
|
let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm);
|
||||||
|
|
||||||
|
// Determine metadata of destination object
|
||||||
|
let was_multipart = source_version_meta.etag.contains('-');
|
||||||
|
let dest_object_meta = ObjectVersionMetaInner {
|
||||||
|
headers: match req.headers().get("x-amz-metadata-directive") {
|
||||||
|
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
|
||||||
|
get_headers(req.headers())?
|
||||||
|
}
|
||||||
|
_ => source_object_meta_inner.into_owned().headers,
|
||||||
|
},
|
||||||
|
checksum: source_checksum,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Do actual object copying
|
||||||
|
//
|
||||||
|
// In any of the following scenarios, we need to read the whole object
|
||||||
|
// data and re-write it again:
|
||||||
|
//
|
||||||
|
// - the data needs to be decrypted or encrypted
|
||||||
|
// - the requested checksum algorithm requires us to recompute a checksum
|
||||||
|
// - the original object was a multipart upload and a checksum algorithm
|
||||||
|
// is defined (AWS specifies that in this case, we must recompute the
|
||||||
|
// checksum from scratch as if this was a single big object and not
|
||||||
|
// a multipart object, as the checksums are not computed in the same way)
|
||||||
|
//
|
||||||
|
// In other cases, we can just copy the metadata and reference the same blocks.
|
||||||
|
//
|
||||||
|
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||||
|
|
||||||
|
let must_recopy = !EncryptionParams::is_same(&source_encryption, &dest_encryption)
|
||||||
|
|| source_checksum_algorithm != checksum_algorithm
|
||||||
|
|| (was_multipart && checksum_algorithm.is_some());
|
||||||
|
|
||||||
|
let res = if !must_recopy {
|
||||||
|
// In most cases, we can just copy the metadata and link blocks of the
|
||||||
|
// old object from the new object.
|
||||||
|
handle_copy_metaonly(
|
||||||
|
ctx,
|
||||||
|
dest_key,
|
||||||
|
dest_object_meta,
|
||||||
|
dest_encryption,
|
||||||
|
source_version,
|
||||||
|
source_version_data,
|
||||||
|
source_version_meta,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
} else {
|
||||||
|
let expected_checksum = ExpectedChecksums {
|
||||||
|
md5: None,
|
||||||
|
sha256: None,
|
||||||
|
extra: source_checksum,
|
||||||
|
};
|
||||||
|
let checksum_mode = if was_multipart || source_checksum_algorithm != checksum_algorithm {
|
||||||
|
ChecksumMode::Calculate(checksum_algorithm)
|
||||||
|
} else {
|
||||||
|
ChecksumMode::Verify(&expected_checksum)
|
||||||
|
};
|
||||||
|
// If source and dest encryption use different keys,
|
||||||
|
// we must decrypt content and re-encrypt, so rewrite all data blocks.
|
||||||
|
handle_copy_reencrypt(
|
||||||
|
ctx,
|
||||||
|
dest_key,
|
||||||
|
dest_object_meta,
|
||||||
|
dest_encryption,
|
||||||
|
source_version,
|
||||||
|
source_version_data,
|
||||||
|
source_encryption,
|
||||||
|
checksum_mode,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
|
||||||
|
let last_modified = msec_to_rfc3339(res.version_timestamp);
|
||||||
|
let result = CopyObjectResult {
|
||||||
|
last_modified: s3_xml::Value(last_modified),
|
||||||
|
etag: s3_xml::Value(format!("\"{}\"", res.etag)),
|
||||||
|
};
|
||||||
|
let xml = s3_xml::to_xml_with_header(&result)?;
|
||||||
|
|
||||||
|
let mut resp = Response::builder()
|
||||||
|
.header("Content-Type", "application/xml")
|
||||||
|
.header("x-amz-version-id", hex::encode(res.version_uuid))
|
||||||
|
.header(
|
||||||
|
"x-amz-copy-source-version-id",
|
||||||
|
hex::encode(source_version.uuid),
|
||||||
|
);
|
||||||
|
dest_encryption.add_response_headers(&mut resp);
|
||||||
|
Ok(resp.body(string_body(xml))?)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_copy_metaonly(
|
||||||
|
ctx: ReqCtx,
|
||||||
|
dest_key: &str,
|
||||||
|
dest_object_meta: ObjectVersionMetaInner,
|
||||||
|
dest_encryption: EncryptionParams,
|
||||||
|
source_version: &ObjectVersion,
|
||||||
|
source_version_data: &ObjectVersionData,
|
||||||
|
source_version_meta: &ObjectVersionMeta,
|
||||||
|
) -> Result<SaveStreamResult, Error> {
|
||||||
|
let ReqCtx {
|
||||||
|
garage,
|
||||||
|
bucket_id: dest_bucket_id,
|
||||||
|
..
|
||||||
|
} = ctx;
|
||||||
|
|
||||||
// Generate parameters for copied object
|
// Generate parameters for copied object
|
||||||
let new_uuid = gen_uuid();
|
let new_uuid = gen_uuid();
|
||||||
let new_timestamp = now_msec();
|
let new_timestamp = now_msec();
|
||||||
|
|
||||||
// Implement x-amz-metadata-directive: REPLACE
|
let new_meta = ObjectVersionMeta {
|
||||||
let new_meta = match req.headers().get("x-amz-metadata-directive") {
|
encryption: dest_encryption.encrypt_meta(dest_object_meta)?,
|
||||||
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => ObjectVersionMeta {
|
size: source_version_meta.size,
|
||||||
headers: get_headers(req.headers())?,
|
etag: source_version_meta.etag.clone(),
|
||||||
size: source_version_meta.size,
|
|
||||||
etag: source_version_meta.etag.clone(),
|
|
||||||
},
|
|
||||||
_ => source_version_meta.clone(),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let etag = new_meta.etag.to_string();
|
let res = SaveStreamResult {
|
||||||
|
version_uuid: new_uuid,
|
||||||
|
version_timestamp: new_timestamp,
|
||||||
|
etag: new_meta.etag.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
// Save object copy
|
// Save object copy
|
||||||
match source_version_data {
|
match source_version_data {
|
||||||
ObjectVersionData::DeleteMarker => unreachable!(),
|
ObjectVersionData::DeleteMarker => unreachable!(),
|
||||||
ObjectVersionData::Inline(_meta, bytes) => {
|
ObjectVersionData::Inline(_meta, bytes) => {
|
||||||
|
// bytes is either plaintext before&after or encrypted with the
|
||||||
|
// same keys, so it's ok to just copy it as is
|
||||||
let dest_object_version = ObjectVersion {
|
let dest_object_version = ObjectVersion {
|
||||||
uuid: new_uuid,
|
uuid: new_uuid,
|
||||||
timestamp: new_timestamp,
|
timestamp: new_timestamp,
|
||||||
|
@ -95,7 +223,8 @@ pub async fn handle_copy(
|
||||||
uuid: new_uuid,
|
uuid: new_uuid,
|
||||||
timestamp: new_timestamp,
|
timestamp: new_timestamp,
|
||||||
state: ObjectVersionState::Uploading {
|
state: ObjectVersionState::Uploading {
|
||||||
headers: new_meta.headers.clone(),
|
encryption: new_meta.encryption.clone(),
|
||||||
|
checksum_algorithm: None,
|
||||||
multipart: false,
|
multipart: false,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -162,48 +291,85 @@ pub async fn handle_copy(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let last_modified = msec_to_rfc3339(new_timestamp);
|
Ok(res)
|
||||||
let result = CopyObjectResult {
|
|
||||||
last_modified: s3_xml::Value(last_modified),
|
|
||||||
etag: s3_xml::Value(format!("\"{}\"", etag)),
|
|
||||||
};
|
|
||||||
let xml = s3_xml::to_xml_with_header(&result)?;
|
|
||||||
|
|
||||||
Ok(Response::builder()
|
|
||||||
.header("Content-Type", "application/xml")
|
|
||||||
.header("x-amz-version-id", hex::encode(new_uuid))
|
|
||||||
.header(
|
|
||||||
"x-amz-copy-source-version-id",
|
|
||||||
hex::encode(source_version.uuid),
|
|
||||||
)
|
|
||||||
.body(Body::from(xml))?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_copy_reencrypt(
|
||||||
|
ctx: ReqCtx,
|
||||||
|
dest_key: &str,
|
||||||
|
dest_object_meta: ObjectVersionMetaInner,
|
||||||
|
dest_encryption: EncryptionParams,
|
||||||
|
source_version: &ObjectVersion,
|
||||||
|
source_version_data: &ObjectVersionData,
|
||||||
|
source_encryption: EncryptionParams,
|
||||||
|
checksum_mode: ChecksumMode<'_>,
|
||||||
|
) -> Result<SaveStreamResult, Error> {
|
||||||
|
// basically we will read the source data (decrypt if necessary)
|
||||||
|
// and save that in a new object (encrypt if necessary),
|
||||||
|
// by combining the code used in getobject and putobject
|
||||||
|
let source_stream = full_object_byte_stream(
|
||||||
|
ctx.garage.clone(),
|
||||||
|
source_version,
|
||||||
|
source_version_data,
|
||||||
|
source_encryption,
|
||||||
|
);
|
||||||
|
|
||||||
|
save_stream(
|
||||||
|
&ctx,
|
||||||
|
dest_object_meta,
|
||||||
|
dest_encryption,
|
||||||
|
source_stream.map_err(|e| Error::from(GarageError::from(e))),
|
||||||
|
&dest_key.to_string(),
|
||||||
|
checksum_mode,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------- UploadPartCopy ---------
|
||||||
|
|
||||||
pub async fn handle_upload_part_copy(
|
pub async fn handle_upload_part_copy(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
api_key: &Key,
|
req: &Request<ReqBody>,
|
||||||
req: &Request<Body>,
|
|
||||||
dest_bucket_id: Uuid,
|
|
||||||
dest_key: &str,
|
dest_key: &str,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
upload_id: &str,
|
upload_id: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
||||||
|
|
||||||
let dest_upload_id = multipart::decode_upload_id(upload_id)?;
|
let dest_upload_id = multipart::decode_upload_id(upload_id)?;
|
||||||
|
|
||||||
let dest_key = dest_key.to_string();
|
let dest_key = dest_key.to_string();
|
||||||
let (source_object, (_, _, mut dest_mpu)) = futures::try_join!(
|
let (source_object, (_, dest_version, mut dest_mpu)) = futures::try_join!(
|
||||||
get_copy_source(&garage, api_key, req),
|
get_copy_source(&ctx, req),
|
||||||
multipart::get_upload(&garage, &dest_bucket_id, &dest_key, &dest_upload_id)
|
multipart::get_upload(&ctx, &dest_key, &dest_upload_id)
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
let ReqCtx { garage, .. } = ctx;
|
||||||
|
|
||||||
let (source_object_version, source_version_data, source_version_meta) =
|
let (source_object_version, source_version_data, source_version_meta) =
|
||||||
extract_source_info(&source_object)?;
|
extract_source_info(&source_object)?;
|
||||||
|
|
||||||
// Check precondition on source, e.g. x-amz-copy-source-if-match
|
// Check precondition on source, e.g. x-amz-copy-source-if-match
|
||||||
copy_precondition.check(source_object_version, &source_version_meta.etag)?;
|
copy_precondition.check(source_object_version, &source_version_meta.etag)?;
|
||||||
|
|
||||||
|
// Determine encryption parameters
|
||||||
|
let (source_encryption, _) = EncryptionParams::check_decrypt_for_copy_source(
|
||||||
|
&garage,
|
||||||
|
req.headers(),
|
||||||
|
&source_version_meta.encryption,
|
||||||
|
)?;
|
||||||
|
let (dest_object_encryption, dest_object_checksum_algorithm) = match dest_version.state {
|
||||||
|
ObjectVersionState::Uploading {
|
||||||
|
encryption,
|
||||||
|
checksum_algorithm,
|
||||||
|
..
|
||||||
|
} => (encryption, checksum_algorithm),
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
let (dest_encryption, _) =
|
||||||
|
EncryptionParams::check_decrypt(&garage, req.headers(), &dest_object_encryption)?;
|
||||||
|
let same_encryption = EncryptionParams::is_same(&source_encryption, &dest_encryption);
|
||||||
|
|
||||||
// Check source range is valid
|
// Check source range is valid
|
||||||
let source_range = match req.headers().get("x-amz-copy-source-range") {
|
let source_range = match req.headers().get("x-amz-copy-source-range") {
|
||||||
Some(range) => {
|
Some(range) => {
|
||||||
|
@ -225,21 +391,16 @@ pub async fn handle_upload_part_copy(
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check source version is not inlined
|
// Check source version is not inlined
|
||||||
match source_version_data {
|
if matches!(source_version_data, ObjectVersionData::Inline(_, _)) {
|
||||||
ObjectVersionData::DeleteMarker => unreachable!(),
|
// This is only for small files, we don't bother handling this.
|
||||||
ObjectVersionData::Inline(_meta, _bytes) => {
|
// (in AWS UploadPartCopy works for parts at least 5MB which
|
||||||
// This is only for small files, we don't bother handling this.
|
// is never the case of an inline object)
|
||||||
// (in AWS UploadPartCopy works for parts at least 5MB which
|
return Err(Error::bad_request(
|
||||||
// is never the case of an inline object)
|
"Source object is too small (minimum part size is 5Mb)",
|
||||||
return Err(Error::bad_request(
|
));
|
||||||
"Source object is too small (minimum part size is 5Mb)",
|
}
|
||||||
));
|
|
||||||
}
|
|
||||||
ObjectVersionData::FirstBlock(_meta, _first_block_hash) => (),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Fetch source versin with its block list,
|
// Fetch source version with its block list
|
||||||
// and destination version to check part hasn't yet been uploaded
|
|
||||||
let source_version = garage
|
let source_version = garage
|
||||||
.version_table
|
.version_table
|
||||||
.get(&source_object_version.uuid, &EmptyKey)
|
.get(&source_object_version.uuid, &EmptyKey)
|
||||||
|
@ -249,7 +410,9 @@ pub async fn handle_upload_part_copy(
|
||||||
// We want to reuse blocks from the source version as much as possible.
|
// We want to reuse blocks from the source version as much as possible.
|
||||||
// However, we still need to get the data from these blocks
|
// However, we still need to get the data from these blocks
|
||||||
// because we need to know it to calculate the MD5sum of the part
|
// because we need to know it to calculate the MD5sum of the part
|
||||||
// which is used as its ETag.
|
// which is used as its ETag. For encrypted sources or destinations,
|
||||||
|
// we must always read(+decrypt) and then write(+encrypt), so we
|
||||||
|
// can never reuse data blocks as is.
|
||||||
|
|
||||||
// First, calculate what blocks we want to keep,
|
// First, calculate what blocks we want to keep,
|
||||||
// and the subrange of the block to take, if the bounds of the
|
// and the subrange of the block to take, if the bounds of the
|
||||||
|
@ -298,7 +461,9 @@ pub async fn handle_upload_part_copy(
|
||||||
dest_mpu_part_key,
|
dest_mpu_part_key,
|
||||||
MpuPart {
|
MpuPart {
|
||||||
version: dest_version_id,
|
version: dest_version_id,
|
||||||
|
// These are all filled in later (bottom of this function)
|
||||||
etag: None,
|
etag: None,
|
||||||
|
checksum: None,
|
||||||
size: None,
|
size: None,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
@ -311,32 +476,55 @@ pub async fn handle_upload_part_copy(
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
);
|
);
|
||||||
|
// write an empty version now to be the parent of the block_ref entries
|
||||||
|
garage.version_table.insert(&dest_version).await?;
|
||||||
|
|
||||||
// Now, actually copy the blocks
|
// Now, actually copy the blocks
|
||||||
let mut md5hasher = Md5::new();
|
let mut checksummer = Checksummer::init(&Default::default(), !dest_encryption.is_encrypted())
|
||||||
|
.add(dest_object_checksum_algorithm);
|
||||||
|
|
||||||
// First, create a stream that is able to read the source blocks
|
// First, create a stream that is able to read the source blocks
|
||||||
// and extract the subrange if necessary.
|
// and extract the subrange if necessary.
|
||||||
// The second returned value is an Option<Hash>, that is Some
|
// The second returned value is an Option<Hash>, that is Some
|
||||||
// if and only if the block returned is a block that already existed
|
// if and only if the block returned is a block that already existed
|
||||||
// in the Garage data store (thus we don't need to save it again).
|
// in the Garage data store and can be reused as-is instead of having
|
||||||
|
// to save it again. This excludes encrypted source blocks that we had
|
||||||
|
// to decrypt.
|
||||||
let garage2 = garage.clone();
|
let garage2 = garage.clone();
|
||||||
let order_stream = OrderTag::stream();
|
let order_stream = OrderTag::stream();
|
||||||
let source_blocks = stream::iter(blocks_to_copy)
|
let source_blocks = stream::iter(blocks_to_copy)
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.flat_map(|(i, (block_hash, range_to_copy))| {
|
.map(|(i, (block_hash, range_to_copy))| {
|
||||||
let garage3 = garage2.clone();
|
let garage3 = garage2.clone();
|
||||||
stream::once(async move {
|
async move {
|
||||||
let data = garage3
|
let stream = source_encryption
|
||||||
.block_manager
|
.get_block(&garage3, &block_hash, Some(order_stream.order(i as u64)))
|
||||||
.rpc_get_block(&block_hash, Some(order_stream.order(i as u64)))
|
|
||||||
.await?;
|
.await?;
|
||||||
|
let data = read_stream_to_end(stream).await?.into_bytes();
|
||||||
|
// For each item, we return a tuple of:
|
||||||
|
// 1. the full data block (decrypted)
|
||||||
|
// 2. an Option<Hash> that indicates the hash of the block in the block store,
|
||||||
|
// only if it can be re-used as-is in the copied object
|
||||||
match range_to_copy {
|
match range_to_copy {
|
||||||
Some(r) => Ok((data.slice(r), None)),
|
Some(r) => {
|
||||||
None => Ok((data, Some(block_hash))),
|
// If we are taking a subslice of the data, we cannot reuse the block as-is
|
||||||
|
Ok((data.slice(r), None))
|
||||||
|
}
|
||||||
|
None if same_encryption => {
|
||||||
|
// If the data is unencrypted before & after, or if we are using
|
||||||
|
// the same encryption key, we can reuse the stored block, no need
|
||||||
|
// to re-send it to storage nodes.
|
||||||
|
Ok((data, Some(block_hash)))
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
// If we are decrypting / (re)encrypting with different keys,
|
||||||
|
// we cannot reuse the block as-is
|
||||||
|
Ok((data, None))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
}
|
||||||
})
|
})
|
||||||
|
.buffered(2)
|
||||||
.peekable();
|
.peekable();
|
||||||
|
|
||||||
// The defragmenter is a custom stream (defined below) that concatenates
|
// The defragmenter is a custom stream (defined below) that concatenates
|
||||||
|
@ -344,22 +532,39 @@ pub async fn handle_upload_part_copy(
|
||||||
// It returns a series of (Vec<u8>, Option<Hash>).
|
// It returns a series of (Vec<u8>, Option<Hash>).
|
||||||
// When it is done, it returns an empty vec.
|
// When it is done, it returns an empty vec.
|
||||||
// Same as the previous iterator, the Option is Some(_) if and only if
|
// Same as the previous iterator, the Option is Some(_) if and only if
|
||||||
// it's an existing block of the Garage data store.
|
// it's an existing block of the Garage data store that can be reused.
|
||||||
let mut defragmenter = Defragmenter::new(garage.config.block_size, Box::pin(source_blocks));
|
let mut defragmenter = Defragmenter::new(garage.config.block_size, Box::pin(source_blocks));
|
||||||
|
|
||||||
let mut current_offset = 0;
|
let mut current_offset = 0;
|
||||||
let mut next_block = defragmenter.next().await?;
|
let mut next_block = defragmenter.next().await?;
|
||||||
|
|
||||||
|
// TODO this could be optimized similarly to read_and_put_blocks
|
||||||
|
// low priority because uploadpartcopy is rarely used
|
||||||
loop {
|
loop {
|
||||||
let (data, existing_block_hash) = next_block;
|
let (data, existing_block_hash) = next_block;
|
||||||
if data.is_empty() {
|
if data.is_empty() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
md5hasher.update(&data[..]);
|
let data_len = data.len() as u64;
|
||||||
|
|
||||||
let must_upload = existing_block_hash.is_none();
|
let (checksummer_updated, (data_to_upload, final_hash)) =
|
||||||
let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..]));
|
tokio::task::spawn_blocking(move || {
|
||||||
|
checksummer.update(&data[..]);
|
||||||
|
|
||||||
|
let tup = match existing_block_hash {
|
||||||
|
Some(hash) if same_encryption => (None, hash),
|
||||||
|
_ => {
|
||||||
|
let data_enc = dest_encryption.encrypt_block(data)?;
|
||||||
|
let hash = blake2sum(&data_enc);
|
||||||
|
(Some(data_enc), hash)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok::<_, Error>((checksummer, tup))
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()?;
|
||||||
|
checksummer = checksummer_updated;
|
||||||
|
|
||||||
dest_version.blocks.clear();
|
dest_version.blocks.clear();
|
||||||
dest_version.blocks.put(
|
dest_version.blocks.put(
|
||||||
|
@ -369,10 +574,10 @@ pub async fn handle_upload_part_copy(
|
||||||
},
|
},
|
||||||
VersionBlock {
|
VersionBlock {
|
||||||
hash: final_hash,
|
hash: final_hash,
|
||||||
size: data.len() as u64,
|
size: data_len,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
current_offset += data.len() as u64;
|
current_offset += data_len;
|
||||||
|
|
||||||
let block_ref = BlockRef {
|
let block_ref = BlockRef {
|
||||||
block: final_hash,
|
block: final_hash,
|
||||||
|
@ -380,33 +585,34 @@ pub async fn handle_upload_part_copy(
|
||||||
deleted: false.into(),
|
deleted: false.into(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let garage2 = garage.clone();
|
let (_, _, _, next) = futures::try_join!(
|
||||||
let res = futures::try_join!(
|
|
||||||
// Thing 1: if the block is not exactly a block that existed before,
|
// Thing 1: if the block is not exactly a block that existed before,
|
||||||
// we need to insert that data as a new block.
|
// we need to insert that data as a new block.
|
||||||
async move {
|
async {
|
||||||
if must_upload {
|
if let Some(final_data) = data_to_upload {
|
||||||
garage2.block_manager.rpc_put_block(final_hash, data).await
|
garage
|
||||||
|
.block_manager
|
||||||
|
.rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None)
|
||||||
|
.await
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
async {
|
// Thing 2: we need to insert the block in the version
|
||||||
// Thing 2: we need to insert the block in the version
|
garage.version_table.insert(&dest_version),
|
||||||
garage.version_table.insert(&dest_version).await?;
|
// Thing 3: we need to add a block reference
|
||||||
// Thing 3: we need to add a block reference
|
garage.block_ref_table.insert(&block_ref),
|
||||||
garage.block_ref_table.insert(&block_ref).await
|
// Thing 4: we need to read the next block
|
||||||
},
|
|
||||||
// Thing 4: we need to prefetch the next block
|
|
||||||
defragmenter.next(),
|
defragmenter.next(),
|
||||||
)?;
|
)?;
|
||||||
next_block = res.2;
|
next_block = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(current_offset, source_range.length);
|
assert_eq!(current_offset, source_range.length);
|
||||||
|
|
||||||
let data_md5sum = md5hasher.finalize();
|
let checksums = checksummer.finalize();
|
||||||
let etag = hex::encode(data_md5sum);
|
let etag = dest_encryption.etag_from_md5(&checksums.md5);
|
||||||
|
let checksum = checksums.extract(dest_object_checksum_algorithm);
|
||||||
|
|
||||||
// Put the part's ETag in the Versiontable
|
// Put the part's ETag in the Versiontable
|
||||||
dest_mpu.parts.put(
|
dest_mpu.parts.put(
|
||||||
|
@ -414,6 +620,7 @@ pub async fn handle_upload_part_copy(
|
||||||
MpuPart {
|
MpuPart {
|
||||||
version: dest_version_id,
|
version: dest_version_id,
|
||||||
etag: Some(etag.clone()),
|
etag: Some(etag.clone()),
|
||||||
|
checksum,
|
||||||
size: Some(current_offset),
|
size: Some(current_offset),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
@ -426,20 +633,21 @@ pub async fn handle_upload_part_copy(
|
||||||
last_modified: s3_xml::Value(msec_to_rfc3339(source_object_version.timestamp)),
|
last_modified: s3_xml::Value(msec_to_rfc3339(source_object_version.timestamp)),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
Ok(Response::builder()
|
let mut resp = Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.header(
|
.header(
|
||||||
"x-amz-copy-source-version-id",
|
"x-amz-copy-source-version-id",
|
||||||
hex::encode(source_object_version.uuid),
|
hex::encode(source_object_version.uuid),
|
||||||
)
|
);
|
||||||
.body(Body::from(resp_xml))?)
|
dest_encryption.add_response_headers(&mut resp);
|
||||||
|
Ok(resp.body(string_body(resp_xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_copy_source(
|
async fn get_copy_source(ctx: &ReqCtx, req: &Request<ReqBody>) -> Result<Object, Error> {
|
||||||
garage: &Garage,
|
let ReqCtx {
|
||||||
api_key: &Key,
|
garage, api_key, ..
|
||||||
req: &Request<Body>,
|
} = ctx;
|
||||||
) -> Result<Object, Error> {
|
|
||||||
let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?;
|
let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?;
|
||||||
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
|
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
|
||||||
|
|
||||||
|
@ -501,7 +709,7 @@ struct CopyPreconditionHeaders {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CopyPreconditionHeaders {
|
impl CopyPreconditionHeaders {
|
||||||
fn parse(req: &Request<Body>) -> Result<Self, Error> {
|
fn parse(req: &Request<ReqBody>) -> Result<Self, Error> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
copy_source_if_match: req
|
copy_source_if_match: req
|
||||||
.headers()
|
.headers()
|
||||||
|
|
|
@ -5,24 +5,29 @@ use http::header::{
|
||||||
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
|
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
|
||||||
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
|
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
|
||||||
};
|
};
|
||||||
use hyper::{header::HeaderName, Body, Method, Request, Response, StatusCode};
|
use hyper::{
|
||||||
|
body::Body, body::Incoming as IncomingBody, header::HeaderName, Method, Request, Response,
|
||||||
|
StatusCode,
|
||||||
|
};
|
||||||
|
|
||||||
|
use http_body_util::BodyExt;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::common_error::CommonError;
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule};
|
use garage_model::bucket_table::{Bucket, BucketParams, CorsRule as GarageCorsRule};
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||||
let param = bucket
|
let ReqCtx { bucket_params, .. } = ctx;
|
||||||
.params()
|
if let Some(cors) = bucket_params.cors_config.get() {
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
|
||||||
|
|
||||||
if let Some(cors) = param.cors_config.get() {
|
|
||||||
let wc = CorsConfiguration {
|
let wc = CorsConfiguration {
|
||||||
xmlns: (),
|
xmlns: (),
|
||||||
cors_rules: cors
|
cors_rules: cors
|
||||||
|
@ -34,64 +39,71 @@ pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<Body>, Error> {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.header(http::header::CONTENT_TYPE, "application/xml")
|
.header(http::header::CONTENT_TYPE, "application/xml")
|
||||||
.body(Body::from(xml))?)
|
.body(string_body(xml))?)
|
||||||
} else {
|
} else {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_cors(
|
pub async fn handle_delete_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||||
garage: Arc<Garage>,
|
let ReqCtx {
|
||||||
mut bucket: Bucket,
|
garage,
|
||||||
) -> Result<Response<Body>, Error> {
|
bucket_id,
|
||||||
let param = bucket
|
mut bucket_params,
|
||||||
.params_mut()
|
..
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
} = ctx;
|
||||||
|
bucket_params.cors_config.update(None);
|
||||||
param.cors_config.update(None);
|
garage
|
||||||
garage.bucket_table.insert(&bucket).await?;
|
.bucket_table
|
||||||
|
.insert(&Bucket::present(bucket_id, bucket_params))
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_put_cors(
|
pub async fn handle_put_cors(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
mut bucket: Bucket,
|
req: Request<ReqBody>,
|
||||||
req: Request<Body>,
|
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let ReqCtx {
|
||||||
|
garage,
|
||||||
|
bucket_id,
|
||||||
|
mut bucket_params,
|
||||||
|
..
|
||||||
|
} = ctx;
|
||||||
|
|
||||||
|
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let param = bucket
|
|
||||||
.params_mut()
|
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
|
||||||
|
|
||||||
let conf: CorsConfiguration = from_reader(&body as &[u8])?;
|
let conf: CorsConfiguration = from_reader(&body as &[u8])?;
|
||||||
conf.validate()?;
|
conf.validate()?;
|
||||||
|
|
||||||
param
|
bucket_params
|
||||||
.cors_config
|
.cors_config
|
||||||
.update(Some(conf.into_garage_cors_config()?));
|
.update(Some(conf.into_garage_cors_config()?));
|
||||||
garage.bucket_table.insert(&bucket).await?;
|
garage
|
||||||
|
.bucket_table
|
||||||
|
.insert(&Bucket::present(bucket_id, bucket_params))
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_options_s3api(
|
pub async fn handle_options_api(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: &Request<Body>,
|
req: &Request<IncomingBody>,
|
||||||
bucket_name: Option<String>,
|
bucket_name: Option<String>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<EmptyBody>, CommonError> {
|
||||||
// FIXME: CORS rules of buckets with local aliases are
|
// FIXME: CORS rules of buckets with local aliases are
|
||||||
// not taken into account.
|
// not taken into account.
|
||||||
|
|
||||||
|
@ -107,7 +119,8 @@ pub async fn handle_options_s3api(
|
||||||
let bucket_id = helper.resolve_global_bucket_name(&bn).await?;
|
let bucket_id = helper.resolve_global_bucket_name(&bn).await?;
|
||||||
if let Some(id) = bucket_id {
|
if let Some(id) = bucket_id {
|
||||||
let bucket = garage.bucket_helper().get_existing_bucket(id).await?;
|
let bucket = garage.bucket_helper().get_existing_bucket(id).await?;
|
||||||
handle_options_for_bucket(req, &bucket)
|
let bucket_params = bucket.state.into_option().unwrap();
|
||||||
|
handle_options_for_bucket(req, &bucket_params)
|
||||||
} else {
|
} else {
|
||||||
// If there is a bucket name in the request, but that name
|
// If there is a bucket name in the request, but that name
|
||||||
// does not correspond to a global alias for a bucket,
|
// does not correspond to a global alias for a bucket,
|
||||||
|
@ -121,7 +134,7 @@ pub async fn handle_options_s3api(
|
||||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
|
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?)
|
.body(EmptyBody::new())?)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If there is no bucket name in the request,
|
// If there is no bucket name in the request,
|
||||||
|
@ -131,14 +144,14 @@ pub async fn handle_options_s3api(
|
||||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
|
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?)
|
.body(EmptyBody::new())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_options_for_bucket(
|
pub fn handle_options_for_bucket(
|
||||||
req: &Request<Body>,
|
req: &Request<IncomingBody>,
|
||||||
bucket: &Bucket,
|
bucket_params: &BucketParams,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<EmptyBody>, CommonError> {
|
||||||
let origin = req
|
let origin = req
|
||||||
.headers()
|
.headers()
|
||||||
.get("Origin")
|
.get("Origin")
|
||||||
|
@ -154,27 +167,29 @@ pub fn handle_options_for_bucket(
|
||||||
None => vec![],
|
None => vec![],
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(cors_config) = bucket.params().unwrap().cors_config.get() {
|
if let Some(cors_config) = bucket_params.cors_config.get() {
|
||||||
let matching_rule = cors_config
|
let matching_rule = cors_config
|
||||||
.iter()
|
.iter()
|
||||||
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
|
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
|
||||||
if let Some(rule) = matching_rule {
|
if let Some(rule) = matching_rule {
|
||||||
let mut resp = Response::builder()
|
let mut resp = Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?;
|
.body(EmptyBody::new())?;
|
||||||
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
|
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
|
||||||
return Ok(resp);
|
return Ok(resp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Err(Error::forbidden("This CORS request is not allowed."))
|
Err(CommonError::Forbidden(
|
||||||
|
"This CORS request is not allowed.".into(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_matching_cors_rule<'a>(
|
pub fn find_matching_cors_rule<'a>(
|
||||||
bucket: &'a Bucket,
|
bucket_params: &'a BucketParams,
|
||||||
req: &Request<Body>,
|
req: &Request<impl Body>,
|
||||||
) -> Result<Option<&'a GarageCorsRule>, Error> {
|
) -> Result<Option<&'a GarageCorsRule>, Error> {
|
||||||
if let Some(cors_config) = bucket.params().unwrap().cors_config.get() {
|
if let Some(cors_config) = bucket_params.cors_config.get() {
|
||||||
if let Some(origin) = req.headers().get("Origin") {
|
if let Some(origin) = req.headers().get("Origin") {
|
||||||
let origin = origin.to_str()?;
|
let origin = origin.to_str()?;
|
||||||
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
|
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
|
||||||
|
@ -209,7 +224,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_cors_headers(
|
pub fn add_cors_headers(
|
||||||
resp: &mut Response<Body>,
|
resp: &mut Response<impl Body>,
|
||||||
rule: &GarageCorsRule,
|
rule: &GarageCorsRule,
|
||||||
) -> Result<(), http::header::InvalidHeaderValue> {
|
) -> Result<(), http::header::InvalidHeaderValue> {
|
||||||
let h = resp.headers_mut();
|
let h = resp.headers_mut();
|
||||||
|
|
|
@ -1,25 +1,24 @@
|
||||||
use std::sync::Arc;
|
use http_body_util::BodyExt;
|
||||||
|
use hyper::{Request, Response, StatusCode};
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::put::next_timestamp;
|
use crate::s3::put::next_timestamp;
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
async fn handle_delete_internal(
|
async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid), Error> {
|
||||||
garage: &Garage,
|
let ReqCtx {
|
||||||
bucket_id: Uuid,
|
garage, bucket_id, ..
|
||||||
key: &str,
|
} = ctx;
|
||||||
) -> Result<(Uuid, Uuid), Error> {
|
|
||||||
let object = garage
|
let object = garage
|
||||||
.object_table
|
.object_table
|
||||||
.get(&bucket_id, &key.to_string())
|
.get(bucket_id, &key.to_string())
|
||||||
.await?
|
.await?
|
||||||
.ok_or(Error::NoSuchKey)?; // No need to delete
|
.ok_or(Error::NoSuchKey)?; // No need to delete
|
||||||
|
|
||||||
|
@ -41,7 +40,7 @@ async fn handle_delete_internal(
|
||||||
};
|
};
|
||||||
|
|
||||||
let object = Object::new(
|
let object = Object::new(
|
||||||
bucket_id,
|
*bucket_id,
|
||||||
key.into(),
|
key.into(),
|
||||||
vec![ObjectVersion {
|
vec![ObjectVersion {
|
||||||
uuid: del_uuid,
|
uuid: del_uuid,
|
||||||
|
@ -55,27 +54,22 @@ async fn handle_delete_internal(
|
||||||
Ok((deleted_version, del_uuid))
|
Ok((deleted_version, del_uuid))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete(
|
pub async fn handle_delete(ctx: ReqCtx, key: &str) -> Result<Response<ResBody>, Error> {
|
||||||
garage: Arc<Garage>,
|
match handle_delete_internal(&ctx, key).await {
|
||||||
bucket_id: Uuid,
|
|
||||||
key: &str,
|
|
||||||
) -> Result<Response<Body>, Error> {
|
|
||||||
match handle_delete_internal(&garage, bucket_id, key).await {
|
|
||||||
Ok(_) | Err(Error::NoSuchKey) => Ok(Response::builder()
|
Ok(_) | Err(Error::NoSuchKey) => Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::from(vec![]))
|
.body(empty_body())
|
||||||
.unwrap()),
|
.unwrap()),
|
||||||
Err(e) => Err(e),
|
Err(e) => Err(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_objects(
|
pub async fn handle_delete_objects(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
bucket_id: Uuid,
|
req: Request<ReqBody>,
|
||||||
req: Request<Body>,
|
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
|
@ -88,7 +82,7 @@ pub async fn handle_delete_objects(
|
||||||
let mut ret_errors = Vec::new();
|
let mut ret_errors = Vec::new();
|
||||||
|
|
||||||
for obj in cmd.objects.iter() {
|
for obj in cmd.objects.iter() {
|
||||||
match handle_delete_internal(&garage, bucket_id, &obj.key).await {
|
match handle_delete_internal(&ctx, &obj.key).await {
|
||||||
Ok((deleted_version, delete_marker_version)) => {
|
Ok((deleted_version, delete_marker_version)) => {
|
||||||
if cmd.quiet {
|
if cmd.quiet {
|
||||||
continue;
|
continue;
|
||||||
|
@ -118,7 +112,7 @@ pub async fn handle_delete_objects(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct DeleteRequest {
|
struct DeleteRequest {
|
||||||
|
|
595
src/api/s3/encryption.rs
Normal file
|
@ -0,0 +1,595 @@
|
||||||
|
use std::borrow::Cow;
|
||||||
|
use std::convert::TryInto;
|
||||||
|
use std::pin::Pin;
|
||||||
|
|
||||||
|
use aes_gcm::{
|
||||||
|
aead::stream::{DecryptorLE31, EncryptorLE31, StreamLE31},
|
||||||
|
aead::{Aead, AeadCore, KeyInit, OsRng},
|
||||||
|
aes::cipher::crypto_common::rand_core::RngCore,
|
||||||
|
aes::cipher::typenum::Unsigned,
|
||||||
|
Aes256Gcm, Key, Nonce,
|
||||||
|
};
|
||||||
|
use base64::prelude::*;
|
||||||
|
use bytes::Bytes;
|
||||||
|
|
||||||
|
use futures::stream::Stream;
|
||||||
|
use futures::task;
|
||||||
|
use tokio::io::BufReader;
|
||||||
|
|
||||||
|
use http::header::{HeaderMap, HeaderName, HeaderValue};
|
||||||
|
|
||||||
|
use garage_net::bytes_buf::BytesBuf;
|
||||||
|
use garage_net::stream::{stream_asyncread, ByteStream};
|
||||||
|
use garage_rpc::rpc_helper::OrderTag;
|
||||||
|
use garage_util::data::Hash;
|
||||||
|
use garage_util::error::Error as GarageError;
|
||||||
|
use garage_util::migrate::Migrate;
|
||||||
|
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
|
||||||
|
|
||||||
|
use crate::common_error::*;
|
||||||
|
use crate::s3::checksum::Md5Checksum;
|
||||||
|
use crate::s3::error::Error;
|
||||||
|
|
||||||
|
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
||||||
|
HeaderName::from_static("x-amz-server-side-encryption-customer-algorithm");
|
||||||
|
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: HeaderName =
|
||||||
|
HeaderName::from_static("x-amz-server-side-encryption-customer-key");
|
||||||
|
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: HeaderName =
|
||||||
|
HeaderName::from_static("x-amz-server-side-encryption-customer-key-md5");
|
||||||
|
|
||||||
|
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
||||||
|
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-algorithm");
|
||||||
|
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: HeaderName =
|
||||||
|
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-key");
|
||||||
|
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: HeaderName =
|
||||||
|
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-key-md5");
|
||||||
|
|
||||||
|
const CUSTOMER_ALGORITHM_AES256: &[u8] = b"AES256";
|
||||||
|
|
||||||
|
type Md5Output = md5::digest::Output<md5::Md5Core>;
|
||||||
|
|
||||||
|
type StreamNonceSize = aes_gcm::aead::stream::NonceSize<Aes256Gcm, StreamLE31<Aes256Gcm>>;
|
||||||
|
|
||||||
|
// Data blocks are encrypted by smaller chunks of size 4096 bytes,
|
||||||
|
// so that data can be streamed when reading.
|
||||||
|
// This size has to be known and has to be constant, or data won't be
|
||||||
|
// readable anymore. DO NOT CHANGE THIS VALUE.
|
||||||
|
const STREAM_ENC_PLAIN_CHUNK_SIZE: usize = 0x1000; // 4096 bytes
|
||||||
|
const STREAM_ENC_CYPER_CHUNK_SIZE: usize = STREAM_ENC_PLAIN_CHUNK_SIZE + 16;
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
pub enum EncryptionParams {
|
||||||
|
Plaintext,
|
||||||
|
SseC {
|
||||||
|
client_key: Key<Aes256Gcm>,
|
||||||
|
client_key_md5: Md5Output,
|
||||||
|
compression_level: Option<i32>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EncryptionParams {
|
||||||
|
pub fn is_encrypted(&self) -> bool {
|
||||||
|
!matches!(self, Self::Plaintext)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_same(a: &Self, b: &Self) -> bool {
|
||||||
|
let relevant_info = |x: &Self| match x {
|
||||||
|
Self::Plaintext => None,
|
||||||
|
Self::SseC {
|
||||||
|
client_key,
|
||||||
|
compression_level,
|
||||||
|
..
|
||||||
|
} => Some((*client_key, compression_level.is_some())),
|
||||||
|
};
|
||||||
|
relevant_info(a) == relevant_info(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_from_headers(
|
||||||
|
garage: &Garage,
|
||||||
|
headers: &HeaderMap,
|
||||||
|
) -> Result<EncryptionParams, Error> {
|
||||||
|
let key = parse_request_headers(
|
||||||
|
headers,
|
||||||
|
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||||
|
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
|
||||||
|
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
|
||||||
|
)?;
|
||||||
|
match key {
|
||||||
|
Some((client_key, client_key_md5)) => Ok(EncryptionParams::SseC {
|
||||||
|
client_key,
|
||||||
|
client_key_md5,
|
||||||
|
compression_level: garage.config.compression_level,
|
||||||
|
}),
|
||||||
|
None => Ok(EncryptionParams::Plaintext),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_response_headers(&self, resp: &mut http::response::Builder) {
|
||||||
|
if let Self::SseC { client_key_md5, .. } = self {
|
||||||
|
let md5 = BASE64_STANDARD.encode(&client_key_md5);
|
||||||
|
|
||||||
|
resp.headers_mut().unwrap().insert(
|
||||||
|
X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||||
|
HeaderValue::from_bytes(CUSTOMER_ALGORITHM_AES256).unwrap(),
|
||||||
|
);
|
||||||
|
resp.headers_mut().unwrap().insert(
|
||||||
|
X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
|
||||||
|
HeaderValue::from_bytes(md5.as_bytes()).unwrap(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn check_decrypt<'a>(
|
||||||
|
garage: &Garage,
|
||||||
|
headers: &HeaderMap,
|
||||||
|
obj_enc: &'a ObjectVersionEncryption,
|
||||||
|
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
||||||
|
let key = parse_request_headers(
|
||||||
|
headers,
|
||||||
|
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||||
|
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
|
||||||
|
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
|
||||||
|
)?;
|
||||||
|
Self::check_decrypt_common(garage, key, obj_enc)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn check_decrypt_for_copy_source<'a>(
|
||||||
|
garage: &Garage,
|
||||||
|
headers: &HeaderMap,
|
||||||
|
obj_enc: &'a ObjectVersionEncryption,
|
||||||
|
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
||||||
|
let key = parse_request_headers(
|
||||||
|
headers,
|
||||||
|
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||||
|
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
|
||||||
|
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
|
||||||
|
)?;
|
||||||
|
Self::check_decrypt_common(garage, key, obj_enc)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_decrypt_common<'a>(
|
||||||
|
garage: &Garage,
|
||||||
|
key: Option<(Key<Aes256Gcm>, Md5Output)>,
|
||||||
|
obj_enc: &'a ObjectVersionEncryption,
|
||||||
|
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
|
||||||
|
match (key, &obj_enc) {
|
||||||
|
(
|
||||||
|
Some((client_key, client_key_md5)),
|
||||||
|
ObjectVersionEncryption::SseC { inner, compressed },
|
||||||
|
) => {
|
||||||
|
let enc = Self::SseC {
|
||||||
|
client_key,
|
||||||
|
client_key_md5,
|
||||||
|
compression_level: if *compressed {
|
||||||
|
Some(garage.config.compression_level.unwrap_or(1))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
|
};
|
||||||
|
let plaintext = enc.decrypt_blob(&inner)?;
|
||||||
|
let inner = ObjectVersionMetaInner::decode(&plaintext)
|
||||||
|
.ok_or_internal_error("Could not decode encrypted metadata")?;
|
||||||
|
Ok((enc, Cow::Owned(inner)))
|
||||||
|
}
|
||||||
|
(None, ObjectVersionEncryption::Plaintext { inner }) => {
|
||||||
|
Ok((Self::Plaintext, Cow::Borrowed(inner)))
|
||||||
|
}
|
||||||
|
(_, ObjectVersionEncryption::SseC { .. }) => {
|
||||||
|
Err(Error::bad_request("Object is encrypted"))
|
||||||
|
}
|
||||||
|
(Some(_), _) => {
|
||||||
|
// TODO: should this be an OK scenario?
|
||||||
|
Err(Error::bad_request("Trying to decrypt a plaintext object"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn encrypt_meta(
|
||||||
|
&self,
|
||||||
|
meta: ObjectVersionMetaInner,
|
||||||
|
) -> Result<ObjectVersionEncryption, Error> {
|
||||||
|
match self {
|
||||||
|
Self::SseC {
|
||||||
|
compression_level, ..
|
||||||
|
} => {
|
||||||
|
let plaintext = meta.encode().map_err(GarageError::from)?;
|
||||||
|
let ciphertext = self.encrypt_blob(&plaintext)?;
|
||||||
|
Ok(ObjectVersionEncryption::SseC {
|
||||||
|
inner: ciphertext.into_owned(),
|
||||||
|
compressed: compression_level.is_some(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
Self::Plaintext => Ok(ObjectVersionEncryption::Plaintext { inner: meta }),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- generating object Etag values ----
|
||||||
|
pub fn etag_from_md5(&self, md5sum: &Option<Md5Checksum>) -> String {
|
||||||
|
match self {
|
||||||
|
Self::Plaintext => md5sum
|
||||||
|
.map(|x| hex::encode(&x[..]))
|
||||||
|
.expect("md5 digest should have been computed"),
|
||||||
|
Self::SseC { .. } => {
|
||||||
|
// AWS specifies that for encrypted objects, the Etag is not
|
||||||
|
// the md5sum of the data, but doesn't say what it is.
|
||||||
|
// So we just put some random bytes.
|
||||||
|
let mut random = [0u8; 16];
|
||||||
|
OsRng.fill_bytes(&mut random);
|
||||||
|
hex::encode(&random)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- generic function for encrypting / decrypting blobs ----
|
||||||
|
// Prepends a randomly-generated nonce to the encrypted value.
|
||||||
|
// This is used for encrypting object metadata and inlined data for small objects.
|
||||||
|
// This does not compress anything.
|
||||||
|
|
||||||
|
pub fn encrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
|
||||||
|
match self {
|
||||||
|
Self::SseC { client_key, .. } => {
|
||||||
|
let cipher = Aes256Gcm::new(&client_key);
|
||||||
|
let nonce = Aes256Gcm::generate_nonce(&mut OsRng);
|
||||||
|
let ciphertext = cipher
|
||||||
|
.encrypt(&nonce, blob)
|
||||||
|
.ok_or_internal_error("Encryption failed")?;
|
||||||
|
Ok(Cow::Owned([nonce.to_vec(), ciphertext].concat()))
|
||||||
|
}
|
||||||
|
Self::Plaintext => Ok(Cow::Borrowed(blob)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
|
||||||
|
match self {
|
||||||
|
Self::SseC { client_key, .. } => {
|
||||||
|
let cipher = Aes256Gcm::new(&client_key);
|
||||||
|
let nonce_size = <Aes256Gcm as AeadCore>::NonceSize::to_usize();
|
||||||
|
let nonce = Nonce::from_slice(
|
||||||
|
blob.get(..nonce_size)
|
||||||
|
.ok_or_internal_error("invalid encrypted data")?,
|
||||||
|
);
|
||||||
|
let plaintext = cipher
|
||||||
|
.decrypt(nonce, &blob[nonce_size..])
|
||||||
|
.ok_or_bad_request(
|
||||||
|
"Invalid encryption key, could not decrypt object metadata.",
|
||||||
|
)?;
|
||||||
|
Ok(Cow::Owned(plaintext))
|
||||||
|
}
|
||||||
|
Self::Plaintext => Ok(Cow::Borrowed(blob)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- function for encrypting / decrypting byte streams ----
|
||||||
|
|
||||||
|
/// Get a data block from the storage node, and decrypt+decompress it
|
||||||
|
/// if necessary. If object is plaintext, just get it without any processing.
|
||||||
|
pub async fn get_block(
|
||||||
|
&self,
|
||||||
|
garage: &Garage,
|
||||||
|
hash: &Hash,
|
||||||
|
order: Option<OrderTag>,
|
||||||
|
) -> Result<ByteStream, GarageError> {
|
||||||
|
let raw_block = garage
|
||||||
|
.block_manager
|
||||||
|
.rpc_get_block_streaming(hash, order)
|
||||||
|
.await?;
|
||||||
|
Ok(self.decrypt_block_stream(raw_block))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decrypt_block_stream(&self, stream: ByteStream) -> ByteStream {
|
||||||
|
match self {
|
||||||
|
Self::Plaintext => stream,
|
||||||
|
Self::SseC {
|
||||||
|
client_key,
|
||||||
|
compression_level,
|
||||||
|
..
|
||||||
|
} => {
|
||||||
|
let plaintext = DecryptStream::new(stream, *client_key);
|
||||||
|
if compression_level.is_some() {
|
||||||
|
let reader = stream_asyncread(Box::pin(plaintext));
|
||||||
|
let reader = BufReader::new(reader);
|
||||||
|
let reader = async_compression::tokio::bufread::ZstdDecoder::new(reader);
|
||||||
|
Box::pin(tokio_util::io::ReaderStream::new(reader))
|
||||||
|
} else {
|
||||||
|
Box::pin(plaintext)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Encrypt a data block if encryption is set, for use before
|
||||||
|
/// putting the data blocks into storage
|
||||||
|
pub fn encrypt_block(&self, block: Bytes) -> Result<Bytes, Error> {
|
||||||
|
match self {
|
||||||
|
Self::Plaintext => Ok(block),
|
||||||
|
Self::SseC {
|
||||||
|
client_key,
|
||||||
|
compression_level,
|
||||||
|
..
|
||||||
|
} => {
|
||||||
|
let block = if let Some(level) = compression_level {
|
||||||
|
Cow::Owned(
|
||||||
|
garage_block::zstd_encode(block.as_ref(), *level)
|
||||||
|
.ok_or_internal_error("failed to compress data block")?,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
Cow::Borrowed(block.as_ref())
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut ret = Vec::with_capacity(block.len() + 32 + block.len() / 64);
|
||||||
|
|
||||||
|
let mut nonce: Nonce<StreamNonceSize> = Default::default();
|
||||||
|
OsRng.fill_bytes(&mut nonce);
|
||||||
|
ret.extend_from_slice(nonce.as_slice());
|
||||||
|
|
||||||
|
let mut cipher = EncryptorLE31::<Aes256Gcm>::new(&client_key, &nonce);
|
||||||
|
let mut iter = block.chunks(STREAM_ENC_PLAIN_CHUNK_SIZE).peekable();
|
||||||
|
|
||||||
|
if iter.peek().is_none() {
|
||||||
|
// Empty stream: we encrypt an empty last chunk
|
||||||
|
let chunk_enc = cipher
|
||||||
|
.encrypt_last(&[][..])
|
||||||
|
.ok_or_internal_error("failed to encrypt chunk")?;
|
||||||
|
ret.extend_from_slice(&chunk_enc);
|
||||||
|
} else {
|
||||||
|
loop {
|
||||||
|
let chunk = iter.next().unwrap();
|
||||||
|
if iter.peek().is_some() {
|
||||||
|
let chunk_enc = cipher
|
||||||
|
.encrypt_next(chunk)
|
||||||
|
.ok_or_internal_error("failed to encrypt chunk")?;
|
||||||
|
assert_eq!(chunk.len(), STREAM_ENC_PLAIN_CHUNK_SIZE);
|
||||||
|
assert_eq!(chunk_enc.len(), STREAM_ENC_CYPER_CHUNK_SIZE);
|
||||||
|
ret.extend_from_slice(&chunk_enc);
|
||||||
|
} else {
|
||||||
|
// use encrypt_last for the last chunk
|
||||||
|
let chunk_enc = cipher
|
||||||
|
.encrypt_last(chunk)
|
||||||
|
.ok_or_internal_error("failed to encrypt chunk")?;
|
||||||
|
ret.extend_from_slice(&chunk_enc);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ret.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_request_headers(
|
||||||
|
headers: &HeaderMap,
|
||||||
|
alg_header: &HeaderName,
|
||||||
|
key_header: &HeaderName,
|
||||||
|
md5_header: &HeaderName,
|
||||||
|
) -> Result<Option<(Key<Aes256Gcm>, Md5Output)>, Error> {
|
||||||
|
let alg = headers.get(alg_header).map(HeaderValue::as_bytes);
|
||||||
|
let key = headers.get(key_header).map(HeaderValue::as_bytes);
|
||||||
|
let md5 = headers.get(md5_header).map(HeaderValue::as_bytes);
|
||||||
|
|
||||||
|
match alg {
|
||||||
|
Some(CUSTOMER_ALGORITHM_AES256) => {
|
||||||
|
use md5::{Digest, Md5};
|
||||||
|
|
||||||
|
let key_b64 =
|
||||||
|
key.ok_or_bad_request("Missing server-side-encryption-customer-key header")?;
|
||||||
|
let key_bytes: [u8; 32] = BASE64_STANDARD
|
||||||
|
.decode(&key_b64)
|
||||||
|
.ok_or_bad_request(
|
||||||
|
"Invalid server-side-encryption-customer-key header: invalid base64",
|
||||||
|
)?
|
||||||
|
.try_into()
|
||||||
|
.ok()
|
||||||
|
.ok_or_bad_request(
|
||||||
|
"Invalid server-side-encryption-customer-key header: invalid length",
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let md5_b64 =
|
||||||
|
md5.ok_or_bad_request("Missing server-side-encryption-customer-key-md5 header")?;
|
||||||
|
let md5_bytes = BASE64_STANDARD.decode(&md5_b64).ok_or_bad_request(
|
||||||
|
"Invalid server-side-encryption-customer-key-md5 header: invalid bass64",
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut hasher = Md5::new();
|
||||||
|
hasher.update(&key_bytes[..]);
|
||||||
|
let our_md5 = hasher.finalize();
|
||||||
|
if our_md5.as_slice() != md5_bytes.as_slice() {
|
||||||
|
return Err(Error::bad_request(
|
||||||
|
"Server-side encryption client key MD5 checksum does not match",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some((key_bytes.into(), our_md5)))
|
||||||
|
}
|
||||||
|
Some(alg) => Err(Error::InvalidEncryptionAlgorithm(
|
||||||
|
String::from_utf8_lossy(alg).into_owned(),
|
||||||
|
)),
|
||||||
|
None => {
|
||||||
|
if key.is_some() || md5.is_some() {
|
||||||
|
Err(Error::bad_request(
|
||||||
|
"Unexpected server-side-encryption-customer-key{,-md5} header(s)",
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- encrypt & decrypt streams ----
|
||||||
|
|
||||||
|
#[pin_project::pin_project]
|
||||||
|
struct DecryptStream {
|
||||||
|
#[pin]
|
||||||
|
stream: ByteStream,
|
||||||
|
done_reading: bool,
|
||||||
|
buf: BytesBuf,
|
||||||
|
key: Key<Aes256Gcm>,
|
||||||
|
state: DecryptStreamState,
|
||||||
|
}
|
||||||
|
|
||||||
|
enum DecryptStreamState {
|
||||||
|
Starting,
|
||||||
|
Running(DecryptorLE31<Aes256Gcm>),
|
||||||
|
Done,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DecryptStream {
|
||||||
|
fn new(stream: ByteStream, key: Key<Aes256Gcm>) -> Self {
|
||||||
|
Self {
|
||||||
|
stream,
|
||||||
|
done_reading: false,
|
||||||
|
buf: BytesBuf::new(),
|
||||||
|
key,
|
||||||
|
state: DecryptStreamState::Starting,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Stream for DecryptStream {
|
||||||
|
type Item = Result<Bytes, std::io::Error>;
|
||||||
|
|
||||||
|
fn poll_next(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut task::Context<'_>,
|
||||||
|
) -> task::Poll<Option<Self::Item>> {
|
||||||
|
use std::task::Poll;
|
||||||
|
|
||||||
|
let mut this = self.project();
|
||||||
|
|
||||||
|
// The first bytes of the stream should contain the starting nonce.
|
||||||
|
// If we don't have a Running state, it means that we haven't
|
||||||
|
// yet read the nonce.
|
||||||
|
while matches!(this.state, DecryptStreamState::Starting) {
|
||||||
|
let nonce_size = StreamNonceSize::to_usize();
|
||||||
|
if let Some(nonce) = this.buf.take_exact(nonce_size) {
|
||||||
|
let nonce = Nonce::from_slice(nonce.as_ref());
|
||||||
|
*this.state = DecryptStreamState::Running(DecryptorLE31::new(&this.key, nonce));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
|
||||||
|
Some(Ok(bytes)) => {
|
||||||
|
this.buf.extend(bytes);
|
||||||
|
}
|
||||||
|
Some(Err(e)) => {
|
||||||
|
return Poll::Ready(Some(Err(e)));
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
return Poll::Ready(Some(Err(std::io::Error::new(
|
||||||
|
std::io::ErrorKind::UnexpectedEof,
|
||||||
|
"Decrypt: unexpected EOF, could not read nonce",
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read at least one byte more than the encrypted chunk size
|
||||||
|
// (if possible), so that we know if we are decrypting the
|
||||||
|
// last chunk or not.
|
||||||
|
while !*this.done_reading && this.buf.len() <= STREAM_ENC_CYPER_CHUNK_SIZE {
|
||||||
|
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
|
||||||
|
Some(Ok(bytes)) => {
|
||||||
|
this.buf.extend(bytes);
|
||||||
|
}
|
||||||
|
Some(Err(e)) => {
|
||||||
|
return Poll::Ready(Some(Err(e)));
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
*this.done_reading = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches!(this.state, DecryptStreamState::Done) {
|
||||||
|
if !this.buf.is_empty() {
|
||||||
|
return Poll::Ready(Some(Err(std::io::Error::new(
|
||||||
|
std::io::ErrorKind::Other,
|
||||||
|
"Decrypt: unexpected bytes after last encrypted chunk",
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
return Poll::Ready(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = if this.buf.len() > STREAM_ENC_CYPER_CHUNK_SIZE {
|
||||||
|
// we have strictly more bytes than the encrypted chunk size,
|
||||||
|
// so we know this is not the last
|
||||||
|
let DecryptStreamState::Running(ref mut cipher) = this.state else {
|
||||||
|
unreachable!()
|
||||||
|
};
|
||||||
|
let chunk = this.buf.take_exact(STREAM_ENC_CYPER_CHUNK_SIZE).unwrap();
|
||||||
|
let chunk_dec = cipher.decrypt_next(chunk.as_ref());
|
||||||
|
if let Ok(c) = &chunk_dec {
|
||||||
|
assert_eq!(c.len(), STREAM_ENC_PLAIN_CHUNK_SIZE);
|
||||||
|
}
|
||||||
|
chunk_dec
|
||||||
|
} else {
|
||||||
|
// We have one encrypted chunk size or less, even though we tried
|
||||||
|
// to read more, so this is the last chunk. Decrypt using the
|
||||||
|
// appropriate decrypt_last() function that then destroys the cipher.
|
||||||
|
let state = std::mem::replace(this.state, DecryptStreamState::Done);
|
||||||
|
let DecryptStreamState::Running(cipher) = state else {
|
||||||
|
unreachable!()
|
||||||
|
};
|
||||||
|
let chunk = this.buf.take_all();
|
||||||
|
cipher.decrypt_last(chunk.as_ref())
|
||||||
|
};
|
||||||
|
|
||||||
|
match res {
|
||||||
|
Ok(bytes) if bytes.is_empty() => Poll::Ready(None),
|
||||||
|
Ok(bytes) => Poll::Ready(Some(Ok(bytes.into()))),
|
||||||
|
Err(_) => Poll::Ready(Some(Err(std::io::Error::new(
|
||||||
|
std::io::ErrorKind::Other,
|
||||||
|
"Decryption failed",
|
||||||
|
)))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
use futures::stream::StreamExt;
|
||||||
|
use garage_net::stream::read_stream_to_end;
|
||||||
|
|
||||||
|
fn stream() -> ByteStream {
|
||||||
|
Box::pin(
|
||||||
|
futures::stream::iter(16usize..1024)
|
||||||
|
.map(|i| Ok(Bytes::from(vec![(i % 256) as u8; (i * 37) % 1024]))),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn test_block_enc(compression_level: Option<i32>) {
|
||||||
|
let enc = EncryptionParams::SseC {
|
||||||
|
client_key: Aes256Gcm::generate_key(&mut OsRng),
|
||||||
|
client_key_md5: Default::default(), // not needed
|
||||||
|
compression_level,
|
||||||
|
};
|
||||||
|
|
||||||
|
let block_plain = read_stream_to_end(stream()).await.unwrap().into_bytes();
|
||||||
|
|
||||||
|
let block_enc = enc.encrypt_block(block_plain.clone()).unwrap();
|
||||||
|
|
||||||
|
let block_dec =
|
||||||
|
enc.decrypt_block_stream(Box::pin(futures::stream::once(async { Ok(block_enc) })));
|
||||||
|
let block_dec = read_stream_to_end(block_dec).await.unwrap().into_bytes();
|
||||||
|
|
||||||
|
assert_eq!(block_plain, block_dec);
|
||||||
|
assert!(block_dec.len() > 128000);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_encrypt_block() {
|
||||||
|
test_block_enc(None).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_encrypt_block_compressed() {
|
||||||
|
test_block_enc(Some(1)).await
|
||||||
|
}
|
||||||
|
}
|
|
@ -2,13 +2,12 @@ use std::convert::TryInto;
|
||||||
|
|
||||||
use err_derive::Error;
|
use err_derive::Error;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{Body, HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
|
||||||
use garage_model::helper::error::Error as HelperError;
|
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
use crate::generic_server::ApiError;
|
use crate::generic_server::ApiError;
|
||||||
|
use crate::helpers::*;
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::signature::error::Error as SignatureError;
|
use crate::signature::error::Error as SignatureError;
|
||||||
|
|
||||||
|
@ -62,14 +61,18 @@ pub enum Error {
|
||||||
#[error(display = "Invalid XML: {}", _0)]
|
#[error(display = "Invalid XML: {}", _0)]
|
||||||
InvalidXml(String),
|
InvalidXml(String),
|
||||||
|
|
||||||
/// The client sent a header with invalid value
|
|
||||||
#[error(display = "Invalid header value: {}", _0)]
|
|
||||||
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
|
||||||
|
|
||||||
/// The client sent a range header with invalid value
|
/// The client sent a range header with invalid value
|
||||||
#[error(display = "Invalid HTTP range: {:?}", _0)]
|
#[error(display = "Invalid HTTP range: {:?}", _0)]
|
||||||
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
|
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
|
||||||
|
|
||||||
|
/// The client sent a range header with invalid value
|
||||||
|
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
|
||||||
|
InvalidEncryptionAlgorithm(String),
|
||||||
|
|
||||||
|
/// The client sent invalid XML data
|
||||||
|
#[error(display = "Invalid digest: {}", _0)]
|
||||||
|
InvalidDigest(String),
|
||||||
|
|
||||||
/// The client sent a request for an action not supported by garage
|
/// The client sent a request for an action not supported by garage
|
||||||
#[error(display = "Unimplemented action: {}", _0)]
|
#[error(display = "Unimplemented action: {}", _0)]
|
||||||
NotImplemented(String),
|
NotImplemented(String),
|
||||||
|
@ -86,18 +89,6 @@ where
|
||||||
|
|
||||||
impl CommonErrorDerivative for Error {}
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
impl From<HelperError> for Error {
|
|
||||||
fn from(err: HelperError) -> Self {
|
|
||||||
match err {
|
|
||||||
HelperError::Internal(i) => Self::Common(CommonError::InternalError(i)),
|
|
||||||
HelperError::BadRequest(b) => Self::Common(CommonError::BadRequest(b)),
|
|
||||||
HelperError::InvalidBucketName(n) => Self::Common(CommonError::InvalidBucketName(n)),
|
|
||||||
HelperError::NoSuchBucket(n) => Self::Common(CommonError::NoSuchBucket(n)),
|
|
||||||
e => Self::bad_request(format!("{}", e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<roxmltree::Error> for Error {
|
impl From<roxmltree::Error> for Error {
|
||||||
fn from(err: roxmltree::Error) -> Self {
|
fn from(err: roxmltree::Error) -> Self {
|
||||||
Self::InvalidXml(format!("{}", err))
|
Self::InvalidXml(format!("{}", err))
|
||||||
|
@ -118,7 +109,6 @@ impl From<SignatureError> for Error {
|
||||||
Self::AuthorizationHeaderMalformed(c)
|
Self::AuthorizationHeaderMalformed(c)
|
||||||
}
|
}
|
||||||
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
||||||
SignatureError::InvalidHeader(h) => Self::InvalidHeader(h),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -143,9 +133,9 @@ impl Error {
|
||||||
Error::NotImplemented(_) => "NotImplemented",
|
Error::NotImplemented(_) => "NotImplemented",
|
||||||
Error::InvalidXml(_) => "MalformedXML",
|
Error::InvalidXml(_) => "MalformedXML",
|
||||||
Error::InvalidRange(_) => "InvalidRange",
|
Error::InvalidRange(_) => "InvalidRange",
|
||||||
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) | Error::InvalidHeader(_) => {
|
Error::InvalidDigest(_) => "InvalidDigest",
|
||||||
"InvalidRequest"
|
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest",
|
||||||
}
|
Error::InvalidEncryptionAlgorithm(_) => "InvalidEncryptionAlgorithmError",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -163,10 +153,11 @@ impl ApiError for Error {
|
||||||
| Error::InvalidPart
|
| Error::InvalidPart
|
||||||
| Error::InvalidPartOrder
|
| Error::InvalidPartOrder
|
||||||
| Error::EntityTooSmall
|
| Error::EntityTooSmall
|
||||||
|
| Error::InvalidDigest(_)
|
||||||
|
| Error::InvalidEncryptionAlgorithm(_)
|
||||||
| Error::InvalidXml(_)
|
| Error::InvalidXml(_)
|
||||||
| Error::InvalidUtf8Str(_)
|
| Error::InvalidUtf8Str(_)
|
||||||
| Error::InvalidUtf8String(_)
|
| Error::InvalidUtf8String(_) => StatusCode::BAD_REQUEST,
|
||||||
| Error::InvalidHeader(_) => StatusCode::BAD_REQUEST,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,22 +180,23 @@ impl ApiError for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn http_body(&self, garage_region: &str, path: &str) -> Body {
|
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody {
|
||||||
let error = s3_xml::Error {
|
let error = s3_xml::Error {
|
||||||
code: s3_xml::Value(self.aws_code().to_string()),
|
code: s3_xml::Value(self.aws_code().to_string()),
|
||||||
message: s3_xml::Value(format!("{}", self)),
|
message: s3_xml::Value(format!("{}", self)),
|
||||||
resource: Some(s3_xml::Value(path.to_string())),
|
resource: Some(s3_xml::Value(path.to_string())),
|
||||||
region: Some(s3_xml::Value(garage_region.to_string())),
|
region: Some(s3_xml::Value(garage_region.to_string())),
|
||||||
};
|
};
|
||||||
Body::from(s3_xml::to_xml_with_header(&error).unwrap_or_else(|_| {
|
let error_str = s3_xml::to_xml_with_header(&error).unwrap_or_else(|_| {
|
||||||
r#"
|
r#"
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Error>
|
<Error>
|
||||||
<Code>InternalError</Code>
|
<Code>InternalError</Code>
|
||||||
<Message>XML encoding of error failed</Message>
|
<Message>XML encoding of error failed</Message>
|
||||||
</Error>
|
</Error>
|
||||||
"#
|
"#
|
||||||
.into()
|
.into()
|
||||||
}))
|
});
|
||||||
|
error_body(error_str)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,17 +1,22 @@
|
||||||
//! Function related to GET and HEAD requests
|
//! Function related to GET and HEAD requests
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::convert::TryInto;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, UNIX_EPOCH};
|
use std::time::{Duration, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
use futures::future;
|
use futures::future;
|
||||||
use futures::stream::{self, StreamExt};
|
use futures::stream::{self, Stream, StreamExt};
|
||||||
use http::header::{
|
use http::header::{
|
||||||
ACCEPT_RANGES, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, IF_MODIFIED_SINCE,
|
ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE,
|
||||||
IF_NONE_MATCH, LAST_MODIFIED, RANGE,
|
CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MODIFIED_SINCE, IF_NONE_MATCH,
|
||||||
|
LAST_MODIFIED, RANGE,
|
||||||
};
|
};
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{body::Body, Request, Response, StatusCode};
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use garage_rpc::rpc_helper::{netapp::stream::ByteStream, OrderTag};
|
use garage_net::stream::ByteStream;
|
||||||
|
use garage_rpc::rpc_helper::OrderTag;
|
||||||
use garage_table::EmptyKey;
|
use garage_table::EmptyKey;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::OkOrMessage;
|
use garage_util::error::OkOrMessage;
|
||||||
|
@ -20,13 +25,30 @@ use garage_model::garage::Garage;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::s3::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::ResBody;
|
||||||
|
use crate::s3::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
||||||
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
|
|
||||||
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
|
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct GetObjectOverrides {
|
||||||
|
pub(crate) response_cache_control: Option<String>,
|
||||||
|
pub(crate) response_content_disposition: Option<String>,
|
||||||
|
pub(crate) response_content_encoding: Option<String>,
|
||||||
|
pub(crate) response_content_language: Option<String>,
|
||||||
|
pub(crate) response_content_type: Option<String>,
|
||||||
|
pub(crate) response_expires: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
fn object_headers(
|
fn object_headers(
|
||||||
version: &ObjectVersion,
|
version: &ObjectVersion,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
|
meta_inner: &ObjectVersionMetaInner,
|
||||||
|
encryption: EncryptionParams,
|
||||||
|
checksum_mode: ChecksumMode,
|
||||||
) -> http::response::Builder {
|
) -> http::response::Builder {
|
||||||
debug!("Version meta: {:?}", version_meta);
|
debug!("Version meta: {:?}", version_meta);
|
||||||
|
|
||||||
|
@ -34,7 +56,6 @@ fn object_headers(
|
||||||
let date_str = httpdate::fmt_http_date(date);
|
let date_str = httpdate::fmt_http_date(date);
|
||||||
|
|
||||||
let mut resp = Response::builder()
|
let mut resp = Response::builder()
|
||||||
.header(CONTENT_TYPE, version_meta.headers.content_type.to_string())
|
|
||||||
.header(LAST_MODIFIED, date_str)
|
.header(LAST_MODIFIED, date_str)
|
||||||
.header(ACCEPT_RANGES, "bytes".to_string());
|
.header(ACCEPT_RANGES, "bytes".to_string());
|
||||||
|
|
||||||
|
@ -42,18 +63,65 @@ fn object_headers(
|
||||||
resp = resp.header(ETAG, format!("\"{}\"", version_meta.etag));
|
resp = resp.header(ETAG, format!("\"{}\"", version_meta.etag));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (k, v) in version_meta.headers.other.iter() {
|
// When metadata is retrieved through the REST API, Amazon S3 combines headers that
|
||||||
resp = resp.header(k, v.to_string());
|
// have the same name (ignoring case) into a comma-delimited list.
|
||||||
|
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
|
||||||
|
let mut headers_by_name = BTreeMap::new();
|
||||||
|
for (name, value) in meta_inner.headers.iter() {
|
||||||
|
match headers_by_name.get_mut(name) {
|
||||||
|
None => {
|
||||||
|
headers_by_name.insert(name, vec![value.as_str()]);
|
||||||
|
}
|
||||||
|
Some(headers) => {
|
||||||
|
headers.push(value.as_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (name, values) in headers_by_name {
|
||||||
|
resp = resp.header(name, values.join(","));
|
||||||
|
}
|
||||||
|
|
||||||
|
if checksum_mode.enabled {
|
||||||
|
resp = add_checksum_response_headers(&meta_inner.checksum, resp);
|
||||||
|
}
|
||||||
|
|
||||||
|
encryption.add_response_headers(&mut resp);
|
||||||
|
|
||||||
resp
|
resp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Override headers according to specific query parameters, see
|
||||||
|
/// section "Overriding response header values through the request" in
|
||||||
|
/// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
|
||||||
|
fn getobject_override_headers(
|
||||||
|
overrides: GetObjectOverrides,
|
||||||
|
resp: &mut http::response::Builder,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// TODO: this only applies for signed requests, so when we support
|
||||||
|
// anonymous access in the future we will have to do a permission check here
|
||||||
|
let overrides = [
|
||||||
|
(CACHE_CONTROL, overrides.response_cache_control),
|
||||||
|
(CONTENT_DISPOSITION, overrides.response_content_disposition),
|
||||||
|
(CONTENT_ENCODING, overrides.response_content_encoding),
|
||||||
|
(CONTENT_LANGUAGE, overrides.response_content_language),
|
||||||
|
(CONTENT_TYPE, overrides.response_content_type),
|
||||||
|
(EXPIRES, overrides.response_expires),
|
||||||
|
];
|
||||||
|
for (hdr, val_opt) in overrides {
|
||||||
|
if let Some(val) = val_opt {
|
||||||
|
let val = val.try_into().ok_or_bad_request("invalid header value")?;
|
||||||
|
resp.headers_mut().unwrap().insert(hdr, val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn try_answer_cached(
|
fn try_answer_cached(
|
||||||
version: &ObjectVersion,
|
version: &ObjectVersion,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
req: &Request<Body>,
|
req: &Request<impl Body>,
|
||||||
) -> Option<Response<Body>> {
|
) -> Option<Response<ResBody>> {
|
||||||
// <trinity> It is possible, and is even usually the case, [that both If-None-Match and
|
// <trinity> It is possible, and is even usually the case, [that both If-None-Match and
|
||||||
// If-Modified-Since] are present in a request. In this situation If-None-Match takes
|
// If-Modified-Since] are present in a request. In this situation If-None-Match takes
|
||||||
// precedence and If-Modified-Since is ignored (as per 6.Precedence from rfc7232). The rational
|
// precedence and If-Modified-Since is ignored (as per 6.Precedence from rfc7232). The rational
|
||||||
|
@ -80,7 +148,7 @@ fn try_answer_cached(
|
||||||
Some(
|
Some(
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.status(StatusCode::NOT_MODIFIED)
|
.status(StatusCode::NOT_MODIFIED)
|
||||||
.body(Body::empty())
|
.body(empty_body())
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
|
@ -90,12 +158,22 @@ fn try_answer_cached(
|
||||||
|
|
||||||
/// Handle HEAD request
|
/// Handle HEAD request
|
||||||
pub async fn handle_head(
|
pub async fn handle_head(
|
||||||
|
ctx: ReqCtx,
|
||||||
|
req: &Request<impl Body>,
|
||||||
|
key: &str,
|
||||||
|
part_number: Option<u64>,
|
||||||
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
handle_head_without_ctx(ctx.garage, req, ctx.bucket_id, key, part_number).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle HEAD request for website
|
||||||
|
pub async fn handle_head_without_ctx(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: &Request<Body>,
|
req: &Request<impl Body>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
key: &str,
|
key: &str,
|
||||||
part_number: Option<u64>,
|
part_number: Option<u64>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let object = garage
|
let object = garage
|
||||||
.object_table
|
.object_table
|
||||||
.get(&bucket_id, &key.to_string())
|
.get(&bucket_id, &key.to_string())
|
||||||
|
@ -124,21 +202,33 @@ pub async fn handle_head(
|
||||||
return Ok(cached);
|
return Ok(cached);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let (encryption, headers) =
|
||||||
|
EncryptionParams::check_decrypt(&garage, req.headers(), &version_meta.encryption)?;
|
||||||
|
|
||||||
|
let checksum_mode = checksum_mode(&req);
|
||||||
|
|
||||||
if let Some(pn) = part_number {
|
if let Some(pn) = part_number {
|
||||||
match version_data {
|
match version_data {
|
||||||
ObjectVersionData::Inline(_, bytes) => {
|
ObjectVersionData::Inline(_, _) => {
|
||||||
if pn != 1 {
|
if pn != 1 {
|
||||||
return Err(Error::InvalidPart);
|
return Err(Error::InvalidPart);
|
||||||
}
|
}
|
||||||
Ok(object_headers(object_version, version_meta)
|
let bytes_len = version_meta.size;
|
||||||
.header(CONTENT_LENGTH, format!("{}", bytes.len()))
|
Ok(object_headers(
|
||||||
.header(
|
object_version,
|
||||||
CONTENT_RANGE,
|
version_meta,
|
||||||
format!("bytes 0-{}/{}", bytes.len() - 1, bytes.len()),
|
&headers,
|
||||||
)
|
encryption,
|
||||||
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
checksum_mode,
|
||||||
.status(StatusCode::PARTIAL_CONTENT)
|
)
|
||||||
.body(Body::empty())?)
|
.header(CONTENT_LENGTH, format!("{}", bytes_len))
|
||||||
|
.header(
|
||||||
|
CONTENT_RANGE,
|
||||||
|
format!("bytes 0-{}/{}", bytes_len - 1, bytes_len),
|
||||||
|
)
|
||||||
|
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
||||||
|
.status(StatusCode::PARTIAL_CONTENT)
|
||||||
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
ObjectVersionData::FirstBlock(_, _) => {
|
ObjectVersionData::FirstBlock(_, _) => {
|
||||||
let version = garage
|
let version = garage
|
||||||
|
@ -150,39 +240,63 @@ pub async fn handle_head(
|
||||||
let (part_offset, part_end) =
|
let (part_offset, part_end) =
|
||||||
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
|
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
|
||||||
|
|
||||||
Ok(object_headers(object_version, version_meta)
|
Ok(object_headers(
|
||||||
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
|
object_version,
|
||||||
.header(
|
version_meta,
|
||||||
CONTENT_RANGE,
|
&headers,
|
||||||
format!(
|
encryption,
|
||||||
"bytes {}-{}/{}",
|
checksum_mode,
|
||||||
part_offset,
|
)
|
||||||
part_end - 1,
|
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
|
||||||
version_meta.size
|
.header(
|
||||||
),
|
CONTENT_RANGE,
|
||||||
)
|
format!(
|
||||||
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
|
"bytes {}-{}/{}",
|
||||||
.status(StatusCode::PARTIAL_CONTENT)
|
part_offset,
|
||||||
.body(Body::empty())?)
|
part_end - 1,
|
||||||
|
version_meta.size
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
|
||||||
|
.status(StatusCode::PARTIAL_CONTENT)
|
||||||
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Ok(object_headers(object_version, version_meta)
|
Ok(object_headers(
|
||||||
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
object_version,
|
||||||
.status(StatusCode::OK)
|
version_meta,
|
||||||
.body(Body::empty())?)
|
&headers,
|
||||||
|
encryption,
|
||||||
|
checksum_mode,
|
||||||
|
)
|
||||||
|
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle GET request
|
/// Handle GET request
|
||||||
pub async fn handle_get(
|
pub async fn handle_get(
|
||||||
|
ctx: ReqCtx,
|
||||||
|
req: &Request<impl Body>,
|
||||||
|
key: &str,
|
||||||
|
part_number: Option<u64>,
|
||||||
|
overrides: GetObjectOverrides,
|
||||||
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
handle_get_without_ctx(ctx.garage, req, ctx.bucket_id, key, part_number, overrides).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle GET request
|
||||||
|
pub async fn handle_get_without_ctx(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: &Request<Body>,
|
req: &Request<impl Body>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
key: &str,
|
key: &str,
|
||||||
part_number: Option<u64>,
|
part_number: Option<u64>,
|
||||||
) -> Result<Response<Body>, Error> {
|
overrides: GetObjectOverrides,
|
||||||
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let object = garage
|
let object = garage
|
||||||
.object_table
|
.object_table
|
||||||
.get(&bucket_id, &key.to_string())
|
.get(&bucket_id, &key.to_string())
|
||||||
|
@ -210,45 +324,107 @@ pub async fn handle_get(
|
||||||
return Ok(cached);
|
return Ok(cached);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let (enc, headers) =
|
||||||
|
EncryptionParams::check_decrypt(&garage, req.headers(), &last_v_meta.encryption)?;
|
||||||
|
|
||||||
|
let checksum_mode = checksum_mode(&req);
|
||||||
|
|
||||||
match (part_number, parse_range_header(req, last_v_meta.size)?) {
|
match (part_number, parse_range_header(req, last_v_meta.size)?) {
|
||||||
(Some(_), Some(_)) => {
|
(Some(_), Some(_)) => Err(Error::bad_request(
|
||||||
return Err(Error::bad_request(
|
"Cannot specify both partNumber and Range header",
|
||||||
"Cannot specify both partNumber and Range header",
|
)),
|
||||||
));
|
|
||||||
}
|
|
||||||
(Some(pn), None) => {
|
(Some(pn), None) => {
|
||||||
return handle_get_part(garage, last_v, last_v_data, last_v_meta, pn).await;
|
handle_get_part(
|
||||||
}
|
|
||||||
(None, Some(range)) => {
|
|
||||||
return handle_get_range(
|
|
||||||
garage,
|
garage,
|
||||||
last_v,
|
last_v,
|
||||||
last_v_data,
|
last_v_data,
|
||||||
last_v_meta,
|
last_v_meta,
|
||||||
|
enc,
|
||||||
|
&headers,
|
||||||
|
pn,
|
||||||
|
checksum_mode,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
(None, Some(range)) => {
|
||||||
|
handle_get_range(
|
||||||
|
garage,
|
||||||
|
last_v,
|
||||||
|
last_v_data,
|
||||||
|
last_v_meta,
|
||||||
|
enc,
|
||||||
|
&headers,
|
||||||
range.start,
|
range.start,
|
||||||
range.start + range.length,
|
range.start + range.length,
|
||||||
|
checksum_mode,
|
||||||
)
|
)
|
||||||
.await;
|
.await
|
||||||
|
}
|
||||||
|
(None, None) => {
|
||||||
|
handle_get_full(
|
||||||
|
garage,
|
||||||
|
last_v,
|
||||||
|
last_v_data,
|
||||||
|
last_v_meta,
|
||||||
|
enc,
|
||||||
|
&headers,
|
||||||
|
overrides,
|
||||||
|
checksum_mode,
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
(None, None) => (),
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let resp_builder = object_headers(last_v, last_v_meta)
|
async fn handle_get_full(
|
||||||
.header(CONTENT_LENGTH, format!("{}", last_v_meta.size))
|
garage: Arc<Garage>,
|
||||||
.status(StatusCode::OK);
|
version: &ObjectVersion,
|
||||||
|
version_data: &ObjectVersionData,
|
||||||
|
version_meta: &ObjectVersionMeta,
|
||||||
|
encryption: EncryptionParams,
|
||||||
|
meta_inner: &ObjectVersionMetaInner,
|
||||||
|
overrides: GetObjectOverrides,
|
||||||
|
checksum_mode: ChecksumMode,
|
||||||
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
let mut resp_builder = object_headers(
|
||||||
|
version,
|
||||||
|
version_meta,
|
||||||
|
&meta_inner,
|
||||||
|
encryption,
|
||||||
|
checksum_mode,
|
||||||
|
)
|
||||||
|
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
||||||
|
.status(StatusCode::OK);
|
||||||
|
getobject_override_headers(overrides, &mut resp_builder)?;
|
||||||
|
|
||||||
match &last_v_data {
|
let stream = full_object_byte_stream(garage, version, version_data, encryption);
|
||||||
|
|
||||||
|
Ok(resp_builder.body(response_body_from_stream(stream))?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn full_object_byte_stream(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
version: &ObjectVersion,
|
||||||
|
version_data: &ObjectVersionData,
|
||||||
|
encryption: EncryptionParams,
|
||||||
|
) -> ByteStream {
|
||||||
|
match &version_data {
|
||||||
ObjectVersionData::DeleteMarker => unreachable!(),
|
ObjectVersionData::DeleteMarker => unreachable!(),
|
||||||
ObjectVersionData::Inline(_, bytes) => {
|
ObjectVersionData::Inline(_, bytes) => {
|
||||||
let body: Body = Body::from(bytes.to_vec());
|
let bytes = bytes.to_vec();
|
||||||
Ok(resp_builder.body(body)?)
|
Box::pin(futures::stream::once(async move {
|
||||||
|
encryption
|
||||||
|
.decrypt_blob(&bytes)
|
||||||
|
.map(|x| Bytes::from(x.to_vec()))
|
||||||
|
.map_err(std_error_from_read_error)
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
ObjectVersionData::FirstBlock(_, first_block_hash) => {
|
ObjectVersionData::FirstBlock(_, first_block_hash) => {
|
||||||
let (tx, rx) = mpsc::channel(2);
|
let (tx, rx) = mpsc::channel::<ByteStream>(2);
|
||||||
|
|
||||||
let order_stream = OrderTag::stream();
|
let order_stream = OrderTag::stream();
|
||||||
let first_block_hash = *first_block_hash;
|
let first_block_hash = *first_block_hash;
|
||||||
let version_uuid = last_v.uuid;
|
let version_uuid = version.uuid;
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
match async {
|
match async {
|
||||||
|
@ -257,19 +433,18 @@ pub async fn handle_get(
|
||||||
garage2.version_table.get(&version_uuid, &EmptyKey).await
|
garage2.version_table.get(&version_uuid, &EmptyKey).await
|
||||||
});
|
});
|
||||||
|
|
||||||
let stream_block_0 = garage
|
let stream_block_0 = encryption
|
||||||
.block_manager
|
.get_block(&garage, &first_block_hash, Some(order_stream.order(0)))
|
||||||
.rpc_get_block_streaming(&first_block_hash, Some(order_stream.order(0)))
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
tx.send(stream_block_0)
|
tx.send(stream_block_0)
|
||||||
.await
|
.await
|
||||||
.ok_or_message("channel closed")?;
|
.ok_or_message("channel closed")?;
|
||||||
|
|
||||||
let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?;
|
let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?;
|
||||||
for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) {
|
for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) {
|
||||||
let stream_block_i = garage
|
let stream_block_i = encryption
|
||||||
.block_manager
|
.get_block(&garage, &vb.hash, Some(order_stream.order(i as u64)))
|
||||||
.rpc_get_block_streaming(&vb.hash, Some(order_stream.order(i as u64)))
|
|
||||||
.await?;
|
.await?;
|
||||||
tx.send(stream_block_i)
|
tx.send(stream_block_i)
|
||||||
.await
|
.await
|
||||||
|
@ -282,21 +457,12 @@ pub async fn handle_get(
|
||||||
{
|
{
|
||||||
Ok(()) => (),
|
Ok(()) => (),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
let err = std::io::Error::new(
|
let _ = tx.send(error_stream_item(e)).await;
|
||||||
std::io::ErrorKind::Other,
|
|
||||||
format!("Error while getting object data: {}", e),
|
|
||||||
);
|
|
||||||
let _ = tx
|
|
||||||
.send(Box::pin(stream::once(future::ready(Err(err)))))
|
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let body_stream = tokio_stream::wrappers::ReceiverStream::new(rx).flatten();
|
Box::pin(tokio_stream::wrappers::ReceiverStream::new(rx).flatten())
|
||||||
|
|
||||||
let body = hyper::body::Body::wrap_stream(body_stream);
|
|
||||||
Ok(resp_builder.body(body)?)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -306,10 +472,16 @@ async fn handle_get_range(
|
||||||
version: &ObjectVersion,
|
version: &ObjectVersion,
|
||||||
version_data: &ObjectVersionData,
|
version_data: &ObjectVersionData,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
|
encryption: EncryptionParams,
|
||||||
|
meta_inner: &ObjectVersionMetaInner,
|
||||||
begin: u64,
|
begin: u64,
|
||||||
end: u64,
|
end: u64,
|
||||||
) -> Result<Response<Body>, Error> {
|
checksum_mode: ChecksumMode,
|
||||||
let resp_builder = object_headers(version, version_meta)
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
// Here we do not use getobject_override_headers because we don't
|
||||||
|
// want to add any overridden headers (those should not be added
|
||||||
|
// when returning PARTIAL_CONTENT)
|
||||||
|
let resp_builder = object_headers(version, version_meta, meta_inner, encryption, checksum_mode)
|
||||||
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
||||||
.header(
|
.header(
|
||||||
CONTENT_RANGE,
|
CONTENT_RANGE,
|
||||||
|
@ -320,8 +492,9 @@ async fn handle_get_range(
|
||||||
match &version_data {
|
match &version_data {
|
||||||
ObjectVersionData::DeleteMarker => unreachable!(),
|
ObjectVersionData::DeleteMarker => unreachable!(),
|
||||||
ObjectVersionData::Inline(_meta, bytes) => {
|
ObjectVersionData::Inline(_meta, bytes) => {
|
||||||
|
let bytes = encryption.decrypt_blob(&bytes)?;
|
||||||
if end as usize <= bytes.len() {
|
if end as usize <= bytes.len() {
|
||||||
let body: Body = Body::from(bytes[begin as usize..end as usize].to_vec());
|
let body = bytes_body(bytes[begin as usize..end as usize].to_vec().into());
|
||||||
Ok(resp_builder.body(body)?)
|
Ok(resp_builder.body(body)?)
|
||||||
} else {
|
} else {
|
||||||
Err(Error::internal_error(
|
Err(Error::internal_error(
|
||||||
|
@ -336,7 +509,8 @@ async fn handle_get_range(
|
||||||
.await?
|
.await?
|
||||||
.ok_or(Error::NoSuchKey)?;
|
.ok_or(Error::NoSuchKey)?;
|
||||||
|
|
||||||
let body = body_from_blocks_range(garage, version.blocks.items(), begin, end);
|
let body =
|
||||||
|
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
|
||||||
Ok(resp_builder.body(body)?)
|
Ok(resp_builder.body(body)?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -347,16 +521,28 @@ async fn handle_get_part(
|
||||||
object_version: &ObjectVersion,
|
object_version: &ObjectVersion,
|
||||||
version_data: &ObjectVersionData,
|
version_data: &ObjectVersionData,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
|
encryption: EncryptionParams,
|
||||||
|
meta_inner: &ObjectVersionMetaInner,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
) -> Result<Response<Body>, Error> {
|
checksum_mode: ChecksumMode,
|
||||||
let resp_builder =
|
) -> Result<Response<ResBody>, Error> {
|
||||||
object_headers(object_version, version_meta).status(StatusCode::PARTIAL_CONTENT);
|
// Same as for get_range, no getobject_override_headers
|
||||||
|
let resp_builder = object_headers(
|
||||||
|
object_version,
|
||||||
|
version_meta,
|
||||||
|
meta_inner,
|
||||||
|
encryption,
|
||||||
|
checksum_mode,
|
||||||
|
)
|
||||||
|
.status(StatusCode::PARTIAL_CONTENT);
|
||||||
|
|
||||||
match version_data {
|
match version_data {
|
||||||
ObjectVersionData::Inline(_, bytes) => {
|
ObjectVersionData::Inline(_, bytes) => {
|
||||||
if part_number != 1 {
|
if part_number != 1 {
|
||||||
return Err(Error::InvalidPart);
|
return Err(Error::InvalidPart);
|
||||||
}
|
}
|
||||||
|
let bytes = encryption.decrypt_blob(&bytes)?;
|
||||||
|
assert_eq!(bytes.len() as u64, version_meta.size);
|
||||||
Ok(resp_builder
|
Ok(resp_builder
|
||||||
.header(CONTENT_LENGTH, format!("{}", bytes.len()))
|
.header(CONTENT_LENGTH, format!("{}", bytes.len()))
|
||||||
.header(
|
.header(
|
||||||
|
@ -364,7 +550,7 @@ async fn handle_get_part(
|
||||||
format!("bytes {}-{}/{}", 0, bytes.len() - 1, bytes.len()),
|
format!("bytes {}-{}/{}", 0, bytes.len() - 1, bytes.len()),
|
||||||
)
|
)
|
||||||
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
||||||
.body(Body::from(bytes.to_vec()))?)
|
.body(bytes_body(bytes.into_owned().into()))?)
|
||||||
}
|
}
|
||||||
ObjectVersionData::FirstBlock(_, _) => {
|
ObjectVersionData::FirstBlock(_, _) => {
|
||||||
let version = garage
|
let version = garage
|
||||||
|
@ -376,7 +562,8 @@ async fn handle_get_part(
|
||||||
let (begin, end) =
|
let (begin, end) =
|
||||||
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
|
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
|
||||||
|
|
||||||
let body = body_from_blocks_range(garage, version.blocks.items(), begin, end);
|
let body =
|
||||||
|
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
|
||||||
|
|
||||||
Ok(resp_builder
|
Ok(resp_builder
|
||||||
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
||||||
|
@ -392,7 +579,7 @@ async fn handle_get_part(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_range_header(
|
fn parse_range_header(
|
||||||
req: &Request<Body>,
|
req: &Request<impl Body>,
|
||||||
total_size: u64,
|
total_size: u64,
|
||||||
) -> Result<Option<http_range::HttpRange>, Error> {
|
) -> Result<Option<http_range::HttpRange>, Error> {
|
||||||
let range = match req.headers().get(RANGE) {
|
let range = match req.headers().get(RANGE) {
|
||||||
|
@ -429,12 +616,27 @@ fn calculate_part_bounds(v: &Version, part_number: u64) -> Option<(u64, u64)> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct ChecksumMode {
|
||||||
|
enabled: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn checksum_mode(req: &Request<impl Body>) -> ChecksumMode {
|
||||||
|
ChecksumMode {
|
||||||
|
enabled: req
|
||||||
|
.headers()
|
||||||
|
.get(X_AMZ_CHECKSUM_MODE)
|
||||||
|
.map(|x| x == "ENABLED")
|
||||||
|
.unwrap_or(false),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn body_from_blocks_range(
|
fn body_from_blocks_range(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
|
encryption: EncryptionParams,
|
||||||
all_blocks: &[(VersionBlockKey, VersionBlock)],
|
all_blocks: &[(VersionBlockKey, VersionBlock)],
|
||||||
begin: u64,
|
begin: u64,
|
||||||
end: u64,
|
end: u64,
|
||||||
) -> Body {
|
) -> ResBody {
|
||||||
// We will store here the list of blocks that have an intersection with the requested
|
// We will store here the list of blocks that have an intersection with the requested
|
||||||
// range, as well as their "true offset", which is their actual offset in the complete
|
// range, as well as their "true offset", which is their actual offset in the complete
|
||||||
// file (whereas block.offset designates the offset of the block WITHIN THE PART
|
// file (whereas block.offset designates the offset of the block WITHIN THE PART
|
||||||
|
@ -456,17 +658,16 @@ fn body_from_blocks_range(
|
||||||
}
|
}
|
||||||
|
|
||||||
let order_stream = OrderTag::stream();
|
let order_stream = OrderTag::stream();
|
||||||
let body_stream = futures::stream::iter(blocks)
|
let (tx, rx) = mpsc::channel::<ByteStream>(2);
|
||||||
.enumerate()
|
|
||||||
.map(move |(i, (block, block_offset))| {
|
tokio::spawn(async move {
|
||||||
let garage = garage.clone();
|
match async {
|
||||||
async move {
|
for (i, (block, block_offset)) in blocks.iter().enumerate() {
|
||||||
garage
|
let block_stream = encryption
|
||||||
.block_manager
|
.get_block(&garage, &block.hash, Some(order_stream.order(i as u64)))
|
||||||
.rpc_get_block_streaming(&block.hash, Some(order_stream.order(i as u64)))
|
.await?;
|
||||||
.await
|
let block_stream = block_stream
|
||||||
.unwrap_or_else(|e| error_stream(i, e))
|
.scan(*block_offset, move |chunk_offset, chunk| {
|
||||||
.scan(block_offset, move |chunk_offset, chunk| {
|
|
||||||
let r = match chunk {
|
let r = match chunk {
|
||||||
Ok(chunk_bytes) => {
|
Ok(chunk_bytes) => {
|
||||||
let chunk_len = chunk_bytes.len() as u64;
|
let chunk_len = chunk_bytes.len() as u64;
|
||||||
|
@ -502,20 +703,53 @@ fn body_from_blocks_range(
|
||||||
};
|
};
|
||||||
futures::future::ready(r)
|
futures::future::ready(r)
|
||||||
})
|
})
|
||||||
.filter_map(futures::future::ready)
|
.filter_map(futures::future::ready);
|
||||||
|
|
||||||
|
let block_stream: ByteStream = Box::pin(block_stream);
|
||||||
|
tx.send(Box::pin(block_stream))
|
||||||
|
.await
|
||||||
|
.ok_or_message("channel closed")?;
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.buffered(2)
|
|
||||||
.flatten();
|
|
||||||
|
|
||||||
hyper::body::Body::wrap_stream(body_stream)
|
Ok::<(), Error>(())
|
||||||
|
}
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(()) => (),
|
||||||
|
Err(e) => {
|
||||||
|
let _ = tx.send(error_stream_item(e)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
response_body_from_block_stream(rx)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn error_stream(i: usize, e: garage_util::error::Error) -> ByteStream {
|
fn response_body_from_block_stream(rx: mpsc::Receiver<ByteStream>) -> ResBody {
|
||||||
Box::pin(futures::stream::once(async move {
|
let body_stream = tokio_stream::wrappers::ReceiverStream::new(rx).flatten();
|
||||||
Err(std::io::Error::new(
|
response_body_from_stream(body_stream)
|
||||||
std::io::ErrorKind::Other,
|
}
|
||||||
format!("Could not get block {}: {}", i, e),
|
|
||||||
))
|
fn response_body_from_stream<S>(stream: S) -> ResBody
|
||||||
}))
|
where
|
||||||
|
S: Stream<Item = Result<Bytes, std::io::Error>> + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
let body_stream = stream.map(|x| {
|
||||||
|
x.map(hyper::body::Frame::data)
|
||||||
|
.map_err(|e| Error::from(garage_util::error::Error::from(e)))
|
||||||
|
});
|
||||||
|
ResBody::new(http_body_util::StreamBody::new(body_stream))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn error_stream_item<E: std::fmt::Display>(e: E) -> ByteStream {
|
||||||
|
Box::pin(stream::once(future::ready(Err(std_error_from_read_error(
|
||||||
|
e,
|
||||||
|
)))))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn std_error_from_read_error<E: std::fmt::Display>(e: E) -> std::io::Error {
|
||||||
|
std::io::Error::new(
|
||||||
|
std::io::ErrorKind::Other,
|
||||||
|
format!("Error while reading object data: {}", e),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
use quick_xml::de::from_reader;
|
use quick_xml::de::from_reader;
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use http_body_util::BodyExt;
|
||||||
|
use hyper::{Request, Response, StatusCode};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
@ -13,71 +15,75 @@ use garage_model::bucket_table::{
|
||||||
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
|
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
|
||||||
LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule,
|
LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule,
|
||||||
};
|
};
|
||||||
use garage_model::garage::Garage;
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
pub async fn handle_get_lifecycle(bucket: &Bucket) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||||
let param = bucket
|
let ReqCtx { bucket_params, .. } = ctx;
|
||||||
.params()
|
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
|
||||||
|
|
||||||
if let Some(lifecycle) = param.lifecycle_config.get() {
|
if let Some(lifecycle) = bucket_params.lifecycle_config.get() {
|
||||||
let wc = LifecycleConfiguration::from_garage_lifecycle_config(lifecycle);
|
let wc = LifecycleConfiguration::from_garage_lifecycle_config(lifecycle);
|
||||||
let xml = to_xml_with_header(&wc)?;
|
let xml = to_xml_with_header(&wc)?;
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.header(http::header::CONTENT_TYPE, "application/xml")
|
.header(http::header::CONTENT_TYPE, "application/xml")
|
||||||
.body(Body::from(xml))?)
|
.body(string_body(xml))?)
|
||||||
} else {
|
} else {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_lifecycle(
|
pub async fn handle_delete_lifecycle(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||||
garage: Arc<Garage>,
|
let ReqCtx {
|
||||||
mut bucket: Bucket,
|
garage,
|
||||||
) -> Result<Response<Body>, Error> {
|
bucket_id,
|
||||||
let param = bucket
|
mut bucket_params,
|
||||||
.params_mut()
|
..
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
} = ctx;
|
||||||
|
bucket_params.lifecycle_config.update(None);
|
||||||
param.lifecycle_config.update(None);
|
garage
|
||||||
garage.bucket_table.insert(&bucket).await?;
|
.bucket_table
|
||||||
|
.insert(&Bucket::present(bucket_id, bucket_params))
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_put_lifecycle(
|
pub async fn handle_put_lifecycle(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
mut bucket: Bucket,
|
req: Request<ReqBody>,
|
||||||
req: Request<Body>,
|
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let ReqCtx {
|
||||||
|
garage,
|
||||||
|
bucket_id,
|
||||||
|
mut bucket_params,
|
||||||
|
..
|
||||||
|
} = ctx;
|
||||||
|
|
||||||
|
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let param = bucket
|
|
||||||
.params_mut()
|
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
|
||||||
|
|
||||||
let conf: LifecycleConfiguration = from_reader(&body as &[u8])?;
|
let conf: LifecycleConfiguration = from_reader(&body as &[u8])?;
|
||||||
let config = conf
|
let config = conf
|
||||||
.validate_into_garage_lifecycle_config()
|
.validate_into_garage_lifecycle_config()
|
||||||
.ok_or_bad_request("Invalid lifecycle configuration")?;
|
.ok_or_bad_request("Invalid lifecycle configuration")?;
|
||||||
|
|
||||||
param.lifecycle_config.update(Some(config));
|
bucket_params.lifecycle_config.update(Some(config));
|
||||||
garage.bucket_table.insert(&bucket).await?;
|
garage
|
||||||
|
.bucket_table
|
||||||
|
.insert(&Bucket::present(bucket_id, bucket_params))
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ----
|
// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ----
|
||||||
|
|
|
@ -1,22 +1,22 @@
|
||||||
use std::collections::{BTreeMap, BTreeSet};
|
use std::collections::{BTreeMap, BTreeSet};
|
||||||
use std::iter::{Iterator, Peekable};
|
use std::iter::{Iterator, Peekable};
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use base64::prelude::*;
|
use base64::prelude::*;
|
||||||
use hyper::{Body, Response};
|
use hyper::{Request, Response};
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
|
||||||
use garage_model::s3::mpu_table::*;
|
use garage_model::s3::mpu_table::*;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
use garage_table::EnumerationOrder;
|
use garage_table::EnumerationOrder;
|
||||||
|
|
||||||
use crate::encoding::*;
|
use crate::encoding::*;
|
||||||
use crate::helpers::key_after_prefix;
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::multipart as s3_multipart;
|
use crate::s3::multipart as s3_multipart;
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
|
@ -61,9 +61,10 @@ pub struct ListPartsQuery {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_list(
|
pub async fn handle_list(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
query: &ListObjectsQuery,
|
query: &ListObjectsQuery,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
let ReqCtx { garage, .. } = &ctx;
|
||||||
let io = |bucket, key, count| {
|
let io = |bucket, key, count| {
|
||||||
let t = &garage.object_table;
|
let t = &garage.object_table;
|
||||||
async move {
|
async move {
|
||||||
|
@ -162,13 +163,15 @@ pub async fn handle_list(
|
||||||
let xml = s3_xml::to_xml_with_header(&result)?;
|
let xml = s3_xml::to_xml_with_header(&result)?;
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml.into_bytes()))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_list_multipart_upload(
|
pub async fn handle_list_multipart_upload(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
query: &ListMultipartUploadsQuery,
|
query: &ListMultipartUploadsQuery,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
|
let ReqCtx { garage, .. } = &ctx;
|
||||||
|
|
||||||
let io = |bucket, key, count| {
|
let io = |bucket, key, count| {
|
||||||
let t = &garage.object_table;
|
let t = &garage.object_table;
|
||||||
async move {
|
async move {
|
||||||
|
@ -264,19 +267,26 @@ pub async fn handle_list_multipart_upload(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml.into_bytes()))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_list_parts(
|
pub async fn handle_list_parts(
|
||||||
garage: Arc<Garage>,
|
ctx: ReqCtx,
|
||||||
|
req: Request<ReqBody>,
|
||||||
query: &ListPartsQuery,
|
query: &ListPartsQuery,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
debug!("ListParts {:?}", query);
|
debug!("ListParts {:?}", query);
|
||||||
|
|
||||||
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
|
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
|
||||||
|
|
||||||
let (_, _, mpu) =
|
let (_, object_version, mpu) = s3_multipart::get_upload(&ctx, &query.key, &upload_id).await?;
|
||||||
s3_multipart::get_upload(&garage, &query.bucket_id, &query.key, &upload_id).await?;
|
|
||||||
|
let object_encryption = match object_version.state {
|
||||||
|
ObjectVersionState::Uploading { encryption, .. } => encryption,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
let encryption_res =
|
||||||
|
EncryptionParams::check_decrypt(&ctx.garage, req.headers(), &object_encryption);
|
||||||
|
|
||||||
let (info, next) = fetch_part_info(query, &mpu)?;
|
let (info, next) = fetch_part_info(query, &mpu)?;
|
||||||
|
|
||||||
|
@ -295,11 +305,40 @@ pub async fn handle_list_parts(
|
||||||
is_truncated: s3_xml::Value(format!("{}", next.is_some())),
|
is_truncated: s3_xml::Value(format!("{}", next.is_some())),
|
||||||
parts: info
|
parts: info
|
||||||
.iter()
|
.iter()
|
||||||
.map(|part| s3_xml::PartItem {
|
.map(|part| {
|
||||||
etag: s3_xml::Value(format!("\"{}\"", part.etag)),
|
// hide checksum if object is encrypted and the decryption
|
||||||
last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)),
|
// keys are not provided
|
||||||
part_number: s3_xml::IntValue(part.part_number as i64),
|
let checksum = part.checksum.filter(|_| encryption_res.is_ok());
|
||||||
size: s3_xml::IntValue(part.size as i64),
|
s3_xml::PartItem {
|
||||||
|
etag: s3_xml::Value(format!("\"{}\"", part.etag)),
|
||||||
|
last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)),
|
||||||
|
part_number: s3_xml::IntValue(part.part_number as i64),
|
||||||
|
size: s3_xml::IntValue(part.size as i64),
|
||||||
|
checksum_crc32: match &checksum {
|
||||||
|
Some(ChecksumValue::Crc32(x)) => {
|
||||||
|
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
},
|
||||||
|
checksum_crc32c: match &checksum {
|
||||||
|
Some(ChecksumValue::Crc32c(x)) => {
|
||||||
|
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
},
|
||||||
|
checksum_sha1: match &checksum {
|
||||||
|
Some(ChecksumValue::Sha1(x)) => {
|
||||||
|
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
},
|
||||||
|
checksum_sha256: match &checksum {
|
||||||
|
Some(ChecksumValue::Sha256(x)) => {
|
||||||
|
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
},
|
||||||
|
}
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
|
|
||||||
|
@ -319,7 +358,7 @@ pub async fn handle_list_parts(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml.into_bytes()))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -345,6 +384,7 @@ struct PartInfo<'a> {
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
size: u64,
|
size: u64,
|
||||||
|
checksum: Option<ChecksumValue>,
|
||||||
}
|
}
|
||||||
|
|
||||||
enum ExtractionResult {
|
enum ExtractionResult {
|
||||||
|
@ -485,6 +525,7 @@ fn fetch_part_info<'a>(
|
||||||
timestamp: pk.timestamp,
|
timestamp: pk.timestamp,
|
||||||
etag,
|
etag,
|
||||||
size,
|
size,
|
||||||
|
checksum: p.checksum,
|
||||||
};
|
};
|
||||||
match parts.last_mut() {
|
match parts.last_mut() {
|
||||||
Some(lastpart) if lastpart.part_number == pk.part_number => {
|
Some(lastpart) if lastpart.part_number == pk.part_number => {
|
||||||
|
@ -943,10 +984,13 @@ mod tests {
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
state: ObjectVersionState::Uploading {
|
state: ObjectVersionState::Uploading {
|
||||||
multipart: true,
|
multipart: true,
|
||||||
headers: ObjectVersionHeaders {
|
encryption: ObjectVersionEncryption::Plaintext {
|
||||||
content_type: "text/plain".to_string(),
|
inner: ObjectVersionMetaInner {
|
||||||
other: BTreeMap::<String, String>::new(),
|
headers: vec![],
|
||||||
|
checksum: None,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
checksum_algorithm: None,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1135,6 +1179,7 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(3),
|
size: Some(3),
|
||||||
etag: Some("etag1".into()),
|
etag: Some("etag1".into()),
|
||||||
|
checksum: None,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1146,6 +1191,7 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: None,
|
size: None,
|
||||||
etag: None,
|
etag: None,
|
||||||
|
checksum: None,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1157,6 +1203,7 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(10),
|
size: Some(10),
|
||||||
etag: Some("etag2".into()),
|
etag: Some("etag2".into()),
|
||||||
|
checksum: None,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1168,6 +1215,7 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(7),
|
size: Some(7),
|
||||||
etag: Some("etag3".into()),
|
etag: Some("etag3".into()),
|
||||||
|
checksum: None,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1179,6 +1227,7 @@ mod tests {
|
||||||
version: uuid,
|
version: uuid,
|
||||||
size: Some(5),
|
size: Some(5),
|
||||||
etag: Some("etag4".into()),
|
etag: Some("etag4".into()),
|
||||||
|
checksum: None,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
@ -1217,12 +1266,14 @@ mod tests {
|
||||||
etag: "etag1",
|
etag: "etag1",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
part_number: 1,
|
part_number: 1,
|
||||||
size: 3
|
size: 3,
|
||||||
|
checksum: None,
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag2",
|
etag: "etag2",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
part_number: 3,
|
part_number: 3,
|
||||||
|
checksum: None,
|
||||||
size: 10
|
size: 10
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
@ -1238,12 +1289,14 @@ mod tests {
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag3",
|
etag: "etag3",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
|
checksum: None,
|
||||||
part_number: 5,
|
part_number: 5,
|
||||||
size: 7
|
size: 7
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag4",
|
etag: "etag4",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
|
checksum: None,
|
||||||
part_number: 8,
|
part_number: 8,
|
||||||
size: 5
|
size: 5
|
||||||
},
|
},
|
||||||
|
@ -1267,24 +1320,28 @@ mod tests {
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag1",
|
etag: "etag1",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
|
checksum: None,
|
||||||
part_number: 1,
|
part_number: 1,
|
||||||
size: 3
|
size: 3
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag2",
|
etag: "etag2",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
|
checksum: None,
|
||||||
part_number: 3,
|
part_number: 3,
|
||||||
size: 10
|
size: 10
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag3",
|
etag: "etag3",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
|
checksum: None,
|
||||||
part_number: 5,
|
part_number: 5,
|
||||||
size: 7
|
size: 7
|
||||||
},
|
},
|
||||||
PartInfo {
|
PartInfo {
|
||||||
etag: "etag4",
|
etag: "etag4",
|
||||||
timestamp: TS,
|
timestamp: TS,
|
||||||
|
checksum: None,
|
||||||
part_number: 8,
|
part_number: 8,
|
||||||
size: 5
|
size: 5
|
||||||
},
|
},
|
||||||
|
|