Compare commits
31 commits
Author | SHA1 | Date | |
---|---|---|---|
e8a52192d0 | |||
8394f32d16 | |||
cd5fa90c68 | |||
d47af7b173 | |||
1cc0de40f0 | |||
40a140bd20 | |||
71d4cf42f1 | |||
ac98769009 | |||
4279ca95eb | |||
90a57fbc7e | |||
b54a938724 | |||
ff06d3f082 | |||
93eab8eaa3 | |||
43ddc933f9 | |||
9f303f6308 | |||
3be43f3372 | |||
2da448b43f | |||
b2a2d3859f | |||
382e74c798 | |||
64c193e3db | |||
c692f55d5c | |||
7b474855e3 | |||
176715c5b2 | |||
5768bf3622 | |||
def78c5e6f | |||
277a20ec44 | |||
c9ef3e461b | |||
c93008d333 | |||
e5341ca47b | |||
a4f9f19ac3 | |||
|
47e57518ec |
133 changed files with 12456 additions and 3000 deletions
344
.drone.yml
344
.drone.yml
|
@ -2,68 +2,26 @@
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
name: default
|
name: default
|
||||||
|
|
||||||
workspace:
|
node:
|
||||||
base: /drone/garage
|
nix-daemon: 1
|
||||||
|
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
host:
|
|
||||||
path: /var/lib/drone/nix
|
|
||||||
- name: nix_config
|
|
||||||
temp: {}
|
|
||||||
|
|
||||||
environment:
|
|
||||||
HOME: /drone/garage
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: setup nix
|
- name: check formatting
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
commands:
|
||||||
- cp nix/nix.conf /etc/nix/nix.conf
|
- nix-shell --attr rust --run "cargo fmt -- --check"
|
||||||
- nix-build --no-build-output --no-out-link shell.nix --arg release false -A inputDerivation
|
|
||||||
|
|
||||||
- name: code quality
|
|
||||||
image: nixpkgs/nix:nixos-21.05
|
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
|
||||||
- nix-shell --arg release false --run "cargo fmt -- --check"
|
|
||||||
- nix-shell --arg release false --run "cargo clippy -- --deny warnings"
|
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --option log-lines 100 --argstr target x86_64-unknown-linux-musl --arg release false --argstr git_version $DRONE_COMMIT
|
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
||||||
|
|
||||||
- name: unit + func tests
|
- name: unit + func tests
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
environment:
|
environment:
|
||||||
GARAGE_TEST_INTEGRATION_EXE: result/bin/garage
|
GARAGE_TEST_INTEGRATION_EXE: result/bin/garage
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
commands:
|
||||||
- |
|
- nix-build --no-build-output --attr test.amd64
|
||||||
nix-build \
|
|
||||||
--no-build-output \
|
|
||||||
--option log-lines 100 \
|
|
||||||
--argstr target x86_64-unknown-linux-musl \
|
|
||||||
--argstr compileMode test
|
|
||||||
- ./result/bin/garage_api-*
|
- ./result/bin/garage_api-*
|
||||||
- ./result/bin/garage_model-*
|
- ./result/bin/garage_model-*
|
||||||
- ./result/bin/garage_rpc-*
|
- ./result/bin/garage_rpc-*
|
||||||
|
@ -73,16 +31,11 @@ steps:
|
||||||
- ./result/bin/garage-*
|
- ./result/bin/garage-*
|
||||||
- ./result/bin/integration-*
|
- ./result/bin/integration-*
|
||||||
|
|
||||||
- name: smoke-test
|
- name: integration tests
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --argstr target x86_64-unknown-linux-musl --arg release false --argstr git_version $DRONE_COMMIT
|
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
||||||
- nix-shell --arg release false --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
- nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||||
|
|
||||||
trigger:
|
trigger:
|
||||||
event:
|
event:
|
||||||
|
@ -92,78 +45,39 @@ trigger:
|
||||||
- tag
|
- tag
|
||||||
- cron
|
- cron
|
||||||
|
|
||||||
node:
|
|
||||||
nix: 1
|
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
type: docker
|
type: docker
|
||||||
name: release-linux-x86_64
|
name: release-linux-amd64
|
||||||
|
|
||||||
volumes:
|
node:
|
||||||
- name: nix_store
|
nix-daemon: 1
|
||||||
host:
|
|
||||||
path: /var/lib/drone/nix
|
|
||||||
- name: nix_config
|
|
||||||
temp: {}
|
|
||||||
|
|
||||||
environment:
|
|
||||||
TARGET: x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: setup nix
|
|
||||||
image: nixpkgs/nix:nixos-21.05
|
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
|
||||||
- cp nix/nix.conf /etc/nix/nix.conf
|
|
||||||
- nix-build --no-build-output --no-out-link shell.nix -A inputDerivation
|
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --argstr target $TARGET --arg release true --argstr git_version $DRONE_COMMIT
|
- nix-build --no-build-output --attr pkgs.amd64.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
||||||
|
- nix-shell --attr rust --run "./script/not-dynamic.sh result/bin/garage"
|
||||||
|
|
||||||
- name: integration
|
- name: integration
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
- nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||||
|
|
||||||
- name: push static binary
|
- name: push static binary
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
environment:
|
environment:
|
||||||
AWS_ACCESS_KEY_ID:
|
AWS_ACCESS_KEY_ID:
|
||||||
from_secret: garagehq_aws_access_key_id
|
from_secret: garagehq_aws_access_key_id
|
||||||
AWS_SECRET_ACCESS_KEY:
|
AWS_SECRET_ACCESS_KEY:
|
||||||
from_secret: garagehq_aws_secret_access_key
|
from_secret: garagehq_aws_secret_access_key
|
||||||
|
TARGET: "x86_64-unknown-linux-musl"
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --arg rust false --arg integration false --run "to_s3"
|
- nix-shell --attr release --run "to_s3"
|
||||||
|
|
||||||
- name: docker build and publish
|
- name: docker build and publish
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
environment:
|
environment:
|
||||||
DOCKER_AUTH:
|
DOCKER_AUTH:
|
||||||
from_secret: docker_auth
|
from_secret: docker_auth
|
||||||
|
@ -174,7 +88,7 @@ steps:
|
||||||
- mkdir -p /kaniko/.docker
|
- mkdir -p /kaniko/.docker
|
||||||
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
||||||
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
||||||
- nix-shell --arg rust false --arg integration false --run "to_docker"
|
- nix-shell --attr release --run "to_docker"
|
||||||
|
|
||||||
|
|
||||||
trigger:
|
trigger:
|
||||||
|
@ -182,78 +96,39 @@ trigger:
|
||||||
- promote
|
- promote
|
||||||
- cron
|
- cron
|
||||||
|
|
||||||
node:
|
|
||||||
nix: 1
|
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
type: docker
|
type: docker
|
||||||
name: release-linux-i686
|
name: release-linux-i386
|
||||||
|
|
||||||
volumes:
|
node:
|
||||||
- name: nix_store
|
nix-daemon: 1
|
||||||
host:
|
|
||||||
path: /var/lib/drone/nix
|
|
||||||
- name: nix_config
|
|
||||||
temp: {}
|
|
||||||
|
|
||||||
environment:
|
|
||||||
TARGET: i686-unknown-linux-musl
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: setup nix
|
|
||||||
image: nixpkgs/nix:nixos-21.05
|
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
|
||||||
- cp nix/nix.conf /etc/nix/nix.conf
|
|
||||||
- nix-build --no-build-output --no-out-link shell.nix -A inputDerivation
|
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --argstr target $TARGET --arg release true --argstr git_version $DRONE_COMMIT
|
- nix-build --no-build-output --attr pkgs.i386.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
||||||
|
- nix-shell --attr rust --run "./script/not-dynamic.sh result/bin/garage"
|
||||||
|
|
||||||
- name: integration
|
- name: integration
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
- nix-shell --attr integration --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||||
|
|
||||||
- name: push static binary
|
- name: push static binary
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
environment:
|
environment:
|
||||||
AWS_ACCESS_KEY_ID:
|
AWS_ACCESS_KEY_ID:
|
||||||
from_secret: garagehq_aws_access_key_id
|
from_secret: garagehq_aws_access_key_id
|
||||||
AWS_SECRET_ACCESS_KEY:
|
AWS_SECRET_ACCESS_KEY:
|
||||||
from_secret: garagehq_aws_secret_access_key
|
from_secret: garagehq_aws_secret_access_key
|
||||||
|
TARGET: "i686-unknown-linux-musl"
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --arg rust false --arg integration false --run "to_s3"
|
- nix-shell --attr release --run "to_s3"
|
||||||
|
|
||||||
- name: docker build and publish
|
- name: docker build and publish
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
environment:
|
environment:
|
||||||
DOCKER_AUTH:
|
DOCKER_AUTH:
|
||||||
from_secret: docker_auth
|
from_secret: docker_auth
|
||||||
|
@ -264,75 +139,41 @@ steps:
|
||||||
- mkdir -p /kaniko/.docker
|
- mkdir -p /kaniko/.docker
|
||||||
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
||||||
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
||||||
- nix-shell --arg rust false --arg integration false --run "to_docker"
|
- nix-shell --attr release --run "to_docker"
|
||||||
|
|
||||||
trigger:
|
trigger:
|
||||||
event:
|
event:
|
||||||
- promote
|
- promote
|
||||||
- cron
|
- cron
|
||||||
|
|
||||||
node:
|
|
||||||
nix: 1
|
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
type: docker
|
type: docker
|
||||||
name: release-linux-aarch64
|
name: release-linux-arm64
|
||||||
|
|
||||||
volumes:
|
node:
|
||||||
- name: nix_store
|
nix-daemon: 1
|
||||||
host:
|
|
||||||
path: /var/lib/drone/nix
|
|
||||||
- name: nix_config
|
|
||||||
temp: {}
|
|
||||||
|
|
||||||
environment:
|
|
||||||
TARGET: aarch64-unknown-linux-musl
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: setup nix
|
|
||||||
image: nixpkgs/nix:nixos-21.05
|
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
|
||||||
- cp nix/nix.conf /etc/nix/nix.conf
|
|
||||||
- nix-build --no-build-output --no-out-link ./shell.nix --arg rust false --arg integration false -A inputDerivation
|
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --argstr target $TARGET --arg release true --argstr git_version $DRONE_COMMIT
|
- nix-build --no-build-output --attr pkgs.arm64.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
||||||
|
- nix-shell --attr rust --run "./script/not-dynamic.sh result/bin/garage"
|
||||||
|
|
||||||
- name: push static binary
|
- name: push static binary
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
environment:
|
environment:
|
||||||
AWS_ACCESS_KEY_ID:
|
AWS_ACCESS_KEY_ID:
|
||||||
from_secret: garagehq_aws_access_key_id
|
from_secret: garagehq_aws_access_key_id
|
||||||
AWS_SECRET_ACCESS_KEY:
|
AWS_SECRET_ACCESS_KEY:
|
||||||
from_secret: garagehq_aws_secret_access_key
|
from_secret: garagehq_aws_secret_access_key
|
||||||
|
TARGET: "aarch64-unknown-linux-musl"
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --arg rust false --arg integration false --run "to_s3"
|
- nix-shell --attr release --run "to_s3"
|
||||||
|
|
||||||
- name: docker build and publish
|
- name: docker build and publish
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
environment:
|
environment:
|
||||||
DOCKER_AUTH:
|
DOCKER_AUTH:
|
||||||
from_secret: docker_auth
|
from_secret: docker_auth
|
||||||
|
@ -343,75 +184,41 @@ steps:
|
||||||
- mkdir -p /kaniko/.docker
|
- mkdir -p /kaniko/.docker
|
||||||
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
||||||
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
||||||
- nix-shell --arg rust false --arg integration false --run "to_docker"
|
- nix-shell --attr release --run "to_docker"
|
||||||
|
|
||||||
trigger:
|
trigger:
|
||||||
event:
|
event:
|
||||||
- promote
|
- promote
|
||||||
- cron
|
- cron
|
||||||
|
|
||||||
node:
|
|
||||||
nix: 1
|
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
type: docker
|
type: docker
|
||||||
name: release-linux-armv6l
|
name: release-linux-arm
|
||||||
|
|
||||||
volumes:
|
node:
|
||||||
- name: nix_store
|
nix-daemon: 1
|
||||||
host:
|
|
||||||
path: /var/lib/drone/nix
|
|
||||||
- name: nix_config
|
|
||||||
temp: {}
|
|
||||||
|
|
||||||
environment:
|
|
||||||
TARGET: armv6l-unknown-linux-musleabihf
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: setup nix
|
|
||||||
image: nixpkgs/nix:nixos-21.05
|
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
|
||||||
- cp nix/nix.conf /etc/nix/nix.conf
|
|
||||||
- nix-build --no-build-output --no-out-link --arg rust false --arg integration false -A inputDerivation
|
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
commands:
|
commands:
|
||||||
- nix-build --no-build-output --argstr target $TARGET --arg release true --argstr git_version $DRONE_COMMIT
|
- nix-build --no-build-output --attr pkgs.arm.release --argstr git_version ${DRONE_TAG:-$DRONE_COMMIT}
|
||||||
|
- nix-shell --attr rust --run "./script/not-dynamic.sh result/bin/garage"
|
||||||
|
|
||||||
- name: push static binary
|
- name: push static binary
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
environment:
|
environment:
|
||||||
AWS_ACCESS_KEY_ID:
|
AWS_ACCESS_KEY_ID:
|
||||||
from_secret: garagehq_aws_access_key_id
|
from_secret: garagehq_aws_access_key_id
|
||||||
AWS_SECRET_ACCESS_KEY:
|
AWS_SECRET_ACCESS_KEY:
|
||||||
from_secret: garagehq_aws_secret_access_key
|
from_secret: garagehq_aws_secret_access_key
|
||||||
|
TARGET: "armv6l-unknown-linux-musleabihf"
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --arg integration false --arg rust false --run "to_s3"
|
- nix-shell --attr release --run "to_s3"
|
||||||
|
|
||||||
- name: docker build and publish
|
- name: docker build and publish
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
- name: nix_config
|
|
||||||
path: /etc/nix
|
|
||||||
environment:
|
environment:
|
||||||
DOCKER_AUTH:
|
DOCKER_AUTH:
|
||||||
from_secret: docker_auth
|
from_secret: docker_auth
|
||||||
|
@ -422,32 +229,24 @@ steps:
|
||||||
- mkdir -p /kaniko/.docker
|
- mkdir -p /kaniko/.docker
|
||||||
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
- echo $DOCKER_AUTH > /kaniko/.docker/config.json
|
||||||
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
- export CONTAINER_TAG=${DRONE_TAG:-$DRONE_COMMIT}
|
||||||
- nix-shell --arg rust false --arg integration false --run "to_docker"
|
- nix-shell --attr release --run "to_docker"
|
||||||
|
|
||||||
trigger:
|
trigger:
|
||||||
event:
|
event:
|
||||||
- promote
|
- promote
|
||||||
- cron
|
- cron
|
||||||
|
|
||||||
node:
|
|
||||||
nix: 1
|
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
type: docker
|
type: docker
|
||||||
name: refresh-release-page
|
name: refresh-release-page
|
||||||
|
|
||||||
volumes:
|
node:
|
||||||
- name: nix_store
|
nix-daemon: 1
|
||||||
host:
|
|
||||||
path: /var/lib/drone/nix
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: refresh-index
|
- name: refresh-index
|
||||||
image: nixpkgs/nix:nixos-21.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
volumes:
|
|
||||||
- name: nix_store
|
|
||||||
path: /nix
|
|
||||||
environment:
|
environment:
|
||||||
AWS_ACCESS_KEY_ID:
|
AWS_ACCESS_KEY_ID:
|
||||||
from_secret: garagehq_aws_access_key_id
|
from_secret: garagehq_aws_access_key_id
|
||||||
|
@ -455,24 +254,21 @@ steps:
|
||||||
from_secret: garagehq_aws_secret_access_key
|
from_secret: garagehq_aws_secret_access_key
|
||||||
commands:
|
commands:
|
||||||
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
|
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
|
||||||
- nix-shell --arg integration false --arg rust false --run "refresh_index"
|
- nix-shell --attr release --run "refresh_index"
|
||||||
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- release-linux-x86_64
|
- release-linux-amd64
|
||||||
- release-linux-i686
|
- release-linux-i386
|
||||||
- release-linux-aarch64
|
- release-linux-arm64
|
||||||
- release-linux-armv6l
|
- release-linux-arm
|
||||||
|
|
||||||
trigger:
|
trigger:
|
||||||
event:
|
event:
|
||||||
- promote
|
- promote
|
||||||
- cron
|
- cron
|
||||||
|
|
||||||
node:
|
|
||||||
nix: 1
|
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: signature
|
kind: signature
|
||||||
hmac: 3fc19d6f9a3555519c8405e3281b2e74289bb802f644740d5481d53df3a01fa4
|
hmac: fa1f98f327abf88486c0c54984287285a4b951efa3776af9dd33b4d782b50815
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
325
Cargo.lock
generated
325
Cargo.lock
generated
|
@ -29,6 +29,16 @@ version = "0.5.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
|
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "assert-json-diff"
|
||||||
|
version = "2.0.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "50f1c3703dd33532d7f0ca049168930e9099ecac238e23cf932f3a69c42f06da"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-stream"
|
name = "async-stream"
|
||||||
version = "0.3.3"
|
version = "0.3.3"
|
||||||
|
@ -393,10 +403,49 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
|
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags",
|
||||||
"textwrap",
|
"textwrap 0.11.0",
|
||||||
"unicode-width",
|
"unicode-width",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap"
|
||||||
|
version = "3.1.18"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b"
|
||||||
|
dependencies = [
|
||||||
|
"atty",
|
||||||
|
"bitflags",
|
||||||
|
"clap_derive",
|
||||||
|
"clap_lex",
|
||||||
|
"indexmap",
|
||||||
|
"lazy_static",
|
||||||
|
"strsim",
|
||||||
|
"termcolor",
|
||||||
|
"textwrap 0.15.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap_derive"
|
||||||
|
version = "3.1.18"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "25320346e922cffe59c0bbc5410c8d8784509efb321488971081313cb1e1a33c"
|
||||||
|
dependencies = [
|
||||||
|
"heck 0.4.0",
|
||||||
|
"proc-macro-error",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap_lex"
|
||||||
|
version = "0.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213"
|
||||||
|
dependencies = [
|
||||||
|
"os_str_bytes",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cloudabi"
|
name = "cloudabi"
|
||||||
version = "0.0.3"
|
version = "0.0.3"
|
||||||
|
@ -494,6 +543,16 @@ dependencies = [
|
||||||
"subtle",
|
"subtle",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crypto-mac"
|
||||||
|
version = "0.11.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714"
|
||||||
|
dependencies = [
|
||||||
|
"generic-array",
|
||||||
|
"subtle",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ct-logs"
|
name = "ct-logs"
|
||||||
version = "0.8.0"
|
version = "0.8.0"
|
||||||
|
@ -819,33 +878,38 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"assert-json-diff",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"aws-sdk-s3",
|
"aws-sdk-s3",
|
||||||
|
"base64",
|
||||||
"bytes 1.1.0",
|
"bytes 1.1.0",
|
||||||
"chrono",
|
"chrono",
|
||||||
"futures",
|
"futures",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"garage_admin",
|
|
||||||
"garage_api",
|
"garage_api",
|
||||||
"garage_model 0.7.0",
|
"garage_model 0.7.3",
|
||||||
"garage_rpc 0.7.0",
|
"garage_rpc 0.7.3",
|
||||||
"garage_table 0.7.0",
|
"garage_table 0.7.3",
|
||||||
"garage_util 0.7.0",
|
"garage_util 0.7.3",
|
||||||
"garage_web",
|
"garage_web",
|
||||||
"git-version",
|
|
||||||
"hex",
|
"hex",
|
||||||
"hmac",
|
"hmac 0.10.1",
|
||||||
"http",
|
"http",
|
||||||
"hyper",
|
"hyper",
|
||||||
"kuska-sodiumoxide",
|
"kuska-sodiumoxide",
|
||||||
"netapp 0.4.2",
|
"netapp 0.4.4",
|
||||||
|
"opentelemetry",
|
||||||
|
"opentelemetry-otlp",
|
||||||
|
"opentelemetry-prometheus",
|
||||||
"pretty_env_logger",
|
"pretty_env_logger",
|
||||||
|
"prometheus",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rmp-serde 0.15.5",
|
"rmp-serde 0.15.5",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_bytes",
|
"serde_bytes",
|
||||||
|
"serde_json",
|
||||||
"sha2",
|
"sha2",
|
||||||
"sled",
|
"sled",
|
||||||
"static_init",
|
"static_init",
|
||||||
|
@ -855,27 +919,11 @@ dependencies = [
|
||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "garage_admin"
|
|
||||||
version = "0.7.0"
|
|
||||||
dependencies = [
|
|
||||||
"futures",
|
|
||||||
"futures-util",
|
|
||||||
"garage_util 0.7.0",
|
|
||||||
"hex",
|
|
||||||
"http",
|
|
||||||
"hyper",
|
|
||||||
"opentelemetry",
|
|
||||||
"opentelemetry-otlp",
|
|
||||||
"opentelemetry-prometheus",
|
|
||||||
"prometheus",
|
|
||||||
"tracing",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
"base64",
|
"base64",
|
||||||
"bytes 1.1.0",
|
"bytes 1.1.0",
|
||||||
"chrono",
|
"chrono",
|
||||||
|
@ -885,11 +933,12 @@ dependencies = [
|
||||||
"futures",
|
"futures",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"garage_block",
|
"garage_block",
|
||||||
"garage_model 0.7.0",
|
"garage_model 0.7.3",
|
||||||
"garage_table 0.7.0",
|
"garage_rpc 0.7.3",
|
||||||
"garage_util 0.7.0",
|
"garage_table 0.7.3",
|
||||||
|
"garage_util 0.7.3",
|
||||||
"hex",
|
"hex",
|
||||||
"hmac",
|
"hmac 0.10.1",
|
||||||
"http",
|
"http",
|
||||||
"http-range",
|
"http-range",
|
||||||
"httpdate 0.3.2",
|
"httpdate 0.3.2",
|
||||||
|
@ -899,8 +948,11 @@ dependencies = [
|
||||||
"multer",
|
"multer",
|
||||||
"nom",
|
"nom",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
|
"opentelemetry-otlp",
|
||||||
|
"opentelemetry-prometheus",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
"pin-project 1.0.10",
|
"pin-project 1.0.10",
|
||||||
|
"prometheus",
|
||||||
"quick-xml",
|
"quick-xml",
|
||||||
"roxmltree",
|
"roxmltree",
|
||||||
"serde",
|
"serde",
|
||||||
|
@ -914,15 +966,15 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes 1.1.0",
|
"bytes 1.1.0",
|
||||||
"futures",
|
"futures",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"garage_rpc 0.7.0",
|
"garage_rpc 0.7.3",
|
||||||
"garage_table 0.7.0",
|
"garage_table 0.7.3",
|
||||||
"garage_util 0.7.0",
|
"garage_util 0.7.3",
|
||||||
"hex",
|
"hex",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
|
@ -962,20 +1014,22 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
"base64",
|
||||||
|
"blake2",
|
||||||
"err-derive 0.3.1",
|
"err-derive 0.3.1",
|
||||||
"futures",
|
"futures",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"garage_block",
|
"garage_block",
|
||||||
"garage_model 0.5.1",
|
"garage_model 0.5.1",
|
||||||
"garage_rpc 0.7.0",
|
"garage_rpc 0.7.3",
|
||||||
"garage_table 0.7.0",
|
"garage_table 0.7.3",
|
||||||
"garage_util 0.7.0",
|
"garage_util 0.7.3",
|
||||||
"hex",
|
"hex",
|
||||||
"netapp 0.4.2",
|
"netapp 0.4.4",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rmp-serde 0.15.5",
|
"rmp-serde 0.15.5",
|
||||||
|
@ -1016,22 +1070,21 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_rpc"
|
name = "garage_rpc"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes 1.1.0",
|
"bytes 1.1.0",
|
||||||
"futures",
|
"futures",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"garage_admin",
|
"garage_util 0.7.3",
|
||||||
"garage_util 0.7.0",
|
|
||||||
"gethostname",
|
"gethostname",
|
||||||
"hex",
|
"hex",
|
||||||
"hyper",
|
"hyper",
|
||||||
"k8s-openapi",
|
"k8s-openapi",
|
||||||
"kube",
|
"kube",
|
||||||
"kuska-sodiumoxide",
|
"kuska-sodiumoxide",
|
||||||
"netapp 0.4.2",
|
"netapp 0.4.4",
|
||||||
"openssl",
|
"openssl",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"pnet_datalink",
|
"pnet_datalink",
|
||||||
|
@ -1070,14 +1123,14 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_table"
|
name = "garage_table"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes 1.1.0",
|
"bytes 1.1.0",
|
||||||
"futures",
|
"futures",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"garage_rpc 0.7.0",
|
"garage_rpc 0.7.3",
|
||||||
"garage_util 0.7.0",
|
"garage_util 0.7.3",
|
||||||
"hexdump",
|
"hexdump",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
|
@ -1117,16 +1170,17 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_util"
|
name = "garage_util"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"blake2",
|
"blake2",
|
||||||
"chrono",
|
"chrono",
|
||||||
"err-derive 0.3.1",
|
"err-derive 0.3.1",
|
||||||
"futures",
|
"futures",
|
||||||
|
"git-version",
|
||||||
"hex",
|
"hex",
|
||||||
"http",
|
"http",
|
||||||
"hyper",
|
"hyper",
|
||||||
"netapp 0.4.2",
|
"netapp 0.4.4",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rmp-serde 0.15.5",
|
"rmp-serde 0.15.5",
|
||||||
|
@ -1142,14 +1196,14 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "garage_web"
|
name = "garage_web"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"err-derive 0.3.1",
|
"err-derive 0.3.1",
|
||||||
"futures",
|
"futures",
|
||||||
"garage_api",
|
"garage_api",
|
||||||
"garage_model 0.7.0",
|
"garage_model 0.7.3",
|
||||||
"garage_table 0.7.0",
|
"garage_table 0.7.3",
|
||||||
"garage_util 0.7.0",
|
"garage_util 0.7.3",
|
||||||
"http",
|
"http",
|
||||||
"hyper",
|
"hyper",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
|
@ -1244,6 +1298,12 @@ dependencies = [
|
||||||
"unicode-segmentation",
|
"unicode-segmentation",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "heck"
|
||||||
|
version = "0.4.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hermit-abi"
|
name = "hermit-abi"
|
||||||
version = "0.1.19"
|
version = "0.1.19"
|
||||||
|
@ -1279,6 +1339,16 @@ dependencies = [
|
||||||
"digest",
|
"digest",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hmac"
|
||||||
|
version = "0.11.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b"
|
||||||
|
dependencies = [
|
||||||
|
"crypto-mac 0.11.1",
|
||||||
|
"digest",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "http"
|
name = "http"
|
||||||
version = "0.2.6"
|
version = "0.2.6"
|
||||||
|
@ -1506,6 +1576,24 @@ dependencies = [
|
||||||
"serde_json",
|
"serde_json",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "k2v-client"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"base64",
|
||||||
|
"clap 3.1.18",
|
||||||
|
"garage_util 0.7.3",
|
||||||
|
"http",
|
||||||
|
"log",
|
||||||
|
"rusoto_core",
|
||||||
|
"rusoto_credential",
|
||||||
|
"rusoto_signature",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
"thiserror",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "k8s-openapi"
|
name = "k8s-openapi"
|
||||||
version = "0.13.1"
|
version = "0.13.1"
|
||||||
|
@ -1833,9 +1921,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "netapp"
|
name = "netapp"
|
||||||
version = "0.4.2"
|
version = "0.4.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d1a19af9ad24e6cdb166e2c5882a7ff9326cab8d45c9d82379fa638c0cbc84df"
|
checksum = "c6419a4b836774192e13fedb05c0e5f414ee8df9ca0c467456a0bacde06c29ee"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -2040,6 +2128,12 @@ dependencies = [
|
||||||
"num-traits",
|
"num-traits",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "os_str_bytes"
|
||||||
|
version = "6.0.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "029d8d0b2f198229de29dca79676f2738ff952edf3fde542eb8bf94d8c21b435"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parking_lot"
|
name = "parking_lot"
|
||||||
version = "0.11.2"
|
version = "0.11.2"
|
||||||
|
@ -2289,7 +2383,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5"
|
checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 1.1.0",
|
"bytes 1.1.0",
|
||||||
"heck",
|
"heck 0.3.3",
|
||||||
"itertools 0.10.3",
|
"itertools 0.10.3",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"log",
|
"log",
|
||||||
|
@ -2516,6 +2610,75 @@ dependencies = [
|
||||||
"xmlparser",
|
"xmlparser",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rusoto_core"
|
||||||
|
version = "0.48.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1db30db44ea73551326269adcf7a2169428a054f14faf9e1768f2163494f2fa2"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"base64",
|
||||||
|
"bytes 1.1.0",
|
||||||
|
"crc32fast",
|
||||||
|
"futures",
|
||||||
|
"http",
|
||||||
|
"hyper",
|
||||||
|
"hyper-tls",
|
||||||
|
"lazy_static",
|
||||||
|
"log",
|
||||||
|
"rusoto_credential",
|
||||||
|
"rusoto_signature",
|
||||||
|
"rustc_version",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
"tokio",
|
||||||
|
"xml-rs",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rusoto_credential"
|
||||||
|
version = "0.48.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ee0a6c13db5aad6047b6a44ef023dbbc21a056b6dab5be3b79ce4283d5c02d05"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"chrono",
|
||||||
|
"dirs-next",
|
||||||
|
"futures",
|
||||||
|
"hyper",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
"shlex",
|
||||||
|
"tokio",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rusoto_signature"
|
||||||
|
version = "0.48.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a5ae95491c8b4847931e291b151127eccd6ff8ca13f33603eb3d0035ecb05272"
|
||||||
|
dependencies = [
|
||||||
|
"base64",
|
||||||
|
"bytes 1.1.0",
|
||||||
|
"chrono",
|
||||||
|
"digest",
|
||||||
|
"futures",
|
||||||
|
"hex",
|
||||||
|
"hmac 0.11.0",
|
||||||
|
"http",
|
||||||
|
"hyper",
|
||||||
|
"log",
|
||||||
|
"md-5",
|
||||||
|
"percent-encoding",
|
||||||
|
"pin-project-lite",
|
||||||
|
"rusoto_credential",
|
||||||
|
"rustc_version",
|
||||||
|
"serde",
|
||||||
|
"sha2",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustc_version"
|
name = "rustc_version"
|
||||||
version = "0.4.0"
|
version = "0.4.0"
|
||||||
|
@ -2652,9 +2815,9 @@ checksum = "a4a3381e03edd24287172047536f20cabde766e2cd3e65e6b00fb3af51c4f38d"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde"
|
name = "serde"
|
||||||
version = "1.0.136"
|
version = "1.0.137"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789"
|
checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde_derive",
|
"serde_derive",
|
||||||
]
|
]
|
||||||
|
@ -2680,9 +2843,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_derive"
|
name = "serde_derive"
|
||||||
version = "1.0.136"
|
version = "1.0.137"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9"
|
checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
|
@ -2702,9 +2865,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_json"
|
name = "serde_json"
|
||||||
version = "1.0.79"
|
version = "1.0.81"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95"
|
checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"itoa",
|
"itoa",
|
||||||
|
@ -2737,6 +2900,12 @@ dependencies = [
|
||||||
"opaque-debug",
|
"opaque-debug",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "shlex"
|
||||||
|
version = "1.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "signal-hook-registry"
|
name = "signal-hook-registry"
|
||||||
version = "1.4.0"
|
version = "1.4.0"
|
||||||
|
@ -2859,7 +3028,7 @@ version = "0.3.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10"
|
checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap",
|
"clap 2.34.0",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"structopt-derive",
|
"structopt-derive",
|
||||||
]
|
]
|
||||||
|
@ -2870,7 +3039,7 @@ version = "0.4.18"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0"
|
checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"heck",
|
"heck 0.3.3",
|
||||||
"proc-macro-error",
|
"proc-macro-error",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
|
@ -2885,9 +3054,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "syn"
|
name = "syn"
|
||||||
version = "1.0.89"
|
version = "1.0.94"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ea297be220d52398dcc07ce15a209fce436d361735ac1db700cab3b6cdfb9f54"
|
checksum = "a07e33e919ebcd69113d5be0e4d70c5707004ff45188910106854f38b960df4a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
|
@ -2939,19 +3108,25 @@ dependencies = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thiserror"
|
name = "textwrap"
|
||||||
version = "1.0.30"
|
version = "0.15.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417"
|
checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "thiserror"
|
||||||
|
version = "1.0.31"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"thiserror-impl",
|
"thiserror-impl",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thiserror-impl"
|
name = "thiserror-impl"
|
||||||
version = "1.0.30"
|
version = "1.0.31"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b"
|
checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
|
@ -3518,6 +3693,12 @@ version = "0.32.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316"
|
checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "xml-rs"
|
||||||
|
version = "0.8.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "xmlparser"
|
name = "xmlparser"
|
||||||
version = "0.13.3"
|
version = "0.13.3"
|
||||||
|
|
|
@ -5,12 +5,14 @@ members = [
|
||||||
"src/table",
|
"src/table",
|
||||||
"src/block",
|
"src/block",
|
||||||
"src/model",
|
"src/model",
|
||||||
"src/admin",
|
|
||||||
"src/api",
|
"src/api",
|
||||||
"src/web",
|
"src/web",
|
||||||
"src/garage"
|
"src/garage",
|
||||||
|
"src/k2v-client",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
default-members = ["src/garage"]
|
||||||
|
|
||||||
[profile.dev]
|
[profile.dev]
|
||||||
lto = "off"
|
lto = "off"
|
||||||
|
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
.PHONY: doc all release shell
|
.PHONY: doc all release shell
|
||||||
|
|
||||||
all:
|
all:
|
||||||
clear; cargo build
|
clear; cargo build --all-features
|
||||||
|
|
||||||
doc:
|
doc:
|
||||||
cd doc/book; mdbook build
|
cd doc/book; mdbook build
|
||||||
|
|
156
default.nix
156
default.nix
|
@ -1,147 +1,33 @@
|
||||||
{
|
{
|
||||||
system ? builtins.currentSystem,
|
system ? builtins.currentSystem,
|
||||||
release ? false,
|
|
||||||
target ? "x86_64-unknown-linux-musl",
|
|
||||||
compileMode ? null,
|
|
||||||
git_version ? null,
|
git_version ? null,
|
||||||
}:
|
}:
|
||||||
|
|
||||||
with import ./nix/common.nix;
|
with import ./nix/common.nix;
|
||||||
|
|
||||||
let
|
let
|
||||||
crossSystem = { config = target; };
|
pkgs = import pkgsSrc { };
|
||||||
in let
|
compile = import ./nix/compile.nix;
|
||||||
log = v: builtins.trace v v;
|
build_debug_and_release = (target: {
|
||||||
|
debug = (compile { inherit target git_version; release = false; }).workspace.garage { compileMode = "build"; };
|
||||||
|
release = (compile { inherit target git_version; release = true; }).workspace.garage { compileMode = "build"; };
|
||||||
|
});
|
||||||
|
test = (rustPkgs: pkgs.symlinkJoin {
|
||||||
|
name ="garage-tests";
|
||||||
|
paths = builtins.map (key: rustPkgs.workspace.${key} { compileMode = "test"; }) (builtins.attrNames rustPkgs.workspace);
|
||||||
|
});
|
||||||
|
|
||||||
pkgs = import pkgsSrc {
|
in {
|
||||||
inherit system crossSystem;
|
pkgs = {
|
||||||
overlays = [ cargo2nixOverlay ];
|
amd64 = build_debug_and_release "x86_64-unknown-linux-musl";
|
||||||
|
i386 = build_debug_and_release "i686-unknown-linux-musl";
|
||||||
|
arm64 = build_debug_and_release "aarch64-unknown-linux-musl";
|
||||||
|
arm = build_debug_and_release "armv6l-unknown-linux-musleabihf";
|
||||||
};
|
};
|
||||||
|
test = {
|
||||||
|
amd64 = test (compile { inherit git_version; target = "x86_64-unknown-linux-musl"; });
|
||||||
/*
|
|
||||||
Rust and Nix triples are not the same. Cargo2nix has a dedicated library
|
|
||||||
to convert Nix triples to Rust ones. We need this conversion as we want to
|
|
||||||
set later options linked to our (rust) target in a generic way. Not only
|
|
||||||
the triple terminology is different, but also the "roles" are named differently.
|
|
||||||
Nix uses a build/host/target terminology where Nix's "host" maps to Cargo's "target".
|
|
||||||
*/
|
|
||||||
rustTarget = log (pkgs.rustBuilder.rustLib.rustTriple pkgs.stdenv.hostPlatform);
|
|
||||||
|
|
||||||
/*
|
|
||||||
Cargo2nix is built for rustOverlay which installs Rust from Mozilla releases.
|
|
||||||
We want our own Rust to avoid incompatibilities, like we had with musl 1.2.0.
|
|
||||||
rustc was built with musl < 1.2.0 and nix shipped musl >= 1.2.0 which lead to compilation breakage.
|
|
||||||
So we want a Rust release that is bound to our Nix repository to avoid these problems.
|
|
||||||
See here for more info: https://musl.libc.org/time64.html
|
|
||||||
Because Cargo2nix does not support the Rust environment shipped by NixOS,
|
|
||||||
we emulate the structure of the Rust object created by rustOverlay.
|
|
||||||
In practise, rustOverlay ships rustc+cargo in a single derivation while
|
|
||||||
NixOS ships them in separate ones. We reunite them with symlinkJoin.
|
|
||||||
*/
|
|
||||||
rustChannel = pkgs.symlinkJoin {
|
|
||||||
name ="rust-channel";
|
|
||||||
paths = [
|
|
||||||
pkgs.rustPlatform.rust.rustc
|
|
||||||
pkgs.rustPlatform.rust.cargo
|
|
||||||
];
|
|
||||||
};
|
};
|
||||||
|
clippy = {
|
||||||
/*
|
amd64 = (compile { inherit git_version; compiler = "clippy"; }).workspace.garage { compileMode = "build"; } ;
|
||||||
Cargo2nix provides many overrides by default, you can take inspiration from them:
|
|
||||||
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
|
|
||||||
|
|
||||||
You can have a complete list of the available options by looking at the overriden object, mkcrate:
|
|
||||||
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/mkcrate.nix
|
|
||||||
*/
|
|
||||||
overrides = pkgs.rustBuilder.overrides.all ++ [
|
|
||||||
/*
|
|
||||||
[1] We need to alter Nix hardening to be able to statically compile: PIE,
|
|
||||||
Position Independent Executables seems to be supported only on amd64. Having
|
|
||||||
this flags set either make our executables crash or compile as dynamic on many platforms.
|
|
||||||
In the following section codegenOpts, we reactive it for the supported targets
|
|
||||||
(only amd64 curently) through the `-static-pie` flag. PIE is a feature used
|
|
||||||
by ASLR, which helps mitigate security issues.
|
|
||||||
Learn more about Nix Hardening: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh
|
|
||||||
|
|
||||||
[2] We want to inject the git version while keeping the build deterministic.
|
|
||||||
As we do not want to consider the .git folder as part of the input source,
|
|
||||||
we ask the user (the CI often) to pass the value to Nix.
|
|
||||||
*/
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "garage";
|
|
||||||
overrideAttrs = drv:
|
|
||||||
/* [1] */ { hardeningDisable = [ "pie" ]; }
|
|
||||||
//
|
|
||||||
/* [2] */ (if git_version != null then {
|
|
||||||
preConfigure = ''
|
|
||||||
${drv.preConfigure or ""}
|
|
||||||
export GIT_VERSION="${git_version}"
|
|
||||||
'';
|
|
||||||
} else {});
|
|
||||||
})
|
|
||||||
|
|
||||||
/*
|
|
||||||
We ship some parts of the code disabled by default by putting them behind a flag.
|
|
||||||
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.).
|
|
||||||
But we want to ship these additional features when we release Garage.
|
|
||||||
In the end, we chose to exclude all features from debug builds while putting (all of) them in the release builds.
|
|
||||||
Currently, the only feature of Garage is kubernetes-discovery from the garage_rpc crate.
|
|
||||||
*/
|
|
||||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
|
||||||
name = "garage_rpc";
|
|
||||||
overrideArgs = old:
|
|
||||||
{
|
|
||||||
features = if release then [ "kubernetes-discovery" ] else [];
|
|
||||||
};
|
|
||||||
})
|
|
||||||
];
|
|
||||||
|
|
||||||
packageFun = import ./Cargo.nix;
|
|
||||||
|
|
||||||
/*
|
|
||||||
We compile fully static binaries with musl to simplify deployment on most systems.
|
|
||||||
When possible, we reactivate PIE hardening (see above).
|
|
||||||
|
|
||||||
Also, if you set the RUSTFLAGS environment variable, the following parameters will
|
|
||||||
be ignored.
|
|
||||||
|
|
||||||
For more information on static builds, please refer to Rust's RFC 1721.
|
|
||||||
https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage
|
|
||||||
*/
|
|
||||||
|
|
||||||
codegenOpts = {
|
|
||||||
"armv6l-unknown-linux-musleabihf" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* compile as dynamic with static-pie */
|
|
||||||
"aarch64-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* segfault with static-pie */
|
|
||||||
"i686-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* segfault with static-pie */
|
|
||||||
"x86_64-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static-pie" ];
|
|
||||||
};
|
};
|
||||||
|
}
|
||||||
/*
|
|
||||||
The following definition is not elegant as we use a low level function of Cargo2nix
|
|
||||||
that enables us to pass our custom rustChannel object. We need this low level definition
|
|
||||||
to pass Nix's Rust toolchains instead of Mozilla's one.
|
|
||||||
|
|
||||||
target is mandatory but must be kept to null to allow cargo2nix to set it to the appropriate value
|
|
||||||
for each crate.
|
|
||||||
*/
|
|
||||||
rustPkgs = pkgs.rustBuilder.makePackageSet {
|
|
||||||
inherit packageFun rustChannel release codegenOpts;
|
|
||||||
packageOverrides = overrides;
|
|
||||||
target = null;
|
|
||||||
|
|
||||||
buildRustPackages = pkgs.buildPackages.rustBuilder.makePackageSet {
|
|
||||||
inherit rustChannel packageFun codegenOpts;
|
|
||||||
packageOverrides = overrides;
|
|
||||||
target = null;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
in
|
|
||||||
if compileMode == "test"
|
|
||||||
then pkgs.symlinkJoin {
|
|
||||||
name ="garage-tests";
|
|
||||||
paths = builtins.map (key: rustPkgs.workspace.${key} { inherit compileMode; }) (builtins.attrNames rustPkgs.workspace);
|
|
||||||
}
|
|
||||||
else rustPkgs.workspace.garage { inherit compileMode; }
|
|
||||||
|
|
|
@ -17,6 +17,61 @@ If you still want to use Borg, you can use it with `rclone mount`.
|
||||||
|
|
||||||
## Restic
|
## Restic
|
||||||
|
|
||||||
|
Create your key and bucket:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
garage key new my-key
|
||||||
|
garage bucket create backup
|
||||||
|
garage bucket allow backup --read --write --key my-key
|
||||||
|
```
|
||||||
|
|
||||||
|
Then register your Key ID and Secret key in your environment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export AWS_ACCESS_KEY_ID=GKxxx
|
||||||
|
export AWS_SECRET_ACCESS_KEY=xxxx
|
||||||
|
```
|
||||||
|
|
||||||
|
Configure restic from environment too:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export RESTIC_REPOSITORY="s3:http://localhost:3900/backups"
|
||||||
|
|
||||||
|
echo "Generated password (save it safely): $(openssl rand -base64 32)"
|
||||||
|
export RESTIC_PASSWORD=xxx # copy paste your generated password here
|
||||||
|
```
|
||||||
|
|
||||||
|
Do not forget to save your password safely (in your password manager or print it). It will be needed to decrypt your backups.
|
||||||
|
|
||||||
|
Now you can use restic:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Initialize the bucket, must be run once
|
||||||
|
restic init
|
||||||
|
|
||||||
|
# Backup your PostgreSQL database
|
||||||
|
# (We suppose your PostgreSQL daemon is stopped for all commands)
|
||||||
|
restic backup /var/lib/postgresql
|
||||||
|
|
||||||
|
# Show backup history
|
||||||
|
restic snapshots
|
||||||
|
|
||||||
|
# Backup again your PostgreSQL database, it will be faster as only changes will be uploaded
|
||||||
|
restic backup /var/lib/postgresql
|
||||||
|
|
||||||
|
# Show backup history (again)
|
||||||
|
restic snapshots
|
||||||
|
|
||||||
|
# Restore a backup
|
||||||
|
# (79766175 is the ID of the snapshot you want to restore)
|
||||||
|
mv /var/lib/postgresql /var/lib/postgresql.broken
|
||||||
|
restic restore 79766175 --target /var/lib/postgresql
|
||||||
|
```
|
||||||
|
|
||||||
|
Restic has way more features than the ones presented here.
|
||||||
|
You can discover all of them by accessing its documentation from the link below.
|
||||||
|
|
||||||
|
|
||||||
*External links:* [Restic Documentation > Amazon S3](https://restic.readthedocs.io/en/stable/030_preparing_a_new_repo.html#amazon-s3)
|
*External links:* [Restic Documentation > Amazon S3](https://restic.readthedocs.io/en/stable/030_preparing_a_new_repo.html#amazon-s3)
|
||||||
|
|
||||||
## Duplicity
|
## Duplicity
|
||||||
|
@ -50,3 +105,24 @@ Click `Test connection` and then no when asked `The bucket name should start wit
|
||||||
|
|
||||||
*External links:* [Kopia Documentation > Repositories](https://kopia.io/docs/repositories/#amazon-s3)
|
*External links:* [Kopia Documentation > Repositories](https://kopia.io/docs/repositories/#amazon-s3)
|
||||||
|
|
||||||
|
To create the Kopia repository, you need to specify the region, the HTTP(S) endpoint, the bucket name and the access keys.
|
||||||
|
For instance, if you have an instance of garage running on `https://garage.example.com`:
|
||||||
|
|
||||||
|
```
|
||||||
|
kopia repository create s3 --region=garage --bucket=mybackups --access-key=KEY_ID --secret-access-key=SECRET_KEY --endpoint=garage.example.com
|
||||||
|
```
|
||||||
|
|
||||||
|
Or if you have an instance running on localhost, without TLS:
|
||||||
|
|
||||||
|
```
|
||||||
|
kopia repository create s3 --region=garage --bucket=mybackups --access-key=KEY_ID --secret-access-key=SECRET_KEY --endpoint=localhost:3900 --disable-tls
|
||||||
|
```
|
||||||
|
|
||||||
|
After the repository has been created, check that everything works as expected:
|
||||||
|
|
||||||
|
```
|
||||||
|
kopia repository validate-provider
|
||||||
|
```
|
||||||
|
|
||||||
|
You can then run all the standard kopia commands: `kopia snapshot create`, `kopia mount`...
|
||||||
|
Everything should work out-of-the-box.
|
||||||
|
|
626
doc/book/reference-manual/admin-api.md
Normal file
626
doc/book/reference-manual/admin-api.md
Normal file
|
@ -0,0 +1,626 @@
|
||||||
|
+++
|
||||||
|
title = "Administration API"
|
||||||
|
weight = 16
|
||||||
|
+++
|
||||||
|
|
||||||
|
The Garage administration API is accessible through a dedicated server whose
|
||||||
|
listen address is specified in the `[admin]` section of the configuration
|
||||||
|
file (see [configuration file
|
||||||
|
reference](@/documentation/reference-manual/configuration.md))
|
||||||
|
|
||||||
|
**WARNING.** At this point, there is no comittement to stability of the APIs described in this document.
|
||||||
|
We will bump the version numbers prefixed to each API endpoint at each time the syntax
|
||||||
|
or semantics change, meaning that code that relies on these endpoint will break
|
||||||
|
when changes are introduced.
|
||||||
|
|
||||||
|
The Garage administration API was introduced in version 0.7.2, this document
|
||||||
|
does not apply to older versions of Garage.
|
||||||
|
|
||||||
|
|
||||||
|
## Access control
|
||||||
|
|
||||||
|
The admin API uses two different tokens for acces control, that are specified in the config file's `[admin]` section:
|
||||||
|
|
||||||
|
- `metrics_token`: the token for accessing the Metrics endpoint (if this token
|
||||||
|
is not set in the config file, the Metrics endpoint can be accessed without
|
||||||
|
access control);
|
||||||
|
|
||||||
|
- `admin_token`: the token for accessing all of the other administration
|
||||||
|
endpoints (if this token is not set in the config file, access to these
|
||||||
|
endpoints is disabled entirely).
|
||||||
|
|
||||||
|
These tokens are used as simple HTTP bearer tokens. In other words, to
|
||||||
|
authenticate access to an admin API endpoint, add the following HTTP header
|
||||||
|
to your request:
|
||||||
|
|
||||||
|
```
|
||||||
|
Authorization: Bearer <token>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Administration API endpoints
|
||||||
|
|
||||||
|
### Metrics-related endpoints
|
||||||
|
|
||||||
|
#### Metrics `GET /metrics`
|
||||||
|
|
||||||
|
Returns internal Garage metrics in Prometheus format.
|
||||||
|
|
||||||
|
### Cluster operations
|
||||||
|
|
||||||
|
#### GetClusterStatus `GET /v0/status`
|
||||||
|
|
||||||
|
Returns the cluster's current status in JSON, including:
|
||||||
|
|
||||||
|
- ID of the node being queried and its version of the Garage daemon
|
||||||
|
- Live nodes
|
||||||
|
- Currently configured cluster layout
|
||||||
|
- Staged changes to the cluster layout
|
||||||
|
|
||||||
|
Example response body:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
|
||||||
|
"garage_version": "git:v0.8.0",
|
||||||
|
"knownNodes": {
|
||||||
|
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
|
||||||
|
"addr": "10.0.0.11:3901",
|
||||||
|
"is_up": true,
|
||||||
|
"last_seen_secs_ago": 9,
|
||||||
|
"hostname": "node1"
|
||||||
|
},
|
||||||
|
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
|
||||||
|
"addr": "10.0.0.12:3901",
|
||||||
|
"is_up": true,
|
||||||
|
"last_seen_secs_ago": 1,
|
||||||
|
"hostname": "node2"
|
||||||
|
},
|
||||||
|
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
|
||||||
|
"addr": "10.0.0.21:3901",
|
||||||
|
"is_up": true,
|
||||||
|
"last_seen_secs_ago": 7,
|
||||||
|
"hostname": "node3"
|
||||||
|
},
|
||||||
|
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
|
||||||
|
"addr": "10.0.0.22:3901",
|
||||||
|
"is_up": true,
|
||||||
|
"last_seen_secs_ago": 1,
|
||||||
|
"hostname": "node4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"layout": {
|
||||||
|
"version": 12,
|
||||||
|
"roles": {
|
||||||
|
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
|
||||||
|
"zone": "dc1",
|
||||||
|
"capacity": 4,
|
||||||
|
"tags": [
|
||||||
|
"node1"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
|
||||||
|
"zone": "dc1",
|
||||||
|
"capacity": 6,
|
||||||
|
"tags": [
|
||||||
|
"node2"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
|
||||||
|
"zone": "dc2",
|
||||||
|
"capacity": 10,
|
||||||
|
"tags": [
|
||||||
|
"node3"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"stagedRoleChanges": {
|
||||||
|
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
|
||||||
|
"zone": "dc2",
|
||||||
|
"capacity": 5,
|
||||||
|
"tags": [
|
||||||
|
"node4"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### ConnectClusterNodes `POST /v0/connect`
|
||||||
|
|
||||||
|
Instructs this Garage node to connect to other Garage nodes at specified addresses.
|
||||||
|
|
||||||
|
Example request body:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f@10.0.0.11:3901",
|
||||||
|
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff@10.0.0.12:3901"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
The format of the string for a node to connect to is: `<node ID>@<ip address>:<port>`, same as in the `garage node connect` CLI call.
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"error": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"success": false,
|
||||||
|
"error": "Handshake error"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### GetClusterLayout `GET /v0/layout`
|
||||||
|
|
||||||
|
Returns the cluster's current layout in JSON, including:
|
||||||
|
|
||||||
|
- Currently configured cluster layout
|
||||||
|
- Staged changes to the cluster layout
|
||||||
|
|
||||||
|
(the info returned by this endpoint is a subset of the info returned by GetClusterStatus)
|
||||||
|
|
||||||
|
Example response body:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": 12,
|
||||||
|
"roles": {
|
||||||
|
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
|
||||||
|
"zone": "dc1",
|
||||||
|
"capacity": 4,
|
||||||
|
"tags": [
|
||||||
|
"node1"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
|
||||||
|
"zone": "dc1",
|
||||||
|
"capacity": 6,
|
||||||
|
"tags": [
|
||||||
|
"node2"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
|
||||||
|
"zone": "dc2",
|
||||||
|
"capacity": 10,
|
||||||
|
"tags": [
|
||||||
|
"node3"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"stagedRoleChanges": {
|
||||||
|
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
|
||||||
|
"zone": "dc2",
|
||||||
|
"capacity": 5,
|
||||||
|
"tags": [
|
||||||
|
"node4"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### UpdateClusterLayout `POST /v0/layout`
|
||||||
|
|
||||||
|
Send modifications to the cluster layout. These modifications will
|
||||||
|
be included in the staged role changes, visible in subsequent calls
|
||||||
|
of `GetClusterLayout`. Once the set of staged changes is satisfactory,
|
||||||
|
the user may call `ApplyClusterLayout` to apply the changed changes,
|
||||||
|
or `Revert ClusterLayout` to clear all of the staged changes in
|
||||||
|
the layout.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
<node_id>: {
|
||||||
|
"capacity": <new_capacity>,
|
||||||
|
"zone": <new_zone>,
|
||||||
|
"tags": [
|
||||||
|
<new_tag>,
|
||||||
|
...
|
||||||
|
]
|
||||||
|
},
|
||||||
|
<node_id_to_remove>: null,
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Contrary to the CLI that may update only a subset of the fields
|
||||||
|
`capacity`, `zone` and `tags`, when calling this API all of these
|
||||||
|
values must be specified.
|
||||||
|
|
||||||
|
|
||||||
|
#### ApplyClusterLayout `POST /v0/layout/apply`
|
||||||
|
|
||||||
|
Applies to the cluster the layout changes currently registered as
|
||||||
|
staged layout changes.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": 13
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Similarly to the CLI, the body must include the version of the new layout
|
||||||
|
that will be created, which MUST be 1 + the value of the currently
|
||||||
|
existing layout in the cluster.
|
||||||
|
|
||||||
|
#### RevertClusterLayout `POST /v0/layout/revert`
|
||||||
|
|
||||||
|
Clears all of the staged layout changes.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": 13
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Reverting the staged changes is done by incrementing the version number
|
||||||
|
and clearing the contents of the staged change list.
|
||||||
|
Similarly to the CLI, the body must include the incremented
|
||||||
|
version number, which MUST be 1 + the value of the currently
|
||||||
|
existing layout in the cluster.
|
||||||
|
|
||||||
|
|
||||||
|
### Access key operations
|
||||||
|
|
||||||
|
#### ListKeys `GET /v0/key`
|
||||||
|
|
||||||
|
Returns all API access keys in the cluster.
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"name": "test"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "GKe10061ac9c2921f09e4c5540",
|
||||||
|
"name": "test2"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### CreateKey `POST /v0/key`
|
||||||
|
|
||||||
|
Creates a new API access key.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "NameOfMyKey"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### ImportKey `POST /v0/key/import`
|
||||||
|
|
||||||
|
Imports an existing API key.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835",
|
||||||
|
"name": "NameOfMyKey"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### GetKeyInfo `GET /v0/key?id=<acces key id>`
|
||||||
|
#### GetKeyInfo `GET /v0/key?search=<pattern>`
|
||||||
|
|
||||||
|
Returns information about the requested API access key.
|
||||||
|
|
||||||
|
If `id` is set, the key is looked up using its exact identifier (faster).
|
||||||
|
If `search` is set, the key is looked up using its name or prefix
|
||||||
|
of identifier (slower, all keys are enumerated to do this).
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "test",
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835",
|
||||||
|
"permissions": {
|
||||||
|
"createBucket": false
|
||||||
|
},
|
||||||
|
"buckets": [
|
||||||
|
{
|
||||||
|
"id": "70dc3bed7fe83a75e46b66e7ddef7d56e65f3c02f9f80b6749fb97eccb5e1033",
|
||||||
|
"globalAliases": [
|
||||||
|
"test2"
|
||||||
|
],
|
||||||
|
"localAliases": [],
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "d7452a935e663fc1914f3a5515163a6d3724010ce8dfd9e4743ca8be5974f995",
|
||||||
|
"globalAliases": [
|
||||||
|
"test3"
|
||||||
|
],
|
||||||
|
"localAliases": [],
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||||
|
"globalAliases": [],
|
||||||
|
"localAliases": [
|
||||||
|
"test"
|
||||||
|
],
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "96470e0df00ec28807138daf01915cfda2bee8eccc91dea9558c0b4855b5bf95",
|
||||||
|
"globalAliases": [
|
||||||
|
"alex"
|
||||||
|
],
|
||||||
|
"localAliases": [],
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### DeleteKey `DELETE /v0/key?id=<acces key id>`
|
||||||
|
|
||||||
|
Deletes an API access key.
|
||||||
|
|
||||||
|
#### UpdateKey `POST /v0/key?id=<acces key id>`
|
||||||
|
|
||||||
|
Updates information about the specified API access key.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "NameOfMyKey",
|
||||||
|
"allow": {
|
||||||
|
"createBucket": true,
|
||||||
|
},
|
||||||
|
"deny": {}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
All fields (`name`, `allow` and `deny`) are optionnal.
|
||||||
|
If they are present, the corresponding modifications are applied to the key, otherwise nothing is changed.
|
||||||
|
The possible flags in `allow` and `deny` are: `createBucket`.
|
||||||
|
|
||||||
|
|
||||||
|
### Bucket operations
|
||||||
|
|
||||||
|
#### ListBuckets `GET /v0/bucket`
|
||||||
|
|
||||||
|
Returns all storage buckets in the cluster.
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "70dc3bed7fe83a75e46b66e7ddef7d56e65f3c02f9f80b6749fb97eccb5e1033",
|
||||||
|
"globalAliases": [
|
||||||
|
"test2"
|
||||||
|
],
|
||||||
|
"localAliases": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "96470e0df00ec28807138daf01915cfda2bee8eccc91dea9558c0b4855b5bf95",
|
||||||
|
"globalAliases": [
|
||||||
|
"alex"
|
||||||
|
],
|
||||||
|
"localAliases": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "d7452a935e663fc1914f3a5515163a6d3724010ce8dfd9e4743ca8be5974f995",
|
||||||
|
"globalAliases": [
|
||||||
|
"test3"
|
||||||
|
],
|
||||||
|
"localAliases": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||||
|
"globalAliases": [],
|
||||||
|
"localAliases": [
|
||||||
|
{
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"alias": "test"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### GetBucketInfo `GET /v0/bucket?id=<bucket id>`
|
||||||
|
#### GetBucketInfo `GET /v0/bucket?globalAlias=<alias>`
|
||||||
|
|
||||||
|
Returns information about the requested storage bucket.
|
||||||
|
|
||||||
|
If `id` is set, the bucket is looked up using its exact identifier.
|
||||||
|
If `globalAlias` is set, the bucket is looked up using its global alias.
|
||||||
|
(both are fast)
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||||
|
"globalAliases": [
|
||||||
|
"alex"
|
||||||
|
],
|
||||||
|
"keys": [
|
||||||
|
{
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"name": "alex",
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": true
|
||||||
|
},
|
||||||
|
"bucketLocalAliases": [
|
||||||
|
"test"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### CreateBucket `POST /v0/bucket`
|
||||||
|
|
||||||
|
Creates a new storage bucket.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"globalAlias": "NameOfMyBucket"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
OR
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"localAlias": {
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"alias": "NameOfMyBucket",
|
||||||
|
"allow": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
OR
|
||||||
|
|
||||||
|
```json
|
||||||
|
{}
|
||||||
|
```
|
||||||
|
|
||||||
|
Creates a new bucket, either with a global alias, a local one,
|
||||||
|
or no alias at all.
|
||||||
|
|
||||||
|
Technically, you can also specify both `globalAlias` and `localAlias` and that would create
|
||||||
|
two aliases, but I don't see why you would want to do that.
|
||||||
|
|
||||||
|
#### DeleteBucket `DELETE /v0/bucket?id=<bucket id>`
|
||||||
|
|
||||||
|
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
|
||||||
|
|
||||||
|
Warning: this will delete all aliases associated with the bucket!
|
||||||
|
|
||||||
|
#### PutBucketWebsite `PUT /v0/bucket/website?id=<bucket id>`
|
||||||
|
|
||||||
|
Sets the website configuration for a bucket (this also enables website access for this bucket).
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"indexDocument": "index.html",
|
||||||
|
"errorDocument": "404.html"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The field `errorDocument` is optional, if no error document is set a generic error message is displayed when errors happen.
|
||||||
|
|
||||||
|
|
||||||
|
#### DeleteBucketWebsite `DELETE /v0/bucket/website?id=<bucket id>`
|
||||||
|
|
||||||
|
Deletes the website configuration for a bucket (disables website access for this bucket).
|
||||||
|
|
||||||
|
|
||||||
|
### Operations on permissions for keys on buckets
|
||||||
|
|
||||||
|
#### BucketAllowKey `POST /v0/bucket/allow`
|
||||||
|
|
||||||
|
Allows a key to do read/write/owner operations on a bucket.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"permissions": {
|
||||||
|
"read": true,
|
||||||
|
"write": true,
|
||||||
|
"owner": true
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Flags in `permissions` which have the value `true` will be activated.
|
||||||
|
Other flags will remain unchanged.
|
||||||
|
|
||||||
|
#### BucketDenyKey `POST /v0/bucket/deny`
|
||||||
|
|
||||||
|
Denies a key from doing read/write/owner operations on a bucket.
|
||||||
|
|
||||||
|
Request body format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||||
|
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||||
|
"permissions": {
|
||||||
|
"read": false,
|
||||||
|
"write": false,
|
||||||
|
"owner": true
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Flags in `permissions` which have the value `true` will be deactivated.
|
||||||
|
Other flags will remain unchanged.
|
||||||
|
|
||||||
|
|
||||||
|
### Operations on bucket aliases
|
||||||
|
|
||||||
|
#### GlobalAliasBucket `PUT /v0/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||||
|
|
||||||
|
Empty body. Creates a global alias for a bucket.
|
||||||
|
|
||||||
|
#### GlobalUnaliasBucket `DELETE /v0/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||||
|
|
||||||
|
Removes a global alias for a bucket.
|
||||||
|
|
||||||
|
#### LocalAliasBucket `PUT /v0/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>`
|
||||||
|
|
||||||
|
Empty body. Creates a local alias for a bucket in the namespace of a specific access key.
|
||||||
|
|
||||||
|
#### LocalUnaliasBucket `DELETE /v0/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>`
|
||||||
|
|
||||||
|
Removes a local alias for a bucket in the namespace of a specific access key.
|
||||||
|
|
|
@ -10,6 +10,7 @@ metadata_dir = "/var/lib/garage/meta"
|
||||||
data_dir = "/var/lib/garage/data"
|
data_dir = "/var/lib/garage/data"
|
||||||
|
|
||||||
block_size = 1048576
|
block_size = 1048576
|
||||||
|
block_manager_background_tranquility = 2
|
||||||
|
|
||||||
replication_mode = "3"
|
replication_mode = "3"
|
||||||
|
|
||||||
|
@ -47,6 +48,8 @@ root_domain = ".web.garage"
|
||||||
|
|
||||||
[admin]
|
[admin]
|
||||||
api_bind_addr = "0.0.0.0:3903"
|
api_bind_addr = "0.0.0.0:3903"
|
||||||
|
metrics_token = "cacce0b2de4bc2d9f5b5fdff551e01ac1496055aed248202d415398987e35f81"
|
||||||
|
admin_token = "ae8cb40ea7368bbdbb6430af11cca7da833d3458a5f52086f4e805a570fb5c2a"
|
||||||
trace_sink = "http://localhost:4317"
|
trace_sink = "http://localhost:4317"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -84,6 +87,17 @@ files will remain available. This however means that chunks from existing files
|
||||||
will not be deduplicated with chunks from newly uploaded files, meaning you
|
will not be deduplicated with chunks from newly uploaded files, meaning you
|
||||||
might use more storage space that is optimally possible.
|
might use more storage space that is optimally possible.
|
||||||
|
|
||||||
|
### `block_manager_background_tranquility`
|
||||||
|
|
||||||
|
This parameter tunes the activity of the background worker responsible for
|
||||||
|
resyncing data blocks between nodes. The higher the tranquility value is set,
|
||||||
|
the more the background worker will wait between iterations, meaning the load
|
||||||
|
on the system (including network usage between nodes) will be reduced. The
|
||||||
|
minimal value for this parameter is `0`, where the background worker will
|
||||||
|
allways work at maximal throughput to resynchronize blocks. The default value
|
||||||
|
is `2`, where the background worker will try to spend at most 1/3 of its time
|
||||||
|
working, and 2/3 sleeping in order to reduce system load.
|
||||||
|
|
||||||
### `replication_mode`
|
### `replication_mode`
|
||||||
|
|
||||||
Garage supports the following replication modes:
|
Garage supports the following replication modes:
|
||||||
|
@ -326,10 +340,24 @@ Garage has a few administration capabilities, in particular to allow remote moni
|
||||||
### `api_bind_addr`
|
### `api_bind_addr`
|
||||||
|
|
||||||
If specified, Garage will bind an HTTP server to this port and address, on
|
If specified, Garage will bind an HTTP server to this port and address, on
|
||||||
which it will listen to requests for administration features. Currently,
|
which it will listen to requests for administration features.
|
||||||
this endpoint only exposes Garage metrics in the Prometheus format at
|
See [administration API reference](@/documentation/reference-manual/admin-api.md) to learn more about these features.
|
||||||
`/metrics`. This endpoint is not authenticated. In the future, bucket and
|
|
||||||
access key management might be possible by REST calls to this endpoint.
|
### `metrics_token` (since version 0.7.2)
|
||||||
|
|
||||||
|
The token for accessing the Metrics endpoint. If this token is not set in
|
||||||
|
the config file, the Metrics endpoint can be accessed without access
|
||||||
|
control.
|
||||||
|
|
||||||
|
You can use any random string for this value. We recommend generating a random token with `openssl rand -hex 32`.
|
||||||
|
|
||||||
|
### `admin_token` (since version 0.7.2)
|
||||||
|
|
||||||
|
The token for accessing all of the other administration endpoints. If this
|
||||||
|
token is not set in the config file, access to these endpoints is disabled
|
||||||
|
entirely.
|
||||||
|
|
||||||
|
You can use any random string for this value. We recommend generating a random token with `openssl rand -hex 32`.
|
||||||
|
|
||||||
### `trace_sink`
|
### `trace_sink`
|
||||||
|
|
||||||
|
|
58
doc/book/reference-manual/k2v.md
Normal file
58
doc/book/reference-manual/k2v.md
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
+++
|
||||||
|
title = "K2V"
|
||||||
|
weight = 30
|
||||||
|
+++
|
||||||
|
|
||||||
|
Starting with version 0.7.2, Garage introduces an optionnal feature, K2V,
|
||||||
|
which is an alternative storage API designed to help efficiently store
|
||||||
|
many small values in buckets (in opposition to S3 which is more designed
|
||||||
|
to store large blobs).
|
||||||
|
|
||||||
|
K2V is currently disabled at compile time in all builds, as the
|
||||||
|
specification is still subject to changes. To build a Garage version with
|
||||||
|
K2V, the Cargo feature flag `k2v` must be activated. Special builds with
|
||||||
|
the `k2v` feature flag enabled can be obtained from our download page under
|
||||||
|
"Extra builds": such builds can be identified easily as their tag name ends
|
||||||
|
with `-k2v` (example: `v0.7.2-k2v`).
|
||||||
|
|
||||||
|
The specification of the K2V API can be found
|
||||||
|
[here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/k2v/doc/drafts/k2v-spec.md).
|
||||||
|
This document also includes a high-level overview of K2V's design.
|
||||||
|
|
||||||
|
The K2V API uses AWSv4 signatures for authentification, same as the S3 API.
|
||||||
|
The AWS region used for signature calculation is always the same as the one
|
||||||
|
defined for the S3 API in the config file.
|
||||||
|
|
||||||
|
## Enabling and using K2V
|
||||||
|
|
||||||
|
To enable K2V, download and run a build that has the `k2v` feature flag
|
||||||
|
enabled, or produce one yourself. Then, add the following section to your
|
||||||
|
configuration file:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[k2v_api]
|
||||||
|
api_bind_addr = "<ip>:<port>"
|
||||||
|
```
|
||||||
|
|
||||||
|
Please select a port number that is not already in use by another API
|
||||||
|
endpoint (S3 api, admin API) or by the RPC server.
|
||||||
|
|
||||||
|
We provide an early-stage K2V client library for Rust which can be imported by adding the following to your `Cargo.toml` file:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
k2v-client = { git = "https://git.deuxfleurs.fr/Deuxfleurs/garage.git" }
|
||||||
|
```
|
||||||
|
|
||||||
|
There is also a simple CLI utility which can be built from source in the
|
||||||
|
following way:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git clone https://git.deuxfleurs.fr/Deuxfleurs/garage.git
|
||||||
|
cd garage/src/k2v-client
|
||||||
|
cargo build --features cli --bin k2v-cli
|
||||||
|
```
|
||||||
|
|
||||||
|
The CLI utility is self-documented, run `k2v-cli --help` to learn how to use
|
||||||
|
it. There is also a short README.md in the `src/k2v-client` folder with some
|
||||||
|
instructions.
|
||||||
|
|
|
@ -3,24 +3,46 @@ title = "S3 Compatibility status"
|
||||||
weight = 20
|
weight = 20
|
||||||
+++
|
+++
|
||||||
|
|
||||||
## Endpoint implementation
|
## DISCLAIMER
|
||||||
|
|
||||||
All APIs that are missing on Garage will return a 501 Not Implemented.
|
**The compatibility list for other platforms is given only for informational
|
||||||
Some `x-amz-` headers are not implemented.
|
purposes and based on available documentation.** They are sometimes completed,
|
||||||
|
in a best effort approach, with the source code and inputs from maintainers
|
||||||
|
when documentation is lacking. We are not proactively monitoring new versions
|
||||||
|
of each software: check the modification history to know when the page has been
|
||||||
|
updated for the last time. Some entries will be inexact or outdated. For any
|
||||||
|
serious decision, you must make your own tests.
|
||||||
|
**The official documentation of each project can be accessed by clicking on the
|
||||||
|
project name in the column header.**
|
||||||
|
|
||||||
*The compatibility list for other platforms is given only for information purposes and based on available documentation. Some entries might be inexact. Feel free to open a PR to fix this table. Minio is missing because they do not provide a public S3 compatibility list.*
|
Feel free to open a PR to suggest fixes this table. Minio is missing because they do not provide a public S3 compatibility list.
|
||||||
|
|
||||||
### Features
|
## Update history
|
||||||
|
|
||||||
|
- 2022-02-07 - First version of this page
|
||||||
|
- 2022-05-25 - Many Ceph S3 endpoints are not documented but implemented. Following a notification from the Ceph community, we added them.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## High-level features
|
||||||
|
|
||||||
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
||||||
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
||||||
| [signature v2](https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html) (deprecated) | ❌ Missing | ✅ | ❌ | ✅ | ✅ |
|
| [signature v2](https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html) (deprecated) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||||
| [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ |
|
| [signature v4](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) | ✅ Implemented | ✅ | ✅ | ❌ | ✅ |
|
||||||
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
|
||||||
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
|
||||||
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
|
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
|
||||||
|
|
||||||
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part of signature v4 and they claim they support it without additional precisions, we suppose that OpenIO supports presigned URLs.
|
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part
|
||||||
|
of signature v4 and they claim they support it without additional precisions,
|
||||||
|
we suppose that OpenIO supports presigned URLs.
|
||||||
|
|
||||||
|
|
||||||
|
## Endpoint implementation
|
||||||
|
|
||||||
|
All endpoints that are missing on Garage will return a 501 Not Implemented.
|
||||||
|
Some `x-amz-` headers are not implemented.
|
||||||
|
|
||||||
### Core endoints
|
### Core endoints
|
||||||
|
|
||||||
|
@ -37,13 +59,17 @@ Some `x-amz-` headers are not implemented.
|
||||||
| [DeleteObjects](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
|
| [DeleteObjects](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
|
||||||
| [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
|
| [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
|
||||||
| [ListObjects](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) | ✅ Implemented (see details below) | ✅ | ✅ | ✅ | ❌|
|
| [ListObjects](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) | ✅ Implemented (see details below) | ✅ | ✅ | ✅ | ❌|
|
||||||
| [ListObjectsV2](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) | ✅ Implemented | ❌| ❌| ❌| ✅ |
|
| [ListObjectsV2](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) | ✅ Implemented | ❌| ✅ | ❌| ✅ |
|
||||||
| [PostObject](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html) (compatibility API) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
| [PostObject](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html) | ✅ Implemented | ❌| ✅ | ❌| ❌|
|
||||||
| [PutObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
|
| [PutObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) | ✅ Implemented | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
|
||||||
**ListObjects:** Implemented, but there isn't a very good specification of what `encoding-type=url` covers so there might be some encoding bugs. In our implementation the url-encoded fields are in the same in ListObjects as they are in ListObjectsV2.
|
**ListObjects:** Implemented, but there isn't a very good specification of what
|
||||||
|
`encoding-type=url` covers so there might be some encoding bugs. In our
|
||||||
|
implementation the url-encoded fields are in the same in ListObjects as they
|
||||||
|
are in ListObjectsV2.
|
||||||
|
|
||||||
*Note: Ceph API documentation is incomplete and miss at least HeadBucket and UploadPartCopy, but these endpoints are documented in [Red Hat Ceph Storage - Chapter 2. Ceph Object Gateway and the S3 API](https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/4/html/developer_guide/ceph-object-gateway-and-the-s3-api)*
|
*Note: Ceph API documentation is incomplete and lacks at least HeadBucket and UploadPartCopy,
|
||||||
|
but these endpoints are documented in [Red Hat Ceph Storage - Chapter 2. Ceph Object Gateway and the S3 API](https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/4/html/developer_guide/ceph-object-gateway-and-the-s3-api)*
|
||||||
|
|
||||||
### Multipart Upload endpoints
|
### Multipart Upload endpoints
|
||||||
|
|
||||||
|
@ -67,13 +93,13 @@ For more information, please refer to our [issue tracker](https://git.deuxfleurs
|
||||||
| [DeleteBucketWebsite](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) | ✅ Implemented | ❌| ❌| ❌| ❌|
|
| [DeleteBucketWebsite](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) | ✅ Implemented | ❌| ❌| ❌| ❌|
|
||||||
| [GetBucketWebsite](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) | ✅ Implemented | ❌ | ❌| ❌| ❌|
|
| [GetBucketWebsite](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) | ✅ Implemented | ❌ | ❌| ❌| ❌|
|
||||||
| [PutBucketWebsite](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) | ⚠ Partially implemented (see below)| ❌| ❌| ❌| ❌|
|
| [PutBucketWebsite](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) | ⚠ Partially implemented (see below)| ❌| ❌| ❌| ❌|
|
||||||
| [DeleteBucketCors](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) | ✅ Implemented | ❌| ❌| ❌| ✅ |
|
| [DeleteBucketCors](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) | ✅ Implemented | ❌| ✅ | ❌| ✅ |
|
||||||
| [GetBucketCors](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) | ✅ Implemented | ❌ | ❌| ❌| ✅ |
|
| [GetBucketCors](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) | ✅ Implemented | ❌ | ✅ | ❌| ✅ |
|
||||||
| [PutBucketCors](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) | ✅ Implemented | ❌| ❌| ❌| ✅ |
|
| [PutBucketCors](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) | ✅ Implemented | ❌| ✅ | ❌| ✅ |
|
||||||
|
|
||||||
**PutBucketWebsite:** Implemented, but only stores the index document suffix and the error document path. Redirects are not supported.
|
**PutBucketWebsite:** Implemented, but only stores the index document suffix and the error document path. Redirects are not supported.
|
||||||
|
|
||||||
*Note: Ceph radosgw has some support for static websites but it is different from Amazon one plus it does not implement its configuration endpoints.*
|
*Note: Ceph radosgw has some support for static websites but it is different from the Amazon one. It also does not implement its configuration endpoints.*
|
||||||
|
|
||||||
### ACL, Policies endpoints
|
### ACL, Policies endpoints
|
||||||
|
|
||||||
|
@ -83,27 +109,27 @@ See Garage CLI reference manual to learn how to use Garage's permission system.
|
||||||
|
|
||||||
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
||||||
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
||||||
| [DeleteBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketPolicy.html) | ❌ Missing | ❌| ❌| ✅ | ❌|
|
| [DeleteBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketPolicy.html) | ❌ Missing | ❌| ✅ | ✅ | ❌|
|
||||||
| [GetBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicy.html) | ❌ Missing | ❌| ❌| ⚠ | ❌|
|
| [GetBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicy.html) | ❌ Missing | ❌| ✅ | ⚠ | ❌|
|
||||||
| [GetBucketPolicyStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) | ❌ Missing | ❌| ❌| ❌| ❌|
|
| [GetBucketPolicyStatus](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
| [PutBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketPolicy.html) | ❌ Missing | ❌| ❌| ⚠ | ❌|
|
| [PutBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketPolicy.html) | ❌ Missing | ❌| ✅ | ⚠ | ❌|
|
||||||
| [GetBucketAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAcl.html) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
| [GetBucketAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAcl.html) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||||
| [PutBucketAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
| [PutBucketAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||||
| [GetObjectAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
| [GetObjectAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||||
| [PutObjectAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectAcl.html) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
| [PutObjectAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectAcl.html) | ❌ Missing | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
|
||||||
*Notes:* Ceph claims that it supports bucket policies but does not implement any Policy endpoints. They probably refer to their own permission system. Riak CS only supports a subset of the policy configuration.
|
*Notes:* Riak CS only supports a subset of the policy configuration.
|
||||||
|
|
||||||
### Versioning, Lifecycle endpoints
|
### Versioning, Lifecycle endpoints
|
||||||
|
|
||||||
Garage does not support (yet) object versioning.
|
Garage does not (yet) support object versioning.
|
||||||
If you need this feature, please [share your use case in our dedicated issue](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/166).
|
If you need this feature, please [share your use case in our dedicated issue](https://git.deuxfleurs.fr/Deuxfleurs/garage/issues/166).
|
||||||
|
|
||||||
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
||||||
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
||||||
| [DeleteBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) | ❌ Missing | ❌| ✅| ❌| ✅|
|
| [DeleteBucketLifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) | ❌ Missing | ❌| ✅| ❌| ✅|
|
||||||
| [GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) | ❌ Missing | ❌| ⚠ | ❌| ✅|
|
| [GetBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ✅|
|
||||||
| [PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) | ❌ Missing | ❌| ⚠ | ❌| ✅|
|
| [PutBucketLifecycleConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ✅|
|
||||||
| [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) | ❌ Stub (see below) | ✅| ✅ | ❌| ✅|
|
| [GetBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) | ❌ Stub (see below) | ✅| ✅ | ❌| ✅|
|
||||||
| [ListObjectVersions](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html) | ❌ Missing | ❌| ✅ | ❌| ✅|
|
| [ListObjectVersions](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html) | ❌ Missing | ❌| ✅ | ❌| ✅|
|
||||||
| [PutBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) | ❌ Missing | ❌| ✅| ❌| ✅|
|
| [PutBucketVersioning](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) | ❌ Missing | ❌| ✅| ❌| ✅|
|
||||||
|
@ -111,8 +137,6 @@ If you need this feature, please [share your use case in our dedicated issue](ht
|
||||||
|
|
||||||
**GetBucketVersioning:** Stub implementation (Garage does not yet support versionning so this always returns "versionning not enabled").
|
**GetBucketVersioning:** Stub implementation (Garage does not yet support versionning so this always returns "versionning not enabled").
|
||||||
|
|
||||||
*Note: Ceph only supports `Expiration`, `NoncurrentVersionExpiration` and `AbortIncompleteMultipartUpload` on its Lifecycle endpoints.*
|
|
||||||
|
|
||||||
### Replication endpoints
|
### Replication endpoints
|
||||||
|
|
||||||
Please open an issue if you have a use case for replication.
|
Please open an issue if you have a use case for replication.
|
||||||
|
@ -123,7 +147,10 @@ Please open an issue if you have a use case for replication.
|
||||||
| [GetBucketReplication](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
| [GetBucketReplication](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
| [PutBucketReplication](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) | ❌ Missing | ❌| ⚠ | ❌| ❌|
|
| [PutBucketReplication](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) | ❌ Missing | ❌| ⚠ | ❌| ❌|
|
||||||
|
|
||||||
*Note: Ceph documentation briefly says that Ceph supports [replication though the S3 API](https://docs.ceph.com/en/latest/radosgw/multisite-sync-policy/#s3-replication-api) but with some limitations. Additionaly, replication endpoints are not documented in the S3 compatibility page so I don't know what kind of support we can expect.*
|
*Note: Ceph documentation briefly says that Ceph supports
|
||||||
|
[replication though the S3 API](https://docs.ceph.com/en/latest/radosgw/multisite-sync-policy/#s3-replication-api)
|
||||||
|
but with some limitations.
|
||||||
|
Additionaly, replication endpoints are not documented in the S3 compatibility page so I don't know what kind of support we can expect.*
|
||||||
|
|
||||||
### Locking objects
|
### Locking objects
|
||||||
|
|
||||||
|
@ -135,8 +162,8 @@ Amazon defines a concept of [object locking](https://docs.aws.amazon.com/AmazonS
|
||||||
| [PutObjectLegalHold](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
| [PutObjectLegalHold](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
| [GetObjectRetention](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
| [GetObjectRetention](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
| [PutObjectRetention](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
| [PutObjectRetention](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
| [GetObjectLockConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) | ❌ Missing | ❌| ❌| ❌| ❌|
|
| [GetObjectLockConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
| [PutObjectLockConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLockConfiguration.html) | ❌ Missing | ❌| ❌| ❌| ❌|
|
| [PutObjectLockConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLockConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
|
|
||||||
### (Server-side) encryption
|
### (Server-side) encryption
|
||||||
|
|
||||||
|
@ -145,9 +172,9 @@ Please open an issue if you have a use case.
|
||||||
|
|
||||||
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
| Endpoint | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
|
||||||
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
||||||
| [DeleteBucketEncryption](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) | ❌ Missing | ❌| ❌| ❌| ❌|
|
| [DeleteBucketEncryption](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
| [GetBucketEncryption](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) | ❌ Missing | ❌| ❌| ❌| ❌|
|
| [GetBucketEncryption](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
| [PutBucketEncryption](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) | ❌ Missing | ❌| ❌| ❌| ❌|
|
| [PutBucketEncryption](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
|
|
||||||
### Misc endpoints
|
### Misc endpoints
|
||||||
|
|
||||||
|
@ -155,13 +182,13 @@ Please open an issue if you have a use case.
|
||||||
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
|------------------------------|----------------------------------|-----------------|---------------|---------|-----|
|
||||||
| [GetBucketNotificationConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
| [GetBucketNotificationConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
| [PutBucketNotificationConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
| [PutBucketNotificationConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
| [DeleteBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) | ❌ Missing | ❌| ❌| ❌| ✅ |
|
| [DeleteBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) | ❌ Missing | ❌| ✅ | ❌| ✅ |
|
||||||
| [GetBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) | ❌ Missing | ❌| ❌| ❌| ✅ |
|
| [GetBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) | ❌ Missing | ❌| ✅ | ❌| ✅ |
|
||||||
| [PutBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) | ❌ Missing | ❌| ❌| ❌| ✅ |
|
| [PutBucketTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) | ❌ Missing | ❌| ✅ | ❌| ✅ |
|
||||||
| [DeleteObjectTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) | ❌ Missing | ❌| ❌| ❌| ✅ |
|
| [DeleteObjectTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) | ❌ Missing | ❌| ✅ | ❌| ✅ |
|
||||||
| [GetObjectTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) | ❌ Missing | ❌| ❌| ❌| ✅ |
|
| [GetObjectTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) | ❌ Missing | ❌| ✅ | ❌| ✅ |
|
||||||
| [PutObjectTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) | ❌ Missing | ❌| ❌| ❌| ✅ |
|
| [PutObjectTagging](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) | ❌ Missing | ❌| ✅ | ❌| ✅ |
|
||||||
| [GetObjectTorrent](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTorrent.html) | ❌ Missing | ❌| ❌| ❌| ❌|
|
| [GetObjectTorrent](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTorrent.html) | ❌ Missing | ❌| ✅ | ❌| ❌|
|
||||||
|
|
||||||
### Vendor specific endpoints
|
### Vendor specific endpoints
|
||||||
|
|
||||||
|
|
717
doc/drafts/k2v-spec.md
Normal file
717
doc/drafts/k2v-spec.md
Normal file
|
@ -0,0 +1,717 @@
|
||||||
|
# Specification of the Garage K2V API (K2V = Key/Key/Value)
|
||||||
|
|
||||||
|
- We are storing triplets of the form `(partition key, sort key, value)` -> no
|
||||||
|
user-defined fields, the client is responsible of writing whatever he wants
|
||||||
|
in the value (typically an encrypted blob). Values are binary blobs, which
|
||||||
|
are always represented as their base64 encoding in the JSON API. Partition
|
||||||
|
keys and sort keys are utf8 strings.
|
||||||
|
|
||||||
|
- Triplets are stored in buckets; each bucket stores a separate set of triplets
|
||||||
|
|
||||||
|
- Bucket names and access keys are the same as for accessing the S3 API
|
||||||
|
|
||||||
|
- K2V triplets exist separately from S3 objects. K2V triplets don't exist for
|
||||||
|
the S3 API, and S3 objects don't exist for the K2V API.
|
||||||
|
|
||||||
|
- Values stored for triplets have associated causality information, that enables
|
||||||
|
Garage to detect concurrent writes. In case of concurrent writes, Garage
|
||||||
|
keeps the concurrent values until a further write supersedes the concurrent
|
||||||
|
values. This is the same method as Riak KV implements. The method used is
|
||||||
|
based on DVVS (dotted version vector sets), described in the paper "Scalable
|
||||||
|
and Accurate Causality Tracking for Eventually Consistent Data Stores", as
|
||||||
|
well as [here](https://github.com/ricardobcl/Dotted-Version-Vectors)
|
||||||
|
|
||||||
|
|
||||||
|
## Data format
|
||||||
|
|
||||||
|
### Triple format
|
||||||
|
|
||||||
|
Triples in K2V are constituted of three fields:
|
||||||
|
|
||||||
|
- a partition key (`pk`), an utf8 string that defines in what partition the
|
||||||
|
triplet is stored; triplets in different partitions cannot be listed together
|
||||||
|
in a ReadBatch command, or deleted together in a DeleteBatch command: a
|
||||||
|
separate command must be included in the ReadBatch/DeleteBatch call for each
|
||||||
|
partition key in which the client wants to read/delete lists of items
|
||||||
|
|
||||||
|
- a sort key (`sk`), an utf8 string that defines the index of the triplet inside its
|
||||||
|
partition; triplets are uniquely idendified by their partition key + sort key
|
||||||
|
|
||||||
|
- a value (`v`), an opaque binary blob associated to the partition key + sort key;
|
||||||
|
they are transmitted as binary when possible but in most case in the JSON API
|
||||||
|
they will be represented as strings using base64 encoding; a value can also
|
||||||
|
be `null` to indicate a deleted triplet (a `null` value is called a tombstone)
|
||||||
|
|
||||||
|
### Causality information
|
||||||
|
|
||||||
|
K2V supports storing several concurrent values associated to a pk+sk, in the
|
||||||
|
case where insertion or deletion operations are detected to be concurrent (i.e.
|
||||||
|
there is not one that was aware of the other, they are not causally dependant
|
||||||
|
one on the other). In practice, it even looks more like the opposite: to
|
||||||
|
overwrite a previously existing value, the client must give a "causality token"
|
||||||
|
that "proves" (not in a cryptographic sense) that it had seen a previous value.
|
||||||
|
Otherwise, the value written will not overwrite an existing value, it will just
|
||||||
|
create a new concurrent value.
|
||||||
|
|
||||||
|
The causality token is a binary/b64-encoded representation of a context,
|
||||||
|
specified below.
|
||||||
|
|
||||||
|
A set of concurrent values looks like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
(node1, tdiscard1, (v1, t1), (v2, t2)) ; tdiscard1 < t1 < t2
|
||||||
|
(node2, tdiscard2, (v3, t3) ; tdiscard2 < t3
|
||||||
|
```
|
||||||
|
|
||||||
|
`tdiscard` for a node `i` means that all values inserted by node `i` with times
|
||||||
|
`<= tdiscard` are obsoleted, i.e. have been read by a client that overwrote it
|
||||||
|
afterwards.
|
||||||
|
|
||||||
|
The associated context would be the following: `[(node1, t2), (node2, t3)]`,
|
||||||
|
i.e. if a node reads this set of values and inserts a new values, we will now
|
||||||
|
have `tdiscard1 = t2` and `tdiscard2 = t3`, to indicate that values v1, v2 and v3
|
||||||
|
are obsoleted by the new write.
|
||||||
|
|
||||||
|
**Basic insertion.** To insert a new value `v4` with context `[(node1, t2), (node2, t3)]`, in a
|
||||||
|
simple case where there was no insertion in-between reading the value
|
||||||
|
mentionned above and writing `v4`, and supposing that node2 receives the
|
||||||
|
InsertItem query:
|
||||||
|
|
||||||
|
- `node2` generates a timestamp `t4` such that `t4 > t3`.
|
||||||
|
- the new state is as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
(node1, tdiscard1', ()) ; tdiscard1' = t2
|
||||||
|
(node2, tdiscard2', (v4, t4)) ; tdiscard2' = t3
|
||||||
|
```
|
||||||
|
|
||||||
|
**A more complex insertion example.** In the general case, other intermediate values could have
|
||||||
|
been written before `v4` with context `[(node1, t2), (node2, t3)]` is sent to the system.
|
||||||
|
For instance, here is a possible sequence of events:
|
||||||
|
|
||||||
|
1. First we have the set of values v1, v2 and v3 described above.
|
||||||
|
A node reads it, it obtains values v1, v2 and v3 with context `[(node1, t2), (node2, t3)]`.
|
||||||
|
|
||||||
|
2. A node writes a value `v5` with context `[(node1, t1)]`, i.e. `v5` is only a
|
||||||
|
successor of v1 but not of v2 or v3. Suppose node1 receives the write, it
|
||||||
|
will generate a new timestamp `t5` larger than all of the timestamps it
|
||||||
|
knows of, i.e. `t5 > t2`. We will now have:
|
||||||
|
|
||||||
|
```
|
||||||
|
(node1, tdiscard1'', (v2, t2), (v5, t5)) ; tdiscard1'' = t1 < t2 < t5
|
||||||
|
(node2, tdiscard2, (v3, t3) ; tdiscard2 < t3
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Now `v4` is written with context `[(node1, t2), (node2, t3)]`, and node2
|
||||||
|
processes the query. It will generate `t4 > t3` and the state will become:
|
||||||
|
|
||||||
|
```
|
||||||
|
(node1, tdiscard1', (v5, t5)) ; tdiscard1' = t2 < t5
|
||||||
|
(node2, tdiscard2', (v4, t4)) ; tdiscard2' = t3
|
||||||
|
```
|
||||||
|
|
||||||
|
**Generic algorithm for handling insertions:** A certain node n handles the
|
||||||
|
InsertItem and is responsible for the correctness of this procedure.
|
||||||
|
|
||||||
|
1. Lock the key (or the whole table?) at this node to prevent concurrent updates of the value that would mess things up
|
||||||
|
2. Read current set of values
|
||||||
|
3. Generate a new timestamp that is larger than the largest timestamp for node n
|
||||||
|
4. Add the inserted value in the list of values of node n
|
||||||
|
5. Update the discard times to be the times set in the context, and accordingly discard overwritten values
|
||||||
|
6. Release lock
|
||||||
|
7. Propagate updated value to other nodes
|
||||||
|
8. Return to user when propagation achieved the write quorum (propagation to other nodes continues asynchronously)
|
||||||
|
|
||||||
|
**Encoding of contexts:**
|
||||||
|
|
||||||
|
Contexts consist in a list of (node id, timestamp) pairs.
|
||||||
|
They are encoded in binary as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
checksum: u64, [ node: u64, timestamp: u64 ]*
|
||||||
|
```
|
||||||
|
|
||||||
|
The checksum is just the XOR of all of the node IDs and timestamps.
|
||||||
|
|
||||||
|
Once encoded in binary, contexts are written and transmitted in base64.
|
||||||
|
|
||||||
|
|
||||||
|
### Indexing
|
||||||
|
|
||||||
|
K2V keeps an index, a secondary data structure that is updated asynchronously,
|
||||||
|
that keeps tracks of the number of triplets stored for each partition key.
|
||||||
|
This allows easy listing of all of the partition keys for which triplets exist
|
||||||
|
in a bucket, as the partition key becomes the sort key in the index.
|
||||||
|
|
||||||
|
How indexing works:
|
||||||
|
|
||||||
|
- Each node keeps a local count of how many items it stores for each partition,
|
||||||
|
in a local Sled tree that is updated atomically when an item is modified.
|
||||||
|
- These local counters are asynchronously stored in the index table which is
|
||||||
|
a regular Garage table spread in the network. Counters are stored as LWW values,
|
||||||
|
so basically the final table will have the following structure:
|
||||||
|
|
||||||
|
```
|
||||||
|
- pk: bucket
|
||||||
|
- sk: partition key for which we are counting
|
||||||
|
- v: lwwmap (node id -> number of items)
|
||||||
|
```
|
||||||
|
|
||||||
|
The final number of items present in the partition can be estimated by taking
|
||||||
|
the maximum of the values (i.e. the value for the node that announces having
|
||||||
|
the most items for that partition). In most cases the values for different node
|
||||||
|
IDs should all be the same; more precisely, three node IDs should map to the
|
||||||
|
same non-zero value, and all other node IDs that are present are tombstones
|
||||||
|
that map to zeroes. Note that we need to filter out values from nodes that are
|
||||||
|
no longer part of the cluster layout, as when nodes are removed they won't
|
||||||
|
necessarily have had the time to set their counters to zero.
|
||||||
|
|
||||||
|
## Important details
|
||||||
|
|
||||||
|
**THIS SECTION CONTAINS A FEW WARNINGS ON THE K2V API WHICH ARE IMPORTANT
|
||||||
|
TO UNDERSTAND IN ORDER TO USE IT CORRECTLY.**
|
||||||
|
|
||||||
|
- **Internal server errors on updates do not mean that the update isn't stored.**
|
||||||
|
K2V will return an internal server error when it cannot reach a quorum of nodes on
|
||||||
|
which to save an updated value. However the value may still be stored on just one
|
||||||
|
node, which will then propagate it to other nodes asynchronously via anti-entropy.
|
||||||
|
|
||||||
|
- **Batch operations are not transactions.** When calling InsertBatch or DeleteBatch,
|
||||||
|
items may appear partially inserted/deleted while the operation is being processed.
|
||||||
|
More importantly, if InsertBatch or DeleteBatch returns an internal server error,
|
||||||
|
some of the items to be inserted/deleted might end up inserted/deleted on the server,
|
||||||
|
while others may still have their old value.
|
||||||
|
|
||||||
|
- **Concurrent values are deduplicated.** When inserting a value for a key,
|
||||||
|
Garage might internally end up
|
||||||
|
storing the value several times if there are network errors. These values will end up as
|
||||||
|
concurrent values for a key, with the same byte string (or `null` for a deletion).
|
||||||
|
Garage fixes this by deduplicating concurrent values when they are returned to the
|
||||||
|
user on read operations. Importantly, *Garage does not differentiate between duplicate
|
||||||
|
concurrent values due to the user making the same call twice, or Garage having to
|
||||||
|
do an internal retry*. This means that all duplicate concurrent values are deduplicated
|
||||||
|
when an item is read: if the user inserts twice concurrently the same value, they will
|
||||||
|
only read it once.
|
||||||
|
|
||||||
|
## API Endpoints
|
||||||
|
|
||||||
|
**Remark.** Example queries and responses here are given in JSON5 format
|
||||||
|
for clarity. However the actual K2V API uses basic JSON so all examples
|
||||||
|
and responses need to be translated.
|
||||||
|
|
||||||
|
### Operations on single items
|
||||||
|
|
||||||
|
**ReadItem: `GET /<bucket>/<partition key>?sort_key=<sort key>`**
|
||||||
|
|
||||||
|
|
||||||
|
Query parameters:
|
||||||
|
|
||||||
|
| name | default value | meaning |
|
||||||
|
| - | - | - |
|
||||||
|
| `sort_key` | **mandatory** | The sort key of the item to read |
|
||||||
|
|
||||||
|
Returns the item with specified partition key and sort key. Values can be
|
||||||
|
returned in either of two ways:
|
||||||
|
|
||||||
|
1. a JSON array of base64-encoded values, or `null`'s for tombstones, with
|
||||||
|
header `Content-Type: application/json`
|
||||||
|
|
||||||
|
2. in the case where there are no concurrent values, the single present value
|
||||||
|
can be returned directly as the response body (or an HTTP 204 NO CONTENT for
|
||||||
|
a tombstone), with header `Content-Type: application/octet-stream`
|
||||||
|
|
||||||
|
The choice between return formats 1 and 2 is directed by the `Accept` HTTP header:
|
||||||
|
|
||||||
|
- if the `Accept` header is not present, format 1 is always used
|
||||||
|
|
||||||
|
- if `Accept` contains `application/json` but not `application/octet-stream`,
|
||||||
|
format 1 is always used
|
||||||
|
|
||||||
|
- if `Accept` contains `application/octet-stream` but not `application/json`,
|
||||||
|
format 2 is used when there is a single value, and an HTTP error 409 (HTTP
|
||||||
|
409 CONFLICT) is returned in the case of multiple concurrent values
|
||||||
|
(including concurrent tombstones)
|
||||||
|
|
||||||
|
- if `Accept` contains both, format 2 is used when there is a single value, and
|
||||||
|
format 1 is used as a fallback in case of concurrent values
|
||||||
|
|
||||||
|
- if `Accept` contains none, HTTP 406 NOT ACCEPTABLE is raised
|
||||||
|
|
||||||
|
Example query:
|
||||||
|
|
||||||
|
```
|
||||||
|
GET /my_bucket/mailboxes?sort_key=INBOX HTTP/1.1
|
||||||
|
```
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
X-Garage-Causality-Token: opaquetoken123
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
[
|
||||||
|
"b64cryptoblob123",
|
||||||
|
"b64cryptoblob'123"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Example response in case the item is a tombstone:
|
||||||
|
|
||||||
|
```
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
X-Garage-Causality-Token: opaquetoken999
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
[
|
||||||
|
null
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Example query 2:
|
||||||
|
|
||||||
|
```
|
||||||
|
GET /my_bucket/mailboxes?sort_key=INBOX HTTP/1.1
|
||||||
|
Accept: application/octet-stream
|
||||||
|
```
|
||||||
|
|
||||||
|
Example response if multiple concurrent versions exist:
|
||||||
|
|
||||||
|
```
|
||||||
|
HTTP/1.1 409 CONFLICT
|
||||||
|
X-Garage-Causality-Token: opaquetoken123
|
||||||
|
Content-Type: application/octet-stream
|
||||||
|
```
|
||||||
|
|
||||||
|
Example response in case of single value:
|
||||||
|
|
||||||
|
```
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
X-Garage-Causality-Token: opaquetoken123
|
||||||
|
Content-Type: application/octet-stream
|
||||||
|
|
||||||
|
cryptoblob123
|
||||||
|
```
|
||||||
|
|
||||||
|
Example response in case of a single value that is a tombstone:
|
||||||
|
|
||||||
|
```
|
||||||
|
HTTP/1.1 204 NO CONTENT
|
||||||
|
X-Garage-Causality-Token: opaquetoken123
|
||||||
|
Content-Type: application/octet-stream
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
**PollItem: `GET /<bucket>/<partition key>?sort_key=<sort key>&causality_token=<causality token>`**
|
||||||
|
|
||||||
|
This endpoint will block until a new value is written to a key.
|
||||||
|
|
||||||
|
The GET parameter `causality_token` should be set to the causality
|
||||||
|
token returned with the last read of the key, so that K2V knows
|
||||||
|
what values are concurrent or newer than the ones that the
|
||||||
|
client previously knew.
|
||||||
|
|
||||||
|
This endpoint returns the new value in the same format as ReadItem.
|
||||||
|
If no new value is written and the timeout elapses,
|
||||||
|
an HTTP 304 NOT MODIFIED is returned.
|
||||||
|
|
||||||
|
Query parameters:
|
||||||
|
|
||||||
|
| name | default value | meaning |
|
||||||
|
| - | - | - |
|
||||||
|
| `sort_key` | **mandatory** | The sort key of the item to read |
|
||||||
|
| `causality_token` | **mandatory** | The causality token of the last known value or set of values |
|
||||||
|
| `timeout` | 300 | The timeout before 304 NOT MODIFIED is returned if the value isn't updated |
|
||||||
|
|
||||||
|
The timeout can be set to any number of seconds, with a maximum of 600 seconds (10 minutes).
|
||||||
|
|
||||||
|
|
||||||
|
**InsertItem: `PUT /<bucket>/<partition key>?sort_key=<sort_key>`**
|
||||||
|
|
||||||
|
Inserts a single item. This request does not use JSON, the body is sent directly as a binary blob.
|
||||||
|
|
||||||
|
To supersede previous values, the HTTP header `X-Garage-Causality-Token` should
|
||||||
|
be set to the causality token returned by a previous read on this key. This
|
||||||
|
header can be ommitted for the first writes to the key.
|
||||||
|
|
||||||
|
Example query:
|
||||||
|
|
||||||
|
```
|
||||||
|
PUT /my_bucket/mailboxes?sort_key=INBOX HTTP/1.1
|
||||||
|
X-Garage-Causality-Token: opaquetoken123
|
||||||
|
|
||||||
|
myblobblahblahblah
|
||||||
|
```
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
```
|
||||||
|
|
||||||
|
**DeleteItem: `DELETE /<bucket>/<partition key>?sort_key=<sort_key>`**
|
||||||
|
|
||||||
|
Deletes a single item. The HTTP header `X-Garage-Causality-Token` must be set
|
||||||
|
to the causality token returned by a previous read on this key, to indicate
|
||||||
|
which versions of the value should be deleted. The request will not process if
|
||||||
|
`X-Garage-Causality-Token` is not set.
|
||||||
|
|
||||||
|
Example query:
|
||||||
|
|
||||||
|
```
|
||||||
|
DELETE /my_bucket/mailboxes?sort_key=INBOX HTTP/1.1
|
||||||
|
X-Garage-Causality-Token: opaquetoken123
|
||||||
|
```
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```
|
||||||
|
HTTP/1.1 204 NO CONTENT
|
||||||
|
```
|
||||||
|
|
||||||
|
### Operations on index
|
||||||
|
|
||||||
|
**ReadIndex: `GET /<bucket>?start=<start>&end=<end>&limit=<limit>`**
|
||||||
|
|
||||||
|
Lists all partition keys in the bucket for which some triplets exist, and gives
|
||||||
|
for each the number of triplets, total number of values (which might be bigger
|
||||||
|
than the number of triplets in case of conflicts), total number of bytes of
|
||||||
|
these values, and number of triplets that are in a state of conflict.
|
||||||
|
The values returned are an approximation of the true counts in the bucket,
|
||||||
|
as these values are asynchronously updated, and thus eventually consistent.
|
||||||
|
|
||||||
|
Query parameters:
|
||||||
|
|
||||||
|
| name | default value | meaning |
|
||||||
|
| - | - | - |
|
||||||
|
| `prefix` | `null` | Restrict listing to partition keys that start with this prefix |
|
||||||
|
| `start` | `null` | First partition key to list, in lexicographical order |
|
||||||
|
| `end` | `null` | Last partition key to list (excluded) |
|
||||||
|
| `limit` | `null` | Maximum number of partition keys to list |
|
||||||
|
| `reverse` | `false` | Iterate in reverse lexicographical order |
|
||||||
|
|
||||||
|
The response consists in a JSON object that repeats the parameters of the query and gives the result (see below).
|
||||||
|
|
||||||
|
The listing starts at partition key `start`, or if not specified at the
|
||||||
|
smallest partition key that exists. It returns partition keys in increasing
|
||||||
|
order, or decreasing order if `reverse` is set to `true`,
|
||||||
|
and stops when either of the following conditions is met:
|
||||||
|
|
||||||
|
1. if `end` is specfied, the partition key `end` is reached or surpassed (if it
|
||||||
|
is reached exactly, it is not included in the result)
|
||||||
|
|
||||||
|
2. if `limit` is specified, `limit` partition keys have been listed
|
||||||
|
|
||||||
|
3. no more partition keys are available to list
|
||||||
|
|
||||||
|
In case 2, and if there are more partition keys to list before condition 1
|
||||||
|
triggers, then in the result `more` is set to `true` and `nextStart` is set to
|
||||||
|
the first partition key that couldn't be listed due to the limit. In the first
|
||||||
|
case (if the listing stopped because of the `end` parameter), `more` is not set
|
||||||
|
and the `nextStart` key is not specified.
|
||||||
|
|
||||||
|
Note that if `reverse` is set to `true`, `start` is the highest key
|
||||||
|
(in lexicographical order) for which values are returned.
|
||||||
|
This means that if an `end` is specified, it must be smaller than `start`,
|
||||||
|
otherwise no values will be returned.
|
||||||
|
|
||||||
|
Example query:
|
||||||
|
|
||||||
|
```
|
||||||
|
GET /my_bucket HTTP/1.1
|
||||||
|
```
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
|
||||||
|
{
|
||||||
|
prefix: null,
|
||||||
|
start: null,
|
||||||
|
end: null,
|
||||||
|
limit: null,
|
||||||
|
reverse: false,
|
||||||
|
partitionKeys: [
|
||||||
|
{
|
||||||
|
pk: "keys",
|
||||||
|
entries: 3043,
|
||||||
|
conflicts: 0,
|
||||||
|
values: 3043,
|
||||||
|
bytes: 121720,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pk: "mailbox:INBOX",
|
||||||
|
entries: 42,
|
||||||
|
conflicts: 1,
|
||||||
|
values: 43,
|
||||||
|
bytes: 142029,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pk: "mailbox:Junk",
|
||||||
|
entries: 2991
|
||||||
|
conflicts: 0,
|
||||||
|
values: 2991,
|
||||||
|
bytes: 12019322,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pk: "mailbox:Trash",
|
||||||
|
entries: 10,
|
||||||
|
conflicts: 0,
|
||||||
|
values: 10,
|
||||||
|
bytes: 32401,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pk: "mailboxes",
|
||||||
|
entries: 3,
|
||||||
|
conflicts: 0,
|
||||||
|
values: 3,
|
||||||
|
bytes: 3019,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
more: false,
|
||||||
|
nextStart: null,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Operations on batches of items
|
||||||
|
|
||||||
|
**InsertBatch: `POST /<bucket>`**
|
||||||
|
|
||||||
|
Simple insertion and deletion of triplets. The body is just a list of items to
|
||||||
|
insert in the following format:
|
||||||
|
`{ pk: "<partition key>", sk: "<sort key>", ct: "<causality token>"|null, v: "<value>"|null }`.
|
||||||
|
|
||||||
|
The causality token should be the one returned in a previous read request (e.g.
|
||||||
|
by ReadItem or ReadBatch), to indicate that this write takes into account the
|
||||||
|
values that were returned from these reads, and supersedes them causally. If
|
||||||
|
the triplet is inserted for the first time, the causality token should be set to
|
||||||
|
`null`.
|
||||||
|
|
||||||
|
The value is expected to be a base64-encoded binary blob. The value `null` can
|
||||||
|
also be used to delete the triplet while preserving causality information: this
|
||||||
|
allows to know if a delete has happenned concurrently with an insert, in which
|
||||||
|
case both are preserved and returned on reads (see below).
|
||||||
|
|
||||||
|
Partition keys and sort keys are utf8 strings which are stored sorted by
|
||||||
|
lexicographical ordering of their binary representation.
|
||||||
|
|
||||||
|
Example query:
|
||||||
|
|
||||||
|
```json
|
||||||
|
POST /my_bucket HTTP/1.1
|
||||||
|
|
||||||
|
[
|
||||||
|
{ pk: "mailbox:INBOX", sk: "001892831", ct: "opaquetoken321", v: "b64cryptoblob321updated" },
|
||||||
|
{ pk: "mailbox:INBOX", sk: "001892912", ct: null, v: "b64cryptoblob444" },
|
||||||
|
{ pk: "mailbox:INBOX", sk: "001892932", ct: "opaquetoken654", v: null },
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
**ReadBatch: `POST /<bucket>?search`**, or alternatively<br/>
|
||||||
|
**ReadBatch: `SEARCH /<bucket>`**
|
||||||
|
|
||||||
|
Batch read of triplets in a bucket.
|
||||||
|
|
||||||
|
The request body is a JSON list of searches, that each specify a range of
|
||||||
|
items to get (to get single items, set `singleItem` to `true`). A search is a
|
||||||
|
JSON struct with the following fields:
|
||||||
|
|
||||||
|
| name | default value | meaning |
|
||||||
|
| - | - | - |
|
||||||
|
| `partitionKey` | **mandatory** | The partition key in which to search |
|
||||||
|
| `prefix` | `null` | Restrict items to list to those whose sort keys start with this prefix |
|
||||||
|
| `start` | `null` | The sort key of the first item to read |
|
||||||
|
| `end` | `null` | The sort key of the last item to read (excluded) |
|
||||||
|
| `limit` | `null` | The maximum number of items to return |
|
||||||
|
| `reverse` | `false` | Iterate in reverse lexicographical order on sort keys |
|
||||||
|
| `singleItem` | `false` | Whether to return only the item with sort key `start` |
|
||||||
|
| `conflictsOnly` | `false` | Whether to return only items that have several concurrent values |
|
||||||
|
| `tombstones` | `false` | Whether or not to return tombstone lines to indicate the presence of old deleted items |
|
||||||
|
|
||||||
|
|
||||||
|
For each of the searches, triplets are listed and returned separately. The
|
||||||
|
semantics of `prefix`, `start`, `end`, `limit` and `reverse` are the same as for ReadIndex. The
|
||||||
|
additionnal parameter `singleItem` allows to get a single item, whose sort key
|
||||||
|
is the one given in `start`. Parameters `conflictsOnly` and `tombstones`
|
||||||
|
control additional filters on the items that are returned.
|
||||||
|
|
||||||
|
The result is a list of length the number of searches, that consists in for
|
||||||
|
each search a JSON object specified similarly to the result of ReadIndex, but
|
||||||
|
that lists triplets within a partition key.
|
||||||
|
|
||||||
|
The format of returned tuples is as follows: `{ sk: "<sort key>", ct: "<causality
|
||||||
|
token>", v: ["<value1>", ...] }`, with the following fields:
|
||||||
|
|
||||||
|
- `sk` (sort key): any unicode string used as a sort key
|
||||||
|
|
||||||
|
- `ct` (causality token): an opaque token served by the server (generally
|
||||||
|
base64-encoded) to be used in subsequent writes to this key
|
||||||
|
|
||||||
|
- `v` (list of values): each value is a binary blob, always base64-encoded;
|
||||||
|
contains multiple items when concurrent values exists
|
||||||
|
|
||||||
|
- in case of concurrent update and deletion, a `null` is added to the list of concurrent values
|
||||||
|
|
||||||
|
- if the `tombstones` query parameter is set to `true`, tombstones are returned
|
||||||
|
for items that have been deleted (this can be usefull for inserting after an
|
||||||
|
item that has been deleted, so that the insert is not considered
|
||||||
|
concurrent with the delete). Tombstones are returned as tuples in the
|
||||||
|
same format with only `null` values
|
||||||
|
|
||||||
|
Example query:
|
||||||
|
|
||||||
|
```json
|
||||||
|
POST /my_bucket?search HTTP/1.1
|
||||||
|
|
||||||
|
[
|
||||||
|
{
|
||||||
|
partitionKey: "mailboxes",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
partitionKey: "mailbox:INBOX",
|
||||||
|
start: "001892831",
|
||||||
|
limit: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
partitionKey: "keys",
|
||||||
|
start: "0",
|
||||||
|
singleItem: true,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Example associated response body:
|
||||||
|
|
||||||
|
```json
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
|
||||||
|
[
|
||||||
|
{
|
||||||
|
partitionKey: "mailboxes",
|
||||||
|
prefix: null,
|
||||||
|
start: null,
|
||||||
|
end: null,
|
||||||
|
limit: null,
|
||||||
|
reverse: false,
|
||||||
|
conflictsOnly: false,
|
||||||
|
tombstones: false,
|
||||||
|
singleItem: false,
|
||||||
|
items: [
|
||||||
|
{ sk: "INBOX", ct: "opaquetoken123", v: ["b64cryptoblob123", "b64cryptoblob'123"] },
|
||||||
|
{ sk: "Trash", ct: "opaquetoken456", v: ["b64cryptoblob456"] },
|
||||||
|
{ sk: "Junk", ct: "opaquetoken789", v: ["b64cryptoblob789"] },
|
||||||
|
],
|
||||||
|
more: false,
|
||||||
|
nextStart: null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
partitionKey: "mailbox::INBOX",
|
||||||
|
prefix: null,
|
||||||
|
start: "001892831",
|
||||||
|
end: null,
|
||||||
|
limit: 3,
|
||||||
|
reverse: false,
|
||||||
|
conflictsOnly: false,
|
||||||
|
tombstones: false,
|
||||||
|
singleItem: false,
|
||||||
|
items: [
|
||||||
|
{ sk: "001892831", ct: "opaquetoken321", v: ["b64cryptoblob321"] },
|
||||||
|
{ sk: "001892832", ct: "opaquetoken654", v: ["b64cryptoblob654"] },
|
||||||
|
{ sk: "001892874", ct: "opaquetoken987", v: ["b64cryptoblob987"] },
|
||||||
|
],
|
||||||
|
more: true,
|
||||||
|
nextStart: "001892898",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
partitionKey: "keys",
|
||||||
|
prefix: null,
|
||||||
|
start: "0",
|
||||||
|
end: null,
|
||||||
|
conflictsOnly: false,
|
||||||
|
tombstones: false,
|
||||||
|
limit: null,
|
||||||
|
reverse: false,
|
||||||
|
singleItem: true,
|
||||||
|
items: [
|
||||||
|
{ sk: "0", ct: "opaquetoken999", v: ["b64binarystuff999"] },
|
||||||
|
],
|
||||||
|
more: false,
|
||||||
|
nextStart: null,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
**DeleteBatch: `POST /<bucket>?delete`**
|
||||||
|
|
||||||
|
Batch deletion of triplets. The request format is the same for `POST
|
||||||
|
/<bucket>?search` to indicate items or range of items, except that here they
|
||||||
|
are deleted instead of returned, but only the fields `partitionKey`, `prefix`, `start`,
|
||||||
|
`end`, and `singleItem` are supported. Causality information is not given by
|
||||||
|
the user: this request will internally list all triplets and write deletion
|
||||||
|
markers that supersede all of the versions that have been read.
|
||||||
|
|
||||||
|
This request returns for each series of items to be deleted, the number of
|
||||||
|
matching items that have been found and deleted.
|
||||||
|
|
||||||
|
Example query:
|
||||||
|
|
||||||
|
```json
|
||||||
|
POST /my_bucket?delete HTTP/1.1
|
||||||
|
|
||||||
|
[
|
||||||
|
{
|
||||||
|
partitionKey: "mailbox:OldMailbox",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
partitionKey: "mailbox:INBOX",
|
||||||
|
start: "0018928321",
|
||||||
|
singleItem: true,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Example response:
|
||||||
|
|
||||||
|
```
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
|
||||||
|
[
|
||||||
|
{
|
||||||
|
partitionKey: "mailbox:OldMailbox",
|
||||||
|
prefix: null,
|
||||||
|
start: null,
|
||||||
|
end: null,
|
||||||
|
singleItem: false,
|
||||||
|
deletedItems: 35,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
partitionKey: "mailbox:INBOX",
|
||||||
|
prefix: null,
|
||||||
|
start: "0018928321",
|
||||||
|
end: null,
|
||||||
|
singleItem: true,
|
||||||
|
deletedItems: 1,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Internals: causality tokens
|
||||||
|
|
||||||
|
The method used is based on DVVS (dotted version vector sets). See:
|
||||||
|
|
||||||
|
- the paper "Scalable and Accurate Causality Tracking for Eventually Consistent Data Stores"
|
||||||
|
- <https://github.com/ricardobcl/Dotted-Version-Vectors>
|
||||||
|
|
||||||
|
For DVVS to work, write operations (at each node) must take a lock on the data table.
|
158
k2v_test.py
Executable file
158
k2v_test.py
Executable file
|
@ -0,0 +1,158 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import os
|
||||||
|
import requests
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
# let's talk to our AWS Elasticsearch cluster
|
||||||
|
#from requests_aws4auth import AWS4Auth
|
||||||
|
#auth = AWS4Auth('GK31c2f218a2e44f485b94239e',
|
||||||
|
# 'b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
||||||
|
# 'us-east-1',
|
||||||
|
# 's3')
|
||||||
|
|
||||||
|
from aws_requests_auth.aws_auth import AWSRequestsAuth
|
||||||
|
auth = AWSRequestsAuth(aws_access_key='GK31c2f218a2e44f485b94239e',
|
||||||
|
aws_secret_access_key='b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835',
|
||||||
|
aws_host='localhost:3812',
|
||||||
|
aws_region='us-east-1',
|
||||||
|
aws_service='k2v')
|
||||||
|
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
|
||||||
|
sort_keys = ["a", "b", "c", "d"]
|
||||||
|
|
||||||
|
for sk in sort_keys:
|
||||||
|
print("-- (%s) Put initial (no CT)"%sk)
|
||||||
|
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth,
|
||||||
|
data='{}: Hello, world!'.format(datetime.timestamp(datetime.now())))
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Get")
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
ct = response.headers["x-garage-causality-token"]
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Put with CT")
|
||||||
|
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth,
|
||||||
|
headers={'x-garage-causality-token': ct},
|
||||||
|
data='{}: Good bye, world!'.format(datetime.timestamp(datetime.now())))
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Get")
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- Put again with same CT (concurrent)")
|
||||||
|
response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth,
|
||||||
|
headers={'x-garage-causality-token': ct},
|
||||||
|
data='{}: Concurrent value, oops'.format(datetime.timestamp(datetime.now())))
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
for sk in sort_keys:
|
||||||
|
print("-- (%s) Get"%sk)
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
ct = response.headers["x-garage-causality-token"]
|
||||||
|
|
||||||
|
print("-- Delete")
|
||||||
|
response = requests.delete('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
headers={'x-garage-causality-token': ct},
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- InsertBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"pk": "root", "sk": "a", "ct": null, "v": "aW5pdGlhbCB0ZXN0Cg=="},
|
||||||
|
{"pk": "root", "sk": "b", "ct": null, "v": "aW5pdGlhbCB0ZXN1Cg=="},
|
||||||
|
{"pk": "root", "sk": "c", "ct": null, "v": "aW5pdGlhbCB0ZXN2Cg=="}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- ReadIndex")
|
||||||
|
response = requests.get('http://localhost:3812/alex',
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
for sk in sort_keys:
|
||||||
|
print("-- (%s) Get"%sk)
|
||||||
|
response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk,
|
||||||
|
auth=auth)
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
ct = response.headers["x-garage-causality-token"]
|
||||||
|
|
||||||
|
print("-- ReadBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex?search',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"partitionKey": "root"},
|
||||||
|
{"partitionKey": "root", "tombstones": true},
|
||||||
|
{"partitionKey": "root", "tombstones": true, "limit": 2},
|
||||||
|
{"partitionKey": "root", "start": "c", "singleItem": true},
|
||||||
|
{"partitionKey": "root", "start": "b", "end": "d", "tombstones": true}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
|
||||||
|
print("-- DeleteBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex?delete',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"partitionKey": "root", "start": "b", "end": "c"}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
||||||
|
|
||||||
|
print("-- ReadBatch")
|
||||||
|
response = requests.post('http://localhost:3812/alex?search',
|
||||||
|
auth=auth,
|
||||||
|
data='''
|
||||||
|
[
|
||||||
|
{"partitionKey": "root"}
|
||||||
|
]
|
||||||
|
''')
|
||||||
|
print(response.headers)
|
||||||
|
print(response.text)
|
|
@ -4,18 +4,16 @@ rec {
|
||||||
*/
|
*/
|
||||||
pkgsSrc = fetchTarball {
|
pkgsSrc = fetchTarball {
|
||||||
# As of 2021-10-04
|
# As of 2021-10-04
|
||||||
url ="https://github.com/NixOS/nixpkgs/archive/b27d18a412b071f5d7991d1648cfe78ee7afe68a.tar.gz";
|
url = "https://github.com/NixOS/nixpkgs/archive/b27d18a412b071f5d7991d1648cfe78ee7afe68a.tar.gz";
|
||||||
sha256 = "1xy9zpypqfxs5gcq5dcla4bfkhxmh5nzn9dyqkr03lqycm9wg5cr";
|
sha256 = "1xy9zpypqfxs5gcq5dcla4bfkhxmh5nzn9dyqkr03lqycm9wg5cr";
|
||||||
};
|
};
|
||||||
cargo2nixSrc = fetchGit {
|
cargo2nixSrc = fetchGit {
|
||||||
# As of 2022-03-17
|
# As of 2022-03-17
|
||||||
url = "https://github.com/superboum/cargo2nix";
|
url = "https://github.com/superboum/cargo2nix";
|
||||||
ref = "main";
|
ref = "dedup_propagate";
|
||||||
rev = "bcbf3ba99e9e01a61eb83a24624419c2dd9dec64";
|
rev = "486675c67249e735dd7eb68e1b9feac9db102be7";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Shared objects
|
* Shared objects
|
||||||
*/
|
*/
|
||||||
|
|
226
nix/compile.nix
Normal file
226
nix/compile.nix
Normal file
|
@ -0,0 +1,226 @@
|
||||||
|
{
|
||||||
|
system ? builtins.currentSystem,
|
||||||
|
target ? null,
|
||||||
|
compiler ? "rustc",
|
||||||
|
release ? false,
|
||||||
|
git_version ? null,
|
||||||
|
}:
|
||||||
|
|
||||||
|
with import ./common.nix;
|
||||||
|
|
||||||
|
let
|
||||||
|
log = v: builtins.trace v v;
|
||||||
|
|
||||||
|
pkgs = import pkgsSrc {
|
||||||
|
inherit system;
|
||||||
|
${ if target == null then null else "crossSystem" } = { config = target; };
|
||||||
|
overlays = [ cargo2nixOverlay ];
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
Rust and Nix triples are not the same. Cargo2nix has a dedicated library
|
||||||
|
to convert Nix triples to Rust ones. We need this conversion as we want to
|
||||||
|
set later options linked to our (rust) target in a generic way. Not only
|
||||||
|
the triple terminology is different, but also the "roles" are named differently.
|
||||||
|
Nix uses a build/host/target terminology where Nix's "host" maps to Cargo's "target".
|
||||||
|
*/
|
||||||
|
rustTarget = log (pkgs.rustBuilder.rustLib.rustTriple pkgs.stdenv.hostPlatform);
|
||||||
|
|
||||||
|
/*
|
||||||
|
Cargo2nix is built for rustOverlay which installs Rust from Mozilla releases.
|
||||||
|
We want our own Rust to avoid incompatibilities, like we had with musl 1.2.0.
|
||||||
|
rustc was built with musl < 1.2.0 and nix shipped musl >= 1.2.0 which lead to compilation breakage.
|
||||||
|
So we want a Rust release that is bound to our Nix repository to avoid these problems.
|
||||||
|
See here for more info: https://musl.libc.org/time64.html
|
||||||
|
Because Cargo2nix does not support the Rust environment shipped by NixOS,
|
||||||
|
we emulate the structure of the Rust object created by rustOverlay.
|
||||||
|
In practise, rustOverlay ships rustc+cargo in a single derivation while
|
||||||
|
NixOS ships them in separate ones. We reunite them with symlinkJoin.
|
||||||
|
*/
|
||||||
|
rustChannel = {
|
||||||
|
rustc = pkgs.symlinkJoin {
|
||||||
|
name = "rust-channel";
|
||||||
|
paths = [
|
||||||
|
pkgs.rustPlatform.rust.cargo
|
||||||
|
pkgs.rustPlatform.rust.rustc
|
||||||
|
];
|
||||||
|
};
|
||||||
|
clippy = pkgs.symlinkJoin {
|
||||||
|
name = "clippy-channel";
|
||||||
|
paths = [
|
||||||
|
pkgs.rustPlatform.rust.cargo
|
||||||
|
pkgs.rustPlatform.rust.rustc
|
||||||
|
pkgs.clippy
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}.${compiler};
|
||||||
|
|
||||||
|
clippyBuilder = pkgs.writeScriptBin "clippy" ''
|
||||||
|
#!${pkgs.stdenv.shell}
|
||||||
|
. ${cargo2nixSrc + "/overlay/utils.sh"}
|
||||||
|
isBuildScript=
|
||||||
|
args=("$@")
|
||||||
|
for i in "''${!args[@]}"; do
|
||||||
|
if [ "xmetadata=" = "x''${args[$i]::9}" ]; then
|
||||||
|
args[$i]=metadata=$NIX_RUST_METADATA
|
||||||
|
elif [ "x--crate-name" = "x''${args[$i]}" ] && [ "xbuild_script_" = "x''${args[$i+1]::13}" ]; then
|
||||||
|
isBuildScript=1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if [ "$isBuildScript" ]; then
|
||||||
|
args+=($NIX_RUST_BUILD_LINK_FLAGS)
|
||||||
|
else
|
||||||
|
args+=($NIX_RUST_LINK_FLAGS)
|
||||||
|
fi
|
||||||
|
touch invoke.log
|
||||||
|
echo "''${args[@]}" >>invoke.log
|
||||||
|
|
||||||
|
exec ${rustChannel}/bin/clippy-driver --deny warnings "''${args[@]}"
|
||||||
|
'';
|
||||||
|
|
||||||
|
buildEnv = (drv: {
|
||||||
|
rustc = drv.setBuildEnv;
|
||||||
|
clippy = ''
|
||||||
|
${drv.setBuildEnv or "" }
|
||||||
|
echo
|
||||||
|
echo --- BUILDING WITH CLIPPY ---
|
||||||
|
echo
|
||||||
|
|
||||||
|
export RUSTC=${clippyBuilder}/bin/clippy
|
||||||
|
'';
|
||||||
|
}.${compiler});
|
||||||
|
|
||||||
|
/*
|
||||||
|
Cargo2nix provides many overrides by default, you can take inspiration from them:
|
||||||
|
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
|
||||||
|
|
||||||
|
You can have a complete list of the available options by looking at the overriden object, mkcrate:
|
||||||
|
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/mkcrate.nix
|
||||||
|
*/
|
||||||
|
overrides = pkgs.rustBuilder.overrides.all ++ [
|
||||||
|
/*
|
||||||
|
[1] We add some logic to compile our crates with clippy, it provides us many additional lints
|
||||||
|
|
||||||
|
[2] We need to alter Nix hardening to make static binaries: PIE,
|
||||||
|
Position Independent Executables seems to be supported only on amd64. Having
|
||||||
|
this flag set either 1. make our executables crash or 2. compile as dynamic on some platforms.
|
||||||
|
Here, we deactivate it. Later (find `codegenOpts`), we reactivate it for supported targets
|
||||||
|
(only amd64 curently) through the `-static-pie` flag.
|
||||||
|
PIE is a feature used by ASLR, which helps mitigate security issues.
|
||||||
|
Learn more about Nix Hardening at: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh
|
||||||
|
|
||||||
|
[3] We want to inject the git version while keeping the build deterministic.
|
||||||
|
As we do not want to consider the .git folder as part of the input source,
|
||||||
|
we ask the user (the CI often) to pass the value to Nix.
|
||||||
|
|
||||||
|
[4] We ship some parts of the code disabled by default by putting them behind a flag.
|
||||||
|
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.).
|
||||||
|
But we want to ship these additional features when we release Garage.
|
||||||
|
In the end, we chose to exclude all features from debug builds while putting (all of) them in the release builds.
|
||||||
|
Currently, the only feature of Garage is kubernetes-discovery from the garage_rpc crate.
|
||||||
|
*/
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage";
|
||||||
|
overrideAttrs = drv: {
|
||||||
|
/* [1] */ setBuildEnv = (buildEnv drv);
|
||||||
|
/* [2] */ hardeningDisable = [ "pie" ];
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_rpc";
|
||||||
|
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||||
|
overrideArgs = old: {
|
||||||
|
/* [4] */ features = if release then [ "kubernetes-discovery" ] else [];
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_db";
|
||||||
|
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_util";
|
||||||
|
overrideAttrs = drv:
|
||||||
|
(if git_version != null then {
|
||||||
|
/* [3] */ preConfigure = ''
|
||||||
|
${drv.preConfigure or ""}
|
||||||
|
export GIT_VERSION="${git_version}"
|
||||||
|
'';
|
||||||
|
} else {})
|
||||||
|
//
|
||||||
|
{ /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_table";
|
||||||
|
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_block";
|
||||||
|
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_model";
|
||||||
|
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_api";
|
||||||
|
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_web";
|
||||||
|
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "k2v-client";
|
||||||
|
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||||
|
})
|
||||||
|
];
|
||||||
|
|
||||||
|
packageFun = import ../Cargo.nix;
|
||||||
|
|
||||||
|
/*
|
||||||
|
We compile fully static binaries with musl to simplify deployment on most systems.
|
||||||
|
When possible, we reactivate PIE hardening (see above).
|
||||||
|
|
||||||
|
Also, if you set the RUSTFLAGS environment variable, the following parameters will
|
||||||
|
be ignored.
|
||||||
|
|
||||||
|
For more information on static builds, please refer to Rust's RFC 1721.
|
||||||
|
https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage
|
||||||
|
*/
|
||||||
|
|
||||||
|
codegenOpts = {
|
||||||
|
"armv6l-unknown-linux-musleabihf" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* compile as dynamic with static-pie */
|
||||||
|
"aarch64-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* segfault with static-pie */
|
||||||
|
"i686-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* segfault with static-pie */
|
||||||
|
"x86_64-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static-pie" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
in
|
||||||
|
/*
|
||||||
|
The following definition is not elegant as we use a low level function of Cargo2nix
|
||||||
|
that enables us to pass our custom rustChannel object. We need this low level definition
|
||||||
|
to pass Nix's Rust toolchains instead of Mozilla's one.
|
||||||
|
|
||||||
|
target is mandatory but must be kept to null to allow cargo2nix to set it to the appropriate value
|
||||||
|
for each crate.
|
||||||
|
*/
|
||||||
|
pkgs.rustBuilder.makePackageSet {
|
||||||
|
inherit packageFun rustChannel release codegenOpts;
|
||||||
|
packageOverrides = overrides;
|
||||||
|
target = null;
|
||||||
|
|
||||||
|
buildRustPackages = pkgs.buildPackages.rustBuilder.makePackageSet {
|
||||||
|
inherit rustChannel packageFun codegenOpts;
|
||||||
|
packageOverrides = overrides;
|
||||||
|
target = null;
|
||||||
|
};
|
||||||
|
}
|
|
@ -1,4 +1,9 @@
|
||||||
substituters = https://cache.nixos.org https://nix.web.deuxfleurs.fr
|
substituters = https://cache.nixos.org https://nix.web.deuxfleurs.fr
|
||||||
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= nix.web.deuxfleurs.fr:eTGL6kvaQn6cDR/F9lDYUIP9nCVR/kkshYfLDJf1yKs=
|
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= nix.web.deuxfleurs.fr:eTGL6kvaQn6cDR/F9lDYUIP9nCVR/kkshYfLDJf1yKs=
|
||||||
max-jobs = auto
|
max-jobs = auto
|
||||||
cores = 4
|
cores = 0
|
||||||
|
log-lines = 200
|
||||||
|
filter-syscalls = false
|
||||||
|
sandbox = false
|
||||||
|
keep-outputs = true
|
||||||
|
keep-derivations = true
|
||||||
|
|
14
script/not-dynamic.sh
Executable file
14
script/not-dynamic.sh
Executable file
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
if [ "$#" -ne 1 ]; then
|
||||||
|
echo "[fail] usage: $0 binary"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
if file $1 | grep 'dynamically linked' 2>&1; then
|
||||||
|
echo "[fail] $1 is dynamic"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[ok] $1 is probably static"
|
102
shell.nix
102
shell.nix
|
@ -1,8 +1,5 @@
|
||||||
{
|
{
|
||||||
system ? builtins.currentSystem,
|
system ? builtins.currentSystem,
|
||||||
rust ? true,
|
|
||||||
integration ? true,
|
|
||||||
release ? true,
|
|
||||||
}:
|
}:
|
||||||
|
|
||||||
with import ./nix/common.nix;
|
with import ./nix/common.nix;
|
||||||
|
@ -16,9 +13,59 @@ let
|
||||||
winscp = (import ./nix/winscp.nix) pkgs;
|
winscp = (import ./nix/winscp.nix) pkgs;
|
||||||
|
|
||||||
in
|
in
|
||||||
|
{
|
||||||
|
|
||||||
pkgs.mkShell {
|
/* --- Rust Shell ---
|
||||||
shellHook = ''
|
* Use it to compile Garage
|
||||||
|
*/
|
||||||
|
rust = pkgs.mkShell {
|
||||||
|
shellHook = ''
|
||||||
|
function refresh_toolchain {
|
||||||
|
nix copy \
|
||||||
|
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/etc/nix/signing-key.sec' \
|
||||||
|
$(nix-store -qR \
|
||||||
|
$(nix-build --quiet --no-build-output --no-out-link nix/toolchain.nix))
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
|
||||||
|
nativeBuildInputs = [
|
||||||
|
#pkgs.rustPlatform.rust.rustc
|
||||||
|
pkgs.rustPlatform.rust.cargo
|
||||||
|
#pkgs.clippy
|
||||||
|
pkgs.rustfmt
|
||||||
|
#pkgs.perl
|
||||||
|
#pkgs.protobuf
|
||||||
|
#pkgs.pkg-config
|
||||||
|
#pkgs.openssl
|
||||||
|
pkgs.file
|
||||||
|
#cargo2nix.packages.x86_64-linux.cargo2nix
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
/* --- Integration shell ---
|
||||||
|
* Use it to test Garage with common S3 clients
|
||||||
|
*/
|
||||||
|
integration = pkgs.mkShell {
|
||||||
|
nativeBuildInputs = [
|
||||||
|
winscp
|
||||||
|
pkgs.s3cmd
|
||||||
|
pkgs.awscli2
|
||||||
|
pkgs.minio-client
|
||||||
|
pkgs.rclone
|
||||||
|
pkgs.socat
|
||||||
|
pkgs.psmisc
|
||||||
|
pkgs.which
|
||||||
|
pkgs.openssl
|
||||||
|
pkgs.curl
|
||||||
|
pkgs.jq
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
/* --- Release shell ---
|
||||||
|
* A shell built to make releasing easier
|
||||||
|
*/
|
||||||
|
release = pkgs.mkShell {
|
||||||
|
shellHook = ''
|
||||||
function to_s3 {
|
function to_s3 {
|
||||||
aws \
|
aws \
|
||||||
--endpoint-url https://garage.deuxfleurs.fr \
|
--endpoint-url https://garage.deuxfleurs.fr \
|
||||||
|
@ -62,43 +109,12 @@ function refresh_index {
|
||||||
result/share/_releases.html \
|
result/share/_releases.html \
|
||||||
s3://garagehq.deuxfleurs.fr/
|
s3://garagehq.deuxfleurs.fr/
|
||||||
}
|
}
|
||||||
|
'';
|
||||||
|
nativeBuildInputs = [
|
||||||
|
pkgs.awscli2
|
||||||
|
kaniko
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
function refresh_toolchain {
|
|
||||||
nix copy \
|
|
||||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/etc/nix/signing-key.sec' \
|
|
||||||
$(nix-store -qR \
|
|
||||||
$(nix-build --quiet --no-build-output --no-out-link nix/toolchain.nix))
|
|
||||||
}
|
|
||||||
'';
|
|
||||||
|
|
||||||
nativeBuildInputs =
|
|
||||||
(if rust then [
|
|
||||||
pkgs.rustPlatform.rust.rustc
|
|
||||||
pkgs.rustPlatform.rust.cargo
|
|
||||||
pkgs.clippy
|
|
||||||
pkgs.rustfmt
|
|
||||||
pkgs.perl
|
|
||||||
pkgs.protobuf
|
|
||||||
cargo2nix.packages.x86_64-linux.cargo2nix
|
|
||||||
] else [])
|
|
||||||
++
|
|
||||||
(if integration then [
|
|
||||||
winscp
|
|
||||||
pkgs.s3cmd
|
|
||||||
pkgs.awscli2
|
|
||||||
pkgs.minio-client
|
|
||||||
pkgs.rclone
|
|
||||||
pkgs.socat
|
|
||||||
pkgs.psmisc
|
|
||||||
pkgs.which
|
|
||||||
pkgs.openssl
|
|
||||||
pkgs.curl
|
|
||||||
pkgs.jq
|
|
||||||
] else [])
|
|
||||||
++
|
|
||||||
(if release then [
|
|
||||||
pkgs.awscli2
|
|
||||||
kaniko
|
|
||||||
] else [])
|
|
||||||
;
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,29 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "garage_admin"
|
|
||||||
version = "0.7.0"
|
|
||||||
authors = ["Maximilien Richer <code@mricher.fr>"]
|
|
||||||
edition = "2018"
|
|
||||||
license = "AGPL-3.0"
|
|
||||||
description = "Administration and metrics REST HTTP server for Garage"
|
|
||||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
path = "lib.rs"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
garage_util = { version = "0.7.0", path = "../util" }
|
|
||||||
|
|
||||||
hex = "0.4"
|
|
||||||
|
|
||||||
futures = "0.3"
|
|
||||||
futures-util = "0.3"
|
|
||||||
http = "0.2"
|
|
||||||
hyper = "0.14"
|
|
||||||
tracing = "0.1.30"
|
|
||||||
|
|
||||||
opentelemetry = { version = "0.17", features = [ "rt-tokio" ] }
|
|
||||||
opentelemetry-prometheus = "0.10"
|
|
||||||
opentelemetry-otlp = "0.10"
|
|
||||||
prometheus = "0.13"
|
|
|
@ -1,6 +0,0 @@
|
||||||
//! Crate for handling the admin and metric HTTP APIs
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tracing;
|
|
||||||
|
|
||||||
pub mod metrics;
|
|
||||||
pub mod tracing_setup;
|
|
|
@ -1,146 +0,0 @@
|
||||||
use std::convert::Infallible;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::SystemTime;
|
|
||||||
|
|
||||||
use futures::future::*;
|
|
||||||
use hyper::{
|
|
||||||
header::CONTENT_TYPE,
|
|
||||||
service::{make_service_fn, service_fn},
|
|
||||||
Body, Method, Request, Response, Server,
|
|
||||||
};
|
|
||||||
|
|
||||||
use opentelemetry::{
|
|
||||||
global,
|
|
||||||
metrics::{BoundCounter, BoundValueRecorder},
|
|
||||||
trace::{FutureExt, TraceContextExt, Tracer},
|
|
||||||
Context,
|
|
||||||
};
|
|
||||||
use opentelemetry_prometheus::PrometheusExporter;
|
|
||||||
|
|
||||||
use prometheus::{Encoder, TextEncoder};
|
|
||||||
|
|
||||||
use garage_util::error::Error as GarageError;
|
|
||||||
use garage_util::metrics::*;
|
|
||||||
|
|
||||||
// serve_req on metric endpoint
|
|
||||||
async fn serve_req(
|
|
||||||
req: Request<Body>,
|
|
||||||
admin_server: Arc<AdminServer>,
|
|
||||||
) -> Result<Response<Body>, hyper::Error> {
|
|
||||||
debug!("Receiving request at path {}", req.uri());
|
|
||||||
let request_start = SystemTime::now();
|
|
||||||
|
|
||||||
admin_server.metrics.http_counter.add(1);
|
|
||||||
|
|
||||||
let response = match (req.method(), req.uri().path()) {
|
|
||||||
(&Method::GET, "/metrics") => {
|
|
||||||
let mut buffer = vec![];
|
|
||||||
let encoder = TextEncoder::new();
|
|
||||||
|
|
||||||
let tracer = opentelemetry::global::tracer("garage");
|
|
||||||
let metric_families = tracer.in_span("admin/gather_metrics", |_| {
|
|
||||||
admin_server.exporter.registry().gather()
|
|
||||||
});
|
|
||||||
|
|
||||||
encoder.encode(&metric_families, &mut buffer).unwrap();
|
|
||||||
admin_server
|
|
||||||
.metrics
|
|
||||||
.http_body_gauge
|
|
||||||
.record(buffer.len() as u64);
|
|
||||||
|
|
||||||
Response::builder()
|
|
||||||
.status(200)
|
|
||||||
.header(CONTENT_TYPE, encoder.format_type())
|
|
||||||
.body(Body::from(buffer))
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
_ => Response::builder()
|
|
||||||
.status(404)
|
|
||||||
.body(Body::from("Not implemented"))
|
|
||||||
.unwrap(),
|
|
||||||
};
|
|
||||||
|
|
||||||
admin_server
|
|
||||||
.metrics
|
|
||||||
.http_req_histogram
|
|
||||||
.record(request_start.elapsed().map_or(0.0, |d| d.as_secs_f64()));
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AdminServer hold the admin server internal admin_server and the metric exporter
|
|
||||||
pub struct AdminServer {
|
|
||||||
exporter: PrometheusExporter,
|
|
||||||
metrics: AdminServerMetrics,
|
|
||||||
}
|
|
||||||
|
|
||||||
// GarageMetricadmin_server holds the metrics counter definition for Garage
|
|
||||||
// FIXME: we would rather have that split up among the different libraries?
|
|
||||||
struct AdminServerMetrics {
|
|
||||||
http_counter: BoundCounter<u64>,
|
|
||||||
http_body_gauge: BoundValueRecorder<u64>,
|
|
||||||
http_req_histogram: BoundValueRecorder<f64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AdminServer {
|
|
||||||
/// init initilialize the AdminServer and background metric server
|
|
||||||
pub fn init() -> AdminServer {
|
|
||||||
let exporter = opentelemetry_prometheus::exporter().init();
|
|
||||||
let meter = global::meter("garage/admin_server");
|
|
||||||
AdminServer {
|
|
||||||
exporter,
|
|
||||||
metrics: AdminServerMetrics {
|
|
||||||
http_counter: meter
|
|
||||||
.u64_counter("admin.http_requests_total")
|
|
||||||
.with_description("Total number of HTTP requests made.")
|
|
||||||
.init()
|
|
||||||
.bind(&[]),
|
|
||||||
http_body_gauge: meter
|
|
||||||
.u64_value_recorder("admin.http_response_size_bytes")
|
|
||||||
.with_description("The metrics HTTP response sizes in bytes.")
|
|
||||||
.init()
|
|
||||||
.bind(&[]),
|
|
||||||
http_req_histogram: meter
|
|
||||||
.f64_value_recorder("admin.http_request_duration_seconds")
|
|
||||||
.with_description("The HTTP request latencies in seconds.")
|
|
||||||
.init()
|
|
||||||
.bind(&[]),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// run execute the admin server on the designated HTTP port and listen for requests
|
|
||||||
pub async fn run(
|
|
||||||
self,
|
|
||||||
bind_addr: SocketAddr,
|
|
||||||
shutdown_signal: impl Future<Output = ()>,
|
|
||||||
) -> Result<(), GarageError> {
|
|
||||||
let admin_server = Arc::new(self);
|
|
||||||
// For every connection, we must make a `Service` to handle all
|
|
||||||
// incoming HTTP requests on said connection.
|
|
||||||
let make_svc = make_service_fn(move |_conn| {
|
|
||||||
let admin_server = admin_server.clone();
|
|
||||||
// This is the `Service` that will handle the connection.
|
|
||||||
// `service_fn` is a helper to convert a function that
|
|
||||||
// returns a Response into a `Service`.
|
|
||||||
async move {
|
|
||||||
Ok::<_, Infallible>(service_fn(move |req| {
|
|
||||||
let tracer = opentelemetry::global::tracer("garage");
|
|
||||||
let span = tracer
|
|
||||||
.span_builder("admin/request")
|
|
||||||
.with_trace_id(gen_trace_id())
|
|
||||||
.start(&tracer);
|
|
||||||
|
|
||||||
serve_req(req, admin_server.clone())
|
|
||||||
.with_context(Context::current_with_span(span))
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let server = Server::bind(&bind_addr).serve(make_svc);
|
|
||||||
let graceful = server.with_graceful_shutdown(shutdown_signal);
|
|
||||||
info!("Admin server listening on http://{}", bind_addr);
|
|
||||||
|
|
||||||
graceful.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api"
|
name = "garage_api"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,11 +14,13 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_model = { version = "0.7.0", path = "../model" }
|
garage_model = { version = "0.7.3", path = "../model" }
|
||||||
garage_table = { version = "0.7.0", path = "../table" }
|
garage_table = { version = "0.7.3", path = "../table" }
|
||||||
garage_block = { version = "0.7.0", path = "../block" }
|
garage_block = { version = "0.7.3", path = "../block" }
|
||||||
garage_util = { version = "0.7.0", path = "../util" }
|
garage_util = { version = "0.7.3", path = "../util" }
|
||||||
|
garage_rpc = { version = "0.7.3", path = "../rpc" }
|
||||||
|
|
||||||
|
async-trait = "0.1.7"
|
||||||
base64 = "0.13"
|
base64 = "0.13"
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
|
@ -52,3 +54,9 @@ quick-xml = { version = "0.21", features = [ "serialize" ] }
|
||||||
url = "2.1"
|
url = "2.1"
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
opentelemetry = "0.17"
|
||||||
|
opentelemetry-prometheus = "0.10"
|
||||||
|
opentelemetry-otlp = "0.10"
|
||||||
|
prometheus = "0.13"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
k2v = [ "garage_util/k2v", "garage_model/k2v" ]
|
||||||
|
|
199
src/api/admin/api_server.rs
Normal file
199
src/api/admin/api_server.rs
Normal file
|
@ -0,0 +1,199 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use futures::future::Future;
|
||||||
|
use http::header::{
|
||||||
|
ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW, CONTENT_TYPE,
|
||||||
|
};
|
||||||
|
use hyper::{Body, Request, Response};
|
||||||
|
|
||||||
|
use opentelemetry::trace::{SpanRef, Tracer};
|
||||||
|
use opentelemetry_prometheus::PrometheusExporter;
|
||||||
|
use prometheus::{Encoder, TextEncoder};
|
||||||
|
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
|
use crate::generic_server::*;
|
||||||
|
|
||||||
|
use crate::admin::bucket::*;
|
||||||
|
use crate::admin::cluster::*;
|
||||||
|
use crate::admin::error::*;
|
||||||
|
use crate::admin::key::*;
|
||||||
|
use crate::admin::router::{Authorization, Endpoint};
|
||||||
|
|
||||||
|
pub struct AdminApiServer {
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
exporter: PrometheusExporter,
|
||||||
|
metrics_token: Option<String>,
|
||||||
|
admin_token: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AdminApiServer {
|
||||||
|
pub fn new(garage: Arc<Garage>) -> Self {
|
||||||
|
let exporter = opentelemetry_prometheus::exporter().init();
|
||||||
|
let cfg = &garage.config.admin;
|
||||||
|
let metrics_token = cfg
|
||||||
|
.metrics_token
|
||||||
|
.as_ref()
|
||||||
|
.map(|tok| format!("Bearer {}", tok));
|
||||||
|
let admin_token = cfg
|
||||||
|
.admin_token
|
||||||
|
.as_ref()
|
||||||
|
.map(|tok| format!("Bearer {}", tok));
|
||||||
|
Self {
|
||||||
|
garage,
|
||||||
|
exporter,
|
||||||
|
metrics_token,
|
||||||
|
admin_token,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run(self, shutdown_signal: impl Future<Output = ()>) -> Result<(), GarageError> {
|
||||||
|
if let Some(bind_addr) = self.garage.config.admin.api_bind_addr {
|
||||||
|
let region = self.garage.config.s3_api.s3_region.clone();
|
||||||
|
ApiServer::new(region, self)
|
||||||
|
.run_server(bind_addr, shutdown_signal)
|
||||||
|
.await
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_options(&self, _req: &Request<Body>) -> Result<Response<Body>, Error> {
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(204)
|
||||||
|
.header(ALLOW, "OPTIONS, GET, POST")
|
||||||
|
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST")
|
||||||
|
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||||
|
.body(Body::empty())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_metrics(&self) -> Result<Response<Body>, Error> {
|
||||||
|
let mut buffer = vec![];
|
||||||
|
let encoder = TextEncoder::new();
|
||||||
|
|
||||||
|
let tracer = opentelemetry::global::tracer("garage");
|
||||||
|
let metric_families = tracer.in_span("admin/gather_metrics", |_| {
|
||||||
|
self.exporter.registry().gather()
|
||||||
|
});
|
||||||
|
|
||||||
|
encoder
|
||||||
|
.encode(&metric_families, &mut buffer)
|
||||||
|
.ok_or_internal_error("Could not serialize metrics")?;
|
||||||
|
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(200)
|
||||||
|
.header(CONTENT_TYPE, encoder.format_type())
|
||||||
|
.body(Body::from(buffer))?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl ApiHandler for AdminApiServer {
|
||||||
|
const API_NAME: &'static str = "admin";
|
||||||
|
const API_NAME_DISPLAY: &'static str = "Admin";
|
||||||
|
|
||||||
|
type Endpoint = Endpoint;
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn parse_endpoint(&self, req: &Request<Body>) -> Result<Endpoint, Error> {
|
||||||
|
Endpoint::from_request(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle(
|
||||||
|
&self,
|
||||||
|
req: Request<Body>,
|
||||||
|
endpoint: Endpoint,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let expected_auth_header =
|
||||||
|
match endpoint.authorization_type() {
|
||||||
|
Authorization::MetricsToken => self.metrics_token.as_ref(),
|
||||||
|
Authorization::AdminToken => match &self.admin_token {
|
||||||
|
None => return Err(Error::forbidden(
|
||||||
|
"Admin token isn't configured, admin API access is disabled for security.",
|
||||||
|
)),
|
||||||
|
Some(t) => Some(t),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(h) = expected_auth_header {
|
||||||
|
match req.headers().get("Authorization") {
|
||||||
|
None => return Err(Error::forbidden("Authorization token must be provided")),
|
||||||
|
Some(v) => {
|
||||||
|
let authorized = v.to_str().map(|hv| hv.trim() == h).unwrap_or(false);
|
||||||
|
if !authorized {
|
||||||
|
return Err(Error::forbidden("Invalid authorization token provided"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match endpoint {
|
||||||
|
Endpoint::Options => self.handle_options(&req),
|
||||||
|
Endpoint::Metrics => self.handle_metrics(),
|
||||||
|
Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await,
|
||||||
|
Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await,
|
||||||
|
// Layout
|
||||||
|
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
|
||||||
|
Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
|
||||||
|
Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
|
||||||
|
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage, req).await,
|
||||||
|
// Keys
|
||||||
|
Endpoint::ListKeys => handle_list_keys(&self.garage).await,
|
||||||
|
Endpoint::GetKeyInfo { id, search } => {
|
||||||
|
handle_get_key_info(&self.garage, id, search).await
|
||||||
|
}
|
||||||
|
Endpoint::CreateKey => handle_create_key(&self.garage, req).await,
|
||||||
|
Endpoint::ImportKey => handle_import_key(&self.garage, req).await,
|
||||||
|
Endpoint::UpdateKey { id } => handle_update_key(&self.garage, id, req).await,
|
||||||
|
Endpoint::DeleteKey { id } => handle_delete_key(&self.garage, id).await,
|
||||||
|
// Buckets
|
||||||
|
Endpoint::ListBuckets => handle_list_buckets(&self.garage).await,
|
||||||
|
Endpoint::GetBucketInfo { id, global_alias } => {
|
||||||
|
handle_get_bucket_info(&self.garage, id, global_alias).await
|
||||||
|
}
|
||||||
|
Endpoint::CreateBucket => handle_create_bucket(&self.garage, req).await,
|
||||||
|
Endpoint::DeleteBucket { id } => handle_delete_bucket(&self.garage, id).await,
|
||||||
|
Endpoint::PutBucketWebsite { id } => {
|
||||||
|
handle_put_bucket_website(&self.garage, id, req).await
|
||||||
|
}
|
||||||
|
Endpoint::DeleteBucketWebsite { id } => {
|
||||||
|
handle_delete_bucket_website(&self.garage, id).await
|
||||||
|
}
|
||||||
|
// Bucket-key permissions
|
||||||
|
Endpoint::BucketAllowKey => {
|
||||||
|
handle_bucket_change_key_perm(&self.garage, req, true).await
|
||||||
|
}
|
||||||
|
Endpoint::BucketDenyKey => {
|
||||||
|
handle_bucket_change_key_perm(&self.garage, req, false).await
|
||||||
|
}
|
||||||
|
// Bucket aliasing
|
||||||
|
Endpoint::GlobalAliasBucket { id, alias } => {
|
||||||
|
handle_global_alias_bucket(&self.garage, id, alias).await
|
||||||
|
}
|
||||||
|
Endpoint::GlobalUnaliasBucket { id, alias } => {
|
||||||
|
handle_global_unalias_bucket(&self.garage, id, alias).await
|
||||||
|
}
|
||||||
|
Endpoint::LocalAliasBucket {
|
||||||
|
id,
|
||||||
|
access_key_id,
|
||||||
|
alias,
|
||||||
|
} => handle_local_alias_bucket(&self.garage, id, access_key_id, alias).await,
|
||||||
|
Endpoint::LocalUnaliasBucket {
|
||||||
|
id,
|
||||||
|
access_key_id,
|
||||||
|
alias,
|
||||||
|
} => handle_local_unalias_bucket(&self.garage, id, access_key_id, alias).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiEndpoint for Endpoint {
|
||||||
|
fn name(&self) -> &'static str {
|
||||||
|
Endpoint::name(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
|
||||||
|
}
|
542
src/api/admin/bucket.rs
Normal file
542
src/api/admin/bucket.rs
Normal file
|
@ -0,0 +1,542 @@
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use hyper::{Body, Request, Response, StatusCode};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use garage_util::crdt::*;
|
||||||
|
use garage_util::data::*;
|
||||||
|
use garage_util::time::*;
|
||||||
|
|
||||||
|
use garage_table::*;
|
||||||
|
|
||||||
|
use garage_model::bucket_alias_table::*;
|
||||||
|
use garage_model::bucket_table::*;
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
use garage_model::permission::*;
|
||||||
|
|
||||||
|
use crate::admin::error::*;
|
||||||
|
use crate::admin::key::ApiBucketKeyPerm;
|
||||||
|
use crate::common_error::CommonError;
|
||||||
|
use crate::helpers::{json_ok_response, parse_json_body};
|
||||||
|
|
||||||
|
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
||||||
|
let buckets = garage
|
||||||
|
.bucket_table
|
||||||
|
.get_range(
|
||||||
|
&EmptyKey,
|
||||||
|
None,
|
||||||
|
Some(DeletedFilter::NotDeleted),
|
||||||
|
10000,
|
||||||
|
EnumerationOrder::Forward,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let res = buckets
|
||||||
|
.into_iter()
|
||||||
|
.map(|b| {
|
||||||
|
let state = b.state.as_option().unwrap();
|
||||||
|
ListBucketResultItem {
|
||||||
|
id: hex::encode(b.id),
|
||||||
|
global_aliases: state
|
||||||
|
.aliases
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, _, a)| *a)
|
||||||
|
.map(|(n, _, _)| n.to_string())
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
local_aliases: state
|
||||||
|
.local_aliases
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, _, a)| *a)
|
||||||
|
.map(|((k, n), _, _)| BucketLocalAlias {
|
||||||
|
access_key_id: k.to_string(),
|
||||||
|
alias: n.to_string(),
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
Ok(json_ok_response(&res)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct ListBucketResultItem {
|
||||||
|
id: String,
|
||||||
|
global_aliases: Vec<String>,
|
||||||
|
local_aliases: Vec<BucketLocalAlias>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct BucketLocalAlias {
|
||||||
|
access_key_id: String,
|
||||||
|
alias: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_get_bucket_info(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
id: Option<String>,
|
||||||
|
global_alias: Option<String>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let bucket_id = match (id, global_alias) {
|
||||||
|
(Some(id), None) => parse_bucket_id(&id)?,
|
||||||
|
(None, Some(ga)) => garage
|
||||||
|
.bucket_helper()
|
||||||
|
.resolve_global_bucket_name(&ga)
|
||||||
|
.await?
|
||||||
|
.ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?,
|
||||||
|
_ => {
|
||||||
|
return Err(Error::bad_request(
|
||||||
|
"Either id or globalAlias must be provided (but not both)",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
bucket_info_results(garage, bucket_id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn bucket_info_results(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
bucket_id: Uuid,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let bucket = garage
|
||||||
|
.bucket_helper()
|
||||||
|
.get_existing_bucket(bucket_id)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut relevant_keys = HashMap::new();
|
||||||
|
for (k, _) in bucket
|
||||||
|
.state
|
||||||
|
.as_option()
|
||||||
|
.unwrap()
|
||||||
|
.authorized_keys
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
{
|
||||||
|
if let Some(key) = garage
|
||||||
|
.key_table
|
||||||
|
.get(&EmptyKey, k)
|
||||||
|
.await?
|
||||||
|
.filter(|k| !k.is_deleted())
|
||||||
|
{
|
||||||
|
if !key.state.is_deleted() {
|
||||||
|
relevant_keys.insert(k.clone(), key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for ((k, _), _, _) in bucket
|
||||||
|
.state
|
||||||
|
.as_option()
|
||||||
|
.unwrap()
|
||||||
|
.local_aliases
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
{
|
||||||
|
if relevant_keys.contains_key(k) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if let Some(key) = garage.key_table.get(&EmptyKey, k).await? {
|
||||||
|
if !key.state.is_deleted() {
|
||||||
|
relevant_keys.insert(k.clone(), key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let state = bucket.state.as_option().unwrap();
|
||||||
|
|
||||||
|
let res =
|
||||||
|
GetBucketInfoResult {
|
||||||
|
id: hex::encode(&bucket.id),
|
||||||
|
global_aliases: state
|
||||||
|
.aliases
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, _, a)| *a)
|
||||||
|
.map(|(n, _, _)| n.to_string())
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
website_access: state.website_config.get().is_some(),
|
||||||
|
website_config: state.website_config.get().clone().map(|wsc| {
|
||||||
|
GetBucketInfoWebsiteResult {
|
||||||
|
index_document: wsc.index_document,
|
||||||
|
error_document: wsc.error_document,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
keys: relevant_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, key)| {
|
||||||
|
let p = key.state.as_option().unwrap();
|
||||||
|
GetBucketInfoKey {
|
||||||
|
access_key_id: key.key_id,
|
||||||
|
name: p.name.get().to_string(),
|
||||||
|
permissions: p
|
||||||
|
.authorized_buckets
|
||||||
|
.get(&bucket.id)
|
||||||
|
.map(|p| ApiBucketKeyPerm {
|
||||||
|
read: p.allow_read,
|
||||||
|
write: p.allow_write,
|
||||||
|
owner: p.allow_owner,
|
||||||
|
})
|
||||||
|
.unwrap_or_default(),
|
||||||
|
bucket_local_aliases: p
|
||||||
|
.local_aliases
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, _, b)| *b == Some(bucket.id))
|
||||||
|
.map(|(n, _, _)| n.to_string())
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(json_ok_response(&res)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct GetBucketInfoResult {
|
||||||
|
id: String,
|
||||||
|
global_aliases: Vec<String>,
|
||||||
|
website_access: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
website_config: Option<GetBucketInfoWebsiteResult>,
|
||||||
|
keys: Vec<GetBucketInfoKey>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct GetBucketInfoWebsiteResult {
|
||||||
|
index_document: String,
|
||||||
|
error_document: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct GetBucketInfoKey {
|
||||||
|
access_key_id: String,
|
||||||
|
name: String,
|
||||||
|
permissions: ApiBucketKeyPerm,
|
||||||
|
bucket_local_aliases: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_create_bucket(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let req = parse_json_body::<CreateBucketRequest>(req).await?;
|
||||||
|
|
||||||
|
if let Some(ga) = &req.global_alias {
|
||||||
|
if !is_valid_bucket_name(ga) {
|
||||||
|
return Err(Error::bad_request(format!(
|
||||||
|
"{}: {}",
|
||||||
|
ga, INVALID_BUCKET_NAME_MESSAGE
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(alias) = garage.bucket_alias_table.get(&EmptyKey, ga).await? {
|
||||||
|
if alias.state.get().is_some() {
|
||||||
|
return Err(CommonError::BucketAlreadyExists.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(la) = &req.local_alias {
|
||||||
|
if !is_valid_bucket_name(&la.alias) {
|
||||||
|
return Err(Error::bad_request(format!(
|
||||||
|
"{}: {}",
|
||||||
|
la.alias, INVALID_BUCKET_NAME_MESSAGE
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let key = garage
|
||||||
|
.key_helper()
|
||||||
|
.get_existing_key(&la.access_key_id)
|
||||||
|
.await?;
|
||||||
|
let state = key.state.as_option().unwrap();
|
||||||
|
if matches!(state.local_aliases.get(&la.alias), Some(_)) {
|
||||||
|
return Err(Error::bad_request("Local alias already exists"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let bucket = Bucket::new();
|
||||||
|
garage.bucket_table.insert(&bucket).await?;
|
||||||
|
|
||||||
|
if let Some(ga) = &req.global_alias {
|
||||||
|
garage
|
||||||
|
.bucket_helper()
|
||||||
|
.set_global_bucket_alias(bucket.id, ga)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(la) = &req.local_alias {
|
||||||
|
garage
|
||||||
|
.bucket_helper()
|
||||||
|
.set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if la.allow.read || la.allow.write || la.allow.owner {
|
||||||
|
garage
|
||||||
|
.bucket_helper()
|
||||||
|
.set_bucket_key_permissions(
|
||||||
|
bucket.id,
|
||||||
|
&la.access_key_id,
|
||||||
|
BucketKeyPerm {
|
||||||
|
timestamp: now_msec(),
|
||||||
|
allow_read: la.allow.read,
|
||||||
|
allow_write: la.allow.write,
|
||||||
|
allow_owner: la.allow.owner,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket_info_results(garage, bucket.id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct CreateBucketRequest {
|
||||||
|
global_alias: Option<String>,
|
||||||
|
local_alias: Option<CreateBucketLocalAlias>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct CreateBucketLocalAlias {
|
||||||
|
access_key_id: String,
|
||||||
|
alias: String,
|
||||||
|
#[serde(default)]
|
||||||
|
allow: ApiBucketKeyPerm,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_delete_bucket(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
id: String,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let helper = garage.bucket_helper();
|
||||||
|
|
||||||
|
let bucket_id = parse_bucket_id(&id)?;
|
||||||
|
|
||||||
|
let mut bucket = helper.get_existing_bucket(bucket_id).await?;
|
||||||
|
let state = bucket.state.as_option().unwrap();
|
||||||
|
|
||||||
|
// Check bucket is empty
|
||||||
|
if !helper.is_bucket_empty(bucket_id).await? {
|
||||||
|
return Err(CommonError::BucketNotEmpty.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- done checking, now commit ---
|
||||||
|
// 1. delete authorization from keys that had access
|
||||||
|
for (key_id, perm) in bucket.authorized_keys() {
|
||||||
|
if perm.is_any() {
|
||||||
|
helper
|
||||||
|
.set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// 2. delete all local aliases
|
||||||
|
for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
|
||||||
|
if *active {
|
||||||
|
helper
|
||||||
|
.unset_local_bucket_alias(bucket.id, key_id, alias)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// 3. delete all global aliases
|
||||||
|
for (alias, _, active) in state.aliases.items().iter() {
|
||||||
|
if *active {
|
||||||
|
helper.purge_global_bucket_alias(bucket.id, alias).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. delete bucket
|
||||||
|
bucket.state = Deletable::delete();
|
||||||
|
garage.bucket_table.insert(&bucket).await?;
|
||||||
|
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::NO_CONTENT)
|
||||||
|
.body(Body::empty())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- BUCKET WEBSITE CONFIGURATION ----
|
||||||
|
|
||||||
|
pub async fn handle_put_bucket_website(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
id: String,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let req = parse_json_body::<PutBucketWebsiteRequest>(req).await?;
|
||||||
|
let bucket_id = parse_bucket_id(&id)?;
|
||||||
|
|
||||||
|
let mut bucket = garage
|
||||||
|
.bucket_helper()
|
||||||
|
.get_existing_bucket(bucket_id)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let state = bucket.state.as_option_mut().unwrap();
|
||||||
|
state.website_config.update(Some(WebsiteConfig {
|
||||||
|
index_document: req.index_document,
|
||||||
|
error_document: req.error_document,
|
||||||
|
}));
|
||||||
|
|
||||||
|
garage.bucket_table.insert(&bucket).await?;
|
||||||
|
|
||||||
|
bucket_info_results(garage, bucket_id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct PutBucketWebsiteRequest {
|
||||||
|
index_document: String,
|
||||||
|
#[serde(default)]
|
||||||
|
error_document: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_delete_bucket_website(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
id: String,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let bucket_id = parse_bucket_id(&id)?;
|
||||||
|
|
||||||
|
let mut bucket = garage
|
||||||
|
.bucket_helper()
|
||||||
|
.get_existing_bucket(bucket_id)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let state = bucket.state.as_option_mut().unwrap();
|
||||||
|
state.website_config.update(None);
|
||||||
|
|
||||||
|
garage.bucket_table.insert(&bucket).await?;
|
||||||
|
|
||||||
|
bucket_info_results(garage, bucket_id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- BUCKET/KEY PERMISSIONS ----
|
||||||
|
|
||||||
|
pub async fn handle_bucket_change_key_perm(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
req: Request<Body>,
|
||||||
|
new_perm_flag: bool,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let req = parse_json_body::<BucketKeyPermChangeRequest>(req).await?;
|
||||||
|
|
||||||
|
let bucket_id = parse_bucket_id(&req.bucket_id)?;
|
||||||
|
|
||||||
|
let bucket = garage
|
||||||
|
.bucket_helper()
|
||||||
|
.get_existing_bucket(bucket_id)
|
||||||
|
.await?;
|
||||||
|
let state = bucket.state.as_option().unwrap();
|
||||||
|
|
||||||
|
let key = garage
|
||||||
|
.key_helper()
|
||||||
|
.get_existing_key(&req.access_key_id)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut perm = state
|
||||||
|
.authorized_keys
|
||||||
|
.get(&key.key_id)
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or(BucketKeyPerm::NO_PERMISSIONS);
|
||||||
|
|
||||||
|
if req.permissions.read {
|
||||||
|
perm.allow_read = new_perm_flag;
|
||||||
|
}
|
||||||
|
if req.permissions.write {
|
||||||
|
perm.allow_write = new_perm_flag;
|
||||||
|
}
|
||||||
|
if req.permissions.owner {
|
||||||
|
perm.allow_owner = new_perm_flag;
|
||||||
|
}
|
||||||
|
|
||||||
|
garage
|
||||||
|
.bucket_helper()
|
||||||
|
.set_bucket_key_permissions(bucket.id, &key.key_id, perm)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
bucket_info_results(garage, bucket.id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct BucketKeyPermChangeRequest {
|
||||||
|
bucket_id: String,
|
||||||
|
access_key_id: String,
|
||||||
|
permissions: ApiBucketKeyPerm,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- BUCKET ALIASES ----
|
||||||
|
|
||||||
|
pub async fn handle_global_alias_bucket(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
bucket_id: String,
|
||||||
|
alias: String,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
|
garage
|
||||||
|
.bucket_helper()
|
||||||
|
.set_global_bucket_alias(bucket_id, &alias)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
bucket_info_results(garage, bucket_id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_global_unalias_bucket(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
bucket_id: String,
|
||||||
|
alias: String,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
|
garage
|
||||||
|
.bucket_helper()
|
||||||
|
.unset_global_bucket_alias(bucket_id, &alias)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
bucket_info_results(garage, bucket_id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_local_alias_bucket(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
bucket_id: String,
|
||||||
|
access_key_id: String,
|
||||||
|
alias: String,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
|
garage
|
||||||
|
.bucket_helper()
|
||||||
|
.set_local_bucket_alias(bucket_id, &access_key_id, &alias)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
bucket_info_results(garage, bucket_id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_local_unalias_bucket(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
bucket_id: String,
|
||||||
|
access_key_id: String,
|
||||||
|
alias: String,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
|
garage
|
||||||
|
.bucket_helper()
|
||||||
|
.unset_local_bucket_alias(bucket_id, &access_key_id, &alias)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
bucket_info_results(garage, bucket_id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- HELPER ----
|
||||||
|
|
||||||
|
fn parse_bucket_id(id: &str) -> Result<Uuid, Error> {
|
||||||
|
let id_hex = hex::decode(&id).ok_or_bad_request("Invalid bucket id")?;
|
||||||
|
Ok(Uuid::try_from(&id_hex).ok_or_bad_request("Invalid bucket id")?)
|
||||||
|
}
|
189
src/api/admin/cluster.rs
Normal file
189
src/api/admin/cluster.rs
Normal file
|
@ -0,0 +1,189 @@
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use hyper::{Body, Request, Response, StatusCode};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use garage_util::crdt::*;
|
||||||
|
use garage_util::data::*;
|
||||||
|
|
||||||
|
use garage_rpc::layout::*;
|
||||||
|
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
|
||||||
|
use crate::admin::error::*;
|
||||||
|
use crate::helpers::{json_ok_response, parse_json_body};
|
||||||
|
|
||||||
|
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
||||||
|
let res = GetClusterStatusResponse {
|
||||||
|
node: hex::encode(garage.system.id),
|
||||||
|
garage_version: garage.system.garage_version(),
|
||||||
|
known_nodes: garage
|
||||||
|
.system
|
||||||
|
.get_known_nodes()
|
||||||
|
.into_iter()
|
||||||
|
.map(|i| {
|
||||||
|
(
|
||||||
|
hex::encode(i.id),
|
||||||
|
KnownNodeResp {
|
||||||
|
addr: i.addr,
|
||||||
|
is_up: i.is_up,
|
||||||
|
last_seen_secs_ago: i.last_seen_secs_ago,
|
||||||
|
hostname: i.status.hostname,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
layout: get_cluster_layout(garage),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(json_ok_response(&res)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_connect_cluster_nodes(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let req = parse_json_body::<Vec<String>>(req).await?;
|
||||||
|
|
||||||
|
let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node)))
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.map(|r| match r {
|
||||||
|
Ok(()) => ConnectClusterNodesResponse {
|
||||||
|
success: true,
|
||||||
|
error: None,
|
||||||
|
},
|
||||||
|
Err(e) => ConnectClusterNodesResponse {
|
||||||
|
success: false,
|
||||||
|
error: Some(format!("{}", e)),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
Ok(json_ok_response(&res)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
||||||
|
let res = get_cluster_layout(garage);
|
||||||
|
|
||||||
|
Ok(json_ok_response(&res)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_cluster_layout(garage: &Arc<Garage>) -> GetClusterLayoutResponse {
|
||||||
|
let layout = garage.system.get_cluster_layout();
|
||||||
|
|
||||||
|
GetClusterLayoutResponse {
|
||||||
|
version: layout.version,
|
||||||
|
roles: layout
|
||||||
|
.roles
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, _, v)| v.0.is_some())
|
||||||
|
.map(|(k, _, v)| (hex::encode(k), v.0.clone()))
|
||||||
|
.collect(),
|
||||||
|
staged_role_changes: layout
|
||||||
|
.staging
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.filter(|(k, _, v)| layout.roles.get(k) != Some(v))
|
||||||
|
.map(|(k, _, v)| (hex::encode(k), v.0.clone()))
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct GetClusterStatusResponse {
|
||||||
|
node: String,
|
||||||
|
garage_version: &'static str,
|
||||||
|
known_nodes: HashMap<String, KnownNodeResp>,
|
||||||
|
layout: GetClusterLayoutResponse,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct ConnectClusterNodesResponse {
|
||||||
|
success: bool,
|
||||||
|
error: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct GetClusterLayoutResponse {
|
||||||
|
version: u64,
|
||||||
|
roles: HashMap<String, Option<NodeRole>>,
|
||||||
|
staged_role_changes: HashMap<String, Option<NodeRole>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct KnownNodeResp {
|
||||||
|
addr: SocketAddr,
|
||||||
|
is_up: bool,
|
||||||
|
last_seen_secs_ago: Option<u64>,
|
||||||
|
hostname: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_update_cluster_layout(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let updates = parse_json_body::<UpdateClusterLayoutRequest>(req).await?;
|
||||||
|
|
||||||
|
let mut layout = garage.system.get_cluster_layout();
|
||||||
|
|
||||||
|
let mut roles = layout.roles.clone();
|
||||||
|
roles.merge(&layout.staging);
|
||||||
|
|
||||||
|
for (node, role) in updates {
|
||||||
|
let node = hex::decode(node).ok_or_bad_request("Invalid node identifier")?;
|
||||||
|
let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
|
||||||
|
|
||||||
|
layout
|
||||||
|
.staging
|
||||||
|
.merge(&roles.update_mutator(node, NodeRoleV(role)));
|
||||||
|
}
|
||||||
|
|
||||||
|
garage.system.update_cluster_layout(&layout).await?;
|
||||||
|
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(Body::empty())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_apply_cluster_layout(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let param = parse_json_body::<ApplyRevertLayoutRequest>(req).await?;
|
||||||
|
|
||||||
|
let layout = garage.system.get_cluster_layout();
|
||||||
|
let layout = layout.apply_staged_changes(Some(param.version))?;
|
||||||
|
garage.system.update_cluster_layout(&layout).await?;
|
||||||
|
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(Body::empty())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_revert_cluster_layout(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let param = parse_json_body::<ApplyRevertLayoutRequest>(req).await?;
|
||||||
|
|
||||||
|
let layout = garage.system.get_cluster_layout();
|
||||||
|
let layout = layout.revert_staged_changes(Some(param.version))?;
|
||||||
|
garage.system.update_cluster_layout(&layout).await?;
|
||||||
|
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(Body::empty())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
type UpdateClusterLayoutRequest = HashMap<String, Option<NodeRole>>;
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ApplyRevertLayoutRequest {
|
||||||
|
version: u64,
|
||||||
|
}
|
97
src/api/admin/error.rs
Normal file
97
src/api/admin/error.rs
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
use err_derive::Error;
|
||||||
|
use hyper::header::HeaderValue;
|
||||||
|
use hyper::{Body, HeaderMap, StatusCode};
|
||||||
|
|
||||||
|
pub use garage_model::helper::error::Error as HelperError;
|
||||||
|
|
||||||
|
use crate::common_error::CommonError;
|
||||||
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
|
use crate::generic_server::ApiError;
|
||||||
|
use crate::helpers::CustomApiErrorBody;
|
||||||
|
|
||||||
|
/// Errors of this crate
|
||||||
|
#[derive(Debug, Error)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error(display = "{}", _0)]
|
||||||
|
/// Error from common error
|
||||||
|
Common(CommonError),
|
||||||
|
|
||||||
|
// Category: cannot process
|
||||||
|
/// The API access key does not exist
|
||||||
|
#[error(display = "Access key not found: {}", _0)]
|
||||||
|
NoSuchAccessKey(String),
|
||||||
|
|
||||||
|
/// In Import key, the key already exists
|
||||||
|
#[error(
|
||||||
|
display = "Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.",
|
||||||
|
_0
|
||||||
|
)]
|
||||||
|
KeyAlreadyExists(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> From<T> for Error
|
||||||
|
where
|
||||||
|
CommonError: From<T>,
|
||||||
|
{
|
||||||
|
fn from(err: T) -> Self {
|
||||||
|
Error::Common(CommonError::from(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
|
impl From<HelperError> for Error {
|
||||||
|
fn from(err: HelperError) -> Self {
|
||||||
|
match err {
|
||||||
|
HelperError::Internal(i) => Self::Common(CommonError::InternalError(i)),
|
||||||
|
HelperError::BadRequest(b) => Self::Common(CommonError::BadRequest(b)),
|
||||||
|
HelperError::InvalidBucketName(n) => Self::Common(CommonError::InvalidBucketName(n)),
|
||||||
|
HelperError::NoSuchBucket(n) => Self::Common(CommonError::NoSuchBucket(n)),
|
||||||
|
HelperError::NoSuchAccessKey(n) => Self::NoSuchAccessKey(n),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error {
|
||||||
|
fn code(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Error::Common(c) => c.aws_code(),
|
||||||
|
Error::NoSuchAccessKey(_) => "NoSuchAccessKey",
|
||||||
|
Error::KeyAlreadyExists(_) => "KeyAlreadyExists",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiError for Error {
|
||||||
|
/// Get the HTTP status code that best represents the meaning of the error for the client
|
||||||
|
fn http_status_code(&self) -> StatusCode {
|
||||||
|
match self {
|
||||||
|
Error::Common(c) => c.http_status_code(),
|
||||||
|
Error::NoSuchAccessKey(_) => StatusCode::NOT_FOUND,
|
||||||
|
Error::KeyAlreadyExists(_) => StatusCode::CONFLICT,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_http_headers(&self, header_map: &mut HeaderMap<HeaderValue>) {
|
||||||
|
use hyper::header;
|
||||||
|
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn http_body(&self, garage_region: &str, path: &str) -> Body {
|
||||||
|
let error = CustomApiErrorBody {
|
||||||
|
code: self.code().to_string(),
|
||||||
|
message: format!("{}", self),
|
||||||
|
path: path.to_string(),
|
||||||
|
region: garage_region.to_string(),
|
||||||
|
};
|
||||||
|
Body::from(serde_json::to_string_pretty(&error).unwrap_or_else(|_| {
|
||||||
|
r#"
|
||||||
|
{
|
||||||
|
"code": "InternalError",
|
||||||
|
"message": "JSON encoding of error failed"
|
||||||
|
}
|
||||||
|
"#
|
||||||
|
.into()
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
256
src/api/admin/key.rs
Normal file
256
src/api/admin/key.rs
Normal file
|
@ -0,0 +1,256 @@
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use hyper::{Body, Request, Response, StatusCode};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use garage_table::*;
|
||||||
|
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
use garage_model::key_table::*;
|
||||||
|
|
||||||
|
use crate::admin::error::*;
|
||||||
|
use crate::helpers::{json_ok_response, parse_json_body};
|
||||||
|
|
||||||
|
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
||||||
|
let res = garage
|
||||||
|
.key_table
|
||||||
|
.get_range(
|
||||||
|
&EmptyKey,
|
||||||
|
None,
|
||||||
|
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||||
|
10000,
|
||||||
|
EnumerationOrder::Forward,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.iter()
|
||||||
|
.map(|k| ListKeyResultItem {
|
||||||
|
id: k.key_id.to_string(),
|
||||||
|
name: k.params().unwrap().name.get().clone(),
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
Ok(json_ok_response(&res)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct ListKeyResultItem {
|
||||||
|
id: String,
|
||||||
|
name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_get_key_info(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
id: Option<String>,
|
||||||
|
search: Option<String>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let key = if let Some(id) = id {
|
||||||
|
garage.key_helper().get_existing_key(&id).await?
|
||||||
|
} else if let Some(search) = search {
|
||||||
|
garage
|
||||||
|
.key_helper()
|
||||||
|
.get_existing_matching_key(&search)
|
||||||
|
.await?
|
||||||
|
} else {
|
||||||
|
unreachable!();
|
||||||
|
};
|
||||||
|
|
||||||
|
key_info_results(garage, key).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_create_key(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let req = parse_json_body::<CreateKeyRequest>(req).await?;
|
||||||
|
|
||||||
|
let key = Key::new(&req.name);
|
||||||
|
garage.key_table.insert(&key).await?;
|
||||||
|
|
||||||
|
key_info_results(garage, key).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct CreateKeyRequest {
|
||||||
|
name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_import_key(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let req = parse_json_body::<ImportKeyRequest>(req).await?;
|
||||||
|
|
||||||
|
let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?;
|
||||||
|
if prev_key.is_some() {
|
||||||
|
return Err(Error::KeyAlreadyExists(req.access_key_id.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let imported_key = Key::import(&req.access_key_id, &req.secret_access_key, &req.name);
|
||||||
|
garage.key_table.insert(&imported_key).await?;
|
||||||
|
|
||||||
|
key_info_results(garage, imported_key).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct ImportKeyRequest {
|
||||||
|
access_key_id: String,
|
||||||
|
secret_access_key: String,
|
||||||
|
name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_update_key(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
id: String,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let req = parse_json_body::<UpdateKeyRequest>(req).await?;
|
||||||
|
|
||||||
|
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
||||||
|
|
||||||
|
let key_state = key.state.as_option_mut().unwrap();
|
||||||
|
|
||||||
|
if let Some(new_name) = req.name {
|
||||||
|
key_state.name.update(new_name);
|
||||||
|
}
|
||||||
|
if let Some(allow) = req.allow {
|
||||||
|
if allow.create_bucket {
|
||||||
|
key_state.allow_create_bucket.update(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(deny) = req.deny {
|
||||||
|
if deny.create_bucket {
|
||||||
|
key_state.allow_create_bucket.update(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
garage.key_table.insert(&key).await?;
|
||||||
|
|
||||||
|
key_info_results(garage, key).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct UpdateKeyRequest {
|
||||||
|
name: Option<String>,
|
||||||
|
allow: Option<KeyPerm>,
|
||||||
|
deny: Option<KeyPerm>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_delete_key(garage: &Arc<Garage>, id: String) -> Result<Response<Body>, Error> {
|
||||||
|
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
||||||
|
|
||||||
|
key.state.as_option().unwrap();
|
||||||
|
|
||||||
|
garage.key_helper().delete_key(&mut key).await?;
|
||||||
|
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::NO_CONTENT)
|
||||||
|
.body(Body::empty())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Body>, Error> {
|
||||||
|
let mut relevant_buckets = HashMap::new();
|
||||||
|
|
||||||
|
let key_state = key.state.as_option().unwrap();
|
||||||
|
|
||||||
|
for id in key_state
|
||||||
|
.authorized_buckets
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.map(|(id, _)| id)
|
||||||
|
.chain(
|
||||||
|
key_state
|
||||||
|
.local_aliases
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.filter_map(|(_, _, v)| v.as_ref()),
|
||||||
|
) {
|
||||||
|
if !relevant_buckets.contains_key(id) {
|
||||||
|
if let Some(b) = garage.bucket_table.get(&EmptyKey, id).await? {
|
||||||
|
if b.state.as_option().is_some() {
|
||||||
|
relevant_buckets.insert(*id, b);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = GetKeyInfoResult {
|
||||||
|
name: key_state.name.get().clone(),
|
||||||
|
access_key_id: key.key_id.clone(),
|
||||||
|
secret_access_key: key_state.secret_key.clone(),
|
||||||
|
permissions: KeyPerm {
|
||||||
|
create_bucket: *key_state.allow_create_bucket.get(),
|
||||||
|
},
|
||||||
|
buckets: relevant_buckets
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, bucket)| {
|
||||||
|
let state = bucket.state.as_option().unwrap();
|
||||||
|
KeyInfoBucketResult {
|
||||||
|
id: hex::encode(bucket.id),
|
||||||
|
global_aliases: state
|
||||||
|
.aliases
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, _, a)| *a)
|
||||||
|
.map(|(n, _, _)| n.to_string())
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
local_aliases: state
|
||||||
|
.local_aliases
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.filter(|((k, _), _, a)| *a && *k == key.key_id)
|
||||||
|
.map(|((_, n), _, _)| n.to_string())
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
permissions: key_state
|
||||||
|
.authorized_buckets
|
||||||
|
.get(&bucket.id)
|
||||||
|
.map(|p| ApiBucketKeyPerm {
|
||||||
|
read: p.allow_read,
|
||||||
|
write: p.allow_write,
|
||||||
|
owner: p.allow_owner,
|
||||||
|
})
|
||||||
|
.unwrap_or_default(),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(json_ok_response(&res)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct GetKeyInfoResult {
|
||||||
|
name: String,
|
||||||
|
access_key_id: String,
|
||||||
|
secret_access_key: String,
|
||||||
|
permissions: KeyPerm,
|
||||||
|
buckets: Vec<KeyInfoBucketResult>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct KeyPerm {
|
||||||
|
#[serde(default)]
|
||||||
|
create_bucket: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct KeyInfoBucketResult {
|
||||||
|
id: String,
|
||||||
|
global_aliases: Vec<String>,
|
||||||
|
local_aliases: Vec<String>,
|
||||||
|
permissions: ApiBucketKeyPerm,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Default)]
|
||||||
|
pub(crate) struct ApiBucketKeyPerm {
|
||||||
|
#[serde(default)]
|
||||||
|
pub(crate) read: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub(crate) write: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub(crate) owner: bool,
|
||||||
|
}
|
7
src/api/admin/mod.rs
Normal file
7
src/api/admin/mod.rs
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
pub mod api_server;
|
||||||
|
mod error;
|
||||||
|
mod router;
|
||||||
|
|
||||||
|
mod bucket;
|
||||||
|
mod cluster;
|
||||||
|
mod key;
|
149
src/api/admin/router.rs
Normal file
149
src/api/admin/router.rs
Normal file
|
@ -0,0 +1,149 @@
|
||||||
|
use std::borrow::Cow;
|
||||||
|
|
||||||
|
use hyper::{Method, Request};
|
||||||
|
|
||||||
|
use crate::admin::error::*;
|
||||||
|
use crate::router_macros::*;
|
||||||
|
|
||||||
|
pub enum Authorization {
|
||||||
|
MetricsToken,
|
||||||
|
AdminToken,
|
||||||
|
}
|
||||||
|
|
||||||
|
router_match! {@func
|
||||||
|
|
||||||
|
/// List of all Admin API endpoints.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub enum Endpoint {
|
||||||
|
Options,
|
||||||
|
Metrics,
|
||||||
|
GetClusterStatus,
|
||||||
|
ConnectClusterNodes,
|
||||||
|
// Layout
|
||||||
|
GetClusterLayout,
|
||||||
|
UpdateClusterLayout,
|
||||||
|
ApplyClusterLayout,
|
||||||
|
RevertClusterLayout,
|
||||||
|
// Keys
|
||||||
|
ListKeys,
|
||||||
|
CreateKey,
|
||||||
|
ImportKey,
|
||||||
|
GetKeyInfo {
|
||||||
|
id: Option<String>,
|
||||||
|
search: Option<String>,
|
||||||
|
},
|
||||||
|
DeleteKey {
|
||||||
|
id: String,
|
||||||
|
},
|
||||||
|
UpdateKey {
|
||||||
|
id: String,
|
||||||
|
},
|
||||||
|
// Buckets
|
||||||
|
ListBuckets,
|
||||||
|
CreateBucket,
|
||||||
|
GetBucketInfo {
|
||||||
|
id: Option<String>,
|
||||||
|
global_alias: Option<String>,
|
||||||
|
},
|
||||||
|
DeleteBucket {
|
||||||
|
id: String,
|
||||||
|
},
|
||||||
|
PutBucketWebsite {
|
||||||
|
id: String,
|
||||||
|
},
|
||||||
|
DeleteBucketWebsite {
|
||||||
|
id: String,
|
||||||
|
},
|
||||||
|
// Bucket-Key Permissions
|
||||||
|
BucketAllowKey,
|
||||||
|
BucketDenyKey,
|
||||||
|
// Bucket aliases
|
||||||
|
GlobalAliasBucket {
|
||||||
|
id: String,
|
||||||
|
alias: String,
|
||||||
|
},
|
||||||
|
GlobalUnaliasBucket {
|
||||||
|
id: String,
|
||||||
|
alias: String,
|
||||||
|
},
|
||||||
|
LocalAliasBucket {
|
||||||
|
id: String,
|
||||||
|
access_key_id: String,
|
||||||
|
alias: String,
|
||||||
|
},
|
||||||
|
LocalUnaliasBucket {
|
||||||
|
id: String,
|
||||||
|
access_key_id: String,
|
||||||
|
alias: String,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
impl Endpoint {
|
||||||
|
/// Determine which S3 endpoint a request is for using the request, and a bucket which was
|
||||||
|
/// possibly extracted from the Host header.
|
||||||
|
/// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets
|
||||||
|
pub fn from_request<T>(req: &Request<T>) -> Result<Self, Error> {
|
||||||
|
let uri = req.uri();
|
||||||
|
let path = uri.path();
|
||||||
|
let query = uri.query();
|
||||||
|
|
||||||
|
let mut query = QueryParameters::from_query(query.unwrap_or_default())?;
|
||||||
|
|
||||||
|
let res = router_match!(@gen_path_parser (req.method(), path, query) [
|
||||||
|
OPTIONS _ => Options,
|
||||||
|
GET "/metrics" => Metrics,
|
||||||
|
GET "/v0/status" => GetClusterStatus,
|
||||||
|
POST "/v0/connect" => ConnectClusterNodes,
|
||||||
|
// Layout endpoints
|
||||||
|
GET "/v0/layout" => GetClusterLayout,
|
||||||
|
POST "/v0/layout" => UpdateClusterLayout,
|
||||||
|
POST "/v0/layout/apply" => ApplyClusterLayout,
|
||||||
|
POST "/v0/layout/revert" => RevertClusterLayout,
|
||||||
|
// API key endpoints
|
||||||
|
GET "/v0/key" if id => GetKeyInfo (query_opt::id, query_opt::search),
|
||||||
|
GET "/v0/key" if search => GetKeyInfo (query_opt::id, query_opt::search),
|
||||||
|
POST "/v0/key" if id => UpdateKey (query::id),
|
||||||
|
POST "/v0/key" => CreateKey,
|
||||||
|
POST "/v0/key/import" => ImportKey,
|
||||||
|
DELETE "/v0/key" if id => DeleteKey (query::id),
|
||||||
|
GET "/v0/key" => ListKeys,
|
||||||
|
// Bucket endpoints
|
||||||
|
GET "/v0/bucket" if id => GetBucketInfo (query_opt::id, query_opt::global_alias),
|
||||||
|
GET "/v0/bucket" if global_alias => GetBucketInfo (query_opt::id, query_opt::global_alias),
|
||||||
|
GET "/v0/bucket" => ListBuckets,
|
||||||
|
POST "/v0/bucket" => CreateBucket,
|
||||||
|
DELETE "/v0/bucket" if id => DeleteBucket (query::id),
|
||||||
|
PUT "/v0/bucket/website" if id => PutBucketWebsite (query::id),
|
||||||
|
DELETE "/v0/bucket/website" if id => DeleteBucketWebsite (query::id),
|
||||||
|
// Bucket-key permissions
|
||||||
|
POST "/v0/bucket/allow" => BucketAllowKey,
|
||||||
|
POST "/v0/bucket/deny" => BucketDenyKey,
|
||||||
|
// Bucket aliases
|
||||||
|
PUT "/v0/bucket/alias/global" => GlobalAliasBucket (query::id, query::alias),
|
||||||
|
DELETE "/v0/bucket/alias/global" => GlobalUnaliasBucket (query::id, query::alias),
|
||||||
|
PUT "/v0/bucket/alias/local" => LocalAliasBucket (query::id, query::access_key_id, query::alias),
|
||||||
|
DELETE "/v0/bucket/alias/local" => LocalUnaliasBucket (query::id, query::access_key_id, query::alias),
|
||||||
|
]);
|
||||||
|
|
||||||
|
if let Some(message) = query.nonempty_message() {
|
||||||
|
debug!("Unused query parameter: {}", message)
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
/// Get the kind of authorization which is required to perform the operation.
|
||||||
|
pub fn authorization_type(&self) -> Authorization {
|
||||||
|
match self {
|
||||||
|
Self::Metrics => Authorization::MetricsToken,
|
||||||
|
_ => Authorization::AdminToken,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
generateQueryParameters! {
|
||||||
|
"id" => id,
|
||||||
|
"search" => search,
|
||||||
|
"globalAlias" => global_alias,
|
||||||
|
"alias" => alias,
|
||||||
|
"accessKeyId" => access_key_id
|
||||||
|
}
|
|
@ -1,645 +0,0 @@
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use chrono::{DateTime, NaiveDateTime, Utc};
|
|
||||||
use futures::future::Future;
|
|
||||||
use futures::prelude::*;
|
|
||||||
use hyper::header;
|
|
||||||
use hyper::server::conn::AddrStream;
|
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
|
||||||
use hyper::{Body, Method, Request, Response, Server};
|
|
||||||
|
|
||||||
use opentelemetry::{
|
|
||||||
global,
|
|
||||||
metrics::{Counter, ValueRecorder},
|
|
||||||
trace::{FutureExt, TraceContextExt, Tracer},
|
|
||||||
Context, KeyValue,
|
|
||||||
};
|
|
||||||
|
|
||||||
use garage_util::data::*;
|
|
||||||
use garage_util::error::Error as GarageError;
|
|
||||||
use garage_util::metrics::{gen_trace_id, RecordDuration};
|
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
|
||||||
use garage_model::key_table::Key;
|
|
||||||
|
|
||||||
use garage_table::util::*;
|
|
||||||
|
|
||||||
use crate::error::*;
|
|
||||||
use crate::signature::compute_scope;
|
|
||||||
use crate::signature::payload::check_payload_signature;
|
|
||||||
use crate::signature::streaming::SignedPayloadStream;
|
|
||||||
use crate::signature::LONG_DATETIME;
|
|
||||||
|
|
||||||
use crate::helpers::*;
|
|
||||||
use crate::s3_bucket::*;
|
|
||||||
use crate::s3_copy::*;
|
|
||||||
use crate::s3_cors::*;
|
|
||||||
use crate::s3_delete::*;
|
|
||||||
use crate::s3_get::*;
|
|
||||||
use crate::s3_list::*;
|
|
||||||
use crate::s3_post_object::handle_post_object;
|
|
||||||
use crate::s3_put::*;
|
|
||||||
use crate::s3_router::{Authorization, Endpoint};
|
|
||||||
use crate::s3_website::*;
|
|
||||||
|
|
||||||
struct ApiMetrics {
|
|
||||||
request_counter: Counter<u64>,
|
|
||||||
error_counter: Counter<u64>,
|
|
||||||
request_duration: ValueRecorder<f64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ApiMetrics {
|
|
||||||
fn new() -> Self {
|
|
||||||
let meter = global::meter("garage/api");
|
|
||||||
Self {
|
|
||||||
request_counter: meter
|
|
||||||
.u64_counter("api.request_counter")
|
|
||||||
.with_description("Number of API calls to the various S3 API endpoints")
|
|
||||||
.init(),
|
|
||||||
error_counter: meter
|
|
||||||
.u64_counter("api.error_counter")
|
|
||||||
.with_description(
|
|
||||||
"Number of API calls to the various S3 API endpoints that resulted in errors",
|
|
||||||
)
|
|
||||||
.init(),
|
|
||||||
request_duration: meter
|
|
||||||
.f64_value_recorder("api.request_duration")
|
|
||||||
.with_description("Duration of API calls to the various S3 API endpoints")
|
|
||||||
.init(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Run the S3 API server
|
|
||||||
pub async fn run_api_server(
|
|
||||||
garage: Arc<Garage>,
|
|
||||||
shutdown_signal: impl Future<Output = ()>,
|
|
||||||
) -> Result<(), GarageError> {
|
|
||||||
let addr = &garage.config.s3_api.api_bind_addr;
|
|
||||||
|
|
||||||
let metrics = Arc::new(ApiMetrics::new());
|
|
||||||
|
|
||||||
let service = make_service_fn(|conn: &AddrStream| {
|
|
||||||
let garage = garage.clone();
|
|
||||||
let metrics = metrics.clone();
|
|
||||||
|
|
||||||
let client_addr = conn.remote_addr();
|
|
||||||
async move {
|
|
||||||
Ok::<_, GarageError>(service_fn(move |req: Request<Body>| {
|
|
||||||
let garage = garage.clone();
|
|
||||||
let metrics = metrics.clone();
|
|
||||||
|
|
||||||
handler(garage, metrics, req, client_addr)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let server = Server::bind(addr).serve(service);
|
|
||||||
|
|
||||||
let graceful = server.with_graceful_shutdown(shutdown_signal);
|
|
||||||
info!("API server listening on http://{}", addr);
|
|
||||||
|
|
||||||
graceful.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handler(
|
|
||||||
garage: Arc<Garage>,
|
|
||||||
metrics: Arc<ApiMetrics>,
|
|
||||||
req: Request<Body>,
|
|
||||||
addr: SocketAddr,
|
|
||||||
) -> Result<Response<Body>, GarageError> {
|
|
||||||
let uri = req.uri().clone();
|
|
||||||
info!("{} {} {}", addr, req.method(), uri);
|
|
||||||
debug!("{:?}", req);
|
|
||||||
|
|
||||||
let tracer = opentelemetry::global::tracer("garage");
|
|
||||||
let span = tracer
|
|
||||||
.span_builder("S3 API call (unknown)")
|
|
||||||
.with_trace_id(gen_trace_id())
|
|
||||||
.with_attributes(vec![
|
|
||||||
KeyValue::new("method", format!("{}", req.method())),
|
|
||||||
KeyValue::new("uri", req.uri().to_string()),
|
|
||||||
])
|
|
||||||
.start(&tracer);
|
|
||||||
|
|
||||||
let res = handler_stage2(garage.clone(), metrics, req)
|
|
||||||
.with_context(Context::current_with_span(span))
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Ok(x) => {
|
|
||||||
debug!("{} {:?}", x.status(), x.headers());
|
|
||||||
Ok(x)
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
let body: Body = Body::from(e.aws_xml(&garage.config.s3_api.s3_region, uri.path()));
|
|
||||||
let mut http_error_builder = Response::builder()
|
|
||||||
.status(e.http_status_code())
|
|
||||||
.header("Content-Type", "application/xml");
|
|
||||||
|
|
||||||
if let Some(header_map) = http_error_builder.headers_mut() {
|
|
||||||
e.add_headers(header_map)
|
|
||||||
}
|
|
||||||
|
|
||||||
let http_error = http_error_builder.body(body)?;
|
|
||||||
|
|
||||||
if e.http_status_code().is_server_error() {
|
|
||||||
warn!("Response: error {}, {}", e.http_status_code(), e);
|
|
||||||
} else {
|
|
||||||
info!("Response: error {}, {}", e.http_status_code(), e);
|
|
||||||
}
|
|
||||||
Ok(http_error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handler_stage2(
|
|
||||||
garage: Arc<Garage>,
|
|
||||||
metrics: Arc<ApiMetrics>,
|
|
||||||
req: Request<Body>,
|
|
||||||
) -> Result<Response<Body>, Error> {
|
|
||||||
let authority = req
|
|
||||||
.headers()
|
|
||||||
.get(header::HOST)
|
|
||||||
.ok_or_bad_request("Host header required")?
|
|
||||||
.to_str()?;
|
|
||||||
|
|
||||||
let host = authority_to_host(authority)?;
|
|
||||||
|
|
||||||
let bucket_name = garage
|
|
||||||
.config
|
|
||||||
.s3_api
|
|
||||||
.root_domain
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|root_domain| host_to_bucket(&host, root_domain));
|
|
||||||
|
|
||||||
let (endpoint, bucket_name) = Endpoint::from_request(&req, bucket_name.map(ToOwned::to_owned))?;
|
|
||||||
debug!("Endpoint: {:?}", endpoint);
|
|
||||||
|
|
||||||
let current_context = Context::current();
|
|
||||||
let current_span = current_context.span();
|
|
||||||
current_span.update_name::<String>(format!("S3 API {}", endpoint.name()));
|
|
||||||
current_span.set_attribute(KeyValue::new("endpoint", endpoint.name()));
|
|
||||||
current_span.set_attribute(KeyValue::new(
|
|
||||||
"bucket",
|
|
||||||
bucket_name.clone().unwrap_or_default(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let metrics_tags = &[KeyValue::new("api_endpoint", endpoint.name())];
|
|
||||||
|
|
||||||
let res = handler_stage3(garage, req, endpoint, bucket_name)
|
|
||||||
.record_duration(&metrics.request_duration, &metrics_tags[..])
|
|
||||||
.await;
|
|
||||||
|
|
||||||
metrics.request_counter.add(1, &metrics_tags[..]);
|
|
||||||
|
|
||||||
let status_code = match &res {
|
|
||||||
Ok(r) => r.status(),
|
|
||||||
Err(e) => e.http_status_code(),
|
|
||||||
};
|
|
||||||
if status_code.is_client_error() || status_code.is_server_error() {
|
|
||||||
metrics.error_counter.add(
|
|
||||||
1,
|
|
||||||
&[
|
|
||||||
metrics_tags[0].clone(),
|
|
||||||
KeyValue::new("status_code", status_code.as_str().to_string()),
|
|
||||||
],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handler_stage3(
|
|
||||||
garage: Arc<Garage>,
|
|
||||||
req: Request<Body>,
|
|
||||||
endpoint: Endpoint,
|
|
||||||
bucket_name: Option<String>,
|
|
||||||
) -> Result<Response<Body>, Error> {
|
|
||||||
// Some endpoints are processed early, before we even check for an API key
|
|
||||||
if let Endpoint::PostObject = endpoint {
|
|
||||||
return handle_post_object(garage, req, bucket_name.unwrap()).await;
|
|
||||||
}
|
|
||||||
if let Endpoint::Options = endpoint {
|
|
||||||
return handle_options_s3api(garage, &req, bucket_name).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
let (api_key, mut content_sha256) = check_payload_signature(&garage, &req).await?;
|
|
||||||
let api_key = api_key.ok_or_else(|| {
|
|
||||||
Error::Forbidden("Garage does not support anonymous access yet".to_string())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let req = match req.headers().get("x-amz-content-sha256") {
|
|
||||||
Some(header) if header == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" => {
|
|
||||||
let signature = content_sha256
|
|
||||||
.take()
|
|
||||||
.ok_or_bad_request("No signature provided")?;
|
|
||||||
|
|
||||||
let secret_key = &api_key
|
|
||||||
.state
|
|
||||||
.as_option()
|
|
||||||
.ok_or_internal_error("Deleted key state")?
|
|
||||||
.secret_key;
|
|
||||||
|
|
||||||
let date = req
|
|
||||||
.headers()
|
|
||||||
.get("x-amz-date")
|
|
||||||
.ok_or_bad_request("Missing X-Amz-Date field")?
|
|
||||||
.to_str()?;
|
|
||||||
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
|
|
||||||
.ok_or_bad_request("Invalid date")?;
|
|
||||||
let date: DateTime<Utc> = DateTime::from_utc(date, Utc);
|
|
||||||
|
|
||||||
let scope = compute_scope(&date, &garage.config.s3_api.s3_region);
|
|
||||||
let signing_hmac = crate::signature::signing_hmac(
|
|
||||||
&date,
|
|
||||||
secret_key,
|
|
||||||
&garage.config.s3_api.s3_region,
|
|
||||||
"s3",
|
|
||||||
)
|
|
||||||
.ok_or_internal_error("Unable to build signing HMAC")?;
|
|
||||||
|
|
||||||
req.map(move |body| {
|
|
||||||
Body::wrap_stream(
|
|
||||||
SignedPayloadStream::new(
|
|
||||||
body.map_err(Error::from),
|
|
||||||
signing_hmac,
|
|
||||||
date,
|
|
||||||
&scope,
|
|
||||||
signature,
|
|
||||||
)
|
|
||||||
.map_err(Error::from),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
_ => req,
|
|
||||||
};
|
|
||||||
|
|
||||||
let bucket_name = match bucket_name {
|
|
||||||
None => return handle_request_without_bucket(garage, req, api_key, endpoint).await,
|
|
||||||
Some(bucket) => bucket.to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Special code path for CreateBucket API endpoint
|
|
||||||
if let Endpoint::CreateBucket {} = endpoint {
|
|
||||||
return handle_create_bucket(&garage, req, content_sha256, api_key, bucket_name).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
let bucket_id = resolve_bucket(&garage, &bucket_name, &api_key).await?;
|
|
||||||
let bucket = garage
|
|
||||||
.bucket_table
|
|
||||||
.get(&EmptyKey, &bucket_id)
|
|
||||||
.await?
|
|
||||||
.filter(|b| !b.state.is_deleted())
|
|
||||||
.ok_or(Error::NoSuchBucket)?;
|
|
||||||
|
|
||||||
let allowed = match endpoint.authorization_type() {
|
|
||||||
Authorization::Read => api_key.allow_read(&bucket_id),
|
|
||||||
Authorization::Write => api_key.allow_write(&bucket_id),
|
|
||||||
Authorization::Owner => api_key.allow_owner(&bucket_id),
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
|
|
||||||
if !allowed {
|
|
||||||
return Err(Error::Forbidden(
|
|
||||||
"Operation is not allowed for this key.".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look up what CORS rule might apply to response.
|
|
||||||
// Requests for methods different than GET, HEAD or POST
|
|
||||||
// are always preflighted, i.e. the browser should make
|
|
||||||
// an OPTIONS call before to check it is allowed
|
|
||||||
let matching_cors_rule = match *req.method() {
|
|
||||||
Method::GET | Method::HEAD | Method::POST => find_matching_cors_rule(&bucket, &req)?,
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let resp = match endpoint {
|
|
||||||
Endpoint::HeadObject {
|
|
||||||
key, part_number, ..
|
|
||||||
} => handle_head(garage, &req, bucket_id, &key, part_number).await,
|
|
||||||
Endpoint::GetObject {
|
|
||||||
key, part_number, ..
|
|
||||||
} => handle_get(garage, &req, bucket_id, &key, part_number).await,
|
|
||||||
Endpoint::UploadPart {
|
|
||||||
key,
|
|
||||||
part_number,
|
|
||||||
upload_id,
|
|
||||||
} => {
|
|
||||||
handle_put_part(
|
|
||||||
garage,
|
|
||||||
req,
|
|
||||||
bucket_id,
|
|
||||||
&key,
|
|
||||||
part_number,
|
|
||||||
&upload_id,
|
|
||||||
content_sha256,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
Endpoint::CopyObject { key } => handle_copy(garage, &api_key, &req, bucket_id, &key).await,
|
|
||||||
Endpoint::UploadPartCopy {
|
|
||||||
key,
|
|
||||||
part_number,
|
|
||||||
upload_id,
|
|
||||||
} => {
|
|
||||||
handle_upload_part_copy(
|
|
||||||
garage,
|
|
||||||
&api_key,
|
|
||||||
&req,
|
|
||||||
bucket_id,
|
|
||||||
&key,
|
|
||||||
part_number,
|
|
||||||
&upload_id,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
Endpoint::PutObject { key } => {
|
|
||||||
handle_put(garage, req, bucket_id, &key, content_sha256).await
|
|
||||||
}
|
|
||||||
Endpoint::AbortMultipartUpload { key, upload_id } => {
|
|
||||||
handle_abort_multipart_upload(garage, bucket_id, &key, &upload_id).await
|
|
||||||
}
|
|
||||||
Endpoint::DeleteObject { key, .. } => handle_delete(garage, bucket_id, &key).await,
|
|
||||||
Endpoint::CreateMultipartUpload { key } => {
|
|
||||||
handle_create_multipart_upload(garage, &req, &bucket_name, bucket_id, &key).await
|
|
||||||
}
|
|
||||||
Endpoint::CompleteMultipartUpload { key, upload_id } => {
|
|
||||||
handle_complete_multipart_upload(
|
|
||||||
garage,
|
|
||||||
req,
|
|
||||||
&bucket_name,
|
|
||||||
bucket_id,
|
|
||||||
&key,
|
|
||||||
&upload_id,
|
|
||||||
content_sha256,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
Endpoint::CreateBucket {} => unreachable!(),
|
|
||||||
Endpoint::HeadBucket {} => {
|
|
||||||
let empty_body: Body = Body::from(vec![]);
|
|
||||||
let response = Response::builder().body(empty_body).unwrap();
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
Endpoint::DeleteBucket {} => {
|
|
||||||
handle_delete_bucket(&garage, bucket_id, bucket_name, api_key).await
|
|
||||||
}
|
|
||||||
Endpoint::GetBucketLocation {} => handle_get_bucket_location(garage),
|
|
||||||
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
|
|
||||||
Endpoint::ListObjects {
|
|
||||||
delimiter,
|
|
||||||
encoding_type,
|
|
||||||
marker,
|
|
||||||
max_keys,
|
|
||||||
prefix,
|
|
||||||
} => {
|
|
||||||
handle_list(
|
|
||||||
garage,
|
|
||||||
&ListObjectsQuery {
|
|
||||||
common: ListQueryCommon {
|
|
||||||
bucket_name,
|
|
||||||
bucket_id,
|
|
||||||
delimiter: delimiter.map(|d| d.to_string()),
|
|
||||||
page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
|
|
||||||
prefix: prefix.unwrap_or_default(),
|
|
||||||
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
|
||||||
},
|
|
||||||
is_v2: false,
|
|
||||||
marker,
|
|
||||||
continuation_token: None,
|
|
||||||
start_after: None,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
Endpoint::ListObjectsV2 {
|
|
||||||
delimiter,
|
|
||||||
encoding_type,
|
|
||||||
max_keys,
|
|
||||||
prefix,
|
|
||||||
continuation_token,
|
|
||||||
start_after,
|
|
||||||
list_type,
|
|
||||||
..
|
|
||||||
} => {
|
|
||||||
if list_type == "2" {
|
|
||||||
handle_list(
|
|
||||||
garage,
|
|
||||||
&ListObjectsQuery {
|
|
||||||
common: ListQueryCommon {
|
|
||||||
bucket_name,
|
|
||||||
bucket_id,
|
|
||||||
delimiter: delimiter.map(|d| d.to_string()),
|
|
||||||
page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
|
|
||||||
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
|
||||||
prefix: prefix.unwrap_or_default(),
|
|
||||||
},
|
|
||||||
is_v2: true,
|
|
||||||
marker: None,
|
|
||||||
continuation_token,
|
|
||||||
start_after,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
} else {
|
|
||||||
Err(Error::BadRequest(format!(
|
|
||||||
"Invalid endpoint: list-type={}",
|
|
||||||
list_type
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Endpoint::ListMultipartUploads {
|
|
||||||
delimiter,
|
|
||||||
encoding_type,
|
|
||||||
key_marker,
|
|
||||||
max_uploads,
|
|
||||||
prefix,
|
|
||||||
upload_id_marker,
|
|
||||||
} => {
|
|
||||||
handle_list_multipart_upload(
|
|
||||||
garage,
|
|
||||||
&ListMultipartUploadsQuery {
|
|
||||||
common: ListQueryCommon {
|
|
||||||
bucket_name,
|
|
||||||
bucket_id,
|
|
||||||
delimiter: delimiter.map(|d| d.to_string()),
|
|
||||||
page_size: max_uploads.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
|
|
||||||
prefix: prefix.unwrap_or_default(),
|
|
||||||
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
|
||||||
},
|
|
||||||
key_marker,
|
|
||||||
upload_id_marker,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
Endpoint::ListParts {
|
|
||||||
key,
|
|
||||||
max_parts,
|
|
||||||
part_number_marker,
|
|
||||||
upload_id,
|
|
||||||
} => {
|
|
||||||
handle_list_parts(
|
|
||||||
garage,
|
|
||||||
&ListPartsQuery {
|
|
||||||
bucket_name,
|
|
||||||
bucket_id,
|
|
||||||
key,
|
|
||||||
upload_id,
|
|
||||||
part_number_marker: part_number_marker.map(|p| p.clamp(1, 10000)),
|
|
||||||
max_parts: max_parts.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
Endpoint::DeleteObjects {} => {
|
|
||||||
handle_delete_objects(garage, bucket_id, req, content_sha256).await
|
|
||||||
}
|
|
||||||
Endpoint::GetBucketWebsite {} => handle_get_website(&bucket).await,
|
|
||||||
Endpoint::PutBucketWebsite {} => {
|
|
||||||
handle_put_website(garage, bucket_id, req, content_sha256).await
|
|
||||||
}
|
|
||||||
Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket_id).await,
|
|
||||||
Endpoint::GetBucketCors {} => handle_get_cors(&bucket).await,
|
|
||||||
Endpoint::PutBucketCors {} => handle_put_cors(garage, bucket_id, req, content_sha256).await,
|
|
||||||
Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket_id).await,
|
|
||||||
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
|
||||||
};
|
|
||||||
|
|
||||||
// If request was a success and we have a CORS rule that applies to it,
|
|
||||||
// add the corresponding CORS headers to the response
|
|
||||||
let mut resp_ok = resp?;
|
|
||||||
if let Some(rule) = matching_cors_rule {
|
|
||||||
add_cors_headers(&mut resp_ok, rule)
|
|
||||||
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(resp_ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_request_without_bucket(
|
|
||||||
garage: Arc<Garage>,
|
|
||||||
_req: Request<Body>,
|
|
||||||
api_key: Key,
|
|
||||||
endpoint: Endpoint,
|
|
||||||
) -> Result<Response<Body>, Error> {
|
|
||||||
match endpoint {
|
|
||||||
Endpoint::ListBuckets => handle_list_buckets(&garage, &api_key).await,
|
|
||||||
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::ptr_arg)]
|
|
||||||
pub async fn resolve_bucket(
|
|
||||||
garage: &Garage,
|
|
||||||
bucket_name: &String,
|
|
||||||
api_key: &Key,
|
|
||||||
) -> Result<Uuid, Error> {
|
|
||||||
let api_key_params = api_key
|
|
||||||
.state
|
|
||||||
.as_option()
|
|
||||||
.ok_or_internal_error("Key should not be deleted at this point")?;
|
|
||||||
|
|
||||||
if let Some(Some(bucket_id)) = api_key_params.local_aliases.get(bucket_name) {
|
|
||||||
Ok(*bucket_id)
|
|
||||||
} else {
|
|
||||||
Ok(garage
|
|
||||||
.bucket_helper()
|
|
||||||
.resolve_global_bucket_name(bucket_name)
|
|
||||||
.await?
|
|
||||||
.ok_or(Error::NoSuchBucket)?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in
|
|
||||||
/// the host header of the request
|
|
||||||
///
|
|
||||||
/// S3 internally manages only buckets and keys. This function splits
|
|
||||||
/// an HTTP path to get the corresponding bucket name and key.
|
|
||||||
pub fn parse_bucket_key<'a>(
|
|
||||||
path: &'a str,
|
|
||||||
host_bucket: Option<&'a str>,
|
|
||||||
) -> Result<(&'a str, Option<&'a str>), Error> {
|
|
||||||
let path = path.trim_start_matches('/');
|
|
||||||
|
|
||||||
if let Some(bucket) = host_bucket {
|
|
||||||
if !path.is_empty() {
|
|
||||||
return Ok((bucket, Some(path)));
|
|
||||||
} else {
|
|
||||||
return Ok((bucket, None));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let (bucket, key) = match path.find('/') {
|
|
||||||
Some(i) => {
|
|
||||||
let key = &path[i + 1..];
|
|
||||||
if !key.is_empty() {
|
|
||||||
(&path[..i], Some(key))
|
|
||||||
} else {
|
|
||||||
(&path[..i], None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => (path, None),
|
|
||||||
};
|
|
||||||
if bucket.is_empty() {
|
|
||||||
return Err(Error::BadRequest("No bucket specified".to_string()));
|
|
||||||
}
|
|
||||||
Ok((bucket, key))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_bucket_containing_a_key() -> Result<(), Error> {
|
|
||||||
let (bucket, key) = parse_bucket_key("/my_bucket/a/super/file.jpg", None)?;
|
|
||||||
assert_eq!(bucket, "my_bucket");
|
|
||||||
assert_eq!(key.expect("key must be set"), "a/super/file.jpg");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_bucket_containing_no_key() -> Result<(), Error> {
|
|
||||||
let (bucket, key) = parse_bucket_key("/my_bucket/", None)?;
|
|
||||||
assert_eq!(bucket, "my_bucket");
|
|
||||||
assert!(key.is_none());
|
|
||||||
let (bucket, key) = parse_bucket_key("/my_bucket", None)?;
|
|
||||||
assert_eq!(bucket, "my_bucket");
|
|
||||||
assert!(key.is_none());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_bucket_containing_no_bucket() {
|
|
||||||
let parsed = parse_bucket_key("", None);
|
|
||||||
assert!(parsed.is_err());
|
|
||||||
let parsed = parse_bucket_key("/", None);
|
|
||||||
assert!(parsed.is_err());
|
|
||||||
let parsed = parse_bucket_key("////", None);
|
|
||||||
assert!(parsed.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_bucket_with_vhost_and_key() -> Result<(), Error> {
|
|
||||||
let (bucket, key) = parse_bucket_key("/a/super/file.jpg", Some("my-bucket"))?;
|
|
||||||
assert_eq!(bucket, "my-bucket");
|
|
||||||
assert_eq!(key.expect("key must be set"), "a/super/file.jpg");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_bucket_with_vhost_no_key() -> Result<(), Error> {
|
|
||||||
let (bucket, key) = parse_bucket_key("", Some("my-bucket"))?;
|
|
||||||
assert_eq!(bucket, "my-bucket");
|
|
||||||
assert!(key.is_none());
|
|
||||||
let (bucket, key) = parse_bucket_key("/", Some("my-bucket"))?;
|
|
||||||
assert_eq!(bucket, "my-bucket");
|
|
||||||
assert!(key.is_none());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
177
src/api/common_error.rs
Normal file
177
src/api/common_error.rs
Normal file
|
@ -0,0 +1,177 @@
|
||||||
|
use err_derive::Error;
|
||||||
|
use hyper::StatusCode;
|
||||||
|
|
||||||
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
|
/// Errors of this crate
|
||||||
|
#[derive(Debug, Error)]
|
||||||
|
pub enum CommonError {
|
||||||
|
// ---- INTERNAL ERRORS ----
|
||||||
|
/// Error related to deeper parts of Garage
|
||||||
|
#[error(display = "Internal error: {}", _0)]
|
||||||
|
InternalError(#[error(source)] GarageError),
|
||||||
|
|
||||||
|
/// Error related to Hyper
|
||||||
|
#[error(display = "Internal error (Hyper error): {}", _0)]
|
||||||
|
Hyper(#[error(source)] hyper::Error),
|
||||||
|
|
||||||
|
/// Error related to HTTP
|
||||||
|
#[error(display = "Internal error (HTTP error): {}", _0)]
|
||||||
|
Http(#[error(source)] http::Error),
|
||||||
|
|
||||||
|
// ---- GENERIC CLIENT ERRORS ----
|
||||||
|
/// Proper authentication was not provided
|
||||||
|
#[error(display = "Forbidden: {}", _0)]
|
||||||
|
Forbidden(String),
|
||||||
|
|
||||||
|
/// Generic bad request response with custom message
|
||||||
|
#[error(display = "Bad request: {}", _0)]
|
||||||
|
BadRequest(String),
|
||||||
|
|
||||||
|
// ---- SPECIFIC ERROR CONDITIONS ----
|
||||||
|
// These have to be error codes referenced in the S3 spec here:
|
||||||
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
||||||
|
/// The bucket requested don't exists
|
||||||
|
#[error(display = "Bucket not found: {}", _0)]
|
||||||
|
NoSuchBucket(String),
|
||||||
|
|
||||||
|
/// Tried to create a bucket that already exist
|
||||||
|
#[error(display = "Bucket already exists")]
|
||||||
|
BucketAlreadyExists,
|
||||||
|
|
||||||
|
/// Tried to delete a non-empty bucket
|
||||||
|
#[error(display = "Tried to delete a non-empty bucket")]
|
||||||
|
BucketNotEmpty,
|
||||||
|
|
||||||
|
// Category: bad request
|
||||||
|
/// Bucket name is not valid according to AWS S3 specs
|
||||||
|
#[error(display = "Invalid bucket name: {}", _0)]
|
||||||
|
InvalidBucketName(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CommonError {
|
||||||
|
pub fn http_status_code(&self) -> StatusCode {
|
||||||
|
match self {
|
||||||
|
CommonError::InternalError(
|
||||||
|
GarageError::Timeout
|
||||||
|
| GarageError::RemoteError(_)
|
||||||
|
| GarageError::Quorum(_, _, _, _),
|
||||||
|
) => StatusCode::SERVICE_UNAVAILABLE,
|
||||||
|
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
|
||||||
|
StatusCode::INTERNAL_SERVER_ERROR
|
||||||
|
}
|
||||||
|
CommonError::BadRequest(_) => StatusCode::BAD_REQUEST,
|
||||||
|
CommonError::Forbidden(_) => StatusCode::FORBIDDEN,
|
||||||
|
CommonError::NoSuchBucket(_) => StatusCode::NOT_FOUND,
|
||||||
|
CommonError::BucketNotEmpty | CommonError::BucketAlreadyExists => StatusCode::CONFLICT,
|
||||||
|
CommonError::InvalidBucketName(_) => StatusCode::BAD_REQUEST,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn aws_code(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
CommonError::Forbidden(_) => "AccessDenied",
|
||||||
|
CommonError::InternalError(
|
||||||
|
GarageError::Timeout
|
||||||
|
| GarageError::RemoteError(_)
|
||||||
|
| GarageError::Quorum(_, _, _, _),
|
||||||
|
) => "ServiceUnavailable",
|
||||||
|
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
|
||||||
|
"InternalError"
|
||||||
|
}
|
||||||
|
CommonError::BadRequest(_) => "InvalidRequest",
|
||||||
|
CommonError::NoSuchBucket(_) => "NoSuchBucket",
|
||||||
|
CommonError::BucketAlreadyExists => "BucketAlreadyExists",
|
||||||
|
CommonError::BucketNotEmpty => "BucketNotEmpty",
|
||||||
|
CommonError::InvalidBucketName(_) => "InvalidBucketName",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn bad_request<M: ToString>(msg: M) -> Self {
|
||||||
|
CommonError::BadRequest(msg.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait CommonErrorDerivative: From<CommonError> {
|
||||||
|
fn internal_error<M: ToString>(msg: M) -> Self {
|
||||||
|
Self::from(CommonError::InternalError(GarageError::Message(
|
||||||
|
msg.to_string(),
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bad_request<M: ToString>(msg: M) -> Self {
|
||||||
|
Self::from(CommonError::BadRequest(msg.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn forbidden<M: ToString>(msg: M) -> Self {
|
||||||
|
Self::from(CommonError::Forbidden(msg.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Trait to map error to the Bad Request error code
|
||||||
|
pub trait OkOrBadRequest {
|
||||||
|
type S;
|
||||||
|
fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<Self::S, CommonError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, E> OkOrBadRequest for Result<T, E>
|
||||||
|
where
|
||||||
|
E: std::fmt::Display,
|
||||||
|
{
|
||||||
|
type S = T;
|
||||||
|
fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<T, CommonError> {
|
||||||
|
match self {
|
||||||
|
Ok(x) => Ok(x),
|
||||||
|
Err(e) => Err(CommonError::BadRequest(format!(
|
||||||
|
"{}: {}",
|
||||||
|
reason.as_ref(),
|
||||||
|
e
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> OkOrBadRequest for Option<T> {
|
||||||
|
type S = T;
|
||||||
|
fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<T, CommonError> {
|
||||||
|
match self {
|
||||||
|
Some(x) => Ok(x),
|
||||||
|
None => Err(CommonError::BadRequest(reason.as_ref().to_string())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Trait to map an error to an Internal Error code
|
||||||
|
pub trait OkOrInternalError {
|
||||||
|
type S;
|
||||||
|
fn ok_or_internal_error<M: AsRef<str>>(self, reason: M) -> Result<Self::S, CommonError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, E> OkOrInternalError for Result<T, E>
|
||||||
|
where
|
||||||
|
E: std::fmt::Display,
|
||||||
|
{
|
||||||
|
type S = T;
|
||||||
|
fn ok_or_internal_error<M: AsRef<str>>(self, reason: M) -> Result<T, CommonError> {
|
||||||
|
match self {
|
||||||
|
Ok(x) => Ok(x),
|
||||||
|
Err(e) => Err(CommonError::InternalError(GarageError::Message(format!(
|
||||||
|
"{}: {}",
|
||||||
|
reason.as_ref(),
|
||||||
|
e
|
||||||
|
)))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> OkOrInternalError for Option<T> {
|
||||||
|
type S = T;
|
||||||
|
fn ok_or_internal_error<M: AsRef<str>>(self, reason: M) -> Result<T, CommonError> {
|
||||||
|
match self {
|
||||||
|
Some(x) => Ok(x),
|
||||||
|
None => Err(CommonError::InternalError(GarageError::Message(
|
||||||
|
reason.as_ref().to_string(),
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
207
src/api/generic_server.rs
Normal file
207
src/api/generic_server.rs
Normal file
|
@ -0,0 +1,207 @@
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use futures::future::Future;
|
||||||
|
|
||||||
|
use hyper::header::HeaderValue;
|
||||||
|
use hyper::server::conn::AddrStream;
|
||||||
|
use hyper::service::{make_service_fn, service_fn};
|
||||||
|
use hyper::{Body, Request, Response, Server};
|
||||||
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
|
||||||
|
use opentelemetry::{
|
||||||
|
global,
|
||||||
|
metrics::{Counter, ValueRecorder},
|
||||||
|
trace::{FutureExt, SpanRef, TraceContextExt, Tracer},
|
||||||
|
Context, KeyValue,
|
||||||
|
};
|
||||||
|
|
||||||
|
use garage_util::error::Error as GarageError;
|
||||||
|
use garage_util::metrics::{gen_trace_id, RecordDuration};
|
||||||
|
|
||||||
|
pub(crate) trait ApiEndpoint: Send + Sync + 'static {
|
||||||
|
fn name(&self) -> &'static str;
|
||||||
|
fn add_span_attributes(&self, span: SpanRef<'_>);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait ApiError: std::error::Error + Send + Sync + 'static {
|
||||||
|
fn http_status_code(&self) -> StatusCode;
|
||||||
|
fn add_http_headers(&self, header_map: &mut HeaderMap<HeaderValue>);
|
||||||
|
fn http_body(&self, garage_region: &str, path: &str) -> Body;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub(crate) trait ApiHandler: Send + Sync + 'static {
|
||||||
|
const API_NAME: &'static str;
|
||||||
|
const API_NAME_DISPLAY: &'static str;
|
||||||
|
|
||||||
|
type Endpoint: ApiEndpoint;
|
||||||
|
type Error: ApiError;
|
||||||
|
|
||||||
|
fn parse_endpoint(&self, r: &Request<Body>) -> Result<Self::Endpoint, Self::Error>;
|
||||||
|
async fn handle(
|
||||||
|
&self,
|
||||||
|
req: Request<Body>,
|
||||||
|
endpoint: Self::Endpoint,
|
||||||
|
) -> Result<Response<Body>, Self::Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct ApiServer<A: ApiHandler> {
|
||||||
|
region: String,
|
||||||
|
api_handler: A,
|
||||||
|
|
||||||
|
// Metrics
|
||||||
|
request_counter: Counter<u64>,
|
||||||
|
error_counter: Counter<u64>,
|
||||||
|
request_duration: ValueRecorder<f64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<A: ApiHandler> ApiServer<A> {
|
||||||
|
pub fn new(region: String, api_handler: A) -> Arc<Self> {
|
||||||
|
let meter = global::meter("garage/api");
|
||||||
|
Arc::new(Self {
|
||||||
|
region,
|
||||||
|
api_handler,
|
||||||
|
request_counter: meter
|
||||||
|
.u64_counter(format!("api.{}.request_counter", A::API_NAME))
|
||||||
|
.with_description(format!(
|
||||||
|
"Number of API calls to the various {} API endpoints",
|
||||||
|
A::API_NAME_DISPLAY
|
||||||
|
))
|
||||||
|
.init(),
|
||||||
|
error_counter: meter
|
||||||
|
.u64_counter(format!("api.{}.error_counter", A::API_NAME))
|
||||||
|
.with_description(format!(
|
||||||
|
"Number of API calls to the various {} API endpoints that resulted in errors",
|
||||||
|
A::API_NAME_DISPLAY
|
||||||
|
))
|
||||||
|
.init(),
|
||||||
|
request_duration: meter
|
||||||
|
.f64_value_recorder(format!("api.{}.request_duration", A::API_NAME))
|
||||||
|
.with_description(format!(
|
||||||
|
"Duration of API calls to the various {} API endpoints",
|
||||||
|
A::API_NAME_DISPLAY
|
||||||
|
))
|
||||||
|
.init(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_server(
|
||||||
|
self: Arc<Self>,
|
||||||
|
bind_addr: SocketAddr,
|
||||||
|
shutdown_signal: impl Future<Output = ()>,
|
||||||
|
) -> Result<(), GarageError> {
|
||||||
|
let service = make_service_fn(|conn: &AddrStream| {
|
||||||
|
let this = self.clone();
|
||||||
|
|
||||||
|
let client_addr = conn.remote_addr();
|
||||||
|
async move {
|
||||||
|
Ok::<_, GarageError>(service_fn(move |req: Request<Body>| {
|
||||||
|
let this = this.clone();
|
||||||
|
|
||||||
|
this.handler(req, client_addr)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let server = Server::bind(&bind_addr).serve(service);
|
||||||
|
|
||||||
|
let graceful = server.with_graceful_shutdown(shutdown_signal);
|
||||||
|
info!(
|
||||||
|
"{} API server listening on http://{}",
|
||||||
|
A::API_NAME_DISPLAY,
|
||||||
|
bind_addr
|
||||||
|
);
|
||||||
|
|
||||||
|
graceful.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handler(
|
||||||
|
self: Arc<Self>,
|
||||||
|
req: Request<Body>,
|
||||||
|
addr: SocketAddr,
|
||||||
|
) -> Result<Response<Body>, GarageError> {
|
||||||
|
let uri = req.uri().clone();
|
||||||
|
info!("{} {} {}", addr, req.method(), uri);
|
||||||
|
debug!("{:?}", req);
|
||||||
|
|
||||||
|
let tracer = opentelemetry::global::tracer("garage");
|
||||||
|
let span = tracer
|
||||||
|
.span_builder(format!("{} API call (unknown)", A::API_NAME_DISPLAY))
|
||||||
|
.with_trace_id(gen_trace_id())
|
||||||
|
.with_attributes(vec![
|
||||||
|
KeyValue::new("method", format!("{}", req.method())),
|
||||||
|
KeyValue::new("uri", req.uri().to_string()),
|
||||||
|
])
|
||||||
|
.start(&tracer);
|
||||||
|
|
||||||
|
let res = self
|
||||||
|
.handler_stage2(req)
|
||||||
|
.with_context(Context::current_with_span(span))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match res {
|
||||||
|
Ok(x) => {
|
||||||
|
debug!("{} {:?}", x.status(), x.headers());
|
||||||
|
Ok(x)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
let body: Body = e.http_body(&self.region, uri.path());
|
||||||
|
let mut http_error_builder = Response::builder().status(e.http_status_code());
|
||||||
|
|
||||||
|
if let Some(header_map) = http_error_builder.headers_mut() {
|
||||||
|
e.add_http_headers(header_map)
|
||||||
|
}
|
||||||
|
|
||||||
|
let http_error = http_error_builder.body(body)?;
|
||||||
|
|
||||||
|
if e.http_status_code().is_server_error() {
|
||||||
|
warn!("Response: error {}, {}", e.http_status_code(), e);
|
||||||
|
} else {
|
||||||
|
info!("Response: error {}, {}", e.http_status_code(), e);
|
||||||
|
}
|
||||||
|
Ok(http_error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handler_stage2(&self, req: Request<Body>) -> Result<Response<Body>, A::Error> {
|
||||||
|
let endpoint = self.api_handler.parse_endpoint(&req)?;
|
||||||
|
debug!("Endpoint: {}", endpoint.name());
|
||||||
|
|
||||||
|
let current_context = Context::current();
|
||||||
|
let current_span = current_context.span();
|
||||||
|
current_span.update_name::<String>(format!("S3 API {}", endpoint.name()));
|
||||||
|
current_span.set_attribute(KeyValue::new("endpoint", endpoint.name()));
|
||||||
|
endpoint.add_span_attributes(current_span);
|
||||||
|
|
||||||
|
let metrics_tags = &[KeyValue::new("api_endpoint", endpoint.name())];
|
||||||
|
|
||||||
|
let res = self
|
||||||
|
.api_handler
|
||||||
|
.handle(req, endpoint)
|
||||||
|
.record_duration(&self.request_duration, &metrics_tags[..])
|
||||||
|
.await;
|
||||||
|
|
||||||
|
self.request_counter.add(1, &metrics_tags[..]);
|
||||||
|
|
||||||
|
let status_code = match &res {
|
||||||
|
Ok(r) => r.status(),
|
||||||
|
Err(e) => e.http_status_code(),
|
||||||
|
};
|
||||||
|
if status_code.is_client_error() || status_code.is_server_error() {
|
||||||
|
self.error_counter.add(
|
||||||
|
1,
|
||||||
|
&[
|
||||||
|
metrics_tags[0].clone(),
|
||||||
|
KeyValue::new("status_code", status_code.as_str().to_string()),
|
||||||
|
],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
res
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,5 +1,21 @@
|
||||||
use crate::Error;
|
use hyper::{Body, Request, Response};
|
||||||
use idna::domain_to_unicode;
|
use idna::domain_to_unicode;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::common_error::{CommonError as Error, *};
|
||||||
|
|
||||||
|
/// What kind of authorization is required to perform a given action
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub enum Authorization {
|
||||||
|
/// No authorization is required
|
||||||
|
None,
|
||||||
|
/// Having Read permission on bucket
|
||||||
|
Read,
|
||||||
|
/// Having Write permission on bucket
|
||||||
|
Write,
|
||||||
|
/// Having Owner permission on bucket
|
||||||
|
Owner,
|
||||||
|
}
|
||||||
|
|
||||||
/// Host to bucket
|
/// Host to bucket
|
||||||
///
|
///
|
||||||
|
@ -31,7 +47,7 @@ pub fn authority_to_host(authority: &str) -> Result<String, Error> {
|
||||||
let mut iter = authority.chars().enumerate();
|
let mut iter = authority.chars().enumerate();
|
||||||
let (_, first_char) = iter
|
let (_, first_char) = iter
|
||||||
.next()
|
.next()
|
||||||
.ok_or_else(|| Error::BadRequest("Authority is empty".to_string()))?;
|
.ok_or_else(|| Error::bad_request("Authority is empty".to_string()))?;
|
||||||
|
|
||||||
let split = match first_char {
|
let split = match first_char {
|
||||||
'[' => {
|
'[' => {
|
||||||
|
@ -39,7 +55,7 @@ pub fn authority_to_host(authority: &str) -> Result<String, Error> {
|
||||||
match iter.next() {
|
match iter.next() {
|
||||||
Some((_, ']')) => iter.next(),
|
Some((_, ']')) => iter.next(),
|
||||||
_ => {
|
_ => {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"Authority {} has an illegal format",
|
"Authority {} has an illegal format",
|
||||||
authority
|
authority
|
||||||
)))
|
)))
|
||||||
|
@ -52,7 +68,7 @@ pub fn authority_to_host(authority: &str) -> Result<String, Error> {
|
||||||
let authority = match split {
|
let authority = match split {
|
||||||
Some((i, ':')) => Ok(&authority[..i]),
|
Some((i, ':')) => Ok(&authority[..i]),
|
||||||
None => Ok(authority),
|
None => Ok(authority),
|
||||||
Some((_, _)) => Err(Error::BadRequest(format!(
|
Some((_, _)) => Err(Error::bad_request(format!(
|
||||||
"Authority {} has an illegal format",
|
"Authority {} has an illegal format",
|
||||||
authority
|
authority
|
||||||
))),
|
))),
|
||||||
|
@ -60,10 +76,134 @@ pub fn authority_to_host(authority: &str) -> Result<String, Error> {
|
||||||
authority.map(|h| domain_to_unicode(h).0)
|
authority.map(|h| domain_to_unicode(h).0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in
|
||||||
|
/// the host header of the request
|
||||||
|
///
|
||||||
|
/// S3 internally manages only buckets and keys. This function splits
|
||||||
|
/// an HTTP path to get the corresponding bucket name and key.
|
||||||
|
pub fn parse_bucket_key<'a>(
|
||||||
|
path: &'a str,
|
||||||
|
host_bucket: Option<&'a str>,
|
||||||
|
) -> Result<(&'a str, Option<&'a str>), Error> {
|
||||||
|
let path = path.trim_start_matches('/');
|
||||||
|
|
||||||
|
if let Some(bucket) = host_bucket {
|
||||||
|
if !path.is_empty() {
|
||||||
|
return Ok((bucket, Some(path)));
|
||||||
|
} else {
|
||||||
|
return Ok((bucket, None));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let (bucket, key) = match path.find('/') {
|
||||||
|
Some(i) => {
|
||||||
|
let key = &path[i + 1..];
|
||||||
|
if !key.is_empty() {
|
||||||
|
(&path[..i], Some(key))
|
||||||
|
} else {
|
||||||
|
(&path[..i], None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => (path, None),
|
||||||
|
};
|
||||||
|
if bucket.is_empty() {
|
||||||
|
return Err(Error::bad_request("No bucket specified"));
|
||||||
|
}
|
||||||
|
Ok((bucket, key))
|
||||||
|
}
|
||||||
|
|
||||||
|
const UTF8_BEFORE_LAST_CHAR: char = '\u{10FFFE}';
|
||||||
|
|
||||||
|
/// Compute the key after the prefix
|
||||||
|
pub fn key_after_prefix(pfx: &str) -> Option<String> {
|
||||||
|
let mut next = pfx.to_string();
|
||||||
|
while !next.is_empty() {
|
||||||
|
let tail = next.pop().unwrap();
|
||||||
|
if tail >= char::MAX {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Circumvent a limitation of RangeFrom that overflow earlier than needed
|
||||||
|
// See: https://doc.rust-lang.org/core/ops/struct.RangeFrom.html
|
||||||
|
let new_tail = if tail == UTF8_BEFORE_LAST_CHAR {
|
||||||
|
char::MAX
|
||||||
|
} else {
|
||||||
|
(tail..).nth(1).unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
next.push(new_tail);
|
||||||
|
return Some(next);
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn parse_json_body<T: for<'de> Deserialize<'de>>(req: Request<Body>) -> Result<T, Error> {
|
||||||
|
let body = hyper::body::to_bytes(req.into_body()).await?;
|
||||||
|
let resp: T = serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?;
|
||||||
|
Ok(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn json_ok_response<T: Serialize>(res: &T) -> Result<Response<Body>, Error> {
|
||||||
|
let resp_json = serde_json::to_string_pretty(res).map_err(garage_util::error::Error::from)?;
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(hyper::StatusCode::OK)
|
||||||
|
.header(http::header::CONTENT_TYPE, "application/json")
|
||||||
|
.body(Body::from(resp_json))?)
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_bucket_containing_a_key() -> Result<(), Error> {
|
||||||
|
let (bucket, key) = parse_bucket_key("/my_bucket/a/super/file.jpg", None)?;
|
||||||
|
assert_eq!(bucket, "my_bucket");
|
||||||
|
assert_eq!(key.expect("key must be set"), "a/super/file.jpg");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_bucket_containing_no_key() -> Result<(), Error> {
|
||||||
|
let (bucket, key) = parse_bucket_key("/my_bucket/", None)?;
|
||||||
|
assert_eq!(bucket, "my_bucket");
|
||||||
|
assert!(key.is_none());
|
||||||
|
let (bucket, key) = parse_bucket_key("/my_bucket", None)?;
|
||||||
|
assert_eq!(bucket, "my_bucket");
|
||||||
|
assert!(key.is_none());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_bucket_containing_no_bucket() {
|
||||||
|
let parsed = parse_bucket_key("", None);
|
||||||
|
assert!(parsed.is_err());
|
||||||
|
let parsed = parse_bucket_key("/", None);
|
||||||
|
assert!(parsed.is_err());
|
||||||
|
let parsed = parse_bucket_key("////", None);
|
||||||
|
assert!(parsed.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_bucket_with_vhost_and_key() -> Result<(), Error> {
|
||||||
|
let (bucket, key) = parse_bucket_key("/a/super/file.jpg", Some("my-bucket"))?;
|
||||||
|
assert_eq!(bucket, "my-bucket");
|
||||||
|
assert_eq!(key.expect("key must be set"), "a/super/file.jpg");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_bucket_with_vhost_no_key() -> Result<(), Error> {
|
||||||
|
let (bucket, key) = parse_bucket_key("", Some("my-bucket"))?;
|
||||||
|
assert_eq!(bucket, "my-bucket");
|
||||||
|
assert!(key.is_none());
|
||||||
|
let (bucket, key) = parse_bucket_key("/", Some("my-bucket"))?;
|
||||||
|
assert_eq!(bucket, "my-bucket");
|
||||||
|
assert!(key.is_none());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn authority_to_host_with_port() -> Result<(), Error> {
|
fn authority_to_host_with_port() -> Result<(), Error> {
|
||||||
let domain = authority_to_host("[::1]:3902")?;
|
let domain = authority_to_host("[::1]:3902")?;
|
||||||
|
@ -111,4 +251,47 @@ mod tests {
|
||||||
assert_eq!(host_to_bucket("not-garage.tld", "garage.tld"), None);
|
assert_eq!(host_to_bucket("not-garage.tld", "garage.tld"), None);
|
||||||
assert_eq!(host_to_bucket("not-garage.tld", ".garage.tld"), None);
|
assert_eq!(host_to_bucket("not-garage.tld", ".garage.tld"), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_key_after_prefix() {
|
||||||
|
use std::iter::FromIterator;
|
||||||
|
|
||||||
|
assert_eq!(UTF8_BEFORE_LAST_CHAR as u32, (char::MAX as u32) - 1);
|
||||||
|
assert_eq!(key_after_prefix("a/b/").unwrap().as_str(), "a/b0");
|
||||||
|
assert_eq!(key_after_prefix("€").unwrap().as_str(), "₭");
|
||||||
|
assert_eq!(
|
||||||
|
key_after_prefix("").unwrap().as_str(),
|
||||||
|
String::from(char::from_u32(0x10FFFE).unwrap())
|
||||||
|
);
|
||||||
|
|
||||||
|
// When the last character is the biggest UTF8 char
|
||||||
|
let a = String::from_iter(['a', char::MAX].iter());
|
||||||
|
assert_eq!(key_after_prefix(a.as_str()).unwrap().as_str(), "b");
|
||||||
|
|
||||||
|
// When all characters are the biggest UTF8 char
|
||||||
|
let b = String::from_iter([char::MAX; 3].iter());
|
||||||
|
assert!(key_after_prefix(b.as_str()).is_none());
|
||||||
|
|
||||||
|
// Check utf8 surrogates
|
||||||
|
let c = String::from('\u{D7FF}');
|
||||||
|
assert_eq!(
|
||||||
|
key_after_prefix(c.as_str()).unwrap().as_str(),
|
||||||
|
String::from('\u{E000}')
|
||||||
|
);
|
||||||
|
|
||||||
|
// Check the character before the biggest one
|
||||||
|
let d = String::from('\u{10FFFE}');
|
||||||
|
assert_eq!(
|
||||||
|
key_after_prefix(d.as_str()).unwrap().as_str(),
|
||||||
|
String::from(char::MAX)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub(crate) struct CustomApiErrorBody {
|
||||||
|
pub(crate) code: String,
|
||||||
|
pub(crate) message: String,
|
||||||
|
pub(crate) region: String,
|
||||||
|
pub(crate) path: String,
|
||||||
}
|
}
|
||||||
|
|
196
src/api/k2v/api_server.rs
Normal file
196
src/api/k2v/api_server.rs
Normal file
|
@ -0,0 +1,196 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use futures::future::Future;
|
||||||
|
use hyper::{Body, Method, Request, Response};
|
||||||
|
|
||||||
|
use opentelemetry::{trace::SpanRef, KeyValue};
|
||||||
|
|
||||||
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
|
||||||
|
use crate::generic_server::*;
|
||||||
|
use crate::k2v::error::*;
|
||||||
|
|
||||||
|
use crate::signature::payload::check_payload_signature;
|
||||||
|
use crate::signature::streaming::*;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::batch::*;
|
||||||
|
use crate::k2v::index::*;
|
||||||
|
use crate::k2v::item::*;
|
||||||
|
use crate::k2v::router::Endpoint;
|
||||||
|
use crate::s3::cors::*;
|
||||||
|
|
||||||
|
pub struct K2VApiServer {
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct K2VApiEndpoint {
|
||||||
|
bucket_name: String,
|
||||||
|
endpoint: Endpoint,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl K2VApiServer {
|
||||||
|
pub async fn run(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
shutdown_signal: impl Future<Output = ()>,
|
||||||
|
) -> Result<(), GarageError> {
|
||||||
|
if let Some(cfg) = &garage.config.k2v_api {
|
||||||
|
let bind_addr = cfg.api_bind_addr;
|
||||||
|
|
||||||
|
ApiServer::new(
|
||||||
|
garage.config.s3_api.s3_region.clone(),
|
||||||
|
K2VApiServer { garage },
|
||||||
|
)
|
||||||
|
.run_server(bind_addr, shutdown_signal)
|
||||||
|
.await
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl ApiHandler for K2VApiServer {
|
||||||
|
const API_NAME: &'static str = "k2v";
|
||||||
|
const API_NAME_DISPLAY: &'static str = "K2V";
|
||||||
|
|
||||||
|
type Endpoint = K2VApiEndpoint;
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn parse_endpoint(&self, req: &Request<Body>) -> Result<K2VApiEndpoint, Error> {
|
||||||
|
let (endpoint, bucket_name) = Endpoint::from_request(req)?;
|
||||||
|
|
||||||
|
Ok(K2VApiEndpoint {
|
||||||
|
bucket_name,
|
||||||
|
endpoint,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle(
|
||||||
|
&self,
|
||||||
|
req: Request<Body>,
|
||||||
|
endpoint: K2VApiEndpoint,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let K2VApiEndpoint {
|
||||||
|
bucket_name,
|
||||||
|
endpoint,
|
||||||
|
} = endpoint;
|
||||||
|
let garage = self.garage.clone();
|
||||||
|
|
||||||
|
// The OPTIONS method is procesed early, before we even check for an API key
|
||||||
|
if let Endpoint::Options = endpoint {
|
||||||
|
return Ok(handle_options_s3api(garage, &req, Some(bucket_name))
|
||||||
|
.await
|
||||||
|
.ok_or_bad_request("Error handling OPTIONS")?);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (api_key, mut content_sha256) = check_payload_signature(&garage, "k2v", &req).await?;
|
||||||
|
let api_key = api_key
|
||||||
|
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
||||||
|
|
||||||
|
let req = parse_streaming_body(
|
||||||
|
&api_key,
|
||||||
|
req,
|
||||||
|
&mut content_sha256,
|
||||||
|
&garage.config.s3_api.s3_region,
|
||||||
|
"k2v",
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let bucket_id = garage
|
||||||
|
.bucket_helper()
|
||||||
|
.resolve_bucket(&bucket_name, &api_key)
|
||||||
|
.await?;
|
||||||
|
let bucket = garage
|
||||||
|
.bucket_helper()
|
||||||
|
.get_existing_bucket(bucket_id)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let allowed = match endpoint.authorization_type() {
|
||||||
|
Authorization::Read => api_key.allow_read(&bucket_id),
|
||||||
|
Authorization::Write => api_key.allow_write(&bucket_id),
|
||||||
|
Authorization::Owner => api_key.allow_owner(&bucket_id),
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !allowed {
|
||||||
|
return Err(Error::forbidden("Operation is not allowed for this key."));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look up what CORS rule might apply to response.
|
||||||
|
// Requests for methods different than GET, HEAD or POST
|
||||||
|
// are always preflighted, i.e. the browser should make
|
||||||
|
// an OPTIONS call before to check it is allowed
|
||||||
|
let matching_cors_rule = match *req.method() {
|
||||||
|
Method::GET | Method::HEAD | Method::POST => find_matching_cors_rule(&bucket, &req)
|
||||||
|
.ok_or_internal_error("Error looking up CORS rule")?,
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let resp = match endpoint {
|
||||||
|
Endpoint::DeleteItem {
|
||||||
|
partition_key,
|
||||||
|
sort_key,
|
||||||
|
} => handle_delete_item(garage, req, bucket_id, &partition_key, &sort_key).await,
|
||||||
|
Endpoint::InsertItem {
|
||||||
|
partition_key,
|
||||||
|
sort_key,
|
||||||
|
} => handle_insert_item(garage, req, bucket_id, &partition_key, &sort_key).await,
|
||||||
|
Endpoint::ReadItem {
|
||||||
|
partition_key,
|
||||||
|
sort_key,
|
||||||
|
} => handle_read_item(garage, &req, bucket_id, &partition_key, &sort_key).await,
|
||||||
|
Endpoint::PollItem {
|
||||||
|
partition_key,
|
||||||
|
sort_key,
|
||||||
|
causality_token,
|
||||||
|
timeout,
|
||||||
|
} => {
|
||||||
|
handle_poll_item(
|
||||||
|
garage,
|
||||||
|
&req,
|
||||||
|
bucket_id,
|
||||||
|
partition_key,
|
||||||
|
sort_key,
|
||||||
|
causality_token,
|
||||||
|
timeout,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Endpoint::ReadIndex {
|
||||||
|
prefix,
|
||||||
|
start,
|
||||||
|
end,
|
||||||
|
limit,
|
||||||
|
reverse,
|
||||||
|
} => handle_read_index(garage, bucket_id, prefix, start, end, limit, reverse).await,
|
||||||
|
Endpoint::InsertBatch {} => handle_insert_batch(garage, bucket_id, req).await,
|
||||||
|
Endpoint::ReadBatch {} => handle_read_batch(garage, bucket_id, req).await,
|
||||||
|
Endpoint::DeleteBatch {} => handle_delete_batch(garage, bucket_id, req).await,
|
||||||
|
Endpoint::Options => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// If request was a success and we have a CORS rule that applies to it,
|
||||||
|
// add the corresponding CORS headers to the response
|
||||||
|
let mut resp_ok = resp?;
|
||||||
|
if let Some(rule) = matching_cors_rule {
|
||||||
|
add_cors_headers(&mut resp_ok, rule)
|
||||||
|
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(resp_ok)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiEndpoint for K2VApiEndpoint {
|
||||||
|
fn name(&self) -> &'static str {
|
||||||
|
self.endpoint.name()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_span_attributes(&self, span: SpanRef<'_>) {
|
||||||
|
span.set_attribute(KeyValue::new("bucket", self.bucket_name.clone()));
|
||||||
|
}
|
||||||
|
}
|
363
src/api/k2v/batch.rs
Normal file
363
src/api/k2v/batch.rs
Normal file
|
@ -0,0 +1,363 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use hyper::{Body, Request, Response, StatusCode};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use garage_util::data::*;
|
||||||
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
|
use garage_table::{EnumerationOrder, TableSchema};
|
||||||
|
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
use garage_model::k2v::causality::*;
|
||||||
|
use garage_model::k2v::item_table::*;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::error::*;
|
||||||
|
use crate::k2v::range::read_range;
|
||||||
|
|
||||||
|
pub async fn handle_insert_batch(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
bucket_id: Uuid,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let items = parse_json_body::<Vec<InsertBatchItem>>(req).await?;
|
||||||
|
|
||||||
|
let mut items2 = vec![];
|
||||||
|
for it in items {
|
||||||
|
let ct = it
|
||||||
|
.ct
|
||||||
|
.map(|s| CausalContext::parse(&s))
|
||||||
|
.transpose()
|
||||||
|
.ok_or_bad_request("Invalid causality token")?;
|
||||||
|
let v = match it.v {
|
||||||
|
Some(vs) => {
|
||||||
|
DvvsValue::Value(base64::decode(vs).ok_or_bad_request("Invalid base64 value")?)
|
||||||
|
}
|
||||||
|
None => DvvsValue::Deleted,
|
||||||
|
};
|
||||||
|
items2.push((it.pk, it.sk, ct, v));
|
||||||
|
}
|
||||||
|
|
||||||
|
garage.k2v.rpc.insert_batch(bucket_id, items2).await?;
|
||||||
|
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(Body::empty())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_read_batch(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
bucket_id: Uuid,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let queries = parse_json_body::<Vec<ReadBatchQuery>>(req).await?;
|
||||||
|
|
||||||
|
let resp_results = futures::future::join_all(
|
||||||
|
queries
|
||||||
|
.into_iter()
|
||||||
|
.map(|q| handle_read_batch_query(&garage, bucket_id, q)),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let mut resps: Vec<ReadBatchResponse> = vec![];
|
||||||
|
for resp in resp_results {
|
||||||
|
resps.push(resp?);
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp_json = serde_json::to_string_pretty(&resps).map_err(GarageError::from)?;
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(Body::from(resp_json))?)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_read_batch_query(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
bucket_id: Uuid,
|
||||||
|
query: ReadBatchQuery,
|
||||||
|
) -> Result<ReadBatchResponse, Error> {
|
||||||
|
let partition = K2VItemPartition {
|
||||||
|
bucket_id,
|
||||||
|
partition_key: query.partition_key.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let filter = ItemFilter {
|
||||||
|
exclude_only_tombstones: !query.tombstones,
|
||||||
|
conflicts_only: query.conflicts_only,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (items, more, next_start) = if query.single_item {
|
||||||
|
if query.prefix.is_some() || query.end.is_some() || query.limit.is_some() || query.reverse {
|
||||||
|
return Err(Error::bad_request("Batch query parameters 'prefix', 'end', 'limit' and 'reverse' must not be set when singleItem is true."));
|
||||||
|
}
|
||||||
|
let sk = query
|
||||||
|
.start
|
||||||
|
.as_ref()
|
||||||
|
.ok_or_bad_request("start should be specified if single_item is set")?;
|
||||||
|
let item = garage
|
||||||
|
.k2v
|
||||||
|
.item_table
|
||||||
|
.get(&partition, sk)
|
||||||
|
.await?
|
||||||
|
.filter(|e| K2VItemTable::matches_filter(e, &filter));
|
||||||
|
match item {
|
||||||
|
Some(i) => (vec![ReadBatchResponseItem::from(i)], false, None),
|
||||||
|
None => (vec![], false, None),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let (items, more, next_start) = read_range(
|
||||||
|
&garage.k2v.item_table,
|
||||||
|
&partition,
|
||||||
|
&query.prefix,
|
||||||
|
&query.start,
|
||||||
|
&query.end,
|
||||||
|
query.limit,
|
||||||
|
Some(filter),
|
||||||
|
EnumerationOrder::from_reverse(query.reverse),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let items = items
|
||||||
|
.into_iter()
|
||||||
|
.map(ReadBatchResponseItem::from)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
(items, more, next_start)
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ReadBatchResponse {
|
||||||
|
partition_key: query.partition_key,
|
||||||
|
prefix: query.prefix,
|
||||||
|
start: query.start,
|
||||||
|
end: query.end,
|
||||||
|
limit: query.limit,
|
||||||
|
reverse: query.reverse,
|
||||||
|
single_item: query.single_item,
|
||||||
|
conflicts_only: query.conflicts_only,
|
||||||
|
tombstones: query.tombstones,
|
||||||
|
items,
|
||||||
|
more,
|
||||||
|
next_start,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_delete_batch(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
bucket_id: Uuid,
|
||||||
|
req: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let queries = parse_json_body::<Vec<DeleteBatchQuery>>(req).await?;
|
||||||
|
|
||||||
|
let resp_results = futures::future::join_all(
|
||||||
|
queries
|
||||||
|
.into_iter()
|
||||||
|
.map(|q| handle_delete_batch_query(&garage, bucket_id, q)),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let mut resps: Vec<DeleteBatchResponse> = vec![];
|
||||||
|
for resp in resp_results {
|
||||||
|
resps.push(resp?);
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp_json = serde_json::to_string_pretty(&resps).map_err(GarageError::from)?;
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(Body::from(resp_json))?)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_delete_batch_query(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
bucket_id: Uuid,
|
||||||
|
query: DeleteBatchQuery,
|
||||||
|
) -> Result<DeleteBatchResponse, Error> {
|
||||||
|
let partition = K2VItemPartition {
|
||||||
|
bucket_id,
|
||||||
|
partition_key: query.partition_key.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let filter = ItemFilter {
|
||||||
|
exclude_only_tombstones: true,
|
||||||
|
conflicts_only: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
let deleted_items = if query.single_item {
|
||||||
|
if query.prefix.is_some() || query.end.is_some() {
|
||||||
|
return Err(Error::bad_request("Batch query parameters 'prefix' and 'end' must not be set when singleItem is true."));
|
||||||
|
}
|
||||||
|
let sk = query
|
||||||
|
.start
|
||||||
|
.as_ref()
|
||||||
|
.ok_or_bad_request("start should be specified if single_item is set")?;
|
||||||
|
let item = garage
|
||||||
|
.k2v
|
||||||
|
.item_table
|
||||||
|
.get(&partition, sk)
|
||||||
|
.await?
|
||||||
|
.filter(|e| K2VItemTable::matches_filter(e, &filter));
|
||||||
|
match item {
|
||||||
|
Some(i) => {
|
||||||
|
let cc = i.causal_context();
|
||||||
|
garage
|
||||||
|
.k2v
|
||||||
|
.rpc
|
||||||
|
.insert(
|
||||||
|
bucket_id,
|
||||||
|
i.partition.partition_key,
|
||||||
|
i.sort_key,
|
||||||
|
Some(cc),
|
||||||
|
DvvsValue::Deleted,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
1
|
||||||
|
}
|
||||||
|
None => 0,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let (items, more, _next_start) = read_range(
|
||||||
|
&garage.k2v.item_table,
|
||||||
|
&partition,
|
||||||
|
&query.prefix,
|
||||||
|
&query.start,
|
||||||
|
&query.end,
|
||||||
|
None,
|
||||||
|
Some(filter),
|
||||||
|
EnumerationOrder::Forward,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
assert!(!more);
|
||||||
|
|
||||||
|
// TODO delete items
|
||||||
|
let items = items
|
||||||
|
.into_iter()
|
||||||
|
.map(|i| {
|
||||||
|
let cc = i.causal_context();
|
||||||
|
(
|
||||||
|
i.partition.partition_key,
|
||||||
|
i.sort_key,
|
||||||
|
Some(cc),
|
||||||
|
DvvsValue::Deleted,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let n = items.len();
|
||||||
|
|
||||||
|
garage.k2v.rpc.insert_batch(bucket_id, items).await?;
|
||||||
|
|
||||||
|
n
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(DeleteBatchResponse {
|
||||||
|
partition_key: query.partition_key,
|
||||||
|
prefix: query.prefix,
|
||||||
|
start: query.start,
|
||||||
|
end: query.end,
|
||||||
|
single_item: query.single_item,
|
||||||
|
deleted_items,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct InsertBatchItem {
|
||||||
|
pk: String,
|
||||||
|
sk: String,
|
||||||
|
ct: Option<String>,
|
||||||
|
v: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ReadBatchQuery {
|
||||||
|
#[serde(rename = "partitionKey")]
|
||||||
|
partition_key: String,
|
||||||
|
#[serde(default)]
|
||||||
|
prefix: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
start: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
end: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
limit: Option<u64>,
|
||||||
|
#[serde(default)]
|
||||||
|
reverse: bool,
|
||||||
|
#[serde(default, rename = "singleItem")]
|
||||||
|
single_item: bool,
|
||||||
|
#[serde(default, rename = "conflictsOnly")]
|
||||||
|
conflicts_only: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
tombstones: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct ReadBatchResponse {
|
||||||
|
#[serde(rename = "partitionKey")]
|
||||||
|
partition_key: String,
|
||||||
|
prefix: Option<String>,
|
||||||
|
start: Option<String>,
|
||||||
|
end: Option<String>,
|
||||||
|
limit: Option<u64>,
|
||||||
|
reverse: bool,
|
||||||
|
#[serde(rename = "singleItem")]
|
||||||
|
single_item: bool,
|
||||||
|
#[serde(rename = "conflictsOnly")]
|
||||||
|
conflicts_only: bool,
|
||||||
|
tombstones: bool,
|
||||||
|
|
||||||
|
items: Vec<ReadBatchResponseItem>,
|
||||||
|
more: bool,
|
||||||
|
#[serde(rename = "nextStart")]
|
||||||
|
next_start: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct ReadBatchResponseItem {
|
||||||
|
sk: String,
|
||||||
|
ct: String,
|
||||||
|
v: Vec<Option<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReadBatchResponseItem {
|
||||||
|
fn from(i: K2VItem) -> Self {
|
||||||
|
let ct = i.causal_context().serialize();
|
||||||
|
let v = i
|
||||||
|
.values()
|
||||||
|
.iter()
|
||||||
|
.map(|v| match v {
|
||||||
|
DvvsValue::Value(x) => Some(base64::encode(x)),
|
||||||
|
DvvsValue::Deleted => None,
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
Self {
|
||||||
|
sk: i.sort_key,
|
||||||
|
ct,
|
||||||
|
v,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct DeleteBatchQuery {
|
||||||
|
#[serde(rename = "partitionKey")]
|
||||||
|
partition_key: String,
|
||||||
|
#[serde(default)]
|
||||||
|
prefix: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
start: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
end: Option<String>,
|
||||||
|
#[serde(default, rename = "singleItem")]
|
||||||
|
single_item: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct DeleteBatchResponse {
|
||||||
|
#[serde(rename = "partitionKey")]
|
||||||
|
partition_key: String,
|
||||||
|
prefix: Option<String>,
|
||||||
|
start: Option<String>,
|
||||||
|
end: Option<String>,
|
||||||
|
#[serde(rename = "singleItem")]
|
||||||
|
single_item: bool,
|
||||||
|
|
||||||
|
#[serde(rename = "deletedItems")]
|
||||||
|
deleted_items: usize,
|
||||||
|
}
|
135
src/api/k2v/error.rs
Normal file
135
src/api/k2v/error.rs
Normal file
|
@ -0,0 +1,135 @@
|
||||||
|
use err_derive::Error;
|
||||||
|
use hyper::header::HeaderValue;
|
||||||
|
use hyper::{Body, HeaderMap, StatusCode};
|
||||||
|
|
||||||
|
use garage_model::helper::error::Error as HelperError;
|
||||||
|
|
||||||
|
use crate::common_error::CommonError;
|
||||||
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
|
use crate::generic_server::ApiError;
|
||||||
|
use crate::helpers::CustomApiErrorBody;
|
||||||
|
use crate::signature::error::Error as SignatureError;
|
||||||
|
|
||||||
|
/// Errors of this crate
|
||||||
|
#[derive(Debug, Error)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error(display = "{}", _0)]
|
||||||
|
/// Error from common error
|
||||||
|
Common(CommonError),
|
||||||
|
|
||||||
|
// Category: cannot process
|
||||||
|
/// Authorization Header Malformed
|
||||||
|
#[error(display = "Authorization header malformed, expected scope: {}", _0)]
|
||||||
|
AuthorizationHeaderMalformed(String),
|
||||||
|
|
||||||
|
/// The object requested don't exists
|
||||||
|
#[error(display = "Key not found")]
|
||||||
|
NoSuchKey,
|
||||||
|
|
||||||
|
/// Some base64 encoded data was badly encoded
|
||||||
|
#[error(display = "Invalid base64: {}", _0)]
|
||||||
|
InvalidBase64(#[error(source)] base64::DecodeError),
|
||||||
|
|
||||||
|
/// The client sent a header with invalid value
|
||||||
|
#[error(display = "Invalid header value: {}", _0)]
|
||||||
|
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
||||||
|
|
||||||
|
/// The client asked for an invalid return format (invalid Accept header)
|
||||||
|
#[error(display = "Not acceptable: {}", _0)]
|
||||||
|
NotAcceptable(String),
|
||||||
|
|
||||||
|
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||||
|
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||||
|
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> From<T> for Error
|
||||||
|
where
|
||||||
|
CommonError: From<T>,
|
||||||
|
{
|
||||||
|
fn from(err: T) -> Self {
|
||||||
|
Error::Common(CommonError::from(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
|
impl From<HelperError> for Error {
|
||||||
|
fn from(err: HelperError) -> Self {
|
||||||
|
match err {
|
||||||
|
HelperError::Internal(i) => Self::Common(CommonError::InternalError(i)),
|
||||||
|
HelperError::BadRequest(b) => Self::Common(CommonError::BadRequest(b)),
|
||||||
|
HelperError::InvalidBucketName(n) => Self::Common(CommonError::InvalidBucketName(n)),
|
||||||
|
HelperError::NoSuchBucket(n) => Self::Common(CommonError::NoSuchBucket(n)),
|
||||||
|
e => Self::Common(CommonError::BadRequest(format!("{}", e))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<SignatureError> for Error {
|
||||||
|
fn from(err: SignatureError) -> Self {
|
||||||
|
match err {
|
||||||
|
SignatureError::Common(c) => Self::Common(c),
|
||||||
|
SignatureError::AuthorizationHeaderMalformed(c) => {
|
||||||
|
Self::AuthorizationHeaderMalformed(c)
|
||||||
|
}
|
||||||
|
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
||||||
|
SignatureError::InvalidHeader(h) => Self::InvalidHeader(h),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error {
|
||||||
|
/// This returns a keyword for the corresponding error.
|
||||||
|
/// Here, these keywords are not necessarily those from AWS S3,
|
||||||
|
/// as we are building a custom API
|
||||||
|
fn code(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Error::Common(c) => c.aws_code(),
|
||||||
|
Error::NoSuchKey => "NoSuchKey",
|
||||||
|
Error::NotAcceptable(_) => "NotAcceptable",
|
||||||
|
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
||||||
|
Error::InvalidBase64(_) => "InvalidBase64",
|
||||||
|
Error::InvalidHeader(_) => "InvalidHeaderValue",
|
||||||
|
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiError for Error {
|
||||||
|
/// Get the HTTP status code that best represents the meaning of the error for the client
|
||||||
|
fn http_status_code(&self) -> StatusCode {
|
||||||
|
match self {
|
||||||
|
Error::Common(c) => c.http_status_code(),
|
||||||
|
Error::NoSuchKey => StatusCode::NOT_FOUND,
|
||||||
|
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
|
||||||
|
Error::AuthorizationHeaderMalformed(_)
|
||||||
|
| Error::InvalidBase64(_)
|
||||||
|
| Error::InvalidHeader(_)
|
||||||
|
| Error::InvalidUtf8Str(_) => StatusCode::BAD_REQUEST,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_http_headers(&self, header_map: &mut HeaderMap<HeaderValue>) {
|
||||||
|
use hyper::header;
|
||||||
|
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn http_body(&self, garage_region: &str, path: &str) -> Body {
|
||||||
|
let error = CustomApiErrorBody {
|
||||||
|
code: self.code().to_string(),
|
||||||
|
message: format!("{}", self),
|
||||||
|
path: path.to_string(),
|
||||||
|
region: garage_region.to_string(),
|
||||||
|
};
|
||||||
|
Body::from(serde_json::to_string_pretty(&error).unwrap_or_else(|_| {
|
||||||
|
r#"
|
||||||
|
{
|
||||||
|
"code": "InternalError",
|
||||||
|
"message": "JSON encoding of error failed"
|
||||||
|
}
|
||||||
|
"#
|
||||||
|
.into()
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
100
src/api/k2v/index.rs
Normal file
100
src/api/k2v/index.rs
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use hyper::{Body, Response, StatusCode};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use garage_util::data::*;
|
||||||
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
|
use garage_rpc::ring::Ring;
|
||||||
|
use garage_table::util::*;
|
||||||
|
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
use garage_model::k2v::counter_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
|
||||||
|
|
||||||
|
use crate::k2v::error::*;
|
||||||
|
use crate::k2v::range::read_range;
|
||||||
|
|
||||||
|
pub async fn handle_read_index(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
bucket_id: Uuid,
|
||||||
|
prefix: Option<String>,
|
||||||
|
start: Option<String>,
|
||||||
|
end: Option<String>,
|
||||||
|
limit: Option<u64>,
|
||||||
|
reverse: Option<bool>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let reverse = reverse.unwrap_or(false);
|
||||||
|
|
||||||
|
let ring: Arc<Ring> = garage.system.ring.borrow().clone();
|
||||||
|
|
||||||
|
let (partition_keys, more, next_start) = read_range(
|
||||||
|
&garage.k2v.counter_table.table,
|
||||||
|
&bucket_id,
|
||||||
|
&prefix,
|
||||||
|
&start,
|
||||||
|
&end,
|
||||||
|
limit,
|
||||||
|
Some((DeletedFilter::NotDeleted, ring.layout.node_id_vec.clone())),
|
||||||
|
EnumerationOrder::from_reverse(reverse),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let s_entries = ENTRIES.to_string();
|
||||||
|
let s_conflicts = CONFLICTS.to_string();
|
||||||
|
let s_values = VALUES.to_string();
|
||||||
|
let s_bytes = BYTES.to_string();
|
||||||
|
|
||||||
|
let resp = ReadIndexResponse {
|
||||||
|
prefix,
|
||||||
|
start,
|
||||||
|
end,
|
||||||
|
limit,
|
||||||
|
reverse,
|
||||||
|
partition_keys: partition_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|part| {
|
||||||
|
let vals = part.filtered_values(&ring);
|
||||||
|
ReadIndexResponseEntry {
|
||||||
|
pk: part.sk,
|
||||||
|
entries: *vals.get(&s_entries).unwrap_or(&0),
|
||||||
|
conflicts: *vals.get(&s_conflicts).unwrap_or(&0),
|
||||||
|
values: *vals.get(&s_values).unwrap_or(&0),
|
||||||
|
bytes: *vals.get(&s_bytes).unwrap_or(&0),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
more,
|
||||||
|
next_start,
|
||||||
|
};
|
||||||
|
|
||||||
|
let resp_json = serde_json::to_string_pretty(&resp).map_err(GarageError::from)?;
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(Body::from(resp_json))?)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct ReadIndexResponse {
|
||||||
|
prefix: Option<String>,
|
||||||
|
start: Option<String>,
|
||||||
|
end: Option<String>,
|
||||||
|
limit: Option<u64>,
|
||||||
|
reverse: bool,
|
||||||
|
|
||||||
|
#[serde(rename = "partitionKeys")]
|
||||||
|
partition_keys: Vec<ReadIndexResponseEntry>,
|
||||||
|
|
||||||
|
more: bool,
|
||||||
|
#[serde(rename = "nextStart")]
|
||||||
|
next_start: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct ReadIndexResponseEntry {
|
||||||
|
pk: String,
|
||||||
|
entries: i64,
|
||||||
|
conflicts: i64,
|
||||||
|
values: i64,
|
||||||
|
bytes: i64,
|
||||||
|
}
|
230
src/api/k2v/item.rs
Normal file
230
src/api/k2v/item.rs
Normal file
|
@ -0,0 +1,230 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use http::header;
|
||||||
|
|
||||||
|
use hyper::{Body, Request, Response, StatusCode};
|
||||||
|
|
||||||
|
use garage_util::data::*;
|
||||||
|
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
use garage_model::k2v::causality::*;
|
||||||
|
use garage_model::k2v::item_table::*;
|
||||||
|
|
||||||
|
use crate::k2v::error::*;
|
||||||
|
|
||||||
|
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
|
||||||
|
|
||||||
|
pub enum ReturnFormat {
|
||||||
|
Json,
|
||||||
|
Binary,
|
||||||
|
Either,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReturnFormat {
|
||||||
|
pub fn from(req: &Request<Body>) -> Result<Self, Error> {
|
||||||
|
let accept = match req.headers().get(header::ACCEPT) {
|
||||||
|
Some(a) => a.to_str()?,
|
||||||
|
None => return Ok(Self::Json),
|
||||||
|
};
|
||||||
|
|
||||||
|
let accept = accept.split(',').map(|s| s.trim()).collect::<Vec<_>>();
|
||||||
|
let accept_json = accept.contains(&"application/json") || accept.contains(&"*/*");
|
||||||
|
let accept_binary = accept.contains(&"application/octet-stream") || accept.contains(&"*/*");
|
||||||
|
|
||||||
|
match (accept_json, accept_binary) {
|
||||||
|
(true, true) => Ok(Self::Either),
|
||||||
|
(true, false) => Ok(Self::Json),
|
||||||
|
(false, true) => Ok(Self::Binary),
|
||||||
|
(false, false) => Err(Error::NotAcceptable("Invalid Accept: header value, must contain either application/json or application/octet-stream (or both)".into())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn make_response(&self, item: &K2VItem) -> Result<Response<Body>, Error> {
|
||||||
|
let vals = item.values();
|
||||||
|
|
||||||
|
if vals.is_empty() {
|
||||||
|
return Err(Error::NoSuchKey);
|
||||||
|
}
|
||||||
|
|
||||||
|
let ct = item.causal_context().serialize();
|
||||||
|
match self {
|
||||||
|
Self::Binary if vals.len() > 1 => Ok(Response::builder()
|
||||||
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
|
.status(StatusCode::CONFLICT)
|
||||||
|
.body(Body::empty())?),
|
||||||
|
Self::Binary => {
|
||||||
|
assert!(vals.len() == 1);
|
||||||
|
Self::make_binary_response(ct, vals[0])
|
||||||
|
}
|
||||||
|
Self::Either if vals.len() == 1 => Self::make_binary_response(ct, vals[0]),
|
||||||
|
_ => Self::make_json_response(ct, &vals[..]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_binary_response(ct: String, v: &DvvsValue) -> Result<Response<Body>, Error> {
|
||||||
|
match v {
|
||||||
|
DvvsValue::Deleted => Ok(Response::builder()
|
||||||
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
|
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||||
|
.status(StatusCode::NO_CONTENT)
|
||||||
|
.body(Body::empty())?),
|
||||||
|
DvvsValue::Value(v) => Ok(Response::builder()
|
||||||
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
|
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(Body::from(v.to_vec()))?),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_json_response(ct: String, v: &[&DvvsValue]) -> Result<Response<Body>, Error> {
|
||||||
|
let items = v
|
||||||
|
.iter()
|
||||||
|
.map(|v| match v {
|
||||||
|
DvvsValue::Deleted => serde_json::Value::Null,
|
||||||
|
DvvsValue::Value(v) => serde_json::Value::String(base64::encode(v)),
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let json_body =
|
||||||
|
serde_json::to_string_pretty(&items).ok_or_internal_error("JSON encoding error")?;
|
||||||
|
Ok(Response::builder()
|
||||||
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
|
.header(header::CONTENT_TYPE, "application/json")
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(Body::from(json_body))?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle ReadItem request
|
||||||
|
#[allow(clippy::ptr_arg)]
|
||||||
|
pub async fn handle_read_item(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
req: &Request<Body>,
|
||||||
|
bucket_id: Uuid,
|
||||||
|
partition_key: &str,
|
||||||
|
sort_key: &String,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let format = ReturnFormat::from(req)?;
|
||||||
|
|
||||||
|
let item = garage
|
||||||
|
.k2v
|
||||||
|
.item_table
|
||||||
|
.get(
|
||||||
|
&K2VItemPartition {
|
||||||
|
bucket_id,
|
||||||
|
partition_key: partition_key.to_string(),
|
||||||
|
},
|
||||||
|
sort_key,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.ok_or(Error::NoSuchKey)?;
|
||||||
|
|
||||||
|
format.make_response(&item)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_insert_item(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
req: Request<Body>,
|
||||||
|
bucket_id: Uuid,
|
||||||
|
partition_key: &str,
|
||||||
|
sort_key: &str,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let causal_context = req
|
||||||
|
.headers()
|
||||||
|
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||||
|
.map(|s| s.to_str())
|
||||||
|
.transpose()?
|
||||||
|
.map(CausalContext::parse)
|
||||||
|
.transpose()
|
||||||
|
.ok_or_bad_request("Invalid causality token")?;
|
||||||
|
|
||||||
|
let body = hyper::body::to_bytes(req.into_body()).await?;
|
||||||
|
let value = DvvsValue::Value(body.to_vec());
|
||||||
|
|
||||||
|
garage
|
||||||
|
.k2v
|
||||||
|
.rpc
|
||||||
|
.insert(
|
||||||
|
bucket_id,
|
||||||
|
partition_key.to_string(),
|
||||||
|
sort_key.to_string(),
|
||||||
|
causal_context,
|
||||||
|
value,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(Body::empty())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_delete_item(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
req: Request<Body>,
|
||||||
|
bucket_id: Uuid,
|
||||||
|
partition_key: &str,
|
||||||
|
sort_key: &str,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let causal_context = req
|
||||||
|
.headers()
|
||||||
|
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||||
|
.map(|s| s.to_str())
|
||||||
|
.transpose()?
|
||||||
|
.map(CausalContext::parse)
|
||||||
|
.transpose()
|
||||||
|
.ok_or_bad_request("Invalid causality token")?;
|
||||||
|
|
||||||
|
let value = DvvsValue::Deleted;
|
||||||
|
|
||||||
|
garage
|
||||||
|
.k2v
|
||||||
|
.rpc
|
||||||
|
.insert(
|
||||||
|
bucket_id,
|
||||||
|
partition_key.to_string(),
|
||||||
|
sort_key.to_string(),
|
||||||
|
causal_context,
|
||||||
|
value,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::NO_CONTENT)
|
||||||
|
.body(Body::empty())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle ReadItem request
|
||||||
|
#[allow(clippy::ptr_arg)]
|
||||||
|
pub async fn handle_poll_item(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
req: &Request<Body>,
|
||||||
|
bucket_id: Uuid,
|
||||||
|
partition_key: String,
|
||||||
|
sort_key: String,
|
||||||
|
causality_token: String,
|
||||||
|
timeout_secs: Option<u64>,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let format = ReturnFormat::from(req)?;
|
||||||
|
|
||||||
|
let causal_context =
|
||||||
|
CausalContext::parse(&causality_token).ok_or_bad_request("Invalid causality token")?;
|
||||||
|
|
||||||
|
let item = garage
|
||||||
|
.k2v
|
||||||
|
.rpc
|
||||||
|
.poll(
|
||||||
|
bucket_id,
|
||||||
|
partition_key,
|
||||||
|
sort_key,
|
||||||
|
causal_context,
|
||||||
|
timeout_secs.unwrap_or(300) * 1000,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if let Some(item) = item {
|
||||||
|
format.make_response(&item)
|
||||||
|
} else {
|
||||||
|
Ok(Response::builder()
|
||||||
|
.status(StatusCode::NOT_MODIFIED)
|
||||||
|
.body(Body::empty())?)
|
||||||
|
}
|
||||||
|
}
|
9
src/api/k2v/mod.rs
Normal file
9
src/api/k2v/mod.rs
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
pub mod api_server;
|
||||||
|
mod error;
|
||||||
|
mod router;
|
||||||
|
|
||||||
|
mod batch;
|
||||||
|
mod index;
|
||||||
|
mod item;
|
||||||
|
|
||||||
|
mod range;
|
100
src/api/k2v/range.rs
Normal file
100
src/api/k2v/range.rs
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
//! Utility module for retrieving ranges of items in Garage tables
|
||||||
|
//! Implements parameters (prefix, start, end, limit) as specified
|
||||||
|
//! for endpoints ReadIndex, ReadBatch and DeleteBatch
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use garage_table::replication::TableShardedReplication;
|
||||||
|
use garage_table::*;
|
||||||
|
|
||||||
|
use crate::helpers::key_after_prefix;
|
||||||
|
use crate::k2v::error::*;
|
||||||
|
|
||||||
|
/// Read range in a Garage table.
|
||||||
|
/// Returns (entries, more?, nextStart)
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub(crate) async fn read_range<F>(
|
||||||
|
table: &Arc<Table<F, TableShardedReplication>>,
|
||||||
|
partition_key: &F::P,
|
||||||
|
prefix: &Option<String>,
|
||||||
|
start: &Option<String>,
|
||||||
|
end: &Option<String>,
|
||||||
|
limit: Option<u64>,
|
||||||
|
filter: Option<F::Filter>,
|
||||||
|
enumeration_order: EnumerationOrder,
|
||||||
|
) -> Result<(Vec<F::E>, bool, Option<String>), Error>
|
||||||
|
where
|
||||||
|
F: TableSchema<S = String> + 'static,
|
||||||
|
{
|
||||||
|
let (mut start, mut start_ignore) = match (prefix, start) {
|
||||||
|
(None, None) => (None, false),
|
||||||
|
(None, Some(s)) => (Some(s.clone()), false),
|
||||||
|
(Some(p), Some(s)) => {
|
||||||
|
if !s.starts_with(p) {
|
||||||
|
return Err(Error::bad_request(format!(
|
||||||
|
"Start key '{}' does not start with prefix '{}'",
|
||||||
|
s, p
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
(Some(s.clone()), false)
|
||||||
|
}
|
||||||
|
(Some(p), None) if enumeration_order == EnumerationOrder::Reverse => {
|
||||||
|
let start = key_after_prefix(p)
|
||||||
|
.ok_or_internal_error("Sorry, can't list this prefix in reverse order")?;
|
||||||
|
(Some(start), true)
|
||||||
|
}
|
||||||
|
(Some(p), None) => (Some(p.clone()), false),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut entries = vec![];
|
||||||
|
loop {
|
||||||
|
let n_get = std::cmp::min(
|
||||||
|
1000,
|
||||||
|
limit.map(|x| x as usize).unwrap_or(usize::MAX - 10) - entries.len() + 2,
|
||||||
|
);
|
||||||
|
let get_ret = table
|
||||||
|
.get_range(
|
||||||
|
partition_key,
|
||||||
|
start.clone(),
|
||||||
|
filter.clone(),
|
||||||
|
n_get,
|
||||||
|
enumeration_order,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let get_ret_len = get_ret.len();
|
||||||
|
|
||||||
|
for entry in get_ret {
|
||||||
|
if start_ignore && Some(entry.sort_key()) == start.as_ref() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if let Some(p) = prefix {
|
||||||
|
if !entry.sort_key().starts_with(p) {
|
||||||
|
return Ok((entries, false, None));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(e) = end {
|
||||||
|
let is_finished = match enumeration_order {
|
||||||
|
EnumerationOrder::Forward => entry.sort_key() >= e,
|
||||||
|
EnumerationOrder::Reverse => entry.sort_key() <= e,
|
||||||
|
};
|
||||||
|
if is_finished {
|
||||||
|
return Ok((entries, false, None));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(l) = limit {
|
||||||
|
if entries.len() >= l as usize {
|
||||||
|
return Ok((entries, true, Some(entry.sort_key().clone())));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
entries.push(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
if get_ret_len < n_get {
|
||||||
|
return Ok((entries, false, None));
|
||||||
|
}
|
||||||
|
|
||||||
|
start = Some(entries.last().unwrap().sort_key().clone());
|
||||||
|
start_ignore = true;
|
||||||
|
}
|
||||||
|
}
|
252
src/api/k2v/router.rs
Normal file
252
src/api/k2v/router.rs
Normal file
|
@ -0,0 +1,252 @@
|
||||||
|
use crate::k2v::error::*;
|
||||||
|
|
||||||
|
use std::borrow::Cow;
|
||||||
|
|
||||||
|
use hyper::{Method, Request};
|
||||||
|
|
||||||
|
use crate::helpers::Authorization;
|
||||||
|
use crate::router_macros::{generateQueryParameters, router_match};
|
||||||
|
|
||||||
|
router_match! {@func
|
||||||
|
|
||||||
|
|
||||||
|
/// List of all K2V API endpoints.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub enum Endpoint {
|
||||||
|
DeleteBatch {
|
||||||
|
},
|
||||||
|
DeleteItem {
|
||||||
|
partition_key: String,
|
||||||
|
sort_key: String,
|
||||||
|
},
|
||||||
|
InsertBatch {
|
||||||
|
},
|
||||||
|
InsertItem {
|
||||||
|
partition_key: String,
|
||||||
|
sort_key: String,
|
||||||
|
},
|
||||||
|
Options,
|
||||||
|
PollItem {
|
||||||
|
partition_key: String,
|
||||||
|
sort_key: String,
|
||||||
|
causality_token: String,
|
||||||
|
timeout: Option<u64>,
|
||||||
|
},
|
||||||
|
ReadBatch {
|
||||||
|
},
|
||||||
|
ReadIndex {
|
||||||
|
prefix: Option<String>,
|
||||||
|
start: Option<String>,
|
||||||
|
end: Option<String>,
|
||||||
|
limit: Option<u64>,
|
||||||
|
reverse: Option<bool>,
|
||||||
|
},
|
||||||
|
ReadItem {
|
||||||
|
partition_key: String,
|
||||||
|
sort_key: String,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
impl Endpoint {
|
||||||
|
/// Determine which S3 endpoint a request is for using the request, and a bucket which was
|
||||||
|
/// possibly extracted from the Host header.
|
||||||
|
/// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets
|
||||||
|
pub fn from_request<T>(req: &Request<T>) -> Result<(Self, String), Error> {
|
||||||
|
let uri = req.uri();
|
||||||
|
let path = uri.path().trim_start_matches('/');
|
||||||
|
let query = uri.query();
|
||||||
|
|
||||||
|
let (bucket, partition_key) = path
|
||||||
|
.split_once('/')
|
||||||
|
.map(|(b, p)| (b.to_owned(), p.trim_start_matches('/')))
|
||||||
|
.unwrap_or((path.to_owned(), ""));
|
||||||
|
|
||||||
|
if bucket.is_empty() {
|
||||||
|
return Err(Error::bad_request("Missing bucket name"));
|
||||||
|
}
|
||||||
|
|
||||||
|
if *req.method() == Method::OPTIONS {
|
||||||
|
return Ok((Self::Options, bucket));
|
||||||
|
}
|
||||||
|
|
||||||
|
let partition_key = percent_encoding::percent_decode_str(partition_key)
|
||||||
|
.decode_utf8()?
|
||||||
|
.into_owned();
|
||||||
|
|
||||||
|
let mut query = QueryParameters::from_query(query.unwrap_or_default())?;
|
||||||
|
|
||||||
|
let method_search = Method::from_bytes(b"SEARCH").unwrap();
|
||||||
|
let res = match *req.method() {
|
||||||
|
Method::GET => Self::from_get(partition_key, &mut query)?,
|
||||||
|
//&Method::HEAD => Self::from_head(partition_key, &mut query)?,
|
||||||
|
Method::POST => Self::from_post(partition_key, &mut query)?,
|
||||||
|
Method::PUT => Self::from_put(partition_key, &mut query)?,
|
||||||
|
Method::DELETE => Self::from_delete(partition_key, &mut query)?,
|
||||||
|
_ if req.method() == method_search => Self::from_search(partition_key, &mut query)?,
|
||||||
|
_ => return Err(Error::bad_request("Unknown method")),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(message) = query.nonempty_message() {
|
||||||
|
debug!("Unused query parameter: {}", message)
|
||||||
|
}
|
||||||
|
Ok((res, bucket))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine which endpoint a request is for, knowing it is a GET.
|
||||||
|
fn from_get(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
|
router_match! {
|
||||||
|
@gen_parser
|
||||||
|
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||||
|
key: [
|
||||||
|
EMPTY if causality_token => PollItem (query::sort_key, query::causality_token, opt_parse::timeout),
|
||||||
|
EMPTY => ReadItem (query::sort_key),
|
||||||
|
],
|
||||||
|
no_key: [
|
||||||
|
EMPTY => ReadIndex (query_opt::prefix, query_opt::start, query_opt::end, opt_parse::limit, opt_parse::reverse),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine which endpoint a request is for, knowing it is a SEARCH.
|
||||||
|
fn from_search(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
|
router_match! {
|
||||||
|
@gen_parser
|
||||||
|
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||||
|
key: [
|
||||||
|
],
|
||||||
|
no_key: [
|
||||||
|
EMPTY => ReadBatch,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
/// Determine which endpoint a request is for, knowing it is a HEAD.
|
||||||
|
fn from_head(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
|
router_match! {
|
||||||
|
@gen_parser
|
||||||
|
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||||
|
key: [
|
||||||
|
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
|
||||||
|
],
|
||||||
|
no_key: [
|
||||||
|
EMPTY => HeadBucket,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
/// Determine which endpoint a request is for, knowing it is a POST.
|
||||||
|
fn from_post(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
|
router_match! {
|
||||||
|
@gen_parser
|
||||||
|
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||||
|
key: [
|
||||||
|
],
|
||||||
|
no_key: [
|
||||||
|
EMPTY => InsertBatch,
|
||||||
|
DELETE => DeleteBatch,
|
||||||
|
SEARCH => ReadBatch,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine which endpoint a request is for, knowing it is a PUT.
|
||||||
|
fn from_put(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
|
router_match! {
|
||||||
|
@gen_parser
|
||||||
|
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||||
|
key: [
|
||||||
|
EMPTY => InsertItem (query::sort_key),
|
||||||
|
|
||||||
|
],
|
||||||
|
no_key: [
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine which endpoint a request is for, knowing it is a DELETE.
|
||||||
|
fn from_delete(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
|
router_match! {
|
||||||
|
@gen_parser
|
||||||
|
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||||
|
key: [
|
||||||
|
EMPTY => DeleteItem (query::sort_key),
|
||||||
|
],
|
||||||
|
no_key: [
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the partition key the request target. Returns None for requests which don't use a partition key.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn get_partition_key(&self) -> Option<&str> {
|
||||||
|
router_match! {
|
||||||
|
@extract
|
||||||
|
self,
|
||||||
|
partition_key,
|
||||||
|
[
|
||||||
|
DeleteItem,
|
||||||
|
InsertItem,
|
||||||
|
PollItem,
|
||||||
|
ReadItem,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the sort key the request target. Returns None for requests which don't use a sort key.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn get_sort_key(&self) -> Option<&str> {
|
||||||
|
router_match! {
|
||||||
|
@extract
|
||||||
|
self,
|
||||||
|
sort_key,
|
||||||
|
[
|
||||||
|
DeleteItem,
|
||||||
|
InsertItem,
|
||||||
|
PollItem,
|
||||||
|
ReadItem,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the kind of authorization which is required to perform the operation.
|
||||||
|
pub fn authorization_type(&self) -> Authorization {
|
||||||
|
let readonly = router_match! {
|
||||||
|
@match
|
||||||
|
self,
|
||||||
|
[
|
||||||
|
PollItem,
|
||||||
|
ReadBatch,
|
||||||
|
ReadIndex,
|
||||||
|
ReadItem,
|
||||||
|
]
|
||||||
|
};
|
||||||
|
if readonly {
|
||||||
|
Authorization::Read
|
||||||
|
} else {
|
||||||
|
Authorization::Write
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parameter name => struct field
|
||||||
|
generateQueryParameters! {
|
||||||
|
"prefix" => prefix,
|
||||||
|
"start" => start,
|
||||||
|
"causality_token" => causality_token,
|
||||||
|
"end" => end,
|
||||||
|
"limit" => limit,
|
||||||
|
"reverse" => reverse,
|
||||||
|
"sort_key" => sort_key,
|
||||||
|
"timeout" => timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
mod keywords {
|
||||||
|
//! This module contain all query parameters with no associated value
|
||||||
|
//! used to differentiate endpoints.
|
||||||
|
pub const EMPTY: &str = "";
|
||||||
|
|
||||||
|
pub const DELETE: &str = "delete";
|
||||||
|
pub const SEARCH: &str = "search";
|
||||||
|
}
|
|
@ -2,26 +2,16 @@
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate tracing;
|
extern crate tracing;
|
||||||
|
|
||||||
pub mod error;
|
pub mod common_error;
|
||||||
pub use error::Error;
|
|
||||||
|
|
||||||
mod encoding;
|
mod encoding;
|
||||||
|
pub mod generic_server;
|
||||||
mod api_server;
|
pub mod helpers;
|
||||||
pub use api_server::run_api_server;
|
mod router_macros;
|
||||||
|
|
||||||
/// This mode is public only to help testing. Don't expect stability here
|
/// This mode is public only to help testing. Don't expect stability here
|
||||||
pub mod signature;
|
pub mod signature;
|
||||||
|
|
||||||
pub mod helpers;
|
pub mod admin;
|
||||||
mod s3_bucket;
|
#[cfg(feature = "k2v")]
|
||||||
mod s3_copy;
|
pub mod k2v;
|
||||||
pub mod s3_cors;
|
pub mod s3;
|
||||||
mod s3_delete;
|
|
||||||
pub mod s3_get;
|
|
||||||
mod s3_list;
|
|
||||||
mod s3_post_object;
|
|
||||||
mod s3_put;
|
|
||||||
mod s3_router;
|
|
||||||
mod s3_website;
|
|
||||||
mod s3_xml;
|
|
||||||
|
|
213
src/api/router_macros.rs
Normal file
213
src/api/router_macros.rs
Normal file
|
@ -0,0 +1,213 @@
|
||||||
|
/// This macro is used to generate very repetitive match {} blocks in this module
|
||||||
|
/// It is _not_ made to be used anywhere else
|
||||||
|
macro_rules! router_match {
|
||||||
|
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
|
||||||
|
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
|
||||||
|
// returns true if the variant was one of the listed variants, false otherwise.
|
||||||
|
use Endpoint::*;
|
||||||
|
match $enum {
|
||||||
|
$(
|
||||||
|
$endpoint { .. } => true,
|
||||||
|
)*
|
||||||
|
_ => false
|
||||||
|
}
|
||||||
|
}};
|
||||||
|
(@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{
|
||||||
|
// usage: router_match {@extract my_enum, field_name, [ VariantWithField1, VariantWithField2 ..] }
|
||||||
|
// returns Some(field_value), or None if the variant was not one of the listed variants.
|
||||||
|
use Endpoint::*;
|
||||||
|
match $enum {
|
||||||
|
$(
|
||||||
|
$endpoint {$param, ..} => Some($param),
|
||||||
|
)*
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
}};
|
||||||
|
(@gen_path_parser ($method:expr, $reqpath:expr, $query:expr)
|
||||||
|
[
|
||||||
|
$($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)*
|
||||||
|
]) => {{
|
||||||
|
{
|
||||||
|
use Endpoint::*;
|
||||||
|
match ($method, $reqpath) {
|
||||||
|
$(
|
||||||
|
(&Method::$meth, $path) if true $(&& $query.$required.is_some())? => $api {
|
||||||
|
$($(
|
||||||
|
$param: router_match!(@@parse_param $query, $conv, $param),
|
||||||
|
)*)?
|
||||||
|
},
|
||||||
|
)*
|
||||||
|
(m, p) => {
|
||||||
|
return Err(Error::bad_request(format!(
|
||||||
|
"Unknown API endpoint: {} {}",
|
||||||
|
m, p
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}};
|
||||||
|
(@gen_parser ($keyword:expr, $key:ident, $query:expr, $header:expr),
|
||||||
|
key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*],
|
||||||
|
no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{
|
||||||
|
// usage: router_match {@gen_parser (keyword, key, query, header),
|
||||||
|
// key: [
|
||||||
|
// SOME_KEYWORD => VariantWithKey,
|
||||||
|
// ...
|
||||||
|
// ],
|
||||||
|
// no_key: [
|
||||||
|
// SOME_KEYWORD => VariantWithoutKey,
|
||||||
|
// ...
|
||||||
|
// ]
|
||||||
|
// }
|
||||||
|
// See in from_{method} for more detailed usage.
|
||||||
|
use Endpoint::*;
|
||||||
|
use keywords::*;
|
||||||
|
match ($keyword, !$key.is_empty()){
|
||||||
|
$(
|
||||||
|
($kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok($api_k {
|
||||||
|
$key,
|
||||||
|
$($(
|
||||||
|
$param_k: router_match!(@@parse_param $query, $conv_k, $param_k),
|
||||||
|
)*)?
|
||||||
|
}),
|
||||||
|
)*
|
||||||
|
$(
|
||||||
|
($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk {
|
||||||
|
$($(
|
||||||
|
$param_nk: router_match!(@@parse_param $query, $conv_nk, $param_nk),
|
||||||
|
)*)?
|
||||||
|
}),
|
||||||
|
)*
|
||||||
|
(kw, _) => Err(Error::bad_request(format!("Invalid endpoint: {}", kw)))
|
||||||
|
}
|
||||||
|
}};
|
||||||
|
|
||||||
|
(@@parse_param $query:expr, query_opt, $param:ident) => {{
|
||||||
|
// extract optional query parameter
|
||||||
|
$query.$param.take().map(|param| param.into_owned())
|
||||||
|
}};
|
||||||
|
(@@parse_param $query:expr, query, $param:ident) => {{
|
||||||
|
// extract mendatory query parameter
|
||||||
|
$query.$param.take().ok_or_bad_request("Missing argument for endpoint")?.into_owned()
|
||||||
|
}};
|
||||||
|
(@@parse_param $query:expr, opt_parse, $param:ident) => {{
|
||||||
|
// extract and parse optional query parameter
|
||||||
|
// missing parameter is file, however parse error is reported as an error
|
||||||
|
$query.$param
|
||||||
|
.take()
|
||||||
|
.map(|param| param.parse())
|
||||||
|
.transpose()
|
||||||
|
.map_err(|_| Error::bad_request("Failed to parse query parameter"))?
|
||||||
|
}};
|
||||||
|
(@@parse_param $query:expr, parse, $param:ident) => {{
|
||||||
|
// extract and parse mandatory query parameter
|
||||||
|
// both missing and un-parseable parameters are reported as errors
|
||||||
|
$query.$param.take().ok_or_bad_request("Missing argument for endpoint")?
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| Error::bad_request("Failed to parse query parameter"))?
|
||||||
|
}};
|
||||||
|
(@func
|
||||||
|
$(#[$doc:meta])*
|
||||||
|
pub enum Endpoint {
|
||||||
|
$(
|
||||||
|
$(#[$outer:meta])*
|
||||||
|
$variant:ident $({
|
||||||
|
$($name:ident: $ty:ty,)*
|
||||||
|
})?,
|
||||||
|
)*
|
||||||
|
}) => {
|
||||||
|
$(#[$doc])*
|
||||||
|
pub enum Endpoint {
|
||||||
|
$(
|
||||||
|
$(#[$outer])*
|
||||||
|
$variant $({
|
||||||
|
$($name: $ty, )*
|
||||||
|
})?,
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
impl Endpoint {
|
||||||
|
pub fn name(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
$(Endpoint::$variant $({ $($name: _,)* .. })? => stringify!($variant),)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
(@if ($($cond:tt)+) then ($($then:tt)*) else ($($else:tt)*)) => {
|
||||||
|
$($then)*
|
||||||
|
};
|
||||||
|
(@if () then ($($then:tt)*) else ($($else:tt)*)) => {
|
||||||
|
$($else)*
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This macro is used to generate part of the code in this module. It must be called only one, and
|
||||||
|
/// is useless outside of this module.
|
||||||
|
macro_rules! generateQueryParameters {
|
||||||
|
( $($rest:expr => $name:ident),* ) => {
|
||||||
|
/// Struct containing all query parameters used in endpoints. Think of it as an HashMap,
|
||||||
|
/// but with keys statically known.
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
struct QueryParameters<'a> {
|
||||||
|
keyword: Option<Cow<'a, str>>,
|
||||||
|
$(
|
||||||
|
$name: Option<Cow<'a, str>>,
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> QueryParameters<'a> {
|
||||||
|
/// Build this struct from the query part of an URI.
|
||||||
|
fn from_query(query: &'a str) -> Result<Self, Error> {
|
||||||
|
let mut res: Self = Default::default();
|
||||||
|
for (k, v) in url::form_urlencoded::parse(query.as_bytes()) {
|
||||||
|
let repeated = match k.as_ref() {
|
||||||
|
$(
|
||||||
|
$rest => if !v.is_empty() {
|
||||||
|
res.$name.replace(v).is_some()
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
},
|
||||||
|
)*
|
||||||
|
_ => {
|
||||||
|
if k.starts_with("response-") || k.starts_with("X-Amz-") {
|
||||||
|
false
|
||||||
|
} else if v.as_ref().is_empty() {
|
||||||
|
if res.keyword.replace(k).is_some() {
|
||||||
|
return Err(Error::bad_request("Multiple keywords"));
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
debug!("Received an unknown query parameter: '{}'", k);
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if repeated {
|
||||||
|
return Err(Error::bad_request(format!(
|
||||||
|
"Query parameter repeated: '{}'",
|
||||||
|
k
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get an error message in case not all parameters where used when extracting them to
|
||||||
|
/// build an Enpoint variant
|
||||||
|
fn nonempty_message(&self) -> Option<&str> {
|
||||||
|
if self.keyword.is_some() {
|
||||||
|
Some("Keyword not used")
|
||||||
|
} $(
|
||||||
|
else if self.$name.is_some() {
|
||||||
|
Some(concat!("'", $rest, "'"))
|
||||||
|
}
|
||||||
|
)* else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) use generateQueryParameters;
|
||||||
|
pub(crate) use router_match;
|
399
src/api/s3/api_server.rs
Normal file
399
src/api/s3/api_server.rs
Normal file
|
@ -0,0 +1,399 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use futures::future::Future;
|
||||||
|
use hyper::header;
|
||||||
|
use hyper::{Body, Method, Request, Response};
|
||||||
|
|
||||||
|
use opentelemetry::{trace::SpanRef, KeyValue};
|
||||||
|
|
||||||
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
|
use garage_model::garage::Garage;
|
||||||
|
use garage_model::key_table::Key;
|
||||||
|
|
||||||
|
use crate::generic_server::*;
|
||||||
|
use crate::s3::error::*;
|
||||||
|
|
||||||
|
use crate::signature::payload::check_payload_signature;
|
||||||
|
use crate::signature::streaming::*;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::bucket::*;
|
||||||
|
use crate::s3::copy::*;
|
||||||
|
use crate::s3::cors::*;
|
||||||
|
use crate::s3::delete::*;
|
||||||
|
use crate::s3::get::*;
|
||||||
|
use crate::s3::list::*;
|
||||||
|
use crate::s3::post_object::handle_post_object;
|
||||||
|
use crate::s3::put::*;
|
||||||
|
use crate::s3::router::Endpoint;
|
||||||
|
use crate::s3::website::*;
|
||||||
|
|
||||||
|
pub struct S3ApiServer {
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct S3ApiEndpoint {
|
||||||
|
bucket_name: Option<String>,
|
||||||
|
endpoint: Endpoint,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl S3ApiServer {
|
||||||
|
pub async fn run(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
shutdown_signal: impl Future<Output = ()>,
|
||||||
|
) -> Result<(), GarageError> {
|
||||||
|
let addr = garage.config.s3_api.api_bind_addr;
|
||||||
|
|
||||||
|
ApiServer::new(
|
||||||
|
garage.config.s3_api.s3_region.clone(),
|
||||||
|
S3ApiServer { garage },
|
||||||
|
)
|
||||||
|
.run_server(addr, shutdown_signal)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_request_without_bucket(
|
||||||
|
&self,
|
||||||
|
_req: Request<Body>,
|
||||||
|
api_key: Key,
|
||||||
|
endpoint: Endpoint,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
match endpoint {
|
||||||
|
Endpoint::ListBuckets => handle_list_buckets(&self.garage, &api_key).await,
|
||||||
|
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl ApiHandler for S3ApiServer {
|
||||||
|
const API_NAME: &'static str = "s3";
|
||||||
|
const API_NAME_DISPLAY: &'static str = "S3";
|
||||||
|
|
||||||
|
type Endpoint = S3ApiEndpoint;
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn parse_endpoint(&self, req: &Request<Body>) -> Result<S3ApiEndpoint, Error> {
|
||||||
|
let authority = req
|
||||||
|
.headers()
|
||||||
|
.get(header::HOST)
|
||||||
|
.ok_or_bad_request("Host header required")?
|
||||||
|
.to_str()?;
|
||||||
|
|
||||||
|
let host = authority_to_host(authority)?;
|
||||||
|
|
||||||
|
let bucket_name = self
|
||||||
|
.garage
|
||||||
|
.config
|
||||||
|
.s3_api
|
||||||
|
.root_domain
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|root_domain| host_to_bucket(&host, root_domain));
|
||||||
|
|
||||||
|
let (endpoint, bucket_name) =
|
||||||
|
Endpoint::from_request(req, bucket_name.map(ToOwned::to_owned))?;
|
||||||
|
|
||||||
|
Ok(S3ApiEndpoint {
|
||||||
|
bucket_name,
|
||||||
|
endpoint,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle(
|
||||||
|
&self,
|
||||||
|
req: Request<Body>,
|
||||||
|
endpoint: S3ApiEndpoint,
|
||||||
|
) -> Result<Response<Body>, Error> {
|
||||||
|
let S3ApiEndpoint {
|
||||||
|
bucket_name,
|
||||||
|
endpoint,
|
||||||
|
} = endpoint;
|
||||||
|
let garage = self.garage.clone();
|
||||||
|
|
||||||
|
// Some endpoints are processed early, before we even check for an API key
|
||||||
|
if let Endpoint::PostObject = endpoint {
|
||||||
|
return handle_post_object(garage, req, bucket_name.unwrap()).await;
|
||||||
|
}
|
||||||
|
if let Endpoint::Options = endpoint {
|
||||||
|
return handle_options_s3api(garage, &req, bucket_name).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let (api_key, mut content_sha256) = check_payload_signature(&garage, "s3", &req).await?;
|
||||||
|
let api_key = api_key
|
||||||
|
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
|
||||||
|
|
||||||
|
let req = parse_streaming_body(
|
||||||
|
&api_key,
|
||||||
|
req,
|
||||||
|
&mut content_sha256,
|
||||||
|
&garage.config.s3_api.s3_region,
|
||||||
|
"s3",
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let bucket_name = match bucket_name {
|
||||||
|
None => {
|
||||||
|
return self
|
||||||
|
.handle_request_without_bucket(req, api_key, endpoint)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Some(bucket) => bucket.to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Special code path for CreateBucket API endpoint
|
||||||
|
if let Endpoint::CreateBucket {} = endpoint {
|
||||||
|
return handle_create_bucket(&garage, req, content_sha256, api_key, bucket_name).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let bucket_id = garage
|
||||||
|
.bucket_helper()
|
||||||
|
.resolve_bucket(&bucket_name, &api_key)
|
||||||
|
.await?;
|
||||||
|
let bucket = garage
|
||||||
|
.bucket_helper()
|
||||||
|
.get_existing_bucket(bucket_id)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let allowed = match endpoint.authorization_type() {
|
||||||
|
Authorization::Read => api_key.allow_read(&bucket_id),
|
||||||
|
Authorization::Write => api_key.allow_write(&bucket_id),
|
||||||
|
Authorization::Owner => api_key.allow_owner(&bucket_id),
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !allowed {
|
||||||
|
return Err(Error::forbidden("Operation is not allowed for this key."));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look up what CORS rule might apply to response.
|
||||||
|
// Requests for methods different than GET, HEAD or POST
|
||||||
|
// are always preflighted, i.e. the browser should make
|
||||||
|
// an OPTIONS call before to check it is allowed
|
||||||
|
let matching_cors_rule = match *req.method() {
|
||||||
|
Method::GET | Method::HEAD | Method::POST => find_matching_cors_rule(&bucket, &req)?,
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let resp = match endpoint {
|
||||||
|
Endpoint::HeadObject {
|
||||||
|
key, part_number, ..
|
||||||
|
} => handle_head(garage, &req, bucket_id, &key, part_number).await,
|
||||||
|
Endpoint::GetObject {
|
||||||
|
key, part_number, ..
|
||||||
|
} => handle_get(garage, &req, bucket_id, &key, part_number).await,
|
||||||
|
Endpoint::UploadPart {
|
||||||
|
key,
|
||||||
|
part_number,
|
||||||
|
upload_id,
|
||||||
|
} => {
|
||||||
|
handle_put_part(
|
||||||
|
garage,
|
||||||
|
req,
|
||||||
|
bucket_id,
|
||||||
|
&key,
|
||||||
|
part_number,
|
||||||
|
&upload_id,
|
||||||
|
content_sha256,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Endpoint::CopyObject { key } => {
|
||||||
|
handle_copy(garage, &api_key, &req, bucket_id, &key).await
|
||||||
|
}
|
||||||
|
Endpoint::UploadPartCopy {
|
||||||
|
key,
|
||||||
|
part_number,
|
||||||
|
upload_id,
|
||||||
|
} => {
|
||||||
|
handle_upload_part_copy(
|
||||||
|
garage,
|
||||||
|
&api_key,
|
||||||
|
&req,
|
||||||
|
bucket_id,
|
||||||
|
&key,
|
||||||
|
part_number,
|
||||||
|
&upload_id,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Endpoint::PutObject { key } => {
|
||||||
|
handle_put(garage, req, bucket_id, &key, content_sha256).await
|
||||||
|
}
|
||||||
|
Endpoint::AbortMultipartUpload { key, upload_id } => {
|
||||||
|
handle_abort_multipart_upload(garage, bucket_id, &key, &upload_id).await
|
||||||
|
}
|
||||||
|
Endpoint::DeleteObject { key, .. } => handle_delete(garage, bucket_id, &key).await,
|
||||||
|
Endpoint::CreateMultipartUpload { key } => {
|
||||||
|
handle_create_multipart_upload(garage, &req, &bucket_name, bucket_id, &key).await
|
||||||
|
}
|
||||||
|
Endpoint::CompleteMultipartUpload { key, upload_id } => {
|
||||||
|
handle_complete_multipart_upload(
|
||||||
|
garage,
|
||||||
|
req,
|
||||||
|
&bucket_name,
|
||||||
|
bucket_id,
|
||||||
|
&key,
|
||||||
|
&upload_id,
|
||||||
|
content_sha256,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Endpoint::CreateBucket {} => unreachable!(),
|
||||||
|
Endpoint::HeadBucket {} => {
|
||||||
|
let empty_body: Body = Body::from(vec![]);
|
||||||
|
let response = Response::builder().body(empty_body).unwrap();
|
||||||
|
Ok(response)
|
||||||
|
}
|
||||||
|
Endpoint::DeleteBucket {} => {
|
||||||
|
handle_delete_bucket(&garage, bucket_id, bucket_name, api_key).await
|
||||||
|
}
|
||||||
|
Endpoint::GetBucketLocation {} => handle_get_bucket_location(garage),
|
||||||
|
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
|
||||||
|
Endpoint::ListObjects {
|
||||||
|
delimiter,
|
||||||
|
encoding_type,
|
||||||
|
marker,
|
||||||
|
max_keys,
|
||||||
|
prefix,
|
||||||
|
} => {
|
||||||
|
handle_list(
|
||||||
|
garage,
|
||||||
|
&ListObjectsQuery {
|
||||||
|
common: ListQueryCommon {
|
||||||
|
bucket_name,
|
||||||
|
bucket_id,
|
||||||
|
delimiter: delimiter.map(|d| d.to_string()),
|
||||||
|
page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
|
||||||
|
prefix: prefix.unwrap_or_default(),
|
||||||
|
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
||||||
|
},
|
||||||
|
is_v2: false,
|
||||||
|
marker,
|
||||||
|
continuation_token: None,
|
||||||
|
start_after: None,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Endpoint::ListObjectsV2 {
|
||||||
|
delimiter,
|
||||||
|
encoding_type,
|
||||||
|
max_keys,
|
||||||
|
prefix,
|
||||||
|
continuation_token,
|
||||||
|
start_after,
|
||||||
|
list_type,
|
||||||
|
..
|
||||||
|
} => {
|
||||||
|
if list_type == "2" {
|
||||||
|
handle_list(
|
||||||
|
garage,
|
||||||
|
&ListObjectsQuery {
|
||||||
|
common: ListQueryCommon {
|
||||||
|
bucket_name,
|
||||||
|
bucket_id,
|
||||||
|
delimiter: delimiter.map(|d| d.to_string()),
|
||||||
|
page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
|
||||||
|
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
||||||
|
prefix: prefix.unwrap_or_default(),
|
||||||
|
},
|
||||||
|
is_v2: true,
|
||||||
|
marker: None,
|
||||||
|
continuation_token,
|
||||||
|
start_after,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
} else {
|
||||||
|
Err(Error::bad_request(format!(
|
||||||
|
"Invalid endpoint: list-type={}",
|
||||||
|
list_type
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Endpoint::ListMultipartUploads {
|
||||||
|
delimiter,
|
||||||
|
encoding_type,
|
||||||
|
key_marker,
|
||||||
|
max_uploads,
|
||||||
|
prefix,
|
||||||
|
upload_id_marker,
|
||||||
|
} => {
|
||||||
|
handle_list_multipart_upload(
|
||||||
|
garage,
|
||||||
|
&ListMultipartUploadsQuery {
|
||||||
|
common: ListQueryCommon {
|
||||||
|
bucket_name,
|
||||||
|
bucket_id,
|
||||||
|
delimiter: delimiter.map(|d| d.to_string()),
|
||||||
|
page_size: max_uploads.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
|
||||||
|
prefix: prefix.unwrap_or_default(),
|
||||||
|
urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false),
|
||||||
|
},
|
||||||
|
key_marker,
|
||||||
|
upload_id_marker,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Endpoint::ListParts {
|
||||||
|
key,
|
||||||
|
max_parts,
|
||||||
|
part_number_marker,
|
||||||
|
upload_id,
|
||||||
|
} => {
|
||||||
|
handle_list_parts(
|
||||||
|
garage,
|
||||||
|
&ListPartsQuery {
|
||||||
|
bucket_name,
|
||||||
|
bucket_id,
|
||||||
|
key,
|
||||||
|
upload_id,
|
||||||
|
part_number_marker: part_number_marker.map(|p| p.clamp(1, 10000)),
|
||||||
|
max_parts: max_parts.map(|p| p.clamp(1, 1000)).unwrap_or(1000),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
Endpoint::DeleteObjects {} => {
|
||||||
|
handle_delete_objects(garage, bucket_id, req, content_sha256).await
|
||||||
|
}
|
||||||
|
Endpoint::GetBucketWebsite {} => handle_get_website(&bucket).await,
|
||||||
|
Endpoint::PutBucketWebsite {} => {
|
||||||
|
handle_put_website(garage, bucket_id, req, content_sha256).await
|
||||||
|
}
|
||||||
|
Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket_id).await,
|
||||||
|
Endpoint::GetBucketCors {} => handle_get_cors(&bucket).await,
|
||||||
|
Endpoint::PutBucketCors {} => {
|
||||||
|
handle_put_cors(garage, bucket_id, req, content_sha256).await
|
||||||
|
}
|
||||||
|
Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket_id).await,
|
||||||
|
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
||||||
|
};
|
||||||
|
|
||||||
|
// If request was a success and we have a CORS rule that applies to it,
|
||||||
|
// add the corresponding CORS headers to the response
|
||||||
|
let mut resp_ok = resp?;
|
||||||
|
if let Some(rule) = matching_cors_rule {
|
||||||
|
add_cors_headers(&mut resp_ok, rule)
|
||||||
|
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(resp_ok)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiEndpoint for S3ApiEndpoint {
|
||||||
|
fn name(&self) -> &'static str {
|
||||||
|
self.endpoint.name()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_span_attributes(&self, span: SpanRef<'_>) {
|
||||||
|
span.set_attribute(KeyValue::new(
|
||||||
|
"bucket",
|
||||||
|
self.bucket_name.clone().unwrap_or_default(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
|
@ -7,15 +7,15 @@ use garage_model::bucket_alias_table::*;
|
||||||
use garage_model::bucket_table::Bucket;
|
use garage_model::bucket_table::Bucket;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::key_table::Key;
|
use garage_model::key_table::Key;
|
||||||
use garage_model::object_table::ObjectFilter;
|
|
||||||
use garage_model::permission::BucketKeyPerm;
|
use garage_model::permission::BucketKeyPerm;
|
||||||
use garage_table::util::*;
|
use garage_table::util::*;
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use crate::error::*;
|
use crate::common_error::CommonError;
|
||||||
use crate::s3_xml;
|
use crate::s3::error::*;
|
||||||
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
pub fn handle_get_bucket_location(garage: Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub fn handle_get_bucket_location(garage: Arc<Garage>) -> Result<Response<Body>, Error> {
|
||||||
|
@ -130,7 +130,7 @@ pub async fn handle_create_bucket(
|
||||||
|
|
||||||
if let Some(location_constraint) = cmd {
|
if let Some(location_constraint) = cmd {
|
||||||
if location_constraint != garage.config.s3_api.s3_region {
|
if location_constraint != garage.config.s3_api.s3_region {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"Cannot satisfy location constraint `{}`: buckets can only be created in region `{}`",
|
"Cannot satisfy location constraint `{}`: buckets can only be created in region `{}`",
|
||||||
location_constraint,
|
location_constraint,
|
||||||
garage.config.s3_api.s3_region
|
garage.config.s3_api.s3_region
|
||||||
|
@ -158,12 +158,12 @@ pub async fn handle_create_bucket(
|
||||||
// otherwise return a forbidden error.
|
// otherwise return a forbidden error.
|
||||||
let kp = api_key.bucket_permissions(&bucket_id);
|
let kp = api_key.bucket_permissions(&bucket_id);
|
||||||
if !(kp.allow_write || kp.allow_owner) {
|
if !(kp.allow_write || kp.allow_owner) {
|
||||||
return Err(Error::BucketAlreadyExists);
|
return Err(CommonError::BucketAlreadyExists.into());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Create the bucket!
|
// Create the bucket!
|
||||||
if !is_valid_bucket_name(&bucket_name) {
|
if !is_valid_bucket_name(&bucket_name) {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"{}: {}",
|
"{}: {}",
|
||||||
bucket_name, INVALID_BUCKET_NAME_MESSAGE
|
bucket_name, INVALID_BUCKET_NAME_MESSAGE
|
||||||
)));
|
)));
|
||||||
|
@ -228,12 +228,8 @@ pub async fn handle_delete_bucket(
|
||||||
// Delete bucket
|
// Delete bucket
|
||||||
|
|
||||||
// Check bucket is empty
|
// Check bucket is empty
|
||||||
let objects = garage
|
if !garage.bucket_helper().is_bucket_empty(bucket_id).await? {
|
||||||
.object_table
|
return Err(CommonError::BucketNotEmpty.into());
|
||||||
.get_range(&bucket_id, None, Some(ObjectFilter::IsData), 10)
|
|
||||||
.await?;
|
|
||||||
if !objects.is_empty() {
|
|
||||||
return Err(Error::BucketNotEmpty);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- done checking, now commit ---
|
// --- done checking, now commit ---
|
|
@ -1,7 +1,8 @@
|
||||||
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
use futures::TryFutureExt;
|
use futures::{stream, stream::Stream, StreamExt, TryFutureExt};
|
||||||
use md5::{Digest as Md5Digest, Md5};
|
use md5::{Digest as Md5Digest, Md5};
|
||||||
|
|
||||||
use hyper::{Body, Request, Response};
|
use hyper::{Body, Request, Response};
|
||||||
|
@ -11,16 +12,16 @@ use garage_table::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_model::block_ref_table::*;
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::key_table::Key;
|
use garage_model::key_table::Key;
|
||||||
use garage_model::object_table::*;
|
use garage_model::s3::block_ref_table::*;
|
||||||
use garage_model::version_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::api_server::{parse_bucket_key, resolve_bucket};
|
use crate::helpers::parse_bucket_key;
|
||||||
use crate::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3_put::{decode_upload_id, get_headers};
|
use crate::s3::put::{decode_upload_id, get_headers};
|
||||||
use crate::s3_xml::{self, xmlns_tag};
|
use crate::s3::xml::{self as s3_xml, xmlns_tag};
|
||||||
|
|
||||||
pub async fn handle_copy(
|
pub async fn handle_copy(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
|
@ -200,8 +201,8 @@ pub async fn handle_upload_part_copy(
|
||||||
let mut ranges = http_range::HttpRange::parse(range_str, source_version_meta.size)
|
let mut ranges = http_range::HttpRange::parse(range_str, source_version_meta.size)
|
||||||
.map_err(|e| (e, source_version_meta.size))?;
|
.map_err(|e| (e, source_version_meta.size))?;
|
||||||
if ranges.len() != 1 {
|
if ranges.len() != 1 {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Invalid x-amz-copy-source-range header: exactly 1 range must be given".into(),
|
"Invalid x-amz-copy-source-range header: exactly 1 range must be given",
|
||||||
));
|
));
|
||||||
} else {
|
} else {
|
||||||
ranges.pop().unwrap()
|
ranges.pop().unwrap()
|
||||||
|
@ -229,8 +230,8 @@ pub async fn handle_upload_part_copy(
|
||||||
// This is only for small files, we don't bother handling this.
|
// This is only for small files, we don't bother handling this.
|
||||||
// (in AWS UploadPartCopy works for parts at least 5MB which
|
// (in AWS UploadPartCopy works for parts at least 5MB which
|
||||||
// is never the case of an inline object)
|
// is never the case of an inline object)
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Source object is too small (minimum part size is 5Mb)".into(),
|
"Source object is too small (minimum part size is 5Mb)",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
ObjectVersionData::FirstBlock(_meta, _first_block_hash) => (),
|
ObjectVersionData::FirstBlock(_meta, _first_block_hash) => (),
|
||||||
|
@ -249,7 +250,7 @@ pub async fn handle_upload_part_copy(
|
||||||
// Check this part number hasn't yet been uploaded
|
// Check this part number hasn't yet been uploaded
|
||||||
if let Some(dv) = dest_version {
|
if let Some(dv) = dest_version {
|
||||||
if dv.has_part_number(part_number) {
|
if dv.has_part_number(part_number) {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"Part number {} has already been uploaded",
|
"Part number {} has already been uploaded",
|
||||||
part_number
|
part_number
|
||||||
)));
|
)));
|
||||||
|
@ -268,7 +269,6 @@ pub async fn handle_upload_part_copy(
|
||||||
|
|
||||||
let mut blocks_to_copy = vec![];
|
let mut blocks_to_copy = vec![];
|
||||||
let mut current_offset = 0;
|
let mut current_offset = 0;
|
||||||
let mut size_to_copy = 0;
|
|
||||||
for (_bk, block) in source_version.blocks.items().iter() {
|
for (_bk, block) in source_version.blocks.items().iter() {
|
||||||
let (block_begin, block_end) = (current_offset, current_offset + block.size);
|
let (block_begin, block_end) = (current_offset, current_offset + block.size);
|
||||||
|
|
||||||
|
@ -289,10 +289,6 @@ pub async fn handle_upload_part_copy(
|
||||||
(Some(b), None) => Some(b as usize..block.size as usize),
|
(Some(b), None) => Some(b as usize..block.size as usize),
|
||||||
(None, None) => None,
|
(None, None) => None,
|
||||||
};
|
};
|
||||||
size_to_copy += range_to_copy
|
|
||||||
.as_ref()
|
|
||||||
.map(|x| x.len() as u64)
|
|
||||||
.unwrap_or(block.size);
|
|
||||||
|
|
||||||
blocks_to_copy.push((block.hash, range_to_copy));
|
blocks_to_copy.push((block.hash, range_to_copy));
|
||||||
}
|
}
|
||||||
|
@ -300,34 +296,49 @@ pub async fn handle_upload_part_copy(
|
||||||
current_offset = block_end;
|
current_offset = block_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
if size_to_copy < 1024 * 1024 {
|
|
||||||
return Err(Error::BadRequest(format!(
|
|
||||||
"Not enough data to copy: {} bytes (minimum: 1MB)",
|
|
||||||
size_to_copy
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now, actually copy the blocks
|
// Now, actually copy the blocks
|
||||||
let mut md5hasher = Md5::new();
|
let mut md5hasher = Md5::new();
|
||||||
|
|
||||||
let mut block = Some(
|
// First, create a stream that is able to read the source blocks
|
||||||
garage
|
// and extract the subrange if necessary.
|
||||||
.block_manager
|
// The second returned value is an Option<Hash>, that is Some
|
||||||
.rpc_get_block(&blocks_to_copy[0].0)
|
// if and only if the block returned is a block that already existed
|
||||||
.await?,
|
// in the Garage data store (thus we don't need to save it again).
|
||||||
);
|
let garage2 = garage.clone();
|
||||||
|
let source_blocks = stream::iter(blocks_to_copy)
|
||||||
|
.flat_map(|(block_hash, range_to_copy)| {
|
||||||
|
let garage3 = garage2.clone();
|
||||||
|
stream::once(async move {
|
||||||
|
let data = garage3.block_manager.rpc_get_block(&block_hash).await?;
|
||||||
|
match range_to_copy {
|
||||||
|
Some(r) => Ok((data[r].to_vec(), None)),
|
||||||
|
None => Ok((data, Some(block_hash))),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.peekable();
|
||||||
|
|
||||||
|
// The defragmenter is a custom stream (defined below) that concatenates
|
||||||
|
// consecutive block parts when they are too small.
|
||||||
|
// It returns a series of (Vec<u8>, Option<Hash>).
|
||||||
|
// When it is done, it returns an empty vec.
|
||||||
|
// Same as the previous iterator, the Option is Some(_) if and only if
|
||||||
|
// it's an existing block of the Garage data store.
|
||||||
|
let mut defragmenter = Defragmenter::new(garage.config.block_size, Box::pin(source_blocks));
|
||||||
|
|
||||||
let mut current_offset = 0;
|
let mut current_offset = 0;
|
||||||
for (i, (block_hash, range_to_copy)) in blocks_to_copy.iter().enumerate() {
|
let mut next_block = defragmenter.next().await?;
|
||||||
let (current_block, subrange_hash) = match range_to_copy.clone() {
|
|
||||||
Some(r) => {
|
loop {
|
||||||
let subrange = block.take().unwrap()[r].to_vec();
|
let (data, existing_block_hash) = next_block;
|
||||||
let hash = blake2sum(&subrange);
|
if data.is_empty() {
|
||||||
(subrange, hash)
|
break;
|
||||||
}
|
}
|
||||||
None => (block.take().unwrap(), *block_hash),
|
|
||||||
};
|
md5hasher.update(&data[..]);
|
||||||
md5hasher.update(¤t_block[..]);
|
|
||||||
|
let must_upload = existing_block_hash.is_none();
|
||||||
|
let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..]));
|
||||||
|
|
||||||
let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false);
|
let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false);
|
||||||
version.blocks.put(
|
version.blocks.put(
|
||||||
|
@ -336,33 +347,25 @@ pub async fn handle_upload_part_copy(
|
||||||
offset: current_offset,
|
offset: current_offset,
|
||||||
},
|
},
|
||||||
VersionBlock {
|
VersionBlock {
|
||||||
hash: subrange_hash,
|
hash: final_hash,
|
||||||
size: current_block.len() as u64,
|
size: data.len() as u64,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
current_offset += current_block.len() as u64;
|
current_offset += data.len() as u64;
|
||||||
|
|
||||||
let block_ref = BlockRef {
|
let block_ref = BlockRef {
|
||||||
block: subrange_hash,
|
block: final_hash,
|
||||||
version: dest_version_uuid,
|
version: dest_version_uuid,
|
||||||
deleted: false.into(),
|
deleted: false.into(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let next_block_hash = blocks_to_copy.get(i + 1).map(|(h, _)| *h);
|
|
||||||
|
|
||||||
let garage2 = garage.clone();
|
let garage2 = garage.clone();
|
||||||
let garage3 = garage.clone();
|
let res = futures::try_join!(
|
||||||
let is_subrange = range_to_copy.is_some();
|
// Thing 1: if the block is not exactly a block that existed before,
|
||||||
|
// we need to insert that data as a new block.
|
||||||
let (_, _, _, next_block) = futures::try_join!(
|
|
||||||
// Thing 1: if we are taking a subrange of the source block,
|
|
||||||
// we need to insert that subrange as a new block.
|
|
||||||
async move {
|
async move {
|
||||||
if is_subrange {
|
if must_upload {
|
||||||
garage2
|
garage2.block_manager.rpc_put_block(final_hash, data).await
|
||||||
.block_manager
|
|
||||||
.rpc_put_block(subrange_hash, current_block)
|
|
||||||
.await
|
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -372,15 +375,9 @@ pub async fn handle_upload_part_copy(
|
||||||
// Thing 3: we need to add a block reference
|
// Thing 3: we need to add a block reference
|
||||||
garage.block_ref_table.insert(&block_ref),
|
garage.block_ref_table.insert(&block_ref),
|
||||||
// Thing 4: we need to prefetch the next block
|
// Thing 4: we need to prefetch the next block
|
||||||
async move {
|
defragmenter.next(),
|
||||||
match next_block_hash {
|
|
||||||
Some(h) => Ok(Some(garage3.block_manager.rpc_get_block(&h).await?)),
|
|
||||||
None => Ok(None),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)?;
|
)?;
|
||||||
|
next_block = res.3;
|
||||||
block = next_block;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let data_md5sum = md5hasher.finalize();
|
let data_md5sum = md5hasher.finalize();
|
||||||
|
@ -416,10 +413,13 @@ async fn get_copy_source(
|
||||||
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
|
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
|
||||||
|
|
||||||
let (source_bucket, source_key) = parse_bucket_key(©_source, None)?;
|
let (source_bucket, source_key) = parse_bucket_key(©_source, None)?;
|
||||||
let source_bucket_id = resolve_bucket(garage, &source_bucket.to_string(), api_key).await?;
|
let source_bucket_id = garage
|
||||||
|
.bucket_helper()
|
||||||
|
.resolve_bucket(&source_bucket.to_string(), api_key)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if !api_key.allow_read(&source_bucket_id) {
|
if !api_key.allow_read(&source_bucket_id) {
|
||||||
return Err(Error::Forbidden(format!(
|
return Err(Error::forbidden(format!(
|
||||||
"Reading from bucket {} not allowed for this key",
|
"Reading from bucket {} not allowed for this key",
|
||||||
source_bucket
|
source_bucket
|
||||||
)));
|
)));
|
||||||
|
@ -539,8 +539,8 @@ impl CopyPreconditionHeaders {
|
||||||
(None, None, None, Some(ims)) => v_date > *ims,
|
(None, None, None, Some(ims)) => v_date > *ims,
|
||||||
(None, None, None, None) => true,
|
(None, None, None, None) => true,
|
||||||
_ => {
|
_ => {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Invalid combination of x-amz-copy-source-if-xxxxx headers".into(),
|
"Invalid combination of x-amz-copy-source-if-xxxxx headers",
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -553,6 +553,54 @@ impl CopyPreconditionHeaders {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BlockStreamItemOk = (Vec<u8>, Option<Hash>);
|
||||||
|
type BlockStreamItem = Result<BlockStreamItemOk, garage_util::error::Error>;
|
||||||
|
|
||||||
|
struct Defragmenter<S: Stream<Item = BlockStreamItem>> {
|
||||||
|
block_size: usize,
|
||||||
|
block_stream: Pin<Box<stream::Peekable<S>>>,
|
||||||
|
buffer: Vec<u8>,
|
||||||
|
hash: Option<Hash>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Stream<Item = BlockStreamItem>> Defragmenter<S> {
|
||||||
|
fn new(block_size: usize, block_stream: Pin<Box<stream::Peekable<S>>>) -> Self {
|
||||||
|
Self {
|
||||||
|
block_size,
|
||||||
|
block_stream,
|
||||||
|
buffer: vec![],
|
||||||
|
hash: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn next(&mut self) -> BlockStreamItem {
|
||||||
|
// Fill buffer while we can
|
||||||
|
while let Some(res) = self.block_stream.as_mut().peek().await {
|
||||||
|
let (peeked_next_block, _) = match res {
|
||||||
|
Ok(t) => t,
|
||||||
|
Err(_) => {
|
||||||
|
self.block_stream.next().await.unwrap()?;
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if self.buffer.is_empty() {
|
||||||
|
let (next_block, next_block_hash) = self.block_stream.next().await.unwrap()?;
|
||||||
|
self.buffer = next_block;
|
||||||
|
self.hash = next_block_hash;
|
||||||
|
} else if self.buffer.len() + peeked_next_block.len() > self.block_size {
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
let (next_block, _) = self.block_stream.next().await.unwrap()?;
|
||||||
|
self.buffer.extend(next_block);
|
||||||
|
self.hash = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((std::mem::take(&mut self.buffer), self.hash.take()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, PartialEq)]
|
#[derive(Debug, Serialize, PartialEq)]
|
||||||
pub struct CopyObjectResult {
|
pub struct CopyObjectResult {
|
||||||
#[serde(rename = "LastModified")]
|
#[serde(rename = "LastModified")]
|
||||||
|
@ -574,7 +622,7 @@ pub struct CopyPartResult {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::s3_xml::to_xml_with_header;
|
use crate::s3::xml::to_xml_with_header;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn copy_object_result() -> Result<(), Error> {
|
fn copy_object_result() -> Result<(), Error> {
|
|
@ -9,13 +9,12 @@ use hyper::{header::HeaderName, Body, Method, Request, Response, StatusCode};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3_xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule};
|
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule};
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_table::*;
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<Body>, Error> {
|
||||||
|
@ -48,14 +47,11 @@ pub async fn handle_delete_cors(
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<Body>, Error> {
|
||||||
let mut bucket = garage
|
let mut bucket = garage
|
||||||
.bucket_table
|
.bucket_helper()
|
||||||
.get(&EmptyKey, &bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
.await?
|
.await?;
|
||||||
.ok_or(Error::NoSuchBucket)?;
|
|
||||||
|
|
||||||
let param = bucket
|
let param = bucket.params_mut().unwrap();
|
||||||
.params_mut()
|
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
|
||||||
|
|
||||||
param.cors_config.update(None);
|
param.cors_config.update(None);
|
||||||
garage.bucket_table.insert(&bucket).await?;
|
garage.bucket_table.insert(&bucket).await?;
|
||||||
|
@ -78,14 +74,11 @@ pub async fn handle_put_cors(
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut bucket = garage
|
let mut bucket = garage
|
||||||
.bucket_table
|
.bucket_helper()
|
||||||
.get(&EmptyKey, &bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
.await?
|
.await?;
|
||||||
.ok_or(Error::NoSuchBucket)?;
|
|
||||||
|
|
||||||
let param = bucket
|
let param = bucket.params_mut().unwrap();
|
||||||
.params_mut()
|
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
|
||||||
|
|
||||||
let conf: CorsConfiguration = from_reader(&body as &[u8])?;
|
let conf: CorsConfiguration = from_reader(&body as &[u8])?;
|
||||||
conf.validate()?;
|
conf.validate()?;
|
||||||
|
@ -119,12 +112,7 @@ pub async fn handle_options_s3api(
|
||||||
let helper = garage.bucket_helper();
|
let helper = garage.bucket_helper();
|
||||||
let bucket_id = helper.resolve_global_bucket_name(&bn).await?;
|
let bucket_id = helper.resolve_global_bucket_name(&bn).await?;
|
||||||
if let Some(id) = bucket_id {
|
if let Some(id) = bucket_id {
|
||||||
let bucket = garage
|
let bucket = garage.bucket_helper().get_existing_bucket(id).await?;
|
||||||
.bucket_table
|
|
||||||
.get(&EmptyKey, &id)
|
|
||||||
.await?
|
|
||||||
.filter(|b| !b.state.is_deleted())
|
|
||||||
.ok_or(Error::NoSuchBucket)?;
|
|
||||||
handle_options_for_bucket(req, &bucket)
|
handle_options_for_bucket(req, &bucket)
|
||||||
} else {
|
} else {
|
||||||
// If there is a bucket name in the request, but that name
|
// If there is a bucket name in the request, but that name
|
||||||
|
@ -185,7 +173,7 @@ pub fn handle_options_for_bucket(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Err(Error::Forbidden("This CORS request is not allowed.".into()))
|
Err(Error::forbidden("This CORS request is not allowed."))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_matching_cors_rule<'a>(
|
pub fn find_matching_cors_rule<'a>(
|
|
@ -6,10 +6,10 @@ use garage_util::data::*;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
use crate::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
async fn handle_delete_internal(
|
async fn handle_delete_internal(
|
|
@ -2,34 +2,24 @@ use std::convert::TryInto;
|
||||||
|
|
||||||
use err_derive::Error;
|
use err_derive::Error;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{Body, HeaderMap, StatusCode};
|
||||||
|
|
||||||
use garage_model::helper::error::Error as HelperError;
|
use garage_model::helper::error::Error as HelperError;
|
||||||
use garage_util::error::Error as GarageError;
|
|
||||||
|
|
||||||
use crate::s3_xml;
|
use crate::common_error::CommonError;
|
||||||
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
|
use crate::generic_server::ApiError;
|
||||||
|
use crate::s3::xml as s3_xml;
|
||||||
|
use crate::signature::error::Error as SignatureError;
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
// Category: internal error
|
#[error(display = "{}", _0)]
|
||||||
/// Error related to deeper parts of Garage
|
/// Error from common error
|
||||||
#[error(display = "Internal error: {}", _0)]
|
Common(CommonError),
|
||||||
InternalError(#[error(source)] GarageError),
|
|
||||||
|
|
||||||
/// Error related to Hyper
|
|
||||||
#[error(display = "Internal error (Hyper error): {}", _0)]
|
|
||||||
Hyper(#[error(source)] hyper::Error),
|
|
||||||
|
|
||||||
/// Error related to HTTP
|
|
||||||
#[error(display = "Internal error (HTTP error): {}", _0)]
|
|
||||||
Http(#[error(source)] http::Error),
|
|
||||||
|
|
||||||
// Category: cannot process
|
// Category: cannot process
|
||||||
/// No proper api key was used, or the signature was invalid
|
|
||||||
#[error(display = "Forbidden: {}", _0)]
|
|
||||||
Forbidden(String),
|
|
||||||
|
|
||||||
/// Authorization Header Malformed
|
/// Authorization Header Malformed
|
||||||
#[error(display = "Authorization header malformed, expected scope: {}", _0)]
|
#[error(display = "Authorization header malformed, expected scope: {}", _0)]
|
||||||
AuthorizationHeaderMalformed(String),
|
AuthorizationHeaderMalformed(String),
|
||||||
|
@ -38,22 +28,10 @@ pub enum Error {
|
||||||
#[error(display = "Key not found")]
|
#[error(display = "Key not found")]
|
||||||
NoSuchKey,
|
NoSuchKey,
|
||||||
|
|
||||||
/// The bucket requested don't exists
|
|
||||||
#[error(display = "Bucket not found")]
|
|
||||||
NoSuchBucket,
|
|
||||||
|
|
||||||
/// The multipart upload requested don't exists
|
/// The multipart upload requested don't exists
|
||||||
#[error(display = "Upload not found")]
|
#[error(display = "Upload not found")]
|
||||||
NoSuchUpload,
|
NoSuchUpload,
|
||||||
|
|
||||||
/// Tried to create a bucket that already exist
|
|
||||||
#[error(display = "Bucket already exists")]
|
|
||||||
BucketAlreadyExists,
|
|
||||||
|
|
||||||
/// Tried to delete a non-empty bucket
|
|
||||||
#[error(display = "Tried to delete a non-empty bucket")]
|
|
||||||
BucketNotEmpty,
|
|
||||||
|
|
||||||
/// Precondition failed (e.g. x-amz-copy-source-if-match)
|
/// Precondition failed (e.g. x-amz-copy-source-if-match)
|
||||||
#[error(display = "At least one of the preconditions you specified did not hold")]
|
#[error(display = "At least one of the preconditions you specified did not hold")]
|
||||||
PreconditionFailed,
|
PreconditionFailed,
|
||||||
|
@ -80,10 +58,6 @@ pub enum Error {
|
||||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||||
InvalidUtf8String(#[error(source)] std::string::FromUtf8Error),
|
InvalidUtf8String(#[error(source)] std::string::FromUtf8Error),
|
||||||
|
|
||||||
/// Some base64 encoded data was badly encoded
|
|
||||||
#[error(display = "Invalid base64: {}", _0)]
|
|
||||||
InvalidBase64(#[error(source)] base64::DecodeError),
|
|
||||||
|
|
||||||
/// The client sent invalid XML data
|
/// The client sent invalid XML data
|
||||||
#[error(display = "Invalid XML: {}", _0)]
|
#[error(display = "Invalid XML: {}", _0)]
|
||||||
InvalidXml(String),
|
InvalidXml(String),
|
||||||
|
@ -96,15 +70,34 @@ pub enum Error {
|
||||||
#[error(display = "Invalid HTTP range: {:?}", _0)]
|
#[error(display = "Invalid HTTP range: {:?}", _0)]
|
||||||
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
|
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
|
||||||
|
|
||||||
/// The client sent an invalid request
|
|
||||||
#[error(display = "Bad request: {}", _0)]
|
|
||||||
BadRequest(String),
|
|
||||||
|
|
||||||
/// The client sent a request for an action not supported by garage
|
/// The client sent a request for an action not supported by garage
|
||||||
#[error(display = "Unimplemented action: {}", _0)]
|
#[error(display = "Unimplemented action: {}", _0)]
|
||||||
NotImplemented(String),
|
NotImplemented(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> From<T> for Error
|
||||||
|
where
|
||||||
|
CommonError: From<T>,
|
||||||
|
{
|
||||||
|
fn from(err: T) -> Self {
|
||||||
|
Error::Common(CommonError::from(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
|
impl From<HelperError> for Error {
|
||||||
|
fn from(err: HelperError) -> Self {
|
||||||
|
match err {
|
||||||
|
HelperError::Internal(i) => Self::Common(CommonError::InternalError(i)),
|
||||||
|
HelperError::BadRequest(b) => Self::Common(CommonError::BadRequest(b)),
|
||||||
|
HelperError::InvalidBucketName(n) => Self::Common(CommonError::InvalidBucketName(n)),
|
||||||
|
HelperError::NoSuchBucket(n) => Self::Common(CommonError::NoSuchBucket(n)),
|
||||||
|
e => Self::bad_request(format!("{}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<roxmltree::Error> for Error {
|
impl From<roxmltree::Error> for Error {
|
||||||
fn from(err: roxmltree::Error) -> Self {
|
fn from(err: roxmltree::Error) -> Self {
|
||||||
Self::InvalidXml(format!("{}", err))
|
Self::InvalidXml(format!("{}", err))
|
||||||
|
@ -117,88 +110,71 @@ impl From<quick_xml::de::DeError> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<HelperError> for Error {
|
impl From<SignatureError> for Error {
|
||||||
fn from(err: HelperError) -> Self {
|
fn from(err: SignatureError) -> Self {
|
||||||
match err {
|
match err {
|
||||||
HelperError::Internal(i) => Self::InternalError(i),
|
SignatureError::Common(c) => Self::Common(c),
|
||||||
HelperError::BadRequest(b) => Self::BadRequest(b),
|
SignatureError::AuthorizationHeaderMalformed(c) => {
|
||||||
|
Self::AuthorizationHeaderMalformed(c)
|
||||||
|
}
|
||||||
|
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
||||||
|
SignatureError::InvalidHeader(h) => Self::InvalidHeader(h),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<multer::Error> for Error {
|
impl From<multer::Error> for Error {
|
||||||
fn from(err: multer::Error) -> Self {
|
fn from(err: multer::Error) -> Self {
|
||||||
Self::BadRequest(err.to_string())
|
Self::bad_request(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
/// Get the HTTP status code that best represents the meaning of the error for the client
|
|
||||||
pub fn http_status_code(&self) -> StatusCode {
|
|
||||||
match self {
|
|
||||||
Error::NoSuchKey | Error::NoSuchBucket | Error::NoSuchUpload => StatusCode::NOT_FOUND,
|
|
||||||
Error::BucketNotEmpty | Error::BucketAlreadyExists => StatusCode::CONFLICT,
|
|
||||||
Error::PreconditionFailed => StatusCode::PRECONDITION_FAILED,
|
|
||||||
Error::Forbidden(_) => StatusCode::FORBIDDEN,
|
|
||||||
Error::InternalError(
|
|
||||||
GarageError::Timeout
|
|
||||||
| GarageError::RemoteError(_)
|
|
||||||
| GarageError::Quorum(_, _, _, _),
|
|
||||||
) => StatusCode::SERVICE_UNAVAILABLE,
|
|
||||||
Error::InternalError(_) | Error::Hyper(_) | Error::Http(_) => {
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR
|
|
||||||
}
|
|
||||||
Error::InvalidRange(_) => StatusCode::RANGE_NOT_SATISFIABLE,
|
|
||||||
Error::NotImplemented(_) => StatusCode::NOT_IMPLEMENTED,
|
|
||||||
_ => StatusCode::BAD_REQUEST,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn aws_code(&self) -> &'static str {
|
pub fn aws_code(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
|
Error::Common(c) => c.aws_code(),
|
||||||
Error::NoSuchKey => "NoSuchKey",
|
Error::NoSuchKey => "NoSuchKey",
|
||||||
Error::NoSuchBucket => "NoSuchBucket",
|
|
||||||
Error::NoSuchUpload => "NoSuchUpload",
|
Error::NoSuchUpload => "NoSuchUpload",
|
||||||
Error::BucketAlreadyExists => "BucketAlreadyExists",
|
|
||||||
Error::BucketNotEmpty => "BucketNotEmpty",
|
|
||||||
Error::PreconditionFailed => "PreconditionFailed",
|
Error::PreconditionFailed => "PreconditionFailed",
|
||||||
Error::InvalidPart => "InvalidPart",
|
Error::InvalidPart => "InvalidPart",
|
||||||
Error::InvalidPartOrder => "InvalidPartOrder",
|
Error::InvalidPartOrder => "InvalidPartOrder",
|
||||||
Error::EntityTooSmall => "EntityTooSmall",
|
Error::EntityTooSmall => "EntityTooSmall",
|
||||||
Error::Forbidden(_) => "AccessDenied",
|
|
||||||
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
||||||
Error::NotImplemented(_) => "NotImplemented",
|
Error::NotImplemented(_) => "NotImplemented",
|
||||||
Error::InternalError(
|
Error::InvalidXml(_) => "MalformedXML",
|
||||||
GarageError::Timeout
|
Error::InvalidRange(_) => "InvalidRange",
|
||||||
| GarageError::RemoteError(_)
|
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) | Error::InvalidHeader(_) => {
|
||||||
| GarageError::Quorum(_, _, _, _),
|
"InvalidRequest"
|
||||||
) => "ServiceUnavailable",
|
}
|
||||||
Error::InternalError(_) | Error::Hyper(_) | Error::Http(_) => "InternalError",
|
}
|
||||||
_ => "InvalidRequest",
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiError for Error {
|
||||||
|
/// Get the HTTP status code that best represents the meaning of the error for the client
|
||||||
|
fn http_status_code(&self) -> StatusCode {
|
||||||
|
match self {
|
||||||
|
Error::Common(c) => c.http_status_code(),
|
||||||
|
Error::NoSuchKey | Error::NoSuchUpload => StatusCode::NOT_FOUND,
|
||||||
|
Error::PreconditionFailed => StatusCode::PRECONDITION_FAILED,
|
||||||
|
Error::InvalidRange(_) => StatusCode::RANGE_NOT_SATISFIABLE,
|
||||||
|
Error::NotImplemented(_) => StatusCode::NOT_IMPLEMENTED,
|
||||||
|
Error::AuthorizationHeaderMalformed(_)
|
||||||
|
| Error::InvalidPart
|
||||||
|
| Error::InvalidPartOrder
|
||||||
|
| Error::EntityTooSmall
|
||||||
|
| Error::InvalidXml(_)
|
||||||
|
| Error::InvalidUtf8Str(_)
|
||||||
|
| Error::InvalidUtf8String(_)
|
||||||
|
| Error::InvalidHeader(_) => StatusCode::BAD_REQUEST,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn aws_xml(&self, garage_region: &str, path: &str) -> String {
|
fn add_http_headers(&self, header_map: &mut HeaderMap<HeaderValue>) {
|
||||||
let error = s3_xml::Error {
|
|
||||||
code: s3_xml::Value(self.aws_code().to_string()),
|
|
||||||
message: s3_xml::Value(format!("{}", self)),
|
|
||||||
resource: Some(s3_xml::Value(path.to_string())),
|
|
||||||
region: Some(s3_xml::Value(garage_region.to_string())),
|
|
||||||
};
|
|
||||||
s3_xml::to_xml_with_header(&error).unwrap_or_else(|_| {
|
|
||||||
r#"
|
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<Error>
|
|
||||||
<Code>InternalError</Code>
|
|
||||||
<Message>XML encoding of error failed</Message>
|
|
||||||
</Error>
|
|
||||||
"#
|
|
||||||
.into()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_headers(&self, header_map: &mut HeaderMap<HeaderValue>) {
|
|
||||||
use hyper::header;
|
use hyper::header;
|
||||||
|
|
||||||
|
header_map.append(header::CONTENT_TYPE, "application/xml".parse().unwrap());
|
||||||
|
|
||||||
#[allow(clippy::single_match)]
|
#[allow(clippy::single_match)]
|
||||||
match self {
|
match self {
|
||||||
Error::InvalidRange((_, len)) => {
|
Error::InvalidRange((_, len)) => {
|
||||||
|
@ -212,68 +188,23 @@ impl Error {
|
||||||
_ => (),
|
_ => (),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait to map error to the Bad Request error code
|
fn http_body(&self, garage_region: &str, path: &str) -> Body {
|
||||||
pub trait OkOrBadRequest {
|
let error = s3_xml::Error {
|
||||||
type S;
|
code: s3_xml::Value(self.aws_code().to_string()),
|
||||||
fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<Self::S, Error>;
|
message: s3_xml::Value(format!("{}", self)),
|
||||||
}
|
resource: Some(s3_xml::Value(path.to_string())),
|
||||||
|
region: Some(s3_xml::Value(garage_region.to_string())),
|
||||||
impl<T, E> OkOrBadRequest for Result<T, E>
|
};
|
||||||
where
|
Body::from(s3_xml::to_xml_with_header(&error).unwrap_or_else(|_| {
|
||||||
E: std::fmt::Display,
|
r#"
|
||||||
{
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
type S = T;
|
<Error>
|
||||||
fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<T, Error> {
|
<Code>InternalError</Code>
|
||||||
match self {
|
<Message>XML encoding of error failed</Message>
|
||||||
Ok(x) => Ok(x),
|
</Error>
|
||||||
Err(e) => Err(Error::BadRequest(format!("{}: {}", reason.as_ref(), e))),
|
"#
|
||||||
}
|
.into()
|
||||||
}
|
}))
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> OkOrBadRequest for Option<T> {
|
|
||||||
type S = T;
|
|
||||||
fn ok_or_bad_request<M: AsRef<str>>(self, reason: M) -> Result<T, Error> {
|
|
||||||
match self {
|
|
||||||
Some(x) => Ok(x),
|
|
||||||
None => Err(Error::BadRequest(reason.as_ref().to_string())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait to map an error to an Internal Error code
|
|
||||||
pub trait OkOrInternalError {
|
|
||||||
type S;
|
|
||||||
fn ok_or_internal_error<M: AsRef<str>>(self, reason: M) -> Result<Self::S, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, E> OkOrInternalError for Result<T, E>
|
|
||||||
where
|
|
||||||
E: std::fmt::Display,
|
|
||||||
{
|
|
||||||
type S = T;
|
|
||||||
fn ok_or_internal_error<M: AsRef<str>>(self, reason: M) -> Result<T, Error> {
|
|
||||||
match self {
|
|
||||||
Ok(x) => Ok(x),
|
|
||||||
Err(e) => Err(Error::InternalError(GarageError::Message(format!(
|
|
||||||
"{}: {}",
|
|
||||||
reason.as_ref(),
|
|
||||||
e
|
|
||||||
)))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> OkOrInternalError for Option<T> {
|
|
||||||
type S = T;
|
|
||||||
fn ok_or_internal_error<M: AsRef<str>>(self, reason: M) -> Result<T, Error> {
|
|
||||||
match self {
|
|
||||||
Some(x) => Ok(x),
|
|
||||||
None => Err(Error::InternalError(GarageError::Message(
|
|
||||||
reason.as_ref().to_string(),
|
|
||||||
))),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -14,10 +14,10 @@ use garage_table::EmptyKey;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::error::*;
|
use crate::s3::error::*;
|
||||||
|
|
||||||
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
|
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
|
||||||
|
|
||||||
|
@ -210,8 +210,8 @@ pub async fn handle_get(
|
||||||
|
|
||||||
match (part_number, parse_range_header(req, last_v_meta.size)?) {
|
match (part_number, parse_range_header(req, last_v_meta.size)?) {
|
||||||
(Some(_), Some(_)) => {
|
(Some(_), Some(_)) => {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Cannot specify both partNumber and Range header".into(),
|
"Cannot specify both partNumber and Range header",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
(Some(pn), None) => {
|
(Some(pn), None) => {
|
||||||
|
@ -302,9 +302,9 @@ async fn handle_get_range(
|
||||||
let body: Body = Body::from(bytes[begin as usize..end as usize].to_vec());
|
let body: Body = Body::from(bytes[begin as usize..end as usize].to_vec());
|
||||||
Ok(resp_builder.body(body)?)
|
Ok(resp_builder.body(body)?)
|
||||||
} else {
|
} else {
|
||||||
None.ok_or_internal_error(
|
Err(Error::internal_error(
|
||||||
"Requested range not present in inline bytes when it should have been",
|
"Requested range not present in inline bytes when it should have been",
|
||||||
)
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ObjectVersionData::FirstBlock(_meta, _first_block_hash) => {
|
ObjectVersionData::FirstBlock(_meta, _first_block_hash) => {
|
|
@ -10,15 +10,16 @@ use garage_util::error::Error as GarageError;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::version_table::Version;
|
use garage_model::s3::version_table::Version;
|
||||||
|
|
||||||
use garage_table::EmptyKey;
|
use garage_table::{EmptyKey, EnumerationOrder};
|
||||||
|
|
||||||
use crate::encoding::*;
|
use crate::encoding::*;
|
||||||
use crate::error::*;
|
use crate::helpers::key_after_prefix;
|
||||||
use crate::s3_put;
|
use crate::s3::error::*;
|
||||||
use crate::s3_xml;
|
use crate::s3::put as s3_put;
|
||||||
|
use crate::s3::xml as s3_xml;
|
||||||
|
|
||||||
const DUMMY_NAME: &str = "Dummy Key";
|
const DUMMY_NAME: &str = "Dummy Key";
|
||||||
const DUMMY_KEY: &str = "GKDummyKey";
|
const DUMMY_KEY: &str = "GKDummyKey";
|
||||||
|
@ -66,8 +67,14 @@ pub async fn handle_list(
|
||||||
let io = |bucket, key, count| {
|
let io = |bucket, key, count| {
|
||||||
let t = &garage.object_table;
|
let t = &garage.object_table;
|
||||||
async move {
|
async move {
|
||||||
t.get_range(&bucket, key, Some(ObjectFilter::IsData), count)
|
t.get_range(
|
||||||
.await
|
&bucket,
|
||||||
|
key,
|
||||||
|
Some(ObjectFilter::IsData),
|
||||||
|
count,
|
||||||
|
EnumerationOrder::Forward,
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -165,8 +172,14 @@ pub async fn handle_list_multipart_upload(
|
||||||
let io = |bucket, key, count| {
|
let io = |bucket, key, count| {
|
||||||
let t = &garage.object_table;
|
let t = &garage.object_table;
|
||||||
async move {
|
async move {
|
||||||
t.get_range(&bucket, key, Some(ObjectFilter::IsUploading), count)
|
t.get_range(
|
||||||
.await
|
&bucket,
|
||||||
|
key,
|
||||||
|
Some(ObjectFilter::IsUploading),
|
||||||
|
count,
|
||||||
|
EnumerationOrder::Forward,
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -569,13 +582,19 @@ impl ListObjectsQuery {
|
||||||
// representing the key to start with.
|
// representing the key to start with.
|
||||||
(Some(token), _) => match &token[..1] {
|
(Some(token), _) => match &token[..1] {
|
||||||
"[" => Ok(RangeBegin::IncludingKey {
|
"[" => Ok(RangeBegin::IncludingKey {
|
||||||
key: String::from_utf8(base64::decode(token[1..].as_bytes())?)?,
|
key: String::from_utf8(
|
||||||
|
base64::decode(token[1..].as_bytes())
|
||||||
|
.ok_or_bad_request("Invalid continuation token")?,
|
||||||
|
)?,
|
||||||
fallback_key: None,
|
fallback_key: None,
|
||||||
}),
|
}),
|
||||||
"]" => Ok(RangeBegin::AfterKey {
|
"]" => Ok(RangeBegin::AfterKey {
|
||||||
key: String::from_utf8(base64::decode(token[1..].as_bytes())?)?,
|
key: String::from_utf8(
|
||||||
|
base64::decode(token[1..].as_bytes())
|
||||||
|
.ok_or_bad_request("Invalid continuation token")?,
|
||||||
|
)?,
|
||||||
}),
|
}),
|
||||||
_ => Err(Error::BadRequest("Invalid continuation token".to_string())),
|
_ => Err(Error::bad_request("Invalid continuation token")),
|
||||||
},
|
},
|
||||||
|
|
||||||
// StartAfter has defined semantics in the spec:
|
// StartAfter has defined semantics in the spec:
|
||||||
|
@ -923,39 +942,13 @@ fn uriencode_maybe(s: &str, yes: bool) -> s3_xml::Value {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const UTF8_BEFORE_LAST_CHAR: char = '\u{10FFFE}';
|
|
||||||
|
|
||||||
/// Compute the key after the prefix
|
|
||||||
fn key_after_prefix(pfx: &str) -> Option<String> {
|
|
||||||
let mut next = pfx.to_string();
|
|
||||||
while !next.is_empty() {
|
|
||||||
let tail = next.pop().unwrap();
|
|
||||||
if tail >= char::MAX {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Circumvent a limitation of RangeFrom that overflow earlier than needed
|
|
||||||
// See: https://doc.rust-lang.org/core/ops/struct.RangeFrom.html
|
|
||||||
let new_tail = if tail == UTF8_BEFORE_LAST_CHAR {
|
|
||||||
char::MAX
|
|
||||||
} else {
|
|
||||||
(tail..).nth(1).unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
next.push(new_tail);
|
|
||||||
return Some(next);
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unit tests of this module
|
* Unit tests of this module
|
||||||
*/
|
*/
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use garage_model::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
use garage_util::*;
|
use garage_util::*;
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
|
|
||||||
|
@ -1002,39 +995,6 @@ mod tests {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_key_after_prefix() {
|
|
||||||
assert_eq!(UTF8_BEFORE_LAST_CHAR as u32, (char::MAX as u32) - 1);
|
|
||||||
assert_eq!(key_after_prefix("a/b/").unwrap().as_str(), "a/b0");
|
|
||||||
assert_eq!(key_after_prefix("€").unwrap().as_str(), "₭");
|
|
||||||
assert_eq!(
|
|
||||||
key_after_prefix("").unwrap().as_str(),
|
|
||||||
String::from(char::from_u32(0x10FFFE).unwrap())
|
|
||||||
);
|
|
||||||
|
|
||||||
// When the last character is the biggest UTF8 char
|
|
||||||
let a = String::from_iter(['a', char::MAX].iter());
|
|
||||||
assert_eq!(key_after_prefix(a.as_str()).unwrap().as_str(), "b");
|
|
||||||
|
|
||||||
// When all characters are the biggest UTF8 char
|
|
||||||
let b = String::from_iter([char::MAX; 3].iter());
|
|
||||||
assert!(key_after_prefix(b.as_str()).is_none());
|
|
||||||
|
|
||||||
// Check utf8 surrogates
|
|
||||||
let c = String::from('\u{D7FF}');
|
|
||||||
assert_eq!(
|
|
||||||
key_after_prefix(c.as_str()).unwrap().as_str(),
|
|
||||||
String::from('\u{E000}')
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check the character before the biggest one
|
|
||||||
let d = String::from('\u{10FFFE}');
|
|
||||||
assert_eq!(
|
|
||||||
key_after_prefix(d.as_str()).unwrap().as_str(),
|
|
||||||
String::from(char::MAX)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_common_prefixes() {
|
fn test_common_prefixes() {
|
||||||
let mut query = query();
|
let mut query = query();
|
15
src/api/s3/mod.rs
Normal file
15
src/api/s3/mod.rs
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
pub mod api_server;
|
||||||
|
pub mod error;
|
||||||
|
|
||||||
|
mod bucket;
|
||||||
|
mod copy;
|
||||||
|
pub mod cors;
|
||||||
|
mod delete;
|
||||||
|
pub mod get;
|
||||||
|
mod list;
|
||||||
|
mod post_object;
|
||||||
|
mod put;
|
||||||
|
mod website;
|
||||||
|
|
||||||
|
mod router;
|
||||||
|
pub mod xml;
|
|
@ -14,10 +14,9 @@ use serde::Deserialize;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
|
|
||||||
use crate::api_server::resolve_bucket;
|
use crate::s3::error::*;
|
||||||
use crate::error::*;
|
use crate::s3::put::{get_headers, save_stream};
|
||||||
use crate::s3_put::{get_headers, save_stream};
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::s3_xml;
|
|
||||||
use crate::signature::payload::{parse_date, verify_v4};
|
use crate::signature::payload::{parse_date, verify_v4};
|
||||||
|
|
||||||
pub async fn handle_post_object(
|
pub async fn handle_post_object(
|
||||||
|
@ -48,9 +47,7 @@ pub async fn handle_post_object(
|
||||||
let field = if let Some(field) = multipart.next_field().await? {
|
let field = if let Some(field) = multipart.next_field().await? {
|
||||||
field
|
field
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request("Request did not contain a file"));
|
||||||
"Request did not contain a file".to_owned(),
|
|
||||||
));
|
|
||||||
};
|
};
|
||||||
let name: HeaderName = if let Some(Ok(name)) = field.name().map(TryInto::try_into) {
|
let name: HeaderName = if let Some(Ok(name)) = field.name().map(TryInto::try_into) {
|
||||||
name
|
name
|
||||||
|
@ -66,14 +63,14 @@ pub async fn handle_post_object(
|
||||||
"tag" => (/* tag need to be reencoded, but we don't support them yet anyway */),
|
"tag" => (/* tag need to be reencoded, but we don't support them yet anyway */),
|
||||||
"acl" => {
|
"acl" => {
|
||||||
if params.insert("x-amz-acl", content).is_some() {
|
if params.insert("x-amz-acl", content).is_some() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Field 'acl' provided more than one time".to_string(),
|
"Field 'acl' provided more than one time",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
if params.insert(&name, content).is_some() {
|
if params.insert(&name, content).is_some() {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"Field '{}' provided more than one time",
|
"Field '{}' provided more than one time",
|
||||||
name
|
name
|
||||||
)));
|
)));
|
||||||
|
@ -90,9 +87,7 @@ pub async fn handle_post_object(
|
||||||
.to_str()?;
|
.to_str()?;
|
||||||
let credential = params
|
let credential = params
|
||||||
.get("x-amz-credential")
|
.get("x-amz-credential")
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?
|
||||||
Error::Forbidden("Garage does not support anonymous access yet".to_string())
|
|
||||||
})?
|
|
||||||
.to_str()?;
|
.to_str()?;
|
||||||
let policy = params
|
let policy = params
|
||||||
.get("policy")
|
.get("policy")
|
||||||
|
@ -119,17 +114,26 @@ pub async fn handle_post_object(
|
||||||
};
|
};
|
||||||
|
|
||||||
let date = parse_date(date)?;
|
let date = parse_date(date)?;
|
||||||
let api_key = verify_v4(&garage, credential, &date, signature, policy.as_bytes()).await?;
|
let api_key = verify_v4(
|
||||||
|
&garage,
|
||||||
|
"s3",
|
||||||
|
credential,
|
||||||
|
&date,
|
||||||
|
signature,
|
||||||
|
policy.as_bytes(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let bucket_id = resolve_bucket(&garage, &bucket, &api_key).await?;
|
let bucket_id = garage
|
||||||
|
.bucket_helper()
|
||||||
|
.resolve_bucket(&bucket, &api_key)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if !api_key.allow_write(&bucket_id) {
|
if !api_key.allow_write(&bucket_id) {
|
||||||
return Err(Error::Forbidden(
|
return Err(Error::forbidden("Operation is not allowed for this key."));
|
||||||
"Operation is not allowed for this key.".to_string(),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let decoded_policy = base64::decode(&policy)?;
|
let decoded_policy = base64::decode(&policy).ok_or_bad_request("Invalid policy")?;
|
||||||
let decoded_policy: Policy =
|
let decoded_policy: Policy =
|
||||||
serde_json::from_slice(&decoded_policy).ok_or_bad_request("Invalid policy")?;
|
serde_json::from_slice(&decoded_policy).ok_or_bad_request("Invalid policy")?;
|
||||||
|
|
||||||
|
@ -137,9 +141,7 @@ pub async fn handle_post_object(
|
||||||
.ok_or_bad_request("Invalid expiration date")?
|
.ok_or_bad_request("Invalid expiration date")?
|
||||||
.into();
|
.into();
|
||||||
if Utc::now() - expiration > Duration::zero() {
|
if Utc::now() - expiration > Duration::zero() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request("Expiration date is in the paste"));
|
||||||
"Expiration date is in the paste".to_string(),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut conditions = decoded_policy.into_conditions()?;
|
let mut conditions = decoded_policy.into_conditions()?;
|
||||||
|
@ -151,7 +153,7 @@ pub async fn handle_post_object(
|
||||||
"policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields
|
"policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields
|
||||||
"content-type" => {
|
"content-type" => {
|
||||||
let conds = conditions.params.remove("content-type").ok_or_else(|| {
|
let conds = conditions.params.remove("content-type").ok_or_else(|| {
|
||||||
Error::BadRequest(format!("Key '{}' is not allowed in policy", param_key))
|
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
|
||||||
})?;
|
})?;
|
||||||
for cond in conds {
|
for cond in conds {
|
||||||
let ok = match cond {
|
let ok = match cond {
|
||||||
|
@ -161,7 +163,7 @@ pub async fn handle_post_object(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if !ok {
|
if !ok {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"Key '{}' has value not allowed in policy",
|
"Key '{}' has value not allowed in policy",
|
||||||
param_key
|
param_key
|
||||||
)));
|
)));
|
||||||
|
@ -170,7 +172,7 @@ pub async fn handle_post_object(
|
||||||
}
|
}
|
||||||
"key" => {
|
"key" => {
|
||||||
let conds = conditions.params.remove("key").ok_or_else(|| {
|
let conds = conditions.params.remove("key").ok_or_else(|| {
|
||||||
Error::BadRequest(format!("Key '{}' is not allowed in policy", param_key))
|
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
|
||||||
})?;
|
})?;
|
||||||
for cond in conds {
|
for cond in conds {
|
||||||
let ok = match cond {
|
let ok = match cond {
|
||||||
|
@ -178,7 +180,7 @@ pub async fn handle_post_object(
|
||||||
Operation::StartsWith(s) => key.starts_with(&s),
|
Operation::StartsWith(s) => key.starts_with(&s),
|
||||||
};
|
};
|
||||||
if !ok {
|
if !ok {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"Key '{}' has value not allowed in policy",
|
"Key '{}' has value not allowed in policy",
|
||||||
param_key
|
param_key
|
||||||
)));
|
)));
|
||||||
|
@ -193,7 +195,7 @@ pub async fn handle_post_object(
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let conds = conditions.params.remove(¶m_key).ok_or_else(|| {
|
let conds = conditions.params.remove(¶m_key).ok_or_else(|| {
|
||||||
Error::BadRequest(format!("Key '{}' is not allowed in policy", param_key))
|
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
|
||||||
})?;
|
})?;
|
||||||
for cond in conds {
|
for cond in conds {
|
||||||
let ok = match cond {
|
let ok = match cond {
|
||||||
|
@ -201,7 +203,7 @@ pub async fn handle_post_object(
|
||||||
Operation::StartsWith(s) => value.to_str()?.starts_with(s.as_str()),
|
Operation::StartsWith(s) => value.to_str()?.starts_with(s.as_str()),
|
||||||
};
|
};
|
||||||
if !ok {
|
if !ok {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"Key '{}' has value not allowed in policy",
|
"Key '{}' has value not allowed in policy",
|
||||||
param_key
|
param_key
|
||||||
)));
|
)));
|
||||||
|
@ -212,7 +214,7 @@ pub async fn handle_post_object(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some((param_key, _)) = conditions.params.iter().next() {
|
if let Some((param_key, _)) = conditions.params.iter().next() {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"Key '{}' is required in policy, but no value was provided",
|
"Key '{}' is required in policy, but no value was provided",
|
||||||
param_key
|
param_key
|
||||||
)));
|
)));
|
||||||
|
@ -318,7 +320,7 @@ impl Policy {
|
||||||
match condition {
|
match condition {
|
||||||
PolicyCondition::Equal(map) => {
|
PolicyCondition::Equal(map) => {
|
||||||
if map.len() != 1 {
|
if map.len() != 1 {
|
||||||
return Err(Error::BadRequest("Invalid policy item".to_owned()));
|
return Err(Error::bad_request("Invalid policy item"));
|
||||||
}
|
}
|
||||||
let (mut k, v) = map.into_iter().next().expect("size was verified");
|
let (mut k, v) = map.into_iter().next().expect("size was verified");
|
||||||
k.make_ascii_lowercase();
|
k.make_ascii_lowercase();
|
||||||
|
@ -326,7 +328,7 @@ impl Policy {
|
||||||
}
|
}
|
||||||
PolicyCondition::OtherOp([cond, mut key, value]) => {
|
PolicyCondition::OtherOp([cond, mut key, value]) => {
|
||||||
if key.remove(0) != '$' {
|
if key.remove(0) != '$' {
|
||||||
return Err(Error::BadRequest("Invalid policy item".to_owned()));
|
return Err(Error::bad_request("Invalid policy item"));
|
||||||
}
|
}
|
||||||
key.make_ascii_lowercase();
|
key.make_ascii_lowercase();
|
||||||
match cond.as_str() {
|
match cond.as_str() {
|
||||||
|
@ -339,7 +341,7 @@ impl Policy {
|
||||||
.or_default()
|
.or_default()
|
||||||
.push(Operation::StartsWith(value));
|
.push(Operation::StartsWith(value));
|
||||||
}
|
}
|
||||||
_ => return Err(Error::BadRequest("Invalid policy item".to_owned())),
|
_ => return Err(Error::bad_request("Invalid policy item")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
PolicyCondition::SizeRange(key, min, max) => {
|
PolicyCondition::SizeRange(key, min, max) => {
|
||||||
|
@ -347,7 +349,7 @@ impl Policy {
|
||||||
length.0 = length.0.max(min);
|
length.0 = length.0.max(min);
|
||||||
length.1 = length.1.min(max);
|
length.1 = length.1.min(max);
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest("Invalid policy item".to_owned()));
|
return Err(Error::bad_request("Invalid policy item"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -412,15 +414,15 @@ where
|
||||||
self.read += bytes.len() as u64;
|
self.read += bytes.len() as u64;
|
||||||
// optimization to fail early when we know before the end it's too long
|
// optimization to fail early when we know before the end it's too long
|
||||||
if self.length.end() < &self.read {
|
if self.length.end() < &self.read {
|
||||||
return Poll::Ready(Some(Err(Error::BadRequest(
|
return Poll::Ready(Some(Err(Error::bad_request(
|
||||||
"File size does not match policy".to_owned(),
|
"File size does not match policy",
|
||||||
))));
|
))));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Poll::Ready(None) => {
|
Poll::Ready(None) => {
|
||||||
if !self.length.contains(&self.read) {
|
if !self.length.contains(&self.read) {
|
||||||
return Poll::Ready(Some(Err(Error::BadRequest(
|
return Poll::Ready(Some(Err(Error::bad_request(
|
||||||
"File size does not match policy".to_owned(),
|
"File size does not match policy",
|
||||||
))));
|
))));
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -14,13 +14,13 @@ use garage_util::error::Error as GarageError;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_block::manager::INLINE_THRESHOLD;
|
use garage_block::manager::INLINE_THRESHOLD;
|
||||||
use garage_model::block_ref_table::*;
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::object_table::*;
|
use garage_model::s3::block_ref_table::*;
|
||||||
use garage_model::version_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
pub async fn handle_put(
|
pub async fn handle_put(
|
||||||
|
@ -183,8 +183,8 @@ fn ensure_checksum_matches(
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
if let Some(expected_sha256) = content_sha256 {
|
if let Some(expected_sha256) = content_sha256 {
|
||||||
if expected_sha256 != data_sha256sum {
|
if expected_sha256 != data_sha256sum {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Unable to validate x-amz-content-sha256".to_string(),
|
"Unable to validate x-amz-content-sha256",
|
||||||
));
|
));
|
||||||
} else {
|
} else {
|
||||||
trace!("Successfully validated x-amz-content-sha256");
|
trace!("Successfully validated x-amz-content-sha256");
|
||||||
|
@ -192,9 +192,7 @@ fn ensure_checksum_matches(
|
||||||
}
|
}
|
||||||
if let Some(expected_md5) = content_md5 {
|
if let Some(expected_md5) = content_md5 {
|
||||||
if expected_md5.trim_matches('"') != base64::encode(data_md5sum) {
|
if expected_md5.trim_matches('"') != base64::encode(data_md5sum) {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request("Unable to validate content-md5"));
|
||||||
"Unable to validate content-md5".to_string(),
|
|
||||||
));
|
|
||||||
} else {
|
} else {
|
||||||
trace!("Successfully validated content-md5");
|
trace!("Successfully validated content-md5");
|
||||||
}
|
}
|
||||||
|
@ -428,7 +426,7 @@ pub async fn handle_put_part(
|
||||||
// Check part hasn't already been uploaded
|
// Check part hasn't already been uploaded
|
||||||
if let Some(v) = version {
|
if let Some(v) = version {
|
||||||
if v.has_part_number(part_number) {
|
if v.has_part_number(part_number) {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::bad_request(format!(
|
||||||
"Part number {} has already been uploaded",
|
"Part number {} has already been uploaded",
|
||||||
part_number
|
part_number
|
||||||
)));
|
)));
|
||||||
|
@ -513,7 +511,7 @@ pub async fn handle_complete_multipart_upload(
|
||||||
|
|
||||||
let version = version.ok_or(Error::NoSuchKey)?;
|
let version = version.ok_or(Error::NoSuchKey)?;
|
||||||
if version.blocks.is_empty() {
|
if version.blocks.is_empty() {
|
||||||
return Err(Error::BadRequest("No data was uploaded".to_string()));
|
return Err(Error::bad_request("No data was uploaded"));
|
||||||
}
|
}
|
||||||
|
|
||||||
let headers = match object_version.state {
|
let headers = match object_version.state {
|
||||||
|
@ -574,8 +572,8 @@ pub async fn handle_complete_multipart_upload(
|
||||||
.map(|x| x.part_number)
|
.map(|x| x.part_number)
|
||||||
.eq(block_parts.into_iter());
|
.eq(block_parts.into_iter());
|
||||||
if !same_parts {
|
if !same_parts {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Part numbers in block list and part list do not match. This can happen if a part was partially uploaded. Please abort the multipart upload and try again.".into(),
|
"Part numbers in block list and part list do not match. This can happen if a part was partially uploaded. Please abort the multipart upload and try again."
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,131 +1,13 @@
|
||||||
use crate::error::{Error, OkOrBadRequest};
|
|
||||||
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
|
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, Method, Request};
|
use hyper::{HeaderMap, Method, Request};
|
||||||
|
|
||||||
/// This macro is used to generate very repetitive match {} blocks in this module
|
use crate::helpers::Authorization;
|
||||||
/// It is _not_ made to be used anywhere else
|
use crate::router_macros::{generateQueryParameters, router_match};
|
||||||
macro_rules! s3_match {
|
use crate::s3::error::*;
|
||||||
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
|
|
||||||
// usage: s3_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
|
|
||||||
// returns true if the variant was one of the listed variants, false otherwise.
|
|
||||||
use Endpoint::*;
|
|
||||||
match $enum {
|
|
||||||
$(
|
|
||||||
$endpoint { .. } => true,
|
|
||||||
)*
|
|
||||||
_ => false
|
|
||||||
}
|
|
||||||
}};
|
|
||||||
(@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{
|
|
||||||
// usage: s3_match {@extract my_enum, field_name, [ VariantWithField1, VariantWithField2 ..] }
|
|
||||||
// returns Some(field_value), or None if the variant was not one of the listed variants.
|
|
||||||
use Endpoint::*;
|
|
||||||
match $enum {
|
|
||||||
$(
|
|
||||||
$endpoint {$param, ..} => Some($param),
|
|
||||||
)*
|
|
||||||
_ => None
|
|
||||||
}
|
|
||||||
}};
|
|
||||||
(@gen_parser ($keyword:expr, $key:expr, $query:expr, $header:expr),
|
|
||||||
key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*],
|
|
||||||
no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{
|
|
||||||
// usage: s3_match {@gen_parser (keyword, key, query, header),
|
|
||||||
// key: [
|
|
||||||
// SOME_KEYWORD => VariantWithKey,
|
|
||||||
// ...
|
|
||||||
// ],
|
|
||||||
// no_key: [
|
|
||||||
// SOME_KEYWORD => VariantWithoutKey,
|
|
||||||
// ...
|
|
||||||
// ]
|
|
||||||
// }
|
|
||||||
// See in from_{method} for more detailed usage.
|
|
||||||
use Endpoint::*;
|
|
||||||
use keywords::*;
|
|
||||||
match ($keyword, !$key.is_empty()){
|
|
||||||
$(
|
|
||||||
($kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok($api_k {
|
|
||||||
key: $key,
|
|
||||||
$($(
|
|
||||||
$param_k: s3_match!(@@parse_param $query, $conv_k, $param_k),
|
|
||||||
)*)?
|
|
||||||
}),
|
|
||||||
)*
|
|
||||||
$(
|
|
||||||
($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk {
|
|
||||||
$($(
|
|
||||||
$param_nk: s3_match!(@@parse_param $query, $conv_nk, $param_nk),
|
|
||||||
)*)?
|
|
||||||
}),
|
|
||||||
)*
|
|
||||||
(kw, _) => Err(Error::BadRequest(format!("Invalid endpoint: {}", kw)))
|
|
||||||
}
|
|
||||||
}};
|
|
||||||
|
|
||||||
(@@parse_param $query:expr, query_opt, $param:ident) => {{
|
router_match! {@func
|
||||||
// extract optional query parameter
|
|
||||||
$query.$param.take().map(|param| param.into_owned())
|
|
||||||
}};
|
|
||||||
(@@parse_param $query:expr, query, $param:ident) => {{
|
|
||||||
// extract mendatory query parameter
|
|
||||||
$query.$param.take().ok_or_bad_request("Missing argument for endpoint")?.into_owned()
|
|
||||||
}};
|
|
||||||
(@@parse_param $query:expr, opt_parse, $param:ident) => {{
|
|
||||||
// extract and parse optional query parameter
|
|
||||||
// missing parameter is file, however parse error is reported as an error
|
|
||||||
$query.$param
|
|
||||||
.take()
|
|
||||||
.map(|param| param.parse())
|
|
||||||
.transpose()
|
|
||||||
.map_err(|_| Error::BadRequest("Failed to parse query parameter".to_owned()))?
|
|
||||||
}};
|
|
||||||
(@@parse_param $query:expr, parse, $param:ident) => {{
|
|
||||||
// extract and parse mandatory query parameter
|
|
||||||
// both missing and un-parseable parameters are reported as errors
|
|
||||||
$query.$param.take().ok_or_bad_request("Missing argument for endpoint")?
|
|
||||||
.parse()
|
|
||||||
.map_err(|_| Error::BadRequest("Failed to parse query parameter".to_owned()))?
|
|
||||||
}};
|
|
||||||
(@func
|
|
||||||
$(#[$doc:meta])*
|
|
||||||
pub enum Endpoint {
|
|
||||||
$(
|
|
||||||
$(#[$outer:meta])*
|
|
||||||
$variant:ident $({
|
|
||||||
$($name:ident: $ty:ty,)*
|
|
||||||
})?,
|
|
||||||
)*
|
|
||||||
}) => {
|
|
||||||
$(#[$doc])*
|
|
||||||
pub enum Endpoint {
|
|
||||||
$(
|
|
||||||
$(#[$outer])*
|
|
||||||
$variant $({
|
|
||||||
$($name: $ty, )*
|
|
||||||
})?,
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
impl Endpoint {
|
|
||||||
pub fn name(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
$(Endpoint::$variant $({ $($name: _,)* .. })? => stringify!($variant),)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
(@if ($($cond:tt)+) then ($($then:tt)*) else ($($else:tt)*)) => {
|
|
||||||
$($then)*
|
|
||||||
};
|
|
||||||
(@if () then ($($then:tt)*) else ($($else:tt)*)) => {
|
|
||||||
$($else)*
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
s3_match! {@func
|
|
||||||
|
|
||||||
/// List of all S3 API endpoints.
|
/// List of all S3 API endpoints.
|
||||||
///
|
///
|
||||||
|
@ -460,7 +342,7 @@ impl Endpoint {
|
||||||
Method::POST => Self::from_post(key, &mut query)?,
|
Method::POST => Self::from_post(key, &mut query)?,
|
||||||
Method::PUT => Self::from_put(key, &mut query, req.headers())?,
|
Method::PUT => Self::from_put(key, &mut query, req.headers())?,
|
||||||
Method::DELETE => Self::from_delete(key, &mut query)?,
|
Method::DELETE => Self::from_delete(key, &mut query)?,
|
||||||
_ => return Err(Error::BadRequest("Unknown method".to_owned())),
|
_ => return Err(Error::bad_request("Unknown method")),
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(message) = query.nonempty_message() {
|
if let Some(message) = query.nonempty_message() {
|
||||||
|
@ -471,7 +353,7 @@ impl Endpoint {
|
||||||
|
|
||||||
/// Determine which endpoint a request is for, knowing it is a GET.
|
/// Determine which endpoint a request is for, knowing it is a GET.
|
||||||
fn from_get(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_get(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
s3_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
|
@ -528,7 +410,7 @@ impl Endpoint {
|
||||||
|
|
||||||
/// Determine which endpoint a request is for, knowing it is a HEAD.
|
/// Determine which endpoint a request is for, knowing it is a HEAD.
|
||||||
fn from_head(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_head(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
s3_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
|
@ -542,7 +424,7 @@ impl Endpoint {
|
||||||
|
|
||||||
/// Determine which endpoint a request is for, knowing it is a POST.
|
/// Determine which endpoint a request is for, knowing it is a POST.
|
||||||
fn from_post(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_post(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
s3_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
|
@ -564,7 +446,7 @@ impl Endpoint {
|
||||||
query: &mut QueryParameters<'_>,
|
query: &mut QueryParameters<'_>,
|
||||||
headers: &HeaderMap<HeaderValue>,
|
headers: &HeaderMap<HeaderValue>,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
s3_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, headers),
|
(query.keyword.take().unwrap_or_default().as_ref(), key, query, headers),
|
||||||
key: [
|
key: [
|
||||||
|
@ -606,7 +488,7 @@ impl Endpoint {
|
||||||
|
|
||||||
/// Determine which endpoint a request is for, knowing it is a DELETE.
|
/// Determine which endpoint a request is for, knowing it is a DELETE.
|
||||||
fn from_delete(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
fn from_delete(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||||
s3_match! {
|
router_match! {
|
||||||
@gen_parser
|
@gen_parser
|
||||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
||||||
key: [
|
key: [
|
||||||
|
@ -636,7 +518,7 @@ impl Endpoint {
|
||||||
/// Get the key the request target. Returns None for requests which don't use a key.
|
/// Get the key the request target. Returns None for requests which don't use a key.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub fn get_key(&self) -> Option<&str> {
|
pub fn get_key(&self) -> Option<&str> {
|
||||||
s3_match! {
|
router_match! {
|
||||||
@extract
|
@extract
|
||||||
self,
|
self,
|
||||||
key,
|
key,
|
||||||
|
@ -673,7 +555,7 @@ impl Endpoint {
|
||||||
if let Endpoint::ListBuckets = self {
|
if let Endpoint::ListBuckets = self {
|
||||||
return Authorization::None;
|
return Authorization::None;
|
||||||
};
|
};
|
||||||
let readonly = s3_match! {
|
let readonly = router_match! {
|
||||||
@match
|
@match
|
||||||
self,
|
self,
|
||||||
[
|
[
|
||||||
|
@ -717,7 +599,7 @@ impl Endpoint {
|
||||||
SelectObjectContent,
|
SelectObjectContent,
|
||||||
]
|
]
|
||||||
};
|
};
|
||||||
let owner = s3_match! {
|
let owner = router_match! {
|
||||||
@match
|
@match
|
||||||
self,
|
self,
|
||||||
[
|
[
|
||||||
|
@ -740,87 +622,6 @@ impl Endpoint {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// What kind of authorization is required to perform a given action
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub enum Authorization {
|
|
||||||
/// No authorization is required
|
|
||||||
None,
|
|
||||||
/// Having Read permission on bucket
|
|
||||||
Read,
|
|
||||||
/// Having Write permission on bucket
|
|
||||||
Write,
|
|
||||||
/// Having Owner permission on bucket
|
|
||||||
Owner,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This macro is used to generate part of the code in this module. It must be called only one, and
|
|
||||||
/// is useless outside of this module.
|
|
||||||
macro_rules! generateQueryParameters {
|
|
||||||
( $($rest:expr => $name:ident),* ) => {
|
|
||||||
/// Struct containing all query parameters used in endpoints. Think of it as an HashMap,
|
|
||||||
/// but with keys statically known.
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
struct QueryParameters<'a> {
|
|
||||||
keyword: Option<Cow<'a, str>>,
|
|
||||||
$(
|
|
||||||
$name: Option<Cow<'a, str>>,
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> QueryParameters<'a> {
|
|
||||||
/// Build this struct from the query part of an URI.
|
|
||||||
fn from_query(query: &'a str) -> Result<Self, Error> {
|
|
||||||
let mut res: Self = Default::default();
|
|
||||||
for (k, v) in url::form_urlencoded::parse(query.as_bytes()) {
|
|
||||||
let repeated = match k.as_ref() {
|
|
||||||
$(
|
|
||||||
$rest => if !v.is_empty() {
|
|
||||||
res.$name.replace(v).is_some()
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
},
|
|
||||||
)*
|
|
||||||
_ => {
|
|
||||||
if k.starts_with("response-") || k.starts_with("X-Amz-") {
|
|
||||||
false
|
|
||||||
} else if v.as_ref().is_empty() {
|
|
||||||
if res.keyword.replace(k).is_some() {
|
|
||||||
return Err(Error::BadRequest("Multiple keywords".to_owned()));
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
debug!("Received an unknown query parameter: '{}'", k);
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if repeated {
|
|
||||||
return Err(Error::BadRequest(format!(
|
|
||||||
"Query parameter repeated: '{}'",
|
|
||||||
k
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get an error message in case not all parameters where used when extracting them to
|
|
||||||
/// build an Enpoint variant
|
|
||||||
fn nonempty_message(&self) -> Option<&str> {
|
|
||||||
if self.keyword.is_some() {
|
|
||||||
Some("Keyword not used")
|
|
||||||
} $(
|
|
||||||
else if self.$name.is_some() {
|
|
||||||
Some(concat!("'", $rest, "'"))
|
|
||||||
}
|
|
||||||
)* else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parameter name => struct field
|
// parameter name => struct field
|
||||||
generateQueryParameters! {
|
generateQueryParameters! {
|
||||||
"continuation-token" => continuation_token,
|
"continuation-token" => continuation_token,
|
|
@ -4,13 +4,12 @@ use std::sync::Arc;
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{Body, Request, Response, StatusCode};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3_xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
use garage_model::bucket_table::*;
|
use garage_model::bucket_table::*;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_table::*;
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
pub async fn handle_get_website(bucket: &Bucket) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_website(bucket: &Bucket) -> Result<Response<Body>, Error> {
|
||||||
|
@ -47,14 +46,11 @@ pub async fn handle_delete_website(
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<Body>, Error> {
|
||||||
let mut bucket = garage
|
let mut bucket = garage
|
||||||
.bucket_table
|
.bucket_helper()
|
||||||
.get(&EmptyKey, &bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
.await?
|
.await?;
|
||||||
.ok_or(Error::NoSuchBucket)?;
|
|
||||||
|
|
||||||
let param = bucket
|
let param = bucket.params_mut().unwrap();
|
||||||
.params_mut()
|
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
|
||||||
|
|
||||||
param.website_config.update(None);
|
param.website_config.update(None);
|
||||||
garage.bucket_table.insert(&bucket).await?;
|
garage.bucket_table.insert(&bucket).await?;
|
||||||
|
@ -77,14 +73,11 @@ pub async fn handle_put_website(
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut bucket = garage
|
let mut bucket = garage
|
||||||
.bucket_table
|
.bucket_helper()
|
||||||
.get(&EmptyKey, &bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
.await?
|
.await?;
|
||||||
.ok_or(Error::NoSuchBucket)?;
|
|
||||||
|
|
||||||
let param = bucket
|
let param = bucket.params_mut().unwrap();
|
||||||
.params_mut()
|
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
|
||||||
|
|
||||||
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
|
let conf: WebsiteConfiguration = from_reader(&body as &[u8])?;
|
||||||
conf.validate()?;
|
conf.validate()?;
|
||||||
|
@ -176,8 +169,8 @@ impl WebsiteConfiguration {
|
||||||
|| self.index_document.is_some()
|
|| self.index_document.is_some()
|
||||||
|| self.routing_rules.is_some())
|
|| self.routing_rules.is_some())
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Bad XML: can't have RedirectAllRequestsTo and other fields".to_owned(),
|
"Bad XML: can't have RedirectAllRequestsTo and other fields",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
if let Some(ref ed) = self.error_document {
|
if let Some(ref ed) = self.error_document {
|
||||||
|
@ -222,8 +215,8 @@ impl WebsiteConfiguration {
|
||||||
impl Key {
|
impl Key {
|
||||||
pub fn validate(&self) -> Result<(), Error> {
|
pub fn validate(&self) -> Result<(), Error> {
|
||||||
if self.key.0.is_empty() {
|
if self.key.0.is_empty() {
|
||||||
Err(Error::BadRequest(
|
Err(Error::bad_request(
|
||||||
"Bad XML: error document specified but empty".to_owned(),
|
"Bad XML: error document specified but empty",
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -234,8 +227,8 @@ impl Key {
|
||||||
impl Suffix {
|
impl Suffix {
|
||||||
pub fn validate(&self) -> Result<(), Error> {
|
pub fn validate(&self) -> Result<(), Error> {
|
||||||
if self.suffix.0.is_empty() | self.suffix.0.contains('/') {
|
if self.suffix.0.is_empty() | self.suffix.0.contains('/') {
|
||||||
Err(Error::BadRequest(
|
Err(Error::bad_request(
|
||||||
"Bad XML: index document is empty or contains /".to_owned(),
|
"Bad XML: index document is empty or contains /",
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -247,7 +240,7 @@ impl Target {
|
||||||
pub fn validate(&self) -> Result<(), Error> {
|
pub fn validate(&self) -> Result<(), Error> {
|
||||||
if let Some(ref protocol) = self.protocol {
|
if let Some(ref protocol) = self.protocol {
|
||||||
if protocol.0 != "http" && protocol.0 != "https" {
|
if protocol.0 != "http" && protocol.0 != "https" {
|
||||||
return Err(Error::BadRequest("Bad XML: invalid protocol".to_owned()));
|
return Err(Error::bad_request("Bad XML: invalid protocol"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -269,19 +262,19 @@ impl Redirect {
|
||||||
pub fn validate(&self, has_prefix: bool) -> Result<(), Error> {
|
pub fn validate(&self, has_prefix: bool) -> Result<(), Error> {
|
||||||
if self.replace_prefix.is_some() {
|
if self.replace_prefix.is_some() {
|
||||||
if self.replace_full.is_some() {
|
if self.replace_full.is_some() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Bad XML: both ReplaceKeyPrefixWith and ReplaceKeyWith are set".to_owned(),
|
"Bad XML: both ReplaceKeyPrefixWith and ReplaceKeyWith are set",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
if !has_prefix {
|
if !has_prefix {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Bad XML: ReplaceKeyPrefixWith is set, but KeyPrefixEquals isn't".to_owned(),
|
"Bad XML: ReplaceKeyPrefixWith is set, but KeyPrefixEquals isn't",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(ref protocol) = self.protocol {
|
if let Some(ref protocol) = self.protocol {
|
||||||
if protocol.0 != "http" && protocol.0 != "https" {
|
if protocol.0 != "http" && protocol.0 != "https" {
|
||||||
return Err(Error::BadRequest("Bad XML: invalid protocol".to_owned()));
|
return Err(Error::bad_request("Bad XML: invalid protocol"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO there are probably more invalide cases, but which ones?
|
// TODO there are probably more invalide cases, but which ones?
|
|
@ -1,7 +1,7 @@
|
||||||
use quick_xml::se::to_string;
|
use quick_xml::se::to_string;
|
||||||
use serde::{Deserialize, Serialize, Serializer};
|
use serde::{Deserialize, Serialize, Serializer};
|
||||||
|
|
||||||
use crate::Error as ApiError;
|
use crate::s3::error::Error as ApiError;
|
||||||
|
|
||||||
pub fn to_xml_with_header<T: Serialize>(x: &T) -> Result<String, ApiError> {
|
pub fn to_xml_with_header<T: Serialize>(x: &T) -> Result<String, ApiError> {
|
||||||
let mut xml = r#"<?xml version="1.0" encoding="UTF-8"?>"#.to_string();
|
let mut xml = r#"<?xml version="1.0" encoding="UTF-8"?>"#.to_string();
|
36
src/api/signature/error.rs
Normal file
36
src/api/signature/error.rs
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
use err_derive::Error;
|
||||||
|
|
||||||
|
use crate::common_error::CommonError;
|
||||||
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
|
|
||||||
|
/// Errors of this crate
|
||||||
|
#[derive(Debug, Error)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error(display = "{}", _0)]
|
||||||
|
/// Error from common error
|
||||||
|
Common(CommonError),
|
||||||
|
|
||||||
|
/// Authorization Header Malformed
|
||||||
|
#[error(display = "Authorization header malformed, expected scope: {}", _0)]
|
||||||
|
AuthorizationHeaderMalformed(String),
|
||||||
|
|
||||||
|
// Category: bad request
|
||||||
|
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||||
|
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||||
|
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||||
|
|
||||||
|
/// The client sent a header with invalid value
|
||||||
|
#[error(display = "Invalid header value: {}", _0)]
|
||||||
|
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> From<T> for Error
|
||||||
|
where
|
||||||
|
CommonError: From<T>,
|
||||||
|
{
|
||||||
|
fn from(err: T) -> Self {
|
||||||
|
Error::Common(CommonError::from(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CommonErrorDerivative for Error {}
|
|
@ -4,11 +4,12 @@ use sha2::Sha256;
|
||||||
|
|
||||||
use garage_util::data::{sha256sum, Hash};
|
use garage_util::data::{sha256sum, Hash};
|
||||||
|
|
||||||
use crate::error::*;
|
pub mod error;
|
||||||
|
|
||||||
pub mod payload;
|
pub mod payload;
|
||||||
pub mod streaming;
|
pub mod streaming;
|
||||||
|
|
||||||
|
use error::*;
|
||||||
|
|
||||||
pub const SHORT_DATE: &str = "%Y%m%d";
|
pub const SHORT_DATE: &str = "%Y%m%d";
|
||||||
pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ";
|
pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ";
|
||||||
|
|
||||||
|
@ -16,7 +17,7 @@ type HmacSha256 = Hmac<Sha256>;
|
||||||
|
|
||||||
pub fn verify_signed_content(expected_sha256: Hash, body: &[u8]) -> Result<(), Error> {
|
pub fn verify_signed_content(expected_sha256: Hash, body: &[u8]) -> Result<(), Error> {
|
||||||
if expected_sha256 != sha256sum(body) {
|
if expected_sha256 != sha256sum(body) {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Request content hash does not match signed hash".to_string(),
|
"Request content hash does not match signed hash".to_string(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -42,6 +43,11 @@ pub fn signing_hmac(
|
||||||
Ok(hmac)
|
Ok(hmac)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn compute_scope(datetime: &DateTime<Utc>, region: &str) -> String {
|
pub fn compute_scope(datetime: &DateTime<Utc>, region: &str, service: &str) -> String {
|
||||||
format!("{}/{}/s3/aws4_request", datetime.format(SHORT_DATE), region,)
|
format!(
|
||||||
|
"{}/{}/{}/aws4_request",
|
||||||
|
datetime.format(SHORT_DATE),
|
||||||
|
region,
|
||||||
|
service
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,14 +11,15 @@ use garage_util::data::Hash;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
|
|
||||||
use super::signing_hmac;
|
use super::LONG_DATETIME;
|
||||||
use super::{LONG_DATETIME, SHORT_DATE};
|
use super::{compute_scope, signing_hmac};
|
||||||
|
|
||||||
use crate::encoding::uri_encode;
|
use crate::encoding::uri_encode;
|
||||||
use crate::error::*;
|
use crate::signature::error::*;
|
||||||
|
|
||||||
pub async fn check_payload_signature(
|
pub async fn check_payload_signature(
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
|
service: &str,
|
||||||
request: &Request<Body>,
|
request: &Request<Body>,
|
||||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
||||||
let mut headers = HashMap::new();
|
let mut headers = HashMap::new();
|
||||||
|
@ -64,6 +65,7 @@ pub async fn check_payload_signature(
|
||||||
|
|
||||||
let key = verify_v4(
|
let key = verify_v4(
|
||||||
garage,
|
garage,
|
||||||
|
service,
|
||||||
&authorization.credential,
|
&authorization.credential,
|
||||||
&authorization.date,
|
&authorization.date,
|
||||||
&authorization.signature,
|
&authorization.signature,
|
||||||
|
@ -103,7 +105,7 @@ fn parse_authorization(
|
||||||
let (auth_kind, rest) = authorization.split_at(first_space);
|
let (auth_kind, rest) = authorization.split_at(first_space);
|
||||||
|
|
||||||
if auth_kind != "AWS4-HMAC-SHA256" {
|
if auth_kind != "AWS4-HMAC-SHA256" {
|
||||||
return Err(Error::BadRequest("Unsupported authorization method".into()));
|
return Err(Error::bad_request("Unsupported authorization method"));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut auth_params = HashMap::new();
|
let mut auth_params = HashMap::new();
|
||||||
|
@ -127,10 +129,11 @@ fn parse_authorization(
|
||||||
let date = headers
|
let date = headers
|
||||||
.get("x-amz-date")
|
.get("x-amz-date")
|
||||||
.ok_or_bad_request("Missing X-Amz-Date field")
|
.ok_or_bad_request("Missing X-Amz-Date field")
|
||||||
|
.map_err(Error::from)
|
||||||
.and_then(|d| parse_date(d))?;
|
.and_then(|d| parse_date(d))?;
|
||||||
|
|
||||||
if Utc::now() - date > Duration::hours(24) {
|
if Utc::now() - date > Duration::hours(24) {
|
||||||
return Err(Error::BadRequest("Date is too old".to_string()));
|
return Err(Error::bad_request("Date is too old".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let auth = Authorization {
|
let auth = Authorization {
|
||||||
|
@ -154,7 +157,7 @@ fn parse_query_authorization(
|
||||||
headers: &HashMap<String, String>,
|
headers: &HashMap<String, String>,
|
||||||
) -> Result<Authorization, Error> {
|
) -> Result<Authorization, Error> {
|
||||||
if algorithm != "AWS4-HMAC-SHA256" {
|
if algorithm != "AWS4-HMAC-SHA256" {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"Unsupported authorization method".to_string(),
|
"Unsupported authorization method".to_string(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -177,10 +180,10 @@ fn parse_query_authorization(
|
||||||
.get("x-amz-expires")
|
.get("x-amz-expires")
|
||||||
.ok_or_bad_request("X-Amz-Expires not found in query parameters")?
|
.ok_or_bad_request("X-Amz-Expires not found in query parameters")?
|
||||||
.parse()
|
.parse()
|
||||||
.map_err(|_| Error::BadRequest("X-Amz-Expires is not a number".to_string()))?;
|
.map_err(|_| Error::bad_request("X-Amz-Expires is not a number".to_string()))?;
|
||||||
|
|
||||||
if duration > 7 * 24 * 3600 {
|
if duration > 7 * 24 * 3600 {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::bad_request(
|
||||||
"X-Amz-Exprires may not exceed a week".to_string(),
|
"X-Amz-Exprires may not exceed a week".to_string(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -188,10 +191,11 @@ fn parse_query_authorization(
|
||||||
let date = headers
|
let date = headers
|
||||||
.get("x-amz-date")
|
.get("x-amz-date")
|
||||||
.ok_or_bad_request("Missing X-Amz-Date field")
|
.ok_or_bad_request("Missing X-Amz-Date field")
|
||||||
|
.map_err(Error::from)
|
||||||
.and_then(|d| parse_date(d))?;
|
.and_then(|d| parse_date(d))?;
|
||||||
|
|
||||||
if Utc::now() - date > Duration::seconds(duration) {
|
if Utc::now() - date > Duration::seconds(duration) {
|
||||||
return Err(Error::BadRequest("Date is too old".to_string()));
|
return Err(Error::bad_request("Date is too old".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Authorization {
|
Ok(Authorization {
|
||||||
|
@ -281,6 +285,7 @@ pub fn parse_date(date: &str) -> Result<DateTime<Utc>, Error> {
|
||||||
|
|
||||||
pub async fn verify_v4(
|
pub async fn verify_v4(
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
|
service: &str,
|
||||||
credential: &str,
|
credential: &str,
|
||||||
date: &DateTime<Utc>,
|
date: &DateTime<Utc>,
|
||||||
signature: &str,
|
signature: &str,
|
||||||
|
@ -288,11 +293,7 @@ pub async fn verify_v4(
|
||||||
) -> Result<Key, Error> {
|
) -> Result<Key, Error> {
|
||||||
let (key_id, scope) = parse_credential(credential)?;
|
let (key_id, scope) = parse_credential(credential)?;
|
||||||
|
|
||||||
let scope_expected = format!(
|
let scope_expected = compute_scope(date, &garage.config.s3_api.s3_region, service);
|
||||||
"{}/{}/s3/aws4_request",
|
|
||||||
date.format(SHORT_DATE),
|
|
||||||
garage.config.s3_api.s3_region
|
|
||||||
);
|
|
||||||
if scope != scope_expected {
|
if scope != scope_expected {
|
||||||
return Err(Error::AuthorizationHeaderMalformed(scope.to_string()));
|
return Err(Error::AuthorizationHeaderMalformed(scope.to_string()));
|
||||||
}
|
}
|
||||||
|
@ -302,20 +303,20 @@ pub async fn verify_v4(
|
||||||
.get(&EmptyKey, &key_id)
|
.get(&EmptyKey, &key_id)
|
||||||
.await?
|
.await?
|
||||||
.filter(|k| !k.state.is_deleted())
|
.filter(|k| !k.state.is_deleted())
|
||||||
.ok_or_else(|| Error::Forbidden(format!("No such key: {}", &key_id)))?;
|
.ok_or_else(|| Error::forbidden(format!("No such key: {}", &key_id)))?;
|
||||||
let key_p = key.params().unwrap();
|
let key_p = key.params().unwrap();
|
||||||
|
|
||||||
let mut hmac = signing_hmac(
|
let mut hmac = signing_hmac(
|
||||||
date,
|
date,
|
||||||
&key_p.secret_key,
|
&key_p.secret_key,
|
||||||
&garage.config.s3_api.s3_region,
|
&garage.config.s3_api.s3_region,
|
||||||
"s3",
|
service,
|
||||||
)
|
)
|
||||||
.ok_or_internal_error("Unable to build signing HMAC")?;
|
.ok_or_internal_error("Unable to build signing HMAC")?;
|
||||||
hmac.update(payload);
|
hmac.update(payload);
|
||||||
let our_signature = hex::encode(hmac.finalize().into_bytes());
|
let our_signature = hex::encode(hmac.finalize().into_bytes());
|
||||||
if signature != our_signature {
|
if signature != our_signature {
|
||||||
return Err(Error::Forbidden("Invalid signature".to_string()));
|
return Err(Error::forbidden("Invalid signature".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(key)
|
Ok(key)
|
||||||
|
|
|
@ -1,18 +1,67 @@
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, NaiveDateTime, Utc};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use futures::task;
|
use futures::task;
|
||||||
|
use garage_model::key_table::Key;
|
||||||
|
use hmac::Mac;
|
||||||
use hyper::body::Bytes;
|
use hyper::body::Bytes;
|
||||||
|
use hyper::{Body, Request};
|
||||||
|
|
||||||
use garage_util::data::Hash;
|
use garage_util::data::Hash;
|
||||||
use hmac::Mac;
|
|
||||||
|
|
||||||
use super::sha256sum;
|
use super::{compute_scope, sha256sum, HmacSha256, LONG_DATETIME};
|
||||||
use super::HmacSha256;
|
|
||||||
use super::LONG_DATETIME;
|
|
||||||
|
|
||||||
use crate::error::*;
|
use crate::signature::error::*;
|
||||||
|
|
||||||
|
pub fn parse_streaming_body(
|
||||||
|
api_key: &Key,
|
||||||
|
req: Request<Body>,
|
||||||
|
content_sha256: &mut Option<Hash>,
|
||||||
|
region: &str,
|
||||||
|
service: &str,
|
||||||
|
) -> Result<Request<Body>, Error> {
|
||||||
|
match req.headers().get("x-amz-content-sha256") {
|
||||||
|
Some(header) if header == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" => {
|
||||||
|
let signature = content_sha256
|
||||||
|
.take()
|
||||||
|
.ok_or_bad_request("No signature provided")?;
|
||||||
|
|
||||||
|
let secret_key = &api_key
|
||||||
|
.state
|
||||||
|
.as_option()
|
||||||
|
.ok_or_internal_error("Deleted key state")?
|
||||||
|
.secret_key;
|
||||||
|
|
||||||
|
let date = req
|
||||||
|
.headers()
|
||||||
|
.get("x-amz-date")
|
||||||
|
.ok_or_bad_request("Missing X-Amz-Date field")?
|
||||||
|
.to_str()?;
|
||||||
|
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
|
||||||
|
.ok_or_bad_request("Invalid date")?;
|
||||||
|
let date: DateTime<Utc> = DateTime::from_utc(date, Utc);
|
||||||
|
|
||||||
|
let scope = compute_scope(&date, region, service);
|
||||||
|
let signing_hmac = crate::signature::signing_hmac(&date, secret_key, region, service)
|
||||||
|
.ok_or_internal_error("Unable to build signing HMAC")?;
|
||||||
|
|
||||||
|
Ok(req.map(move |body| {
|
||||||
|
Body::wrap_stream(
|
||||||
|
SignedPayloadStream::new(
|
||||||
|
body.map_err(Error::from),
|
||||||
|
signing_hmac,
|
||||||
|
date,
|
||||||
|
&scope,
|
||||||
|
signature,
|
||||||
|
)
|
||||||
|
.map_err(Error::from),
|
||||||
|
)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
_ => Ok(req),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Result of `sha256("")`
|
/// Result of `sha256("")`
|
||||||
const EMPTY_STRING_HEX_DIGEST: &str =
|
const EMPTY_STRING_HEX_DIGEST: &str =
|
||||||
|
@ -38,7 +87,7 @@ fn compute_streaming_payload_signature(
|
||||||
let mut hmac = signing_hmac.clone();
|
let mut hmac = signing_hmac.clone();
|
||||||
hmac.update(string_to_sign.as_bytes());
|
hmac.update(string_to_sign.as_bytes());
|
||||||
|
|
||||||
Hash::try_from(&hmac.finalize().into_bytes()).ok_or_internal_error("Invalid signature")
|
Ok(Hash::try_from(&hmac.finalize().into_bytes()).ok_or_internal_error("Invalid signature")?)
|
||||||
}
|
}
|
||||||
|
|
||||||
mod payload {
|
mod payload {
|
||||||
|
@ -114,10 +163,10 @@ impl From<SignedPayloadStreamError> for Error {
|
||||||
match err {
|
match err {
|
||||||
SignedPayloadStreamError::Stream(e) => e,
|
SignedPayloadStreamError::Stream(e) => e,
|
||||||
SignedPayloadStreamError::InvalidSignature => {
|
SignedPayloadStreamError::InvalidSignature => {
|
||||||
Error::BadRequest("Invalid payload signature".into())
|
Error::bad_request("Invalid payload signature")
|
||||||
}
|
}
|
||||||
SignedPayloadStreamError::Message(e) => {
|
SignedPayloadStreamError::Message(e) => {
|
||||||
Error::BadRequest(format!("Chunk format error: {}", e))
|
Error::bad_request(format!("Chunk format error: {}", e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -295,7 +344,7 @@ mod tests {
|
||||||
.with_timezone(&Utc);
|
.with_timezone(&Utc);
|
||||||
let secret_key = "test";
|
let secret_key = "test";
|
||||||
let region = "test";
|
let region = "test";
|
||||||
let scope = crate::signature::compute_scope(&datetime, region);
|
let scope = crate::signature::compute_scope(&datetime, region, "s3");
|
||||||
let signing_hmac =
|
let signing_hmac =
|
||||||
crate::signature::signing_hmac(&datetime, secret_key, region, "s3").unwrap();
|
crate::signature::signing_hmac(&datetime, secret_key, region, "s3").unwrap();
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_block"
|
name = "garage_block"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,9 +14,9 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_rpc = { version = "0.7.0", path = "../rpc" }
|
garage_rpc = { version = "0.7.3", path = "../rpc" }
|
||||||
garage_util = { version = "0.7.0", path = "../util" }
|
garage_util = { version = "0.7.3", path = "../util" }
|
||||||
garage_table = { version = "0.7.0", path = "../table" }
|
garage_table = { version = "0.7.3", path = "../table" }
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
opentelemetry = "0.17"
|
||||||
|
|
||||||
|
|
|
@ -132,7 +132,7 @@ impl BlockManager {
|
||||||
|
|
||||||
let endpoint = system
|
let endpoint = system
|
||||||
.netapp
|
.netapp
|
||||||
.endpoint("garage_model/block.rs/Rpc".to_string());
|
.endpoint("garage_block/manager.rs/Rpc".to_string());
|
||||||
|
|
||||||
let manager_locked = BlockManagerLocked();
|
let manager_locked = BlockManagerLocked();
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage"
|
name = "garage"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -21,16 +21,14 @@ path = "tests/lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_api = { version = "0.7.0", path = "../api" }
|
garage_api = { version = "0.7.3", path = "../api" }
|
||||||
garage_model = { version = "0.7.0", path = "../model" }
|
garage_model = { version = "0.7.3", path = "../model" }
|
||||||
garage_rpc = { version = "0.7.0", path = "../rpc" }
|
garage_rpc = { version = "0.7.3", path = "../rpc" }
|
||||||
garage_table = { version = "0.7.0", path = "../table" }
|
garage_table = { version = "0.7.3", path = "../table" }
|
||||||
garage_util = { version = "0.7.0", path = "../util" }
|
garage_util = { version = "0.7.3", path = "../util" }
|
||||||
garage_web = { version = "0.7.0", path = "../web" }
|
garage_web = { version = "0.7.3", path = "../web" }
|
||||||
garage_admin = { version = "0.7.0", path = "../admin" }
|
|
||||||
|
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
git-version = "0.3.4"
|
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
tracing = { version = "0.1.30", features = ["log-always"] }
|
tracing = { version = "0.1.30", features = ["log-always"] }
|
||||||
pretty_env_logger = "0.4"
|
pretty_env_logger = "0.4"
|
||||||
|
@ -54,6 +52,11 @@ tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi
|
||||||
#netapp = { version = "0.4", path = "../../../netapp" }
|
#netapp = { version = "0.4", path = "../../../netapp" }
|
||||||
netapp = "0.4"
|
netapp = "0.4"
|
||||||
|
|
||||||
|
opentelemetry = { version = "0.17", features = [ "rt-tokio" ] }
|
||||||
|
opentelemetry-prometheus = "0.10"
|
||||||
|
opentelemetry-otlp = "0.10"
|
||||||
|
prometheus = "0.13"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
aws-sdk-s3 = "0.8"
|
aws-sdk-s3 = "0.8"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
|
@ -63,3 +66,11 @@ hyper = { version = "0.14", features = ["client", "http1", "runtime"] }
|
||||||
sha2 = "0.9"
|
sha2 = "0.9"
|
||||||
|
|
||||||
static_init = "1.0"
|
static_init = "1.0"
|
||||||
|
assert-json-diff = "2.0"
|
||||||
|
serde_json = "1.0"
|
||||||
|
base64 = "0.13"
|
||||||
|
|
||||||
|
|
||||||
|
[features]
|
||||||
|
kubernetes-discovery = [ "garage_rpc/kubernetes-discovery" ]
|
||||||
|
k2v = [ "garage_util/k2v", "garage_api/k2v" ]
|
||||||
|
|
|
@ -21,7 +21,6 @@ use garage_model::garage::Garage;
|
||||||
use garage_model::helper::error::{Error, OkOrBadRequest};
|
use garage_model::helper::error::{Error, OkOrBadRequest};
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
use garage_model::migrate::Migrate;
|
use garage_model::migrate::Migrate;
|
||||||
use garage_model::object_table::ObjectFilter;
|
|
||||||
use garage_model::permission::*;
|
use garage_model::permission::*;
|
||||||
|
|
||||||
use crate::cli::*;
|
use crate::cli::*;
|
||||||
|
@ -80,7 +79,13 @@ impl AdminRpcHandler {
|
||||||
let buckets = self
|
let buckets = self
|
||||||
.garage
|
.garage
|
||||||
.bucket_table
|
.bucket_table
|
||||||
.get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000)
|
.get_range(
|
||||||
|
&EmptyKey,
|
||||||
|
None,
|
||||||
|
Some(DeletedFilter::NotDeleted),
|
||||||
|
10000,
|
||||||
|
EnumerationOrder::Forward,
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
Ok(AdminRpc::BucketList(buckets))
|
Ok(AdminRpc::BucketList(buckets))
|
||||||
}
|
}
|
||||||
|
@ -207,12 +212,7 @@ impl AdminRpcHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check bucket is empty
|
// Check bucket is empty
|
||||||
let objects = self
|
if !helper.is_bucket_empty(bucket_id).await? {
|
||||||
.garage
|
|
||||||
.object_table
|
|
||||||
.get_range(&bucket_id, None, Some(ObjectFilter::IsData), 10)
|
|
||||||
.await?;
|
|
||||||
if !objects.is_empty() {
|
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::BadRequest(format!(
|
||||||
"Bucket {} is not empty",
|
"Bucket {} is not empty",
|
||||||
query.name
|
query.name
|
||||||
|
@ -249,6 +249,7 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
async fn handle_alias_bucket(&self, query: &AliasBucketOpt) -> Result<AdminRpc, Error> {
|
async fn handle_alias_bucket(&self, query: &AliasBucketOpt) -> Result<AdminRpc, Error> {
|
||||||
let helper = self.garage.bucket_helper();
|
let helper = self.garage.bucket_helper();
|
||||||
|
let key_helper = self.garage.key_helper();
|
||||||
|
|
||||||
let bucket_id = helper
|
let bucket_id = helper
|
||||||
.resolve_global_bucket_name(&query.existing_bucket)
|
.resolve_global_bucket_name(&query.existing_bucket)
|
||||||
|
@ -256,7 +257,7 @@ impl AdminRpcHandler {
|
||||||
.ok_or_bad_request("Bucket not found")?;
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
|
|
||||||
if let Some(key_pattern) = &query.local {
|
if let Some(key_pattern) = &query.local {
|
||||||
let key = helper.get_existing_matching_key(key_pattern).await?;
|
let key = key_helper.get_existing_matching_key(key_pattern).await?;
|
||||||
|
|
||||||
helper
|
helper
|
||||||
.set_local_bucket_alias(bucket_id, &key.key_id, &query.new_name)
|
.set_local_bucket_alias(bucket_id, &key.key_id, &query.new_name)
|
||||||
|
@ -278,9 +279,10 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
async fn handle_unalias_bucket(&self, query: &UnaliasBucketOpt) -> Result<AdminRpc, Error> {
|
async fn handle_unalias_bucket(&self, query: &UnaliasBucketOpt) -> Result<AdminRpc, Error> {
|
||||||
let helper = self.garage.bucket_helper();
|
let helper = self.garage.bucket_helper();
|
||||||
|
let key_helper = self.garage.key_helper();
|
||||||
|
|
||||||
if let Some(key_pattern) = &query.local {
|
if let Some(key_pattern) = &query.local {
|
||||||
let key = helper.get_existing_matching_key(key_pattern).await?;
|
let key = key_helper.get_existing_matching_key(key_pattern).await?;
|
||||||
|
|
||||||
let bucket_id = key
|
let bucket_id = key
|
||||||
.state
|
.state
|
||||||
|
@ -319,12 +321,15 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
|
async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
|
||||||
let helper = self.garage.bucket_helper();
|
let helper = self.garage.bucket_helper();
|
||||||
|
let key_helper = self.garage.key_helper();
|
||||||
|
|
||||||
let bucket_id = helper
|
let bucket_id = helper
|
||||||
.resolve_global_bucket_name(&query.bucket)
|
.resolve_global_bucket_name(&query.bucket)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_bad_request("Bucket not found")?;
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
let key = helper.get_existing_matching_key(&query.key_pattern).await?;
|
let key = key_helper
|
||||||
|
.get_existing_matching_key(&query.key_pattern)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let allow_read = query.read || key.allow_read(&bucket_id);
|
let allow_read = query.read || key.allow_read(&bucket_id);
|
||||||
let allow_write = query.write || key.allow_write(&bucket_id);
|
let allow_write = query.write || key.allow_write(&bucket_id);
|
||||||
|
@ -351,12 +356,15 @@ impl AdminRpcHandler {
|
||||||
|
|
||||||
async fn handle_bucket_deny(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
|
async fn handle_bucket_deny(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
|
||||||
let helper = self.garage.bucket_helper();
|
let helper = self.garage.bucket_helper();
|
||||||
|
let key_helper = self.garage.key_helper();
|
||||||
|
|
||||||
let bucket_id = helper
|
let bucket_id = helper
|
||||||
.resolve_global_bucket_name(&query.bucket)
|
.resolve_global_bucket_name(&query.bucket)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_bad_request("Bucket not found")?;
|
.ok_or_bad_request("Bucket not found")?;
|
||||||
let key = helper.get_existing_matching_key(&query.key_pattern).await?;
|
let key = key_helper
|
||||||
|
.get_existing_matching_key(&query.key_pattern)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let allow_read = !query.read && key.allow_read(&bucket_id);
|
let allow_read = !query.read && key.allow_read(&bucket_id);
|
||||||
let allow_write = !query.write && key.allow_write(&bucket_id);
|
let allow_write = !query.write && key.allow_write(&bucket_id);
|
||||||
|
@ -445,6 +453,7 @@ impl AdminRpcHandler {
|
||||||
None,
|
None,
|
||||||
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
|
||||||
10000,
|
10000,
|
||||||
|
EnumerationOrder::Forward,
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -456,7 +465,7 @@ impl AdminRpcHandler {
|
||||||
async fn handle_key_info(&self, query: &KeyOpt) -> Result<AdminRpc, Error> {
|
async fn handle_key_info(&self, query: &KeyOpt) -> Result<AdminRpc, Error> {
|
||||||
let key = self
|
let key = self
|
||||||
.garage
|
.garage
|
||||||
.bucket_helper()
|
.key_helper()
|
||||||
.get_existing_matching_key(&query.key_pattern)
|
.get_existing_matching_key(&query.key_pattern)
|
||||||
.await?;
|
.await?;
|
||||||
self.key_info_result(key).await
|
self.key_info_result(key).await
|
||||||
|
@ -471,7 +480,7 @@ impl AdminRpcHandler {
|
||||||
async fn handle_rename_key(&self, query: &KeyRenameOpt) -> Result<AdminRpc, Error> {
|
async fn handle_rename_key(&self, query: &KeyRenameOpt) -> Result<AdminRpc, Error> {
|
||||||
let mut key = self
|
let mut key = self
|
||||||
.garage
|
.garage
|
||||||
.bucket_helper()
|
.key_helper()
|
||||||
.get_existing_matching_key(&query.key_pattern)
|
.get_existing_matching_key(&query.key_pattern)
|
||||||
.await?;
|
.await?;
|
||||||
key.params_mut()
|
key.params_mut()
|
||||||
|
@ -483,9 +492,11 @@ impl AdminRpcHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result<AdminRpc, Error> {
|
async fn handle_delete_key(&self, query: &KeyDeleteOpt) -> Result<AdminRpc, Error> {
|
||||||
let helper = self.garage.bucket_helper();
|
let key_helper = self.garage.key_helper();
|
||||||
|
|
||||||
let mut key = helper.get_existing_matching_key(&query.key_pattern).await?;
|
let mut key = key_helper
|
||||||
|
.get_existing_matching_key(&query.key_pattern)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if !query.yes {
|
if !query.yes {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -493,32 +504,7 @@ impl AdminRpcHandler {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let state = key.state.as_option_mut().unwrap();
|
key_helper.delete_key(&mut key).await?;
|
||||||
|
|
||||||
// --- done checking, now commit ---
|
|
||||||
// (the step at unset_local_bucket_alias will fail if a bucket
|
|
||||||
// does not have another alias, the deletion will be
|
|
||||||
// interrupted in the middle if that happens)
|
|
||||||
|
|
||||||
// 1. Delete local aliases
|
|
||||||
for (alias, _, to) in state.local_aliases.items().iter() {
|
|
||||||
if let Some(bucket_id) = to {
|
|
||||||
helper
|
|
||||||
.unset_local_bucket_alias(*bucket_id, &key.key_id, alias)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Remove permissions on all authorized buckets
|
|
||||||
for (ab_id, _auth) in state.authorized_buckets.items().iter() {
|
|
||||||
helper
|
|
||||||
.set_bucket_key_permissions(*ab_id, &key.key_id, BucketKeyPerm::NO_PERMISSIONS)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Actually delete key
|
|
||||||
key.state = Deletable::delete();
|
|
||||||
self.garage.key_table.insert(&key).await?;
|
|
||||||
|
|
||||||
Ok(AdminRpc::Ok(format!(
|
Ok(AdminRpc::Ok(format!(
|
||||||
"Key {} was deleted successfully.",
|
"Key {} was deleted successfully.",
|
||||||
|
@ -529,7 +515,7 @@ impl AdminRpcHandler {
|
||||||
async fn handle_allow_key(&self, query: &KeyPermOpt) -> Result<AdminRpc, Error> {
|
async fn handle_allow_key(&self, query: &KeyPermOpt) -> Result<AdminRpc, Error> {
|
||||||
let mut key = self
|
let mut key = self
|
||||||
.garage
|
.garage
|
||||||
.bucket_helper()
|
.key_helper()
|
||||||
.get_existing_matching_key(&query.key_pattern)
|
.get_existing_matching_key(&query.key_pattern)
|
||||||
.await?;
|
.await?;
|
||||||
if query.create_bucket {
|
if query.create_bucket {
|
||||||
|
@ -542,7 +528,7 @@ impl AdminRpcHandler {
|
||||||
async fn handle_deny_key(&self, query: &KeyPermOpt) -> Result<AdminRpc, Error> {
|
async fn handle_deny_key(&self, query: &KeyPermOpt) -> Result<AdminRpc, Error> {
|
||||||
let mut key = self
|
let mut key = self
|
||||||
.garage
|
.garage
|
||||||
.bucket_helper()
|
.key_helper()
|
||||||
.get_existing_matching_key(&query.key_pattern)
|
.get_existing_matching_key(&query.key_pattern)
|
||||||
.await?;
|
.await?;
|
||||||
if query.create_bucket {
|
if query.create_bucket {
|
||||||
|
@ -683,11 +669,7 @@ impl AdminRpcHandler {
|
||||||
writeln!(
|
writeln!(
|
||||||
&mut ret,
|
&mut ret,
|
||||||
"\nGarage version: {}",
|
"\nGarage version: {}",
|
||||||
option_env!("GIT_VERSION").unwrap_or(git_version::git_version!(
|
self.garage.system.garage_version(),
|
||||||
prefix = "git:",
|
|
||||||
cargo_prefix = "cargo:",
|
|
||||||
fallback = "unknown"
|
|
||||||
))
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
|
use garage_util::formater::format_table;
|
||||||
|
|
||||||
use garage_rpc::layout::*;
|
use garage_rpc::layout::*;
|
||||||
use garage_rpc::system::*;
|
use garage_rpc::system::*;
|
||||||
|
@ -85,13 +86,14 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
||||||
format_table(healthy_nodes);
|
format_table(healthy_nodes);
|
||||||
|
|
||||||
let status_keys = status.iter().map(|adv| adv.id).collect::<HashSet<_>>();
|
let status_keys = status.iter().map(|adv| adv.id).collect::<HashSet<_>>();
|
||||||
let failure_case_1 = status.iter().any(|adv| !adv.is_up);
|
let failure_case_1 = status
|
||||||
|
.iter()
|
||||||
|
.any(|adv| !adv.is_up && matches!(layout.roles.get(&adv.id), Some(NodeRoleV(Some(_)))));
|
||||||
let failure_case_2 = layout
|
let failure_case_2 = layout
|
||||||
.roles
|
.roles
|
||||||
.items()
|
.items()
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|(_, _, v)| v.0.is_some())
|
.any(|(id, _, v)| !status_keys.contains(id) && v.0.is_some());
|
||||||
.any(|(id, _, _)| !status_keys.contains(id));
|
|
||||||
if failure_case_1 || failure_case_2 {
|
if failure_case_1 || failure_case_2 {
|
||||||
println!("\n==== FAILED NODES ====");
|
println!("\n==== FAILED NODES ====");
|
||||||
let mut failed_nodes =
|
let mut failed_nodes =
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use garage_util::crdt::Crdt;
|
use garage_util::crdt::Crdt;
|
||||||
use garage_util::data::*;
|
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
|
use garage_util::formater::format_table;
|
||||||
|
|
||||||
use garage_rpc::layout::*;
|
use garage_rpc::layout::*;
|
||||||
use garage_rpc::system::*;
|
use garage_rpc::system::*;
|
||||||
|
@ -43,14 +43,22 @@ pub async fn cmd_assign_role(
|
||||||
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||||
|
|
||||||
let added_nodes = args
|
let added_nodes = args
|
||||||
.node_ids
|
.node_ids
|
||||||
.iter()
|
.iter()
|
||||||
.map(|node_id| find_matching_node(status.iter().map(|adv| adv.id), node_id))
|
.map(|node_id| {
|
||||||
|
find_matching_node(
|
||||||
|
status
|
||||||
|
.iter()
|
||||||
|
.map(|adv| adv.id)
|
||||||
|
.chain(layout.node_ids().iter().cloned()),
|
||||||
|
node_id,
|
||||||
|
)
|
||||||
|
})
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
|
||||||
|
|
||||||
let mut roles = layout.roles.clone();
|
let mut roles = layout.roles.clone();
|
||||||
roles.merge(&layout.staging);
|
roles.merge(&layout.staging);
|
||||||
|
|
||||||
|
@ -203,31 +211,9 @@ pub async fn cmd_apply_layout(
|
||||||
rpc_host: NodeID,
|
rpc_host: NodeID,
|
||||||
apply_opt: ApplyLayoutOpt,
|
apply_opt: ApplyLayoutOpt,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||||
|
|
||||||
match apply_opt.version {
|
let layout = layout.apply_staged_changes(apply_opt.version)?;
|
||||||
None => {
|
|
||||||
println!("Please pass the --version flag to ensure that you are writing the correct version of the cluster layout.");
|
|
||||||
println!("To know the correct value of the --version flag, invoke `garage layout show` and review the proposed changes.");
|
|
||||||
return Err(Error::Message("--version flag is missing".into()));
|
|
||||||
}
|
|
||||||
Some(v) => {
|
|
||||||
if v != layout.version + 1 {
|
|
||||||
return Err(Error::Message("Invalid value of --version flag".into()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
layout.roles.merge(&layout.staging);
|
|
||||||
|
|
||||||
if !layout.calculate_partition_assignation() {
|
|
||||||
return Err(Error::Message("Could not calculate new assignation of partitions to nodes. This can happen if there are less nodes than the desired number of copies of your data (see the replication_mode configuration parameter).".into()));
|
|
||||||
}
|
|
||||||
|
|
||||||
layout.staging.clear();
|
|
||||||
layout.staging_hash = blake2sum(&rmp_to_vec_all_named(&layout.staging).unwrap()[..]);
|
|
||||||
|
|
||||||
layout.version += 1;
|
|
||||||
|
|
||||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||||
|
|
||||||
|
@ -242,25 +228,9 @@ pub async fn cmd_revert_layout(
|
||||||
rpc_host: NodeID,
|
rpc_host: NodeID,
|
||||||
revert_opt: RevertLayoutOpt,
|
revert_opt: RevertLayoutOpt,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut layout = fetch_layout(rpc_cli, rpc_host).await?;
|
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||||
|
|
||||||
match revert_opt.version {
|
let layout = layout.revert_staged_changes(revert_opt.version)?;
|
||||||
None => {
|
|
||||||
println!("Please pass the --version flag to ensure that you are writing the correct version of the cluster layout.");
|
|
||||||
println!("To know the correct value of the --version flag, invoke `garage layout show` and review the proposed changes.");
|
|
||||||
return Err(Error::Message("--version flag is missing".into()));
|
|
||||||
}
|
|
||||||
Some(v) => {
|
|
||||||
if v != layout.version + 1 {
|
|
||||||
return Err(Error::Message("Invalid value of --version flag".into()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
layout.staging.clear();
|
|
||||||
layout.staging_hash = blake2sum(&rmp_to_vec_all_named(&layout.staging).unwrap()[..]);
|
|
||||||
|
|
||||||
layout.version += 1;
|
|
||||||
|
|
||||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||||
|
|
||||||
|
@ -323,11 +293,20 @@ pub fn print_cluster_layout(layout: &ClusterLayout) -> bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn print_staging_role_changes(layout: &ClusterLayout) -> bool {
|
pub fn print_staging_role_changes(layout: &ClusterLayout) -> bool {
|
||||||
if !layout.staging.items().is_empty() {
|
let has_changes = layout
|
||||||
|
.staging
|
||||||
|
.items()
|
||||||
|
.iter()
|
||||||
|
.any(|(k, _, v)| layout.roles.get(k) != Some(v));
|
||||||
|
|
||||||
|
if has_changes {
|
||||||
println!();
|
println!();
|
||||||
println!("==== STAGED ROLE CHANGES ====");
|
println!("==== STAGED ROLE CHANGES ====");
|
||||||
let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
|
let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()];
|
||||||
for (id, _, role) in layout.staging.items().iter() {
|
for (id, _, role) in layout.staging.items().iter() {
|
||||||
|
if layout.roles.get(id) == Some(role) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if let Some(role) = &role.0 {
|
if let Some(role) = &role.0 {
|
||||||
let tags = role.tags.join(",");
|
let tags = role.tags.join(",");
|
||||||
table.push(format!(
|
table.push(format!(
|
||||||
|
|
|
@ -1,55 +1,56 @@
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use garage_util::version;
|
||||||
use structopt::StructOpt;
|
use structopt::StructOpt;
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
#[derive(StructOpt, Debug)]
|
||||||
pub enum Command {
|
pub enum Command {
|
||||||
/// Run Garage server
|
/// Run Garage server
|
||||||
#[structopt(name = "server")]
|
#[structopt(name = "server", version = version::garage())]
|
||||||
Server,
|
Server,
|
||||||
|
|
||||||
/// Get network status
|
/// Get network status
|
||||||
#[structopt(name = "status")]
|
#[structopt(name = "status", version = version::garage())]
|
||||||
Status,
|
Status,
|
||||||
|
|
||||||
/// Operations on individual Garage nodes
|
/// Operations on individual Garage nodes
|
||||||
#[structopt(name = "node")]
|
#[structopt(name = "node", version = version::garage())]
|
||||||
Node(NodeOperation),
|
Node(NodeOperation),
|
||||||
|
|
||||||
/// Operations on the assignation of node roles in the cluster layout
|
/// Operations on the assignation of node roles in the cluster layout
|
||||||
#[structopt(name = "layout")]
|
#[structopt(name = "layout", version = version::garage())]
|
||||||
Layout(LayoutOperation),
|
Layout(LayoutOperation),
|
||||||
|
|
||||||
/// Operations on buckets
|
/// Operations on buckets
|
||||||
#[structopt(name = "bucket")]
|
#[structopt(name = "bucket", version = version::garage())]
|
||||||
Bucket(BucketOperation),
|
Bucket(BucketOperation),
|
||||||
|
|
||||||
/// Operations on S3 access keys
|
/// Operations on S3 access keys
|
||||||
#[structopt(name = "key")]
|
#[structopt(name = "key", version = version::garage())]
|
||||||
Key(KeyOperation),
|
Key(KeyOperation),
|
||||||
|
|
||||||
/// Run migrations from previous Garage version
|
/// Run migrations from previous Garage version
|
||||||
/// (DO NOT USE WITHOUT READING FULL DOCUMENTATION)
|
/// (DO NOT USE WITHOUT READING FULL DOCUMENTATION)
|
||||||
#[structopt(name = "migrate")]
|
#[structopt(name = "migrate", version = version::garage())]
|
||||||
Migrate(MigrateOpt),
|
Migrate(MigrateOpt),
|
||||||
|
|
||||||
/// Start repair of node data
|
/// Start repair of node data on remote node
|
||||||
#[structopt(name = "repair")]
|
#[structopt(name = "repair", version = version::garage())]
|
||||||
Repair(RepairOpt),
|
Repair(RepairOpt),
|
||||||
|
|
||||||
/// Gather node statistics
|
/// Gather node statistics
|
||||||
#[structopt(name = "stats")]
|
#[structopt(name = "stats", version = version::garage())]
|
||||||
Stats(StatsOpt),
|
Stats(StatsOpt),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
#[derive(StructOpt, Debug)]
|
||||||
pub enum NodeOperation {
|
pub enum NodeOperation {
|
||||||
/// Print identifier (public key) of this Garage node
|
/// Print identifier (public key) of this Garage node
|
||||||
#[structopt(name = "id")]
|
#[structopt(name = "id", version = version::garage())]
|
||||||
NodeId(NodeIdOpt),
|
NodeId(NodeIdOpt),
|
||||||
|
|
||||||
/// Connect to Garage node that is currently isolated from the system
|
/// Connect to Garage node that is currently isolated from the system
|
||||||
#[structopt(name = "connect")]
|
#[structopt(name = "connect", version = version::garage())]
|
||||||
Connect(ConnectNodeOpt),
|
Connect(ConnectNodeOpt),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,23 +71,23 @@ pub struct ConnectNodeOpt {
|
||||||
#[derive(StructOpt, Debug)]
|
#[derive(StructOpt, Debug)]
|
||||||
pub enum LayoutOperation {
|
pub enum LayoutOperation {
|
||||||
/// Assign role to Garage node
|
/// Assign role to Garage node
|
||||||
#[structopt(name = "assign")]
|
#[structopt(name = "assign", version = version::garage())]
|
||||||
Assign(AssignRoleOpt),
|
Assign(AssignRoleOpt),
|
||||||
|
|
||||||
/// Remove role from Garage cluster node
|
/// Remove role from Garage cluster node
|
||||||
#[structopt(name = "remove")]
|
#[structopt(name = "remove", version = version::garage())]
|
||||||
Remove(RemoveRoleOpt),
|
Remove(RemoveRoleOpt),
|
||||||
|
|
||||||
/// Show roles currently assigned to nodes and changes staged for commit
|
/// Show roles currently assigned to nodes and changes staged for commit
|
||||||
#[structopt(name = "show")]
|
#[structopt(name = "show", version = version::garage())]
|
||||||
Show,
|
Show,
|
||||||
|
|
||||||
/// Apply staged changes to cluster layout
|
/// Apply staged changes to cluster layout
|
||||||
#[structopt(name = "apply")]
|
#[structopt(name = "apply", version = version::garage())]
|
||||||
Apply(ApplyLayoutOpt),
|
Apply(ApplyLayoutOpt),
|
||||||
|
|
||||||
/// Revert staged changes to cluster layout
|
/// Revert staged changes to cluster layout
|
||||||
#[structopt(name = "revert")]
|
#[structopt(name = "revert", version = version::garage())]
|
||||||
Revert(RevertLayoutOpt),
|
Revert(RevertLayoutOpt),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,39 +142,39 @@ pub struct RevertLayoutOpt {
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
pub enum BucketOperation {
|
pub enum BucketOperation {
|
||||||
/// List buckets
|
/// List buckets
|
||||||
#[structopt(name = "list")]
|
#[structopt(name = "list", version = version::garage())]
|
||||||
List,
|
List,
|
||||||
|
|
||||||
/// Get bucket info
|
/// Get bucket info
|
||||||
#[structopt(name = "info")]
|
#[structopt(name = "info", version = version::garage())]
|
||||||
Info(BucketOpt),
|
Info(BucketOpt),
|
||||||
|
|
||||||
/// Create bucket
|
/// Create bucket
|
||||||
#[structopt(name = "create")]
|
#[structopt(name = "create", version = version::garage())]
|
||||||
Create(BucketOpt),
|
Create(BucketOpt),
|
||||||
|
|
||||||
/// Delete bucket
|
/// Delete bucket
|
||||||
#[structopt(name = "delete")]
|
#[structopt(name = "delete", version = version::garage())]
|
||||||
Delete(DeleteBucketOpt),
|
Delete(DeleteBucketOpt),
|
||||||
|
|
||||||
/// Alias bucket under new name
|
/// Alias bucket under new name
|
||||||
#[structopt(name = "alias")]
|
#[structopt(name = "alias", version = version::garage())]
|
||||||
Alias(AliasBucketOpt),
|
Alias(AliasBucketOpt),
|
||||||
|
|
||||||
/// Remove bucket alias
|
/// Remove bucket alias
|
||||||
#[structopt(name = "unalias")]
|
#[structopt(name = "unalias", version = version::garage())]
|
||||||
Unalias(UnaliasBucketOpt),
|
Unalias(UnaliasBucketOpt),
|
||||||
|
|
||||||
/// Allow key to read or write to bucket
|
/// Allow key to read or write to bucket
|
||||||
#[structopt(name = "allow")]
|
#[structopt(name = "allow", version = version::garage())]
|
||||||
Allow(PermBucketOpt),
|
Allow(PermBucketOpt),
|
||||||
|
|
||||||
/// Deny key from reading or writing to bucket
|
/// Deny key from reading or writing to bucket
|
||||||
#[structopt(name = "deny")]
|
#[structopt(name = "deny", version = version::garage())]
|
||||||
Deny(PermBucketOpt),
|
Deny(PermBucketOpt),
|
||||||
|
|
||||||
/// Expose as website or not
|
/// Expose as website or not
|
||||||
#[structopt(name = "website")]
|
#[structopt(name = "website", version = version::garage())]
|
||||||
Website(WebsiteOpt),
|
Website(WebsiteOpt),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,35 +265,35 @@ pub struct PermBucketOpt {
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
#[derive(Serialize, Deserialize, StructOpt, Debug)]
|
||||||
pub enum KeyOperation {
|
pub enum KeyOperation {
|
||||||
/// List keys
|
/// List keys
|
||||||
#[structopt(name = "list")]
|
#[structopt(name = "list", version = version::garage())]
|
||||||
List,
|
List,
|
||||||
|
|
||||||
/// Get key info
|
/// Get key info
|
||||||
#[structopt(name = "info")]
|
#[structopt(name = "info", version = version::garage())]
|
||||||
Info(KeyOpt),
|
Info(KeyOpt),
|
||||||
|
|
||||||
/// Create new key
|
/// Create new key
|
||||||
#[structopt(name = "new")]
|
#[structopt(name = "new", version = version::garage())]
|
||||||
New(KeyNewOpt),
|
New(KeyNewOpt),
|
||||||
|
|
||||||
/// Rename key
|
/// Rename key
|
||||||
#[structopt(name = "rename")]
|
#[structopt(name = "rename", version = version::garage())]
|
||||||
Rename(KeyRenameOpt),
|
Rename(KeyRenameOpt),
|
||||||
|
|
||||||
/// Delete key
|
/// Delete key
|
||||||
#[structopt(name = "delete")]
|
#[structopt(name = "delete", version = version::garage())]
|
||||||
Delete(KeyDeleteOpt),
|
Delete(KeyDeleteOpt),
|
||||||
|
|
||||||
/// Set permission flags for key
|
/// Set permission flags for key
|
||||||
#[structopt(name = "allow")]
|
#[structopt(name = "allow", version = version::garage())]
|
||||||
Allow(KeyPermOpt),
|
Allow(KeyPermOpt),
|
||||||
|
|
||||||
/// Unset permission flags for key
|
/// Unset permission flags for key
|
||||||
#[structopt(name = "deny")]
|
#[structopt(name = "deny", version = version::garage())]
|
||||||
Deny(KeyPermOpt),
|
Deny(KeyPermOpt),
|
||||||
|
|
||||||
/// Import key
|
/// Import key
|
||||||
#[structopt(name = "import")]
|
#[structopt(name = "import", version = version::garage())]
|
||||||
Import(KeyImportOpt),
|
Import(KeyImportOpt),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -364,7 +365,7 @@ pub struct MigrateOpt {
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||||
pub enum MigrateWhat {
|
pub enum MigrateWhat {
|
||||||
/// Migrate buckets and permissions from v0.5.0
|
/// Migrate buckets and permissions from v0.5.0
|
||||||
#[structopt(name = "buckets050")]
|
#[structopt(name = "buckets050", version = version::garage())]
|
||||||
Buckets050,
|
Buckets050,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -385,19 +386,19 @@ pub struct RepairOpt {
|
||||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||||
pub enum RepairWhat {
|
pub enum RepairWhat {
|
||||||
/// Only do a full sync of metadata tables
|
/// Only do a full sync of metadata tables
|
||||||
#[structopt(name = "tables")]
|
#[structopt(name = "tables", version = version::garage())]
|
||||||
Tables,
|
Tables,
|
||||||
/// Only repair (resync/rebalance) the set of stored blocks
|
/// Only repair (resync/rebalance) the set of stored blocks
|
||||||
#[structopt(name = "blocks")]
|
#[structopt(name = "blocks", version = version::garage())]
|
||||||
Blocks,
|
Blocks,
|
||||||
/// Only redo the propagation of object deletions to the version table (slow)
|
/// Only redo the propagation of object deletions to the version table (slow)
|
||||||
#[structopt(name = "versions")]
|
#[structopt(name = "versions", version = version::garage())]
|
||||||
Versions,
|
Versions,
|
||||||
/// Only redo the propagation of version deletions to the block ref table (extremely slow)
|
/// Only redo the propagation of version deletions to the block ref table (extremely slow)
|
||||||
#[structopt(name = "block_refs")]
|
#[structopt(name = "block_refs", version = version::garage())]
|
||||||
BlockRefs,
|
BlockRefs,
|
||||||
/// Verify integrity of all blocks on disc (extremely slow, i/o intensive)
|
/// Verify integrity of all blocks on disc (extremely slow, i/o intensive)
|
||||||
#[structopt(name = "scrub")]
|
#[structopt(name = "scrub", version = version::garage())]
|
||||||
Scrub {
|
Scrub {
|
||||||
/// Tranquility factor (see tranquilizer documentation)
|
/// Tranquility factor (see tranquilizer documentation)
|
||||||
#[structopt(name = "tranquility", default_value = "2")]
|
#[structopt(name = "tranquility", default_value = "2")]
|
||||||
|
|
|
@ -3,6 +3,7 @@ use std::collections::HashMap;
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
use garage_util::data::Uuid;
|
use garage_util::data::Uuid;
|
||||||
use garage_util::error::*;
|
use garage_util::error::*;
|
||||||
|
use garage_util::formater::format_table;
|
||||||
|
|
||||||
use garage_model::bucket_table::*;
|
use garage_model::bucket_table::*;
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
|
@ -173,42 +174,13 @@ pub fn print_bucket_info(bucket: &Bucket, relevant_keys: &HashMap<String, Key>)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn format_table(data: Vec<String>) {
|
|
||||||
let data = data
|
|
||||||
.iter()
|
|
||||||
.map(|s| s.split('\t').collect::<Vec<_>>())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let columns = data.iter().map(|row| row.len()).fold(0, std::cmp::max);
|
|
||||||
let mut column_size = vec![0; columns];
|
|
||||||
|
|
||||||
let mut out = String::new();
|
|
||||||
|
|
||||||
for row in data.iter() {
|
|
||||||
for (i, col) in row.iter().enumerate() {
|
|
||||||
column_size[i] = std::cmp::max(column_size[i], col.chars().count());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for row in data.iter() {
|
|
||||||
for (col, col_len) in row[..row.len() - 1].iter().zip(column_size.iter()) {
|
|
||||||
out.push_str(col);
|
|
||||||
(0..col_len - col.chars().count() + 2).for_each(|_| out.push(' '));
|
|
||||||
}
|
|
||||||
out.push_str(row[row.len() - 1]);
|
|
||||||
out.push('\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
print!("{}", out);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_matching_node(
|
pub fn find_matching_node(
|
||||||
cand: impl std::iter::Iterator<Item = Uuid>,
|
cand: impl std::iter::Iterator<Item = Uuid>,
|
||||||
pattern: &str,
|
pattern: &str,
|
||||||
) -> Result<Uuid, Error> {
|
) -> Result<Uuid, Error> {
|
||||||
let mut candidates = vec![];
|
let mut candidates = vec![];
|
||||||
for c in cand {
|
for c in cand {
|
||||||
if hex::encode(&c).starts_with(&pattern) {
|
if hex::encode(&c).starts_with(&pattern) && !candidates.contains(&c) {
|
||||||
candidates.push(c);
|
candidates.push(c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ mod admin;
|
||||||
mod cli;
|
mod cli;
|
||||||
mod repair;
|
mod repair;
|
||||||
mod server;
|
mod server;
|
||||||
|
mod tracing_setup;
|
||||||
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
@ -21,6 +22,7 @@ use garage_util::error::*;
|
||||||
|
|
||||||
use garage_rpc::system::*;
|
use garage_rpc::system::*;
|
||||||
use garage_rpc::*;
|
use garage_rpc::*;
|
||||||
|
use garage_util::version;
|
||||||
|
|
||||||
use garage_model::helper::error::Error as HelperError;
|
use garage_model::helper::error::Error as HelperError;
|
||||||
|
|
||||||
|
@ -28,7 +30,7 @@ use admin::*;
|
||||||
use cli::*;
|
use cli::*;
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
#[derive(StructOpt, Debug)]
|
||||||
#[structopt(name = "garage")]
|
#[structopt(name = "garage", version = version::garage(), about = "S3-compatible object store for self-hosted geo-distributed deployments")]
|
||||||
struct Opt {
|
struct Opt {
|
||||||
/// Host to connect to for admin operations, in the format:
|
/// Host to connect to for admin operations, in the format:
|
||||||
/// <public-key>@<ip>:<port>
|
/// <public-key>@<ip>:<port>
|
||||||
|
@ -141,6 +143,7 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
|
||||||
match cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await {
|
match cli_command_dispatch(opt.cmd, &system_rpc_endpoint, &admin_rpc_endpoint, id).await {
|
||||||
Err(HelperError::Internal(i)) => Err(Error::Message(format!("Internal error: {}", i))),
|
Err(HelperError::Internal(i)) => Err(Error::Message(format!("Internal error: {}", i))),
|
||||||
Err(HelperError::BadRequest(b)) => Err(Error::Message(b)),
|
Err(HelperError::BadRequest(b)) => Err(Error::Message(b)),
|
||||||
|
Err(e) => Err(Error::Message(format!("{}", e))),
|
||||||
Ok(x) => Ok(x),
|
Ok(x) => Ok(x),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,10 @@ use std::sync::Arc;
|
||||||
|
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use garage_model::block_ref_table::*;
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::object_table::*;
|
use garage_model::s3::block_ref_table::*;
|
||||||
use garage_model::version_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
use garage_model::s3::version_table::*;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
|
|
||||||
|
|
|
@ -6,13 +6,16 @@ use garage_util::background::*;
|
||||||
use garage_util::config::*;
|
use garage_util::config::*;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
|
|
||||||
use garage_admin::metrics::*;
|
use garage_api::admin::api_server::AdminApiServer;
|
||||||
use garage_admin::tracing_setup::*;
|
use garage_api::s3::api_server::S3ApiServer;
|
||||||
use garage_api::run_api_server;
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_web::run_web_server;
|
use garage_web::run_web_server;
|
||||||
|
|
||||||
|
#[cfg(feature = "k2v")]
|
||||||
|
use garage_api::k2v::api_server::K2VApiServer;
|
||||||
|
|
||||||
use crate::admin::*;
|
use crate::admin::*;
|
||||||
|
use crate::tracing_setup::*;
|
||||||
|
|
||||||
async fn wait_from(mut chan: watch::Receiver<bool>) {
|
async fn wait_from(mut chan: watch::Receiver<bool>) {
|
||||||
while !*chan.borrow() {
|
while !*chan.borrow() {
|
||||||
|
@ -36,9 +39,6 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> {
|
||||||
.open()
|
.open()
|
||||||
.expect("Unable to open sled DB");
|
.expect("Unable to open sled DB");
|
||||||
|
|
||||||
info!("Initialize admin web server and metric backend...");
|
|
||||||
let admin_server_init = AdminServer::init();
|
|
||||||
|
|
||||||
info!("Initializing background runner...");
|
info!("Initializing background runner...");
|
||||||
let watch_cancel = netapp::util::watch_ctrl_c();
|
let watch_cancel = netapp::util::watch_ctrl_c();
|
||||||
let (background, await_background_done) = BackgroundRunner::new(16, watch_cancel.clone());
|
let (background, await_background_done) = BackgroundRunner::new(16, watch_cancel.clone());
|
||||||
|
@ -51,52 +51,70 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> {
|
||||||
init_tracing(&export_to, garage.system.id)?;
|
init_tracing(&export_to, garage.system.id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info!("Initialize Admin API server and metrics collector...");
|
||||||
|
let admin_server = AdminApiServer::new(garage.clone());
|
||||||
|
|
||||||
let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone()));
|
let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone()));
|
||||||
|
|
||||||
info!("Create admin RPC handler...");
|
info!("Create admin RPC handler...");
|
||||||
AdminRpcHandler::new(garage.clone());
|
AdminRpcHandler::new(garage.clone());
|
||||||
|
|
||||||
info!("Initializing API server...");
|
info!("Initializing S3 API server...");
|
||||||
let api_server = tokio::spawn(run_api_server(
|
let s3_api_server = tokio::spawn(S3ApiServer::run(
|
||||||
garage.clone(),
|
garage.clone(),
|
||||||
wait_from(watch_cancel.clone()),
|
wait_from(watch_cancel.clone()),
|
||||||
));
|
));
|
||||||
|
|
||||||
|
#[cfg(feature = "k2v")]
|
||||||
|
let k2v_api_server = {
|
||||||
|
info!("Initializing K2V API server...");
|
||||||
|
tokio::spawn(K2VApiServer::run(
|
||||||
|
garage.clone(),
|
||||||
|
wait_from(watch_cancel.clone()),
|
||||||
|
))
|
||||||
|
};
|
||||||
|
|
||||||
info!("Initializing web server...");
|
info!("Initializing web server...");
|
||||||
let web_server = tokio::spawn(run_web_server(
|
let web_server = tokio::spawn(run_web_server(
|
||||||
garage.clone(),
|
garage.clone(),
|
||||||
wait_from(watch_cancel.clone()),
|
wait_from(watch_cancel.clone()),
|
||||||
));
|
));
|
||||||
|
|
||||||
let admin_server = if let Some(admin_bind_addr) = config.admin.api_bind_addr {
|
info!("Launching Admin API server...");
|
||||||
info!("Configure and run admin web server...");
|
let admin_server = tokio::spawn(admin_server.run(wait_from(watch_cancel.clone())));
|
||||||
Some(tokio::spawn(
|
|
||||||
admin_server_init.run(admin_bind_addr, wait_from(watch_cancel.clone())),
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
// Stuff runs
|
// Stuff runs
|
||||||
|
|
||||||
// When a cancel signal is sent, stuff stops
|
// When a cancel signal is sent, stuff stops
|
||||||
if let Err(e) = api_server.await? {
|
if let Err(e) = s3_api_server.await? {
|
||||||
warn!("API server exited with error: {}", e);
|
warn!("S3 API server exited with error: {}", e);
|
||||||
|
} else {
|
||||||
|
info!("S3 API server exited without error.");
|
||||||
|
}
|
||||||
|
#[cfg(feature = "k2v")]
|
||||||
|
if let Err(e) = k2v_api_server.await? {
|
||||||
|
warn!("K2V API server exited with error: {}", e);
|
||||||
|
} else {
|
||||||
|
info!("K2V API server exited without error.");
|
||||||
}
|
}
|
||||||
if let Err(e) = web_server.await? {
|
if let Err(e) = web_server.await? {
|
||||||
warn!("Web server exited with error: {}", e);
|
warn!("Web server exited with error: {}", e);
|
||||||
|
} else {
|
||||||
|
info!("Web server exited without error.");
|
||||||
}
|
}
|
||||||
if let Some(a) = admin_server {
|
if let Err(e) = admin_server.await? {
|
||||||
if let Err(e) = a.await? {
|
warn!("Admin web server exited with error: {}", e);
|
||||||
warn!("Admin web server exited with error: {}", e);
|
} else {
|
||||||
}
|
info!("Admin API server exited without error.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove RPC handlers for system to break reference cycles
|
// Remove RPC handlers for system to break reference cycles
|
||||||
garage.system.netapp.drop_all_handlers();
|
garage.system.netapp.drop_all_handlers();
|
||||||
|
opentelemetry::global::shutdown_tracer_provider();
|
||||||
|
|
||||||
// Await for netapp RPC system to end
|
// Await for netapp RPC system to end
|
||||||
run_system.await?;
|
run_system.await?;
|
||||||
|
info!("Netapp exited");
|
||||||
|
|
||||||
// Drop all references so that stuff can terminate properly
|
// Drop all references so that stuff can terminate properly
|
||||||
drop(garage);
|
drop(garage);
|
||||||
|
|
|
@ -10,7 +10,7 @@ pub fn build_client(instance: &Instance) -> Client {
|
||||||
None,
|
None,
|
||||||
"garage-integ-test",
|
"garage-integ-test",
|
||||||
);
|
);
|
||||||
let endpoint = Endpoint::immutable(instance.uri());
|
let endpoint = Endpoint::immutable(instance.s3_uri());
|
||||||
|
|
||||||
let config = Config::builder()
|
let config = Config::builder()
|
||||||
.region(super::REGION)
|
.region(super::REGION)
|
||||||
|
|
|
@ -17,14 +17,25 @@ use garage_api::signature;
|
||||||
pub struct CustomRequester {
|
pub struct CustomRequester {
|
||||||
key: Key,
|
key: Key,
|
||||||
uri: Uri,
|
uri: Uri,
|
||||||
|
service: &'static str,
|
||||||
client: Client<HttpConnector>,
|
client: Client<HttpConnector>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CustomRequester {
|
impl CustomRequester {
|
||||||
pub fn new(instance: &Instance) -> Self {
|
pub fn new_s3(instance: &Instance) -> Self {
|
||||||
CustomRequester {
|
CustomRequester {
|
||||||
key: instance.key.clone(),
|
key: instance.key.clone(),
|
||||||
uri: instance.uri(),
|
uri: instance.s3_uri(),
|
||||||
|
service: "s3",
|
||||||
|
client: Client::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_k2v(instance: &Instance) -> Self {
|
||||||
|
CustomRequester {
|
||||||
|
key: instance.key.clone(),
|
||||||
|
uri: instance.k2v_uri(),
|
||||||
|
service: "k2v",
|
||||||
client: Client::new(),
|
client: Client::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -32,6 +43,7 @@ impl CustomRequester {
|
||||||
pub fn builder(&self, bucket: String) -> RequestBuilder<'_> {
|
pub fn builder(&self, bucket: String) -> RequestBuilder<'_> {
|
||||||
RequestBuilder {
|
RequestBuilder {
|
||||||
requester: self,
|
requester: self,
|
||||||
|
service: self.service,
|
||||||
bucket,
|
bucket,
|
||||||
method: Method::GET,
|
method: Method::GET,
|
||||||
path: String::new(),
|
path: String::new(),
|
||||||
|
@ -47,6 +59,7 @@ impl CustomRequester {
|
||||||
|
|
||||||
pub struct RequestBuilder<'a> {
|
pub struct RequestBuilder<'a> {
|
||||||
requester: &'a CustomRequester,
|
requester: &'a CustomRequester,
|
||||||
|
service: &'static str,
|
||||||
bucket: String,
|
bucket: String,
|
||||||
method: Method,
|
method: Method,
|
||||||
path: String,
|
path: String,
|
||||||
|
@ -59,13 +72,17 @@ pub struct RequestBuilder<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> RequestBuilder<'a> {
|
impl<'a> RequestBuilder<'a> {
|
||||||
|
pub fn service(&mut self, service: &'static str) -> &mut Self {
|
||||||
|
self.service = service;
|
||||||
|
self
|
||||||
|
}
|
||||||
pub fn method(&mut self, method: Method) -> &mut Self {
|
pub fn method(&mut self, method: Method) -> &mut Self {
|
||||||
self.method = method;
|
self.method = method;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn path(&mut self, path: String) -> &mut Self {
|
pub fn path(&mut self, path: impl ToString) -> &mut Self {
|
||||||
self.path = path;
|
self.path = path.to_string();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,16 +91,38 @@ impl<'a> RequestBuilder<'a> {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn query_param<T, U>(&mut self, param: T, value: Option<U>) -> &mut Self
|
||||||
|
where
|
||||||
|
T: ToString,
|
||||||
|
U: ToString,
|
||||||
|
{
|
||||||
|
self.query_params
|
||||||
|
.insert(param.to_string(), value.as_ref().map(ToString::to_string));
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub fn signed_headers(&mut self, signed_headers: HashMap<String, String>) -> &mut Self {
|
pub fn signed_headers(&mut self, signed_headers: HashMap<String, String>) -> &mut Self {
|
||||||
self.signed_headers = signed_headers;
|
self.signed_headers = signed_headers;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn signed_header(&mut self, name: impl ToString, value: impl ToString) -> &mut Self {
|
||||||
|
self.signed_headers
|
||||||
|
.insert(name.to_string(), value.to_string());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub fn unsigned_headers(&mut self, unsigned_headers: HashMap<String, String>) -> &mut Self {
|
pub fn unsigned_headers(&mut self, unsigned_headers: HashMap<String, String>) -> &mut Self {
|
||||||
self.unsigned_headers = unsigned_headers;
|
self.unsigned_headers = unsigned_headers;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn unsigned_header(&mut self, name: impl ToString, value: impl ToString) -> &mut Self {
|
||||||
|
self.unsigned_headers
|
||||||
|
.insert(name.to_string(), value.to_string());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub fn body(&mut self, body: Vec<u8>) -> &mut Self {
|
pub fn body(&mut self, body: Vec<u8>) -> &mut Self {
|
||||||
self.body = body;
|
self.body = body;
|
||||||
self
|
self
|
||||||
|
@ -106,24 +145,24 @@ impl<'a> RequestBuilder<'a> {
|
||||||
let query = query_param_to_string(&self.query_params);
|
let query = query_param_to_string(&self.query_params);
|
||||||
let (host, path) = if self.vhost_style {
|
let (host, path) = if self.vhost_style {
|
||||||
(
|
(
|
||||||
format!("{}.s3.garage", self.bucket),
|
format!("{}.{}.garage", self.bucket, self.service),
|
||||||
format!("{}{}", self.path, query),
|
format!("{}{}", self.path, query),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
(
|
(
|
||||||
"s3.garage".to_owned(),
|
format!("{}.garage", self.service),
|
||||||
format!("{}/{}{}", self.bucket, self.path, query),
|
format!("{}/{}{}", self.bucket, self.path, query),
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
let uri = format!("{}{}", self.requester.uri, path);
|
let uri = format!("{}{}", self.requester.uri, path);
|
||||||
|
|
||||||
let now = Utc::now();
|
let now = Utc::now();
|
||||||
let scope = signature::compute_scope(&now, super::REGION.as_ref());
|
let scope = signature::compute_scope(&now, super::REGION.as_ref(), self.service);
|
||||||
let mut signer = signature::signing_hmac(
|
let mut signer = signature::signing_hmac(
|
||||||
&now,
|
&now,
|
||||||
&self.requester.key.secret,
|
&self.requester.key.secret,
|
||||||
super::REGION.as_ref(),
|
super::REGION.as_ref(),
|
||||||
"s3",
|
self.service,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let streaming_signer = signer.clone();
|
let streaming_signer = signer.clone();
|
||||||
|
|
|
@ -22,7 +22,9 @@ pub struct Instance {
|
||||||
process: process::Child,
|
process: process::Child,
|
||||||
pub path: PathBuf,
|
pub path: PathBuf,
|
||||||
pub key: Key,
|
pub key: Key,
|
||||||
pub api_port: u16,
|
pub s3_port: u16,
|
||||||
|
pub k2v_port: u16,
|
||||||
|
pub web_port: u16,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
|
@ -58,9 +60,12 @@ rpc_secret = "{secret}"
|
||||||
|
|
||||||
[s3_api]
|
[s3_api]
|
||||||
s3_region = "{region}"
|
s3_region = "{region}"
|
||||||
api_bind_addr = "127.0.0.1:{api_port}"
|
api_bind_addr = "127.0.0.1:{s3_port}"
|
||||||
root_domain = ".s3.garage"
|
root_domain = ".s3.garage"
|
||||||
|
|
||||||
|
[k2v_api]
|
||||||
|
api_bind_addr = "127.0.0.1:{k2v_port}"
|
||||||
|
|
||||||
[s3_web]
|
[s3_web]
|
||||||
bind_addr = "127.0.0.1:{web_port}"
|
bind_addr = "127.0.0.1:{web_port}"
|
||||||
root_domain = ".web.garage"
|
root_domain = ".web.garage"
|
||||||
|
@ -72,10 +77,11 @@ api_bind_addr = "127.0.0.1:{admin_port}"
|
||||||
path = path.display(),
|
path = path.display(),
|
||||||
secret = GARAGE_TEST_SECRET,
|
secret = GARAGE_TEST_SECRET,
|
||||||
region = super::REGION,
|
region = super::REGION,
|
||||||
api_port = port,
|
s3_port = port,
|
||||||
rpc_port = port + 1,
|
k2v_port = port + 1,
|
||||||
web_port = port + 2,
|
rpc_port = port + 2,
|
||||||
admin_port = port + 3,
|
web_port = port + 3,
|
||||||
|
admin_port = port + 4,
|
||||||
);
|
);
|
||||||
fs::write(path.join("config.toml"), config).expect("Could not write garage config file");
|
fs::write(path.join("config.toml"), config).expect("Could not write garage config file");
|
||||||
|
|
||||||
|
@ -88,7 +94,7 @@ api_bind_addr = "127.0.0.1:{admin_port}"
|
||||||
.arg("server")
|
.arg("server")
|
||||||
.stdout(stdout)
|
.stdout(stdout)
|
||||||
.stderr(stderr)
|
.stderr(stderr)
|
||||||
.env("RUST_LOG", "garage=info,garage_api=debug")
|
.env("RUST_LOG", "garage=info,garage_api=trace")
|
||||||
.spawn()
|
.spawn()
|
||||||
.expect("Could not start garage");
|
.expect("Could not start garage");
|
||||||
|
|
||||||
|
@ -96,7 +102,9 @@ api_bind_addr = "127.0.0.1:{admin_port}"
|
||||||
process: child,
|
process: child,
|
||||||
path,
|
path,
|
||||||
key: Key::default(),
|
key: Key::default(),
|
||||||
api_port: port,
|
s3_port: port,
|
||||||
|
k2v_port: port + 1,
|
||||||
|
web_port: port + 3,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,8 +155,14 @@ api_bind_addr = "127.0.0.1:{admin_port}"
|
||||||
String::from_utf8(output.stdout).unwrap()
|
String::from_utf8(output.stdout).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn uri(&self) -> http::Uri {
|
pub fn s3_uri(&self) -> http::Uri {
|
||||||
format!("http://127.0.0.1:{api_port}", api_port = self.api_port)
|
format!("http://127.0.0.1:{s3_port}", s3_port = self.s3_port)
|
||||||
|
.parse()
|
||||||
|
.expect("Could not build garage endpoint URI")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn k2v_uri(&self) -> http::Uri {
|
||||||
|
format!("http://127.0.0.1:{k2v_port}", k2v_port = self.k2v_port)
|
||||||
.parse()
|
.parse()
|
||||||
.expect("Could not build garage endpoint URI")
|
.expect("Could not build garage endpoint URI")
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,18 +17,27 @@ pub struct Context {
|
||||||
pub garage: &'static garage::Instance,
|
pub garage: &'static garage::Instance,
|
||||||
pub client: Client,
|
pub client: Client,
|
||||||
pub custom_request: CustomRequester,
|
pub custom_request: CustomRequester,
|
||||||
|
pub k2v: K2VContext,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct K2VContext {
|
||||||
|
pub request: CustomRequester,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Context {
|
impl Context {
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
let garage = garage::instance();
|
let garage = garage::instance();
|
||||||
let client = client::build_client(garage);
|
let client = client::build_client(garage);
|
||||||
let custom_request = CustomRequester::new(garage);
|
let custom_request = CustomRequester::new_s3(garage);
|
||||||
|
let k2v_request = CustomRequester::new_k2v(garage);
|
||||||
|
|
||||||
Context {
|
Context {
|
||||||
garage,
|
garage,
|
||||||
client,
|
client,
|
||||||
custom_request,
|
custom_request,
|
||||||
|
k2v: K2VContext {
|
||||||
|
request: k2v_request,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
612
src/garage/tests/k2v/batch.rs
Normal file
612
src/garage/tests/k2v/batch.rs
Normal file
|
@ -0,0 +1,612 @@
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use crate::common;
|
||||||
|
|
||||||
|
use assert_json_diff::assert_json_eq;
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
use super::json_body;
|
||||||
|
use hyper::Method;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_batch() {
|
||||||
|
let ctx = common::context();
|
||||||
|
let bucket = ctx.create_bucket("test-k2v-batch");
|
||||||
|
|
||||||
|
let mut values = HashMap::new();
|
||||||
|
values.insert("a", "initial test 1");
|
||||||
|
values.insert("b", "initial test 2");
|
||||||
|
values.insert("c", "initial test 3");
|
||||||
|
values.insert("d.1", "initial test 4");
|
||||||
|
values.insert("d.2", "initial test 5");
|
||||||
|
values.insert("e", "initial test 6");
|
||||||
|
let mut ct = HashMap::new();
|
||||||
|
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.body(
|
||||||
|
format!(
|
||||||
|
r#"[
|
||||||
|
{{"pk": "root", "sk": "a", "ct": null, "v": "{}"}},
|
||||||
|
{{"pk": "root", "sk": "b", "ct": null, "v": "{}"}},
|
||||||
|
{{"pk": "root", "sk": "c", "ct": null, "v": "{}"}},
|
||||||
|
{{"pk": "root", "sk": "d.1", "ct": null, "v": "{}"}},
|
||||||
|
{{"pk": "root", "sk": "d.2", "ct": null, "v": "{}"}},
|
||||||
|
{{"pk": "root", "sk": "e", "ct": null, "v": "{}"}}
|
||||||
|
]"#,
|
||||||
|
base64::encode(values.get(&"a").unwrap()),
|
||||||
|
base64::encode(values.get(&"b").unwrap()),
|
||||||
|
base64::encode(values.get(&"c").unwrap()),
|
||||||
|
base64::encode(values.get(&"d.1").unwrap()),
|
||||||
|
base64::encode(values.get(&"d.2").unwrap()),
|
||||||
|
base64::encode(values.get(&"e").unwrap()),
|
||||||
|
)
|
||||||
|
.into_bytes(),
|
||||||
|
)
|
||||||
|
.method(Method::POST)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
|
||||||
|
for sk in ["a", "b", "c", "d.1", "d.2", "e"] {
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some(sk))
|
||||||
|
.signed_header("accept", "*/*")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/octet-stream"
|
||||||
|
);
|
||||||
|
ct.insert(
|
||||||
|
sk,
|
||||||
|
res.headers()
|
||||||
|
.get("x-garage-causality-token")
|
||||||
|
.unwrap()
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
let res_body = hyper::body::to_bytes(res.into_body())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(res_body, values.get(sk).unwrap().as_bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.query_param("search", Option::<&str>::None)
|
||||||
|
.body(
|
||||||
|
br#"[
|
||||||
|
{"partitionKey": "root"},
|
||||||
|
{"partitionKey": "root", "start": "c"},
|
||||||
|
{"partitionKey": "root", "start": "c", "end": "dynamite"},
|
||||||
|
{"partitionKey": "root", "start": "c", "reverse": true, "end": "a"},
|
||||||
|
{"partitionKey": "root", "start": "c", "reverse": true, "end": "azerty"},
|
||||||
|
{"partitionKey": "root", "limit": 1},
|
||||||
|
{"partitionKey": "root", "prefix": "d"}
|
||||||
|
]"#
|
||||||
|
.to_vec(),
|
||||||
|
)
|
||||||
|
.method(Method::POST)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
let json_res = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
json_res,
|
||||||
|
json!([
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "a", "ct": ct.get("a").unwrap(), "v": [base64::encode(values.get("a").unwrap())]},
|
||||||
|
{"sk": "b", "ct": ct.get("b").unwrap(), "v": [base64::encode(values.get("b").unwrap())]},
|
||||||
|
{"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]},
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1").unwrap())]},
|
||||||
|
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap())]},
|
||||||
|
{"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]}
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": null,
|
||||||
|
"start": "c",
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]},
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1").unwrap())]},
|
||||||
|
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap())]},
|
||||||
|
{"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]}
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": null,
|
||||||
|
"start": "c",
|
||||||
|
"end": "dynamite",
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]},
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1").unwrap())]},
|
||||||
|
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": null,
|
||||||
|
"start": "c",
|
||||||
|
"end": "a",
|
||||||
|
"limit": null,
|
||||||
|
"reverse": true,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]},
|
||||||
|
{"sk": "b", "ct": ct.get("b").unwrap(), "v": [base64::encode(values.get("b").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": null,
|
||||||
|
"start": "c",
|
||||||
|
"end": "azerty",
|
||||||
|
"limit": null,
|
||||||
|
"reverse": true,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]},
|
||||||
|
{"sk": "b", "ct": ct.get("b").unwrap(), "v": [base64::encode(values.get("b").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": 1,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "a", "ct": ct.get("a").unwrap(), "v": [base64::encode(values.get("a").unwrap())]}
|
||||||
|
],
|
||||||
|
"more": true,
|
||||||
|
"nextStart": "b",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": "d",
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1").unwrap())]},
|
||||||
|
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap())]}
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
// Insert some new values
|
||||||
|
values.insert("c'", "new test 3");
|
||||||
|
values.insert("d.1'", "new test 4");
|
||||||
|
values.insert("d.2'", "new test 5");
|
||||||
|
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.body(
|
||||||
|
format!(
|
||||||
|
r#"[
|
||||||
|
{{"pk": "root", "sk": "b", "ct": "{}", "v": null}},
|
||||||
|
{{"pk": "root", "sk": "c", "ct": null, "v": "{}"}},
|
||||||
|
{{"pk": "root", "sk": "d.1", "ct": "{}", "v": "{}"}},
|
||||||
|
{{"pk": "root", "sk": "d.2", "ct": null, "v": "{}"}}
|
||||||
|
]"#,
|
||||||
|
ct.get(&"b").unwrap(),
|
||||||
|
base64::encode(values.get(&"c'").unwrap()),
|
||||||
|
ct.get(&"d.1").unwrap(),
|
||||||
|
base64::encode(values.get(&"d.1'").unwrap()),
|
||||||
|
base64::encode(values.get(&"d.2'").unwrap()),
|
||||||
|
)
|
||||||
|
.into_bytes(),
|
||||||
|
)
|
||||||
|
.method(Method::POST)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
|
||||||
|
for sk in ["b", "c", "d.1", "d.2"] {
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some(sk))
|
||||||
|
.signed_header("accept", "*/*")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
if sk == "b" {
|
||||||
|
assert_eq!(res.status(), 204);
|
||||||
|
} else {
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
}
|
||||||
|
ct.insert(
|
||||||
|
sk,
|
||||||
|
res.headers()
|
||||||
|
.get("x-garage-causality-token")
|
||||||
|
.unwrap()
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.query_param("search", Option::<&str>::None)
|
||||||
|
.body(
|
||||||
|
br#"[
|
||||||
|
{"partitionKey": "root"},
|
||||||
|
{"partitionKey": "root", "prefix": "d"},
|
||||||
|
{"partitionKey": "root", "prefix": "d.", "end": "d.2"},
|
||||||
|
{"partitionKey": "root", "prefix": "d.", "limit": 1},
|
||||||
|
{"partitionKey": "root", "prefix": "d.", "start": "d.2", "limit": 1},
|
||||||
|
{"partitionKey": "root", "prefix": "d.", "reverse": true},
|
||||||
|
{"partitionKey": "root", "prefix": "d.", "start": "d.2", "reverse": true},
|
||||||
|
{"partitionKey": "root", "prefix": "d.", "limit": 2}
|
||||||
|
]"#
|
||||||
|
.to_vec(),
|
||||||
|
)
|
||||||
|
.method(Method::POST)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
let json_res = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
json_res,
|
||||||
|
json!([
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "a", "ct": ct.get("a").unwrap(), "v": [base64::encode(values.get("a").unwrap())]},
|
||||||
|
{"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap()), base64::encode(values.get("c'").unwrap())]},
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
|
||||||
|
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
|
||||||
|
{"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]}
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": "d",
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
|
||||||
|
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": "d.",
|
||||||
|
"start": null,
|
||||||
|
"end": "d.2",
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": "d.",
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": 1,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": true,
|
||||||
|
"nextStart": "d.2",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": "d.",
|
||||||
|
"start": "d.2",
|
||||||
|
"end": null,
|
||||||
|
"limit": 1,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": "d.",
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": true,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": "d.",
|
||||||
|
"start": "d.2",
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": true,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": "d.",
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": 2,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
|
||||||
|
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
// Test DeleteBatch
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.query_param("delete", Option::<&str>::None)
|
||||||
|
.body(
|
||||||
|
br#"[
|
||||||
|
{"partitionKey": "root", "start": "a", "end": "c"},
|
||||||
|
{"partitionKey": "root", "prefix": "d"}
|
||||||
|
]"#
|
||||||
|
.to_vec(),
|
||||||
|
)
|
||||||
|
.method(Method::POST)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
let json_res = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
json_res,
|
||||||
|
json!([
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": null,
|
||||||
|
"start": "a",
|
||||||
|
"end": "c",
|
||||||
|
"singleItem": false,
|
||||||
|
"deletedItems": 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": "d",
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"singleItem": false,
|
||||||
|
"deletedItems": 2,
|
||||||
|
},
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
// update our known tombstones
|
||||||
|
for sk in ["a", "b", "d.1", "d.2"] {
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some(sk))
|
||||||
|
.signed_header("accept", "application/octet-stream")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 204);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/octet-stream"
|
||||||
|
);
|
||||||
|
ct.insert(
|
||||||
|
sk,
|
||||||
|
res.headers()
|
||||||
|
.get("x-garage-causality-token")
|
||||||
|
.unwrap()
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.query_param("search", Option::<&str>::None)
|
||||||
|
.body(
|
||||||
|
br#"[
|
||||||
|
{"partitionKey": "root"},
|
||||||
|
{"partitionKey": "root", "reverse": true},
|
||||||
|
{"partitionKey": "root", "tombstones": true}
|
||||||
|
]"#
|
||||||
|
.to_vec(),
|
||||||
|
)
|
||||||
|
.method(Method::POST)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
let json_res = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
json_res,
|
||||||
|
json!([
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap()), base64::encode(values.get("c'").unwrap())]},
|
||||||
|
{"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]}
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": true,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": false,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]},
|
||||||
|
{"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap()), base64::encode(values.get("c'").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"partitionKey": "root",
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"conflictsOnly": false,
|
||||||
|
"tombstones": true,
|
||||||
|
"singleItem": false,
|
||||||
|
"items": [
|
||||||
|
{"sk": "a", "ct": ct.get("a").unwrap(), "v": [null]},
|
||||||
|
{"sk": "b", "ct": ct.get("b").unwrap(), "v": [null]},
|
||||||
|
{"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap()), base64::encode(values.get("c'").unwrap())]},
|
||||||
|
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [null]},
|
||||||
|
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [null]},
|
||||||
|
{"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]},
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null,
|
||||||
|
},
|
||||||
|
])
|
||||||
|
);
|
||||||
|
}
|
141
src/garage/tests/k2v/errorcodes.rs
Normal file
141
src/garage/tests/k2v/errorcodes.rs
Normal file
|
@ -0,0 +1,141 @@
|
||||||
|
use crate::common;
|
||||||
|
|
||||||
|
use hyper::Method;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_error_codes() {
|
||||||
|
let ctx = common::context();
|
||||||
|
let bucket = ctx.create_bucket("test-k2v-error-codes");
|
||||||
|
|
||||||
|
// Regular insert should work (code 200)
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.method(Method::PUT)
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("test1"))
|
||||||
|
.body(b"Hello, world!".to_vec())
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
|
||||||
|
// Insert with trash causality token: invalid request
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.method(Method::PUT)
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("test1"))
|
||||||
|
.signed_header("x-garage-causality-token", "tra$sh")
|
||||||
|
.body(b"Hello, world!".to_vec())
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 400);
|
||||||
|
|
||||||
|
// Search without partition key: invalid request
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.query_param("search", Option::<&str>::None)
|
||||||
|
.body(
|
||||||
|
br#"[
|
||||||
|
{},
|
||||||
|
]"#
|
||||||
|
.to_vec(),
|
||||||
|
)
|
||||||
|
.method(Method::POST)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 400);
|
||||||
|
|
||||||
|
// Search with start that is not in prefix: invalid request
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.query_param("search", Option::<&str>::None)
|
||||||
|
.body(
|
||||||
|
br#"[
|
||||||
|
{"partition_key": "root", "prefix": "a", "start": "bx"},
|
||||||
|
]"#
|
||||||
|
.to_vec(),
|
||||||
|
)
|
||||||
|
.method(Method::POST)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 400);
|
||||||
|
|
||||||
|
// Search with invalid json: 400
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.query_param("search", Option::<&str>::None)
|
||||||
|
.body(
|
||||||
|
br#"[
|
||||||
|
{"partition_key": "root"
|
||||||
|
]"#
|
||||||
|
.to_vec(),
|
||||||
|
)
|
||||||
|
.method(Method::POST)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 400);
|
||||||
|
|
||||||
|
// Batch insert with invalid causality token: 400
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.body(
|
||||||
|
br#"[
|
||||||
|
{"pk": "root", "sk": "a", "ct": "tra$h", "v": "aGVsbG8sIHdvcmxkCg=="}
|
||||||
|
]"#
|
||||||
|
.to_vec(),
|
||||||
|
)
|
||||||
|
.method(Method::POST)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 400);
|
||||||
|
|
||||||
|
// Batch insert with invalid data: 400
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.body(
|
||||||
|
br#"[
|
||||||
|
{"pk": "root", "sk": "a", "ct": null, "v": "aGVsbG8sIHdvcmx$Cg=="}
|
||||||
|
]"#
|
||||||
|
.to_vec(),
|
||||||
|
)
|
||||||
|
.method(Method::POST)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 400);
|
||||||
|
|
||||||
|
// Poll with invalid causality token: 400
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("test1"))
|
||||||
|
.query_param("causality_token", Some("tra$h"))
|
||||||
|
.query_param("timeout", Some("10"))
|
||||||
|
.signed_header("accept", "application/octet-stream")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 400);
|
||||||
|
}
|
719
src/garage/tests/k2v/item.rs
Normal file
719
src/garage/tests/k2v/item.rs
Normal file
|
@ -0,0 +1,719 @@
|
||||||
|
use crate::common;
|
||||||
|
|
||||||
|
use assert_json_diff::assert_json_eq;
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
use super::json_body;
|
||||||
|
use hyper::Method;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_items_and_indices() {
|
||||||
|
let ctx = common::context();
|
||||||
|
let bucket = ctx.create_bucket("test-k2v-item-and-index");
|
||||||
|
|
||||||
|
// ReadIndex -- there should be nothing
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
res_body,
|
||||||
|
json!({
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"partitionKeys": [],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
let content2_len = "_: hello universe".len();
|
||||||
|
let content3_len = "_: concurrent value".len();
|
||||||
|
|
||||||
|
for (i, sk) in ["a", "b", "c", "d"].iter().enumerate() {
|
||||||
|
let content = format!("{}: hello world", sk).into_bytes();
|
||||||
|
let content2 = format!("{}: hello universe", sk).into_bytes();
|
||||||
|
let content3 = format!("{}: concurrent value", sk).into_bytes();
|
||||||
|
|
||||||
|
// Put initially, no causality token
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some(sk))
|
||||||
|
.body(content.clone())
|
||||||
|
.method(Method::PUT)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
|
||||||
|
// Get value back
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some(sk))
|
||||||
|
.signed_header("accept", "*/*")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/octet-stream"
|
||||||
|
);
|
||||||
|
let ct = res
|
||||||
|
.headers()
|
||||||
|
.get("x-garage-causality-token")
|
||||||
|
.unwrap()
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string();
|
||||||
|
let res_body = hyper::body::to_bytes(res.into_body())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(res_body, content);
|
||||||
|
|
||||||
|
// ReadIndex -- now there should be some stuff
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
res_body,
|
||||||
|
json!({
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"partitionKeys": [
|
||||||
|
{
|
||||||
|
"pk": "root",
|
||||||
|
"entries": i+1,
|
||||||
|
"conflicts": i,
|
||||||
|
"values": i+i+1,
|
||||||
|
"bytes": i*(content2.len() + content3.len()) + content.len(),
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Put again, this time with causality token
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some(sk))
|
||||||
|
.signed_header("x-garage-causality-token", ct.clone())
|
||||||
|
.body(content2.clone())
|
||||||
|
.method(Method::PUT)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
|
||||||
|
// Get value back
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some(sk))
|
||||||
|
.signed_header("accept", "*/*")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/octet-stream"
|
||||||
|
);
|
||||||
|
let res_body = hyper::body::to_bytes(res.into_body())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(res_body, content2);
|
||||||
|
|
||||||
|
// ReadIndex -- now there should be some stuff
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
res_body,
|
||||||
|
json!({
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"partitionKeys": [
|
||||||
|
{
|
||||||
|
"pk": "root",
|
||||||
|
"entries": i+1,
|
||||||
|
"conflicts": i,
|
||||||
|
"values": i+i+1,
|
||||||
|
"bytes": i*content3.len() + (i+1)*content2.len(),
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Put again with same CT, now we have concurrent values
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some(sk))
|
||||||
|
.signed_header("x-garage-causality-token", ct.clone())
|
||||||
|
.body(content3.clone())
|
||||||
|
.method(Method::PUT)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
|
||||||
|
// Get value back
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some(sk))
|
||||||
|
.signed_header("accept", "*/*")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
let res_json = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
res_json,
|
||||||
|
[base64::encode(&content2), base64::encode(&content3)]
|
||||||
|
);
|
||||||
|
|
||||||
|
// ReadIndex -- now there should be some stuff
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
res_body,
|
||||||
|
json!({
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"partitionKeys": [
|
||||||
|
{
|
||||||
|
"pk": "root",
|
||||||
|
"entries": i+1,
|
||||||
|
"conflicts": i+1,
|
||||||
|
"values": 2*(i+1),
|
||||||
|
"bytes": (i+1)*(content2.len() + content3.len()),
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now delete things
|
||||||
|
for (i, sk) in ["a", "b", "c", "d"].iter().enumerate() {
|
||||||
|
// Get value back (we just need the CT)
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some(sk))
|
||||||
|
.signed_header("accept", "*/*")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
let ct = res
|
||||||
|
.headers()
|
||||||
|
.get("x-garage-causality-token")
|
||||||
|
.unwrap()
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
// Delete it
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.method(Method::DELETE)
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some(sk))
|
||||||
|
.signed_header("x-garage-causality-token", ct)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 204);
|
||||||
|
|
||||||
|
// ReadIndex -- now there should be some stuff
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
if i < 3 {
|
||||||
|
assert_json_eq!(
|
||||||
|
res_body,
|
||||||
|
json!({
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"partitionKeys": [
|
||||||
|
{
|
||||||
|
"pk": "root",
|
||||||
|
"entries": 3-i,
|
||||||
|
"conflicts": 3-i,
|
||||||
|
"values": 2*(3-i),
|
||||||
|
"bytes": (3-i)*(content2_len + content3_len),
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null
|
||||||
|
})
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
assert_json_eq!(
|
||||||
|
res_body,
|
||||||
|
json!({
|
||||||
|
"prefix": null,
|
||||||
|
"start": null,
|
||||||
|
"end": null,
|
||||||
|
"limit": null,
|
||||||
|
"reverse": false,
|
||||||
|
"partitionKeys": [],
|
||||||
|
"more": false,
|
||||||
|
"nextStart": null
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_item_return_format() {
|
||||||
|
let ctx = common::context();
|
||||||
|
let bucket = ctx.create_bucket("test-k2v-item-return-format");
|
||||||
|
|
||||||
|
let single_value = b"A single value".to_vec();
|
||||||
|
let concurrent_value = b"A concurrent value".to_vec();
|
||||||
|
|
||||||
|
// -- Test with a single value --
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.body(single_value.clone())
|
||||||
|
.method(Method::PUT)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
|
||||||
|
// f0: either
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "*/*")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/octet-stream"
|
||||||
|
);
|
||||||
|
let ct = res
|
||||||
|
.headers()
|
||||||
|
.get("x-garage-causality-token")
|
||||||
|
.unwrap()
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string();
|
||||||
|
let res_body = hyper::body::to_bytes(res.into_body())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(res_body, single_value);
|
||||||
|
|
||||||
|
// f1: not specified
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(res_body, json!([base64::encode(&single_value)]));
|
||||||
|
|
||||||
|
// f2: binary
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "application/octet-stream")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/octet-stream"
|
||||||
|
);
|
||||||
|
let res_body = hyper::body::to_bytes(res.into_body())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(res_body, single_value);
|
||||||
|
|
||||||
|
// f3: json
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "application/json")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(res_body, json!([base64::encode(&single_value)]));
|
||||||
|
|
||||||
|
// -- Test with a second, concurrent value --
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.body(concurrent_value.clone())
|
||||||
|
.method(Method::PUT)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
|
||||||
|
// f0: either
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "*/*")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
res_body,
|
||||||
|
json!([
|
||||||
|
base64::encode(&single_value),
|
||||||
|
base64::encode(&concurrent_value)
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
// f1: not specified
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
res_body,
|
||||||
|
json!([
|
||||||
|
base64::encode(&single_value),
|
||||||
|
base64::encode(&concurrent_value)
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
// f2: binary
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "application/octet-stream")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 409); // CONFLICT
|
||||||
|
|
||||||
|
// f3: json
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "application/json")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(
|
||||||
|
res_body,
|
||||||
|
json!([
|
||||||
|
base64::encode(&single_value),
|
||||||
|
base64::encode(&concurrent_value)
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
// -- Delete first value, concurrently with second insert --
|
||||||
|
// -- (we now have a concurrent value and a deletion) --
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.method(Method::DELETE)
|
||||||
|
.signed_header("x-garage-causality-token", ct)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 204);
|
||||||
|
|
||||||
|
// f0: either
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "*/*")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(res_body, json!([base64::encode(&concurrent_value), null]));
|
||||||
|
|
||||||
|
// f1: not specified
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
let ct = res
|
||||||
|
.headers()
|
||||||
|
.get("x-garage-causality-token")
|
||||||
|
.unwrap()
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string();
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(res_body, json!([base64::encode(&concurrent_value), null]));
|
||||||
|
|
||||||
|
// f2: binary
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "application/octet-stream")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 409); // CONFLICT
|
||||||
|
|
||||||
|
// f3: json
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "application/json")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(res_body, json!([base64::encode(&concurrent_value), null]));
|
||||||
|
|
||||||
|
// -- Delete everything --
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.method(Method::DELETE)
|
||||||
|
.signed_header("x-garage-causality-token", ct)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 204);
|
||||||
|
|
||||||
|
// f0: either
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "*/*")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 204); // NO CONTENT
|
||||||
|
|
||||||
|
// f1: not specified
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(res_body, json!([null]));
|
||||||
|
|
||||||
|
// f2: binary
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "application/octet-stream")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 204); // NO CONTENT
|
||||||
|
|
||||||
|
// f3: json
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("v1"))
|
||||||
|
.signed_header("accept", "application/json")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
assert_eq!(
|
||||||
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
let res_body = json_body(res).await;
|
||||||
|
assert_json_eq!(res_body, json!([null]));
|
||||||
|
}
|
18
src/garage/tests/k2v/mod.rs
Normal file
18
src/garage/tests/k2v/mod.rs
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
pub mod batch;
|
||||||
|
pub mod errorcodes;
|
||||||
|
pub mod item;
|
||||||
|
pub mod poll;
|
||||||
|
pub mod simple;
|
||||||
|
|
||||||
|
use hyper::{Body, Response};
|
||||||
|
|
||||||
|
pub async fn json_body(res: Response<Body>) -> serde_json::Value {
|
||||||
|
let res_body: serde_json::Value = serde_json::from_slice(
|
||||||
|
&hyper::body::to_bytes(res.into_body())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.to_vec()[..],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
res_body
|
||||||
|
}
|
98
src/garage/tests/k2v/poll.rs
Normal file
98
src/garage/tests/k2v/poll.rs
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
use hyper::Method;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use crate::common;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_poll() {
|
||||||
|
let ctx = common::context();
|
||||||
|
let bucket = ctx.create_bucket("test-k2v-poll");
|
||||||
|
|
||||||
|
// Write initial value
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.method(Method::PUT)
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("test1"))
|
||||||
|
.body(b"Initial value".to_vec())
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
|
||||||
|
// Retrieve initial value to get its causality token
|
||||||
|
let res2 = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("test1"))
|
||||||
|
.signed_header("accept", "application/octet-stream")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res2.status(), 200);
|
||||||
|
let ct = res2
|
||||||
|
.headers()
|
||||||
|
.get("x-garage-causality-token")
|
||||||
|
.unwrap()
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let res2_body = hyper::body::to_bytes(res2.into_body())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(res2_body, b"Initial value");
|
||||||
|
|
||||||
|
// Start poll operation
|
||||||
|
let poll = {
|
||||||
|
let bucket = bucket.clone();
|
||||||
|
let ct = ct.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let ctx = common::context();
|
||||||
|
ctx.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("test1"))
|
||||||
|
.query_param("causality_token", Some(ct))
|
||||||
|
.query_param("timeout", Some("10"))
|
||||||
|
.signed_header("accept", "application/octet-stream")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
// Write new value that supersedes initial one
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.method(Method::PUT)
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("test1"))
|
||||||
|
.signed_header("x-garage-causality-token", ct)
|
||||||
|
.body(b"New value".to_vec())
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
|
||||||
|
// Check poll finishes with correct value
|
||||||
|
let poll_res = tokio::select! {
|
||||||
|
_ = tokio::time::sleep(Duration::from_secs(10)) => panic!("poll did not terminate in time"),
|
||||||
|
res = poll => res.unwrap().unwrap(),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(poll_res.status(), 200);
|
||||||
|
|
||||||
|
let poll_res_body = hyper::body::to_bytes(poll_res.into_body())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(poll_res_body, b"New value");
|
||||||
|
}
|
40
src/garage/tests/k2v/simple.rs
Normal file
40
src/garage/tests/k2v/simple.rs
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
use crate::common;
|
||||||
|
|
||||||
|
use hyper::Method;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_simple() {
|
||||||
|
let ctx = common::context();
|
||||||
|
let bucket = ctx.create_bucket("test-k2v-simple");
|
||||||
|
|
||||||
|
let res = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.method(Method::PUT)
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("test1"))
|
||||||
|
.body(b"Hello, world!".to_vec())
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res.status(), 200);
|
||||||
|
|
||||||
|
let res2 = ctx
|
||||||
|
.k2v
|
||||||
|
.request
|
||||||
|
.builder(bucket.clone())
|
||||||
|
.path("root")
|
||||||
|
.query_param("sort_key", Some("test1"))
|
||||||
|
.signed_header("accept", "application/octet-stream")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(res2.status(), 200);
|
||||||
|
|
||||||
|
let res2_body = hyper::body::to_bytes(res2.into_body())
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.to_vec();
|
||||||
|
assert_eq!(res2_body, b"Hello, world!");
|
||||||
|
}
|
|
@ -3,9 +3,5 @@ mod common;
|
||||||
|
|
||||||
mod admin;
|
mod admin;
|
||||||
mod bucket;
|
mod bucket;
|
||||||
mod list;
|
mod k2v;
|
||||||
mod multipart;
|
mod s3;
|
||||||
mod objects;
|
|
||||||
mod simple;
|
|
||||||
mod streaming_signature;
|
|
||||||
mod website;
|
|
||||||
|
|
6
src/garage/tests/s3/mod.rs
Normal file
6
src/garage/tests/s3/mod.rs
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
mod list;
|
||||||
|
mod multipart;
|
||||||
|
mod objects;
|
||||||
|
mod simple;
|
||||||
|
mod streaming_signature;
|
||||||
|
mod website;
|
|
@ -35,10 +35,7 @@ async fn test_website() {
|
||||||
let req = || {
|
let req = || {
|
||||||
Request::builder()
|
Request::builder()
|
||||||
.method("GET")
|
.method("GET")
|
||||||
.uri(format!(
|
.uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port))
|
||||||
"http://127.0.0.1:{}/",
|
|
||||||
common::garage::DEFAULT_PORT + 2
|
|
||||||
))
|
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.body(Body::empty())
|
.body(Body::empty())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -170,10 +167,7 @@ async fn test_website_s3_api() {
|
||||||
{
|
{
|
||||||
let req = Request::builder()
|
let req = Request::builder()
|
||||||
.method("GET")
|
.method("GET")
|
||||||
.uri(format!(
|
.uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
|
||||||
"http://127.0.0.1:{}/site/",
|
|
||||||
common::garage::DEFAULT_PORT + 2
|
|
||||||
))
|
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.header("Origin", "https://example.com")
|
.header("Origin", "https://example.com")
|
||||||
.body(Body::empty())
|
.body(Body::empty())
|
||||||
|
@ -198,7 +192,7 @@ async fn test_website_s3_api() {
|
||||||
.method("GET")
|
.method("GET")
|
||||||
.uri(format!(
|
.uri(format!(
|
||||||
"http://127.0.0.1:{}/wrong.html",
|
"http://127.0.0.1:{}/wrong.html",
|
||||||
common::garage::DEFAULT_PORT + 2
|
ctx.garage.web_port
|
||||||
))
|
))
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.body(Body::empty())
|
.body(Body::empty())
|
||||||
|
@ -217,10 +211,7 @@ async fn test_website_s3_api() {
|
||||||
{
|
{
|
||||||
let req = Request::builder()
|
let req = Request::builder()
|
||||||
.method("OPTIONS")
|
.method("OPTIONS")
|
||||||
.uri(format!(
|
.uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
|
||||||
"http://127.0.0.1:{}/site/",
|
|
||||||
common::garage::DEFAULT_PORT + 2
|
|
||||||
))
|
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.header("Origin", "https://example.com")
|
.header("Origin", "https://example.com")
|
||||||
.header("Access-Control-Request-Method", "PUT")
|
.header("Access-Control-Request-Method", "PUT")
|
||||||
|
@ -244,10 +235,7 @@ async fn test_website_s3_api() {
|
||||||
{
|
{
|
||||||
let req = Request::builder()
|
let req = Request::builder()
|
||||||
.method("OPTIONS")
|
.method("OPTIONS")
|
||||||
.uri(format!(
|
.uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
|
||||||
"http://127.0.0.1:{}/site/",
|
|
||||||
common::garage::DEFAULT_PORT + 2
|
|
||||||
))
|
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.header("Origin", "https://example.com")
|
.header("Origin", "https://example.com")
|
||||||
.header("Access-Control-Request-Method", "DELETE")
|
.header("Access-Control-Request-Method", "DELETE")
|
||||||
|
@ -288,10 +276,7 @@ async fn test_website_s3_api() {
|
||||||
{
|
{
|
||||||
let req = Request::builder()
|
let req = Request::builder()
|
||||||
.method("OPTIONS")
|
.method("OPTIONS")
|
||||||
.uri(format!(
|
.uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
|
||||||
"http://127.0.0.1:{}/site/",
|
|
||||||
common::garage::DEFAULT_PORT + 2
|
|
||||||
))
|
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.header("Origin", "https://example.com")
|
.header("Origin", "https://example.com")
|
||||||
.header("Access-Control-Request-Method", "PUT")
|
.header("Access-Control-Request-Method", "PUT")
|
||||||
|
@ -319,10 +304,7 @@ async fn test_website_s3_api() {
|
||||||
{
|
{
|
||||||
let req = Request::builder()
|
let req = Request::builder()
|
||||||
.method("GET")
|
.method("GET")
|
||||||
.uri(format!(
|
.uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
|
||||||
"http://127.0.0.1:{}/site/",
|
|
||||||
common::garage::DEFAULT_PORT + 2
|
|
||||||
))
|
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.body(Body::empty())
|
.body(Body::empty())
|
||||||
.unwrap();
|
.unwrap();
|
32
src/k2v-client/Cargo.toml
Normal file
32
src/k2v-client/Cargo.toml
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
[package]
|
||||||
|
name = "k2v-client"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
base64 = "0.13.0"
|
||||||
|
http = "0.2.6"
|
||||||
|
log = "0.4"
|
||||||
|
rusoto_core = "0.48.0"
|
||||||
|
rusoto_credential = "0.48.0"
|
||||||
|
rusoto_signature = "0.48.0"
|
||||||
|
serde = "1.0.137"
|
||||||
|
serde_json = "1.0.81"
|
||||||
|
thiserror = "1.0.31"
|
||||||
|
tokio = "1.17.0"
|
||||||
|
|
||||||
|
# cli deps
|
||||||
|
clap = { version = "3.1.18", optional = true, features = ["derive", "env"] }
|
||||||
|
garage_util = { path = "../util", optional = true }
|
||||||
|
|
||||||
|
|
||||||
|
[features]
|
||||||
|
cli = ["clap", "tokio/fs", "tokio/io-std", "garage_util"]
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
path = "lib.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "k2v-cli"
|
||||||
|
path = "bin/k2v-cli.rs"
|
||||||
|
required-features = ["cli"]
|
25
src/k2v-client/README.md
Normal file
25
src/k2v-client/README.md
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
Example usage:
|
||||||
|
```sh
|
||||||
|
# all these values can be provided on the cli instead
|
||||||
|
export AWS_ACCESS_KEY_ID=GK123456
|
||||||
|
export AWS_SECRET_ACCESS_KEY=0123..789
|
||||||
|
export AWS_REGION=garage
|
||||||
|
export K2V_ENDPOINT=http://172.30.2.1:3903
|
||||||
|
export K2V_BUCKET=my-bucket
|
||||||
|
|
||||||
|
cargo run --features=cli -- read-range my-partition-key --all
|
||||||
|
|
||||||
|
cargo run --features=cli -- insert my-partition-key my-sort-key --text "my string1"
|
||||||
|
cargo run --features=cli -- insert my-partition-key my-sort-key --text "my string2"
|
||||||
|
cargo run --features=cli -- insert my-partition-key my-sort-key2 --text "my string"
|
||||||
|
|
||||||
|
cargo run --features=cli -- read-range my-partition-key --all
|
||||||
|
|
||||||
|
causality=$(cargo run --features=cli -- read my-partition-key my-sort-key2 -b | head -n1)
|
||||||
|
cargo run --features=cli -- delete my-partition-key my-sort-key2 -c $causality
|
||||||
|
|
||||||
|
causality=$(cargo run --features=cli -- read my-partition-key my-sort-key -b | head -n1)
|
||||||
|
cargo run --features=cli -- insert my-partition-key my-sort-key --text "my string3" -c $causality
|
||||||
|
|
||||||
|
cargo run --features=cli -- read-range my-partition-key --all
|
||||||
|
```
|
466
src/k2v-client/bin/k2v-cli.rs
Normal file
466
src/k2v-client/bin/k2v-cli.rs
Normal file
|
@ -0,0 +1,466 @@
|
||||||
|
use k2v_client::*;
|
||||||
|
|
||||||
|
use garage_util::formater::format_table;
|
||||||
|
|
||||||
|
use rusoto_core::credential::AwsCredentials;
|
||||||
|
use rusoto_core::Region;
|
||||||
|
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
|
||||||
|
/// K2V command line interface
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[clap(author, version, about, long_about = None)]
|
||||||
|
struct Args {
|
||||||
|
/// Name of the region to use
|
||||||
|
#[clap(short, long, env = "AWS_REGION", default_value = "garage")]
|
||||||
|
region: String,
|
||||||
|
/// Url of the endpoint to connect to
|
||||||
|
#[clap(short, long, env = "K2V_ENDPOINT")]
|
||||||
|
endpoint: String,
|
||||||
|
/// Access key ID
|
||||||
|
#[clap(short, long, env = "AWS_ACCESS_KEY_ID")]
|
||||||
|
key_id: String,
|
||||||
|
/// Access key ID
|
||||||
|
#[clap(short, long, env = "AWS_SECRET_ACCESS_KEY")]
|
||||||
|
secret: String,
|
||||||
|
/// Bucket name
|
||||||
|
#[clap(short, long, env = "K2V_BUCKET")]
|
||||||
|
bucket: String,
|
||||||
|
#[clap(subcommand)]
|
||||||
|
command: Command,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Subcommand, Debug)]
|
||||||
|
enum Command {
|
||||||
|
/// Insert a single value
|
||||||
|
Insert {
|
||||||
|
/// Partition key to insert to
|
||||||
|
partition_key: String,
|
||||||
|
/// Sort key to insert to
|
||||||
|
sort_key: String,
|
||||||
|
/// Causality of the insertion
|
||||||
|
#[clap(short, long)]
|
||||||
|
causality: Option<String>,
|
||||||
|
/// Value to insert
|
||||||
|
#[clap(flatten)]
|
||||||
|
value: Value,
|
||||||
|
},
|
||||||
|
/// Read a single value
|
||||||
|
Read {
|
||||||
|
/// Partition key to read from
|
||||||
|
partition_key: String,
|
||||||
|
/// Sort key to read from
|
||||||
|
sort_key: String,
|
||||||
|
/// Output formating
|
||||||
|
#[clap(flatten)]
|
||||||
|
output_kind: ReadOutputKind,
|
||||||
|
},
|
||||||
|
/// Delete a single value
|
||||||
|
Delete {
|
||||||
|
/// Partition key to delete from
|
||||||
|
partition_key: String,
|
||||||
|
/// Sort key to delete from
|
||||||
|
sort_key: String,
|
||||||
|
/// Causality information
|
||||||
|
#[clap(short, long)]
|
||||||
|
causality: String,
|
||||||
|
},
|
||||||
|
/// List partition keys
|
||||||
|
ReadIndex {
|
||||||
|
/// Output formating
|
||||||
|
#[clap(flatten)]
|
||||||
|
output_kind: BatchOutputKind,
|
||||||
|
/// Output only partition keys matching this filter
|
||||||
|
#[clap(flatten)]
|
||||||
|
filter: Filter,
|
||||||
|
},
|
||||||
|
/// Read a range of sort keys
|
||||||
|
ReadRange {
|
||||||
|
/// Partition key to read from
|
||||||
|
partition_key: String,
|
||||||
|
/// Output formating
|
||||||
|
#[clap(flatten)]
|
||||||
|
output_kind: BatchOutputKind,
|
||||||
|
/// Output only sort keys matching this filter
|
||||||
|
#[clap(flatten)]
|
||||||
|
filter: Filter,
|
||||||
|
},
|
||||||
|
/// Delete a range of sort keys
|
||||||
|
DeleteRange {
|
||||||
|
/// Partition key to delete from
|
||||||
|
partition_key: String,
|
||||||
|
/// Output formating
|
||||||
|
#[clap(flatten)]
|
||||||
|
output_kind: BatchOutputKind,
|
||||||
|
/// Delete only sort keys matching this filter
|
||||||
|
#[clap(flatten)]
|
||||||
|
filter: Filter,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Where to read a value from
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[clap(group = clap::ArgGroup::new("value").multiple(false).required(true))]
|
||||||
|
struct Value {
|
||||||
|
/// Read value from a file. use - to read from stdin
|
||||||
|
#[clap(short, long, group = "value")]
|
||||||
|
file: Option<String>,
|
||||||
|
/// Read a base64 value from commandline
|
||||||
|
#[clap(short, long, group = "value")]
|
||||||
|
b64: Option<String>,
|
||||||
|
/// Read a raw (UTF-8) value from the commandline
|
||||||
|
#[clap(short, long, group = "value")]
|
||||||
|
text: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Value {
|
||||||
|
async fn to_data(&self) -> Result<Vec<u8>, Error> {
|
||||||
|
if let Some(ref text) = self.text {
|
||||||
|
Ok(text.as_bytes().to_vec())
|
||||||
|
} else if let Some(ref b64) = self.b64 {
|
||||||
|
base64::decode(b64).map_err(|_| Error::Message("invalid base64 input".into()))
|
||||||
|
} else if let Some(ref path) = self.file {
|
||||||
|
use tokio::io::AsyncReadExt;
|
||||||
|
if path == "-" {
|
||||||
|
let mut file = tokio::io::stdin();
|
||||||
|
let mut vec = Vec::new();
|
||||||
|
file.read_to_end(&mut vec).await?;
|
||||||
|
Ok(vec)
|
||||||
|
} else {
|
||||||
|
let mut file = tokio::fs::File::open(path).await?;
|
||||||
|
let mut vec = Vec::new();
|
||||||
|
file.read_to_end(&mut vec).await?;
|
||||||
|
Ok(vec)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
unreachable!("Value must have one option set")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[clap(group = clap::ArgGroup::new("output-kind").multiple(false).required(false))]
|
||||||
|
struct ReadOutputKind {
|
||||||
|
/// Base64 output. Conflicts are line separated, first line is causality token
|
||||||
|
#[clap(short, long, group = "output-kind")]
|
||||||
|
b64: bool,
|
||||||
|
/// Raw output. Conflicts generate error, causality token is not returned
|
||||||
|
#[clap(short, long, group = "output-kind")]
|
||||||
|
raw: bool,
|
||||||
|
/// Human formated output
|
||||||
|
#[clap(short = 'H', long, group = "output-kind")]
|
||||||
|
human: bool,
|
||||||
|
/// JSON formated output
|
||||||
|
#[clap(short, long, group = "output-kind")]
|
||||||
|
json: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReadOutputKind {
|
||||||
|
fn display_output(&self, val: CausalValue) -> ! {
|
||||||
|
use std::io::Write;
|
||||||
|
use std::process::exit;
|
||||||
|
|
||||||
|
if self.json {
|
||||||
|
let stdout = std::io::stdout();
|
||||||
|
serde_json::to_writer_pretty(stdout, &val).unwrap();
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.raw {
|
||||||
|
let mut val = val.value;
|
||||||
|
if val.len() != 1 {
|
||||||
|
eprintln!(
|
||||||
|
"Raw mode can only read non-concurent values, found {} values, expected 1",
|
||||||
|
val.len()
|
||||||
|
);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
let val = val.pop().unwrap();
|
||||||
|
match val {
|
||||||
|
K2vValue::Value(v) => {
|
||||||
|
std::io::stdout().write_all(&v).unwrap();
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
K2vValue::Tombstone => {
|
||||||
|
eprintln!("Expected value, found tombstone");
|
||||||
|
exit(2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let causality: String = val.causality.into();
|
||||||
|
if self.b64 {
|
||||||
|
println!("{}", causality);
|
||||||
|
for val in val.value {
|
||||||
|
match val {
|
||||||
|
K2vValue::Value(v) => {
|
||||||
|
println!("{}", base64::encode(&v))
|
||||||
|
}
|
||||||
|
K2vValue::Tombstone => {
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// human
|
||||||
|
println!("causality: {}", causality);
|
||||||
|
println!("values:");
|
||||||
|
for val in val.value {
|
||||||
|
match val {
|
||||||
|
K2vValue::Value(v) => {
|
||||||
|
if let Ok(string) = std::str::from_utf8(&v) {
|
||||||
|
println!(" utf-8: {}", string);
|
||||||
|
} else {
|
||||||
|
println!(" base64: {}", base64::encode(&v));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
K2vValue::Tombstone => {
|
||||||
|
println!(" tombstone");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[clap(group = clap::ArgGroup::new("output-kind").multiple(false).required(false))]
|
||||||
|
struct BatchOutputKind {
|
||||||
|
/// Human formated output
|
||||||
|
#[clap(short = 'H', long, group = "output-kind")]
|
||||||
|
human: bool,
|
||||||
|
/// JSON formated output
|
||||||
|
#[clap(short, long, group = "output-kind")]
|
||||||
|
json: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Filter for batch operations
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[clap(group = clap::ArgGroup::new("filter").multiple(true).required(true))]
|
||||||
|
struct Filter {
|
||||||
|
/// Match only keys starting with this prefix
|
||||||
|
#[clap(short, long, group = "filter")]
|
||||||
|
prefix: Option<String>,
|
||||||
|
/// Match only keys lexicographically after this key (including this key itself)
|
||||||
|
#[clap(short, long, group = "filter")]
|
||||||
|
start: Option<String>,
|
||||||
|
/// Match only keys lexicographically before this key (excluding this key)
|
||||||
|
#[clap(short, long, group = "filter")]
|
||||||
|
end: Option<String>,
|
||||||
|
/// Only match the first X keys
|
||||||
|
#[clap(short, long)]
|
||||||
|
limit: Option<u64>,
|
||||||
|
/// Return keys in reverse order
|
||||||
|
#[clap(short, long)]
|
||||||
|
reverse: bool,
|
||||||
|
/// Return only keys where conflict happened
|
||||||
|
#[clap(short, long)]
|
||||||
|
conflicts_only: bool,
|
||||||
|
/// Also include keys storing only tombstones
|
||||||
|
#[clap(short, long)]
|
||||||
|
tombstones: bool,
|
||||||
|
/// Return any key
|
||||||
|
#[clap(short, long, group = "filter")]
|
||||||
|
all: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Filter {
|
||||||
|
fn k2v_filter(&self) -> k2v_client::Filter<'_> {
|
||||||
|
k2v_client::Filter {
|
||||||
|
start: self.start.as_deref(),
|
||||||
|
end: self.end.as_deref(),
|
||||||
|
prefix: self.prefix.as_deref(),
|
||||||
|
limit: self.limit,
|
||||||
|
reverse: self.reverse,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Error> {
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
let region = Region::Custom {
|
||||||
|
name: args.region,
|
||||||
|
endpoint: args.endpoint,
|
||||||
|
};
|
||||||
|
|
||||||
|
let creds = AwsCredentials::new(args.key_id, args.secret, None, None);
|
||||||
|
|
||||||
|
let client = K2vClient::new(region, args.bucket, creds, None)?;
|
||||||
|
|
||||||
|
match args.command {
|
||||||
|
Command::Insert {
|
||||||
|
partition_key,
|
||||||
|
sort_key,
|
||||||
|
causality,
|
||||||
|
value,
|
||||||
|
} => {
|
||||||
|
client
|
||||||
|
.insert_item(
|
||||||
|
&partition_key,
|
||||||
|
&sort_key,
|
||||||
|
value.to_data().await?,
|
||||||
|
causality.map(Into::into),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Command::Delete {
|
||||||
|
partition_key,
|
||||||
|
sort_key,
|
||||||
|
causality,
|
||||||
|
} => {
|
||||||
|
client
|
||||||
|
.delete_item(&partition_key, &sort_key, causality.into())
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
Command::Read {
|
||||||
|
partition_key,
|
||||||
|
sort_key,
|
||||||
|
output_kind,
|
||||||
|
} => {
|
||||||
|
let res = client.read_item(&partition_key, &sort_key).await?;
|
||||||
|
output_kind.display_output(res);
|
||||||
|
}
|
||||||
|
Command::ReadIndex {
|
||||||
|
output_kind,
|
||||||
|
filter,
|
||||||
|
} => {
|
||||||
|
if filter.conflicts_only || filter.tombstones {
|
||||||
|
return Err(Error::Message(
|
||||||
|
"conlicts-only and tombstones are invalid for read-index".into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let res = client.read_index(filter.k2v_filter()).await?;
|
||||||
|
if output_kind.json {
|
||||||
|
let values = res
|
||||||
|
.items
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| {
|
||||||
|
let mut value = serde_json::to_value(v).unwrap();
|
||||||
|
value
|
||||||
|
.as_object_mut()
|
||||||
|
.unwrap()
|
||||||
|
.insert("sort_key".to_owned(), k.into());
|
||||||
|
value
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let json = serde_json::json!({
|
||||||
|
"next_key": res.next_start,
|
||||||
|
"values": values,
|
||||||
|
});
|
||||||
|
|
||||||
|
let stdout = std::io::stdout();
|
||||||
|
serde_json::to_writer_pretty(stdout, &json).unwrap();
|
||||||
|
} else {
|
||||||
|
if let Some(next) = res.next_start {
|
||||||
|
println!("next key: {}", next);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut to_print = Vec::new();
|
||||||
|
to_print.push(format!("key:\tentries\tconflicts\tvalues\tbytes"));
|
||||||
|
for (k, v) in res.items {
|
||||||
|
to_print.push(format!(
|
||||||
|
"{}\t{}\t{}\t{}\t{}",
|
||||||
|
k, v.entries, v.conflicts, v.values, v.bytes
|
||||||
|
));
|
||||||
|
}
|
||||||
|
format_table(to_print);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Command::ReadRange {
|
||||||
|
partition_key,
|
||||||
|
output_kind,
|
||||||
|
filter,
|
||||||
|
} => {
|
||||||
|
let op = BatchReadOp {
|
||||||
|
partition_key: &partition_key,
|
||||||
|
filter: filter.k2v_filter(),
|
||||||
|
conflicts_only: filter.conflicts_only,
|
||||||
|
tombstones: filter.tombstones,
|
||||||
|
single_item: false,
|
||||||
|
};
|
||||||
|
let mut res = client.read_batch(&[op]).await?;
|
||||||
|
let res = res.pop().unwrap();
|
||||||
|
if output_kind.json {
|
||||||
|
let values = res
|
||||||
|
.items
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| {
|
||||||
|
let mut value = serde_json::to_value(v).unwrap();
|
||||||
|
value
|
||||||
|
.as_object_mut()
|
||||||
|
.unwrap()
|
||||||
|
.insert("sort_key".to_owned(), k.into());
|
||||||
|
value
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let json = serde_json::json!({
|
||||||
|
"next_key": res.next_start,
|
||||||
|
"values": values,
|
||||||
|
});
|
||||||
|
|
||||||
|
let stdout = std::io::stdout();
|
||||||
|
serde_json::to_writer_pretty(stdout, &json).unwrap();
|
||||||
|
} else {
|
||||||
|
if let Some(next) = res.next_start {
|
||||||
|
println!("next key: {}", next);
|
||||||
|
}
|
||||||
|
for (key, values) in res.items {
|
||||||
|
println!("key: {}", key);
|
||||||
|
let causality: String = values.causality.into();
|
||||||
|
println!("causality: {}", causality);
|
||||||
|
for value in values.value {
|
||||||
|
match value {
|
||||||
|
K2vValue::Value(v) => {
|
||||||
|
if let Ok(string) = std::str::from_utf8(&v) {
|
||||||
|
println!(" value(utf-8): {}", string);
|
||||||
|
} else {
|
||||||
|
println!(" value(base64): {}", base64::encode(&v));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
K2vValue::Tombstone => {
|
||||||
|
println!(" tombstone");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Command::DeleteRange {
|
||||||
|
partition_key,
|
||||||
|
output_kind,
|
||||||
|
filter,
|
||||||
|
} => {
|
||||||
|
let op = BatchDeleteOp {
|
||||||
|
partition_key: &partition_key,
|
||||||
|
prefix: filter.prefix.as_deref(),
|
||||||
|
start: filter.start.as_deref(),
|
||||||
|
end: filter.end.as_deref(),
|
||||||
|
single_item: false,
|
||||||
|
};
|
||||||
|
if filter.reverse
|
||||||
|
|| filter.conflicts_only
|
||||||
|
|| filter.tombstones
|
||||||
|
|| filter.limit.is_some()
|
||||||
|
{
|
||||||
|
return Err(Error::Message(
|
||||||
|
"limit, conlicts-only, reverse and tombstones are invalid for delete-range"
|
||||||
|
.into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = client.delete_batch(&[op]).await?;
|
||||||
|
|
||||||
|
if output_kind.json {
|
||||||
|
println!("{}", res[0]);
|
||||||
|
} else {
|
||||||
|
println!("deleted {} keys", res[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
29
src/k2v-client/error.rs
Normal file
29
src/k2v-client/error.rs
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
use std::borrow::Cow;
|
||||||
|
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
|
/// Errors returned by this crate
|
||||||
|
#[derive(Error, Debug)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("{0}, {1}: {2} (path = {3})")]
|
||||||
|
Remote(
|
||||||
|
http::StatusCode,
|
||||||
|
Cow<'static, str>,
|
||||||
|
Cow<'static, str>,
|
||||||
|
Cow<'static, str>,
|
||||||
|
),
|
||||||
|
#[error("received invalid response: {0}")]
|
||||||
|
InvalidResponse(Cow<'static, str>),
|
||||||
|
#[error("not found")]
|
||||||
|
NotFound,
|
||||||
|
#[error("io error: {0}")]
|
||||||
|
IoError(#[from] std::io::Error),
|
||||||
|
#[error("rusoto tls error: {0}")]
|
||||||
|
RusotoTls(#[from] rusoto_core::request::TlsError),
|
||||||
|
#[error("rusoto http error: {0}")]
|
||||||
|
RusotoHttp(#[from] rusoto_core::HttpDispatchError),
|
||||||
|
#[error("deserialization error: {0}")]
|
||||||
|
Deserialization(#[from] serde_json::Error),
|
||||||
|
#[error("{0}")]
|
||||||
|
Message(Cow<'static, str>),
|
||||||
|
}
|
611
src/k2v-client/lib.rs
Normal file
611
src/k2v-client/lib.rs
Normal file
|
@ -0,0 +1,611 @@
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use http::header::{ACCEPT, CONTENT_LENGTH, CONTENT_TYPE};
|
||||||
|
use http::status::StatusCode;
|
||||||
|
use http::HeaderMap;
|
||||||
|
use log::{debug, error};
|
||||||
|
|
||||||
|
use rusoto_core::{ByteStream, DispatchSignedRequest, HttpClient};
|
||||||
|
use rusoto_credential::AwsCredentials;
|
||||||
|
use rusoto_signature::region::Region;
|
||||||
|
use rusoto_signature::signature::SignedRequest;
|
||||||
|
use serde::de::Error as DeError;
|
||||||
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
|
||||||
|
use tokio::io::AsyncReadExt;
|
||||||
|
|
||||||
|
mod error;
|
||||||
|
|
||||||
|
pub use error::Error;
|
||||||
|
|
||||||
|
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
|
const DEFAULT_POLL_TIMEOUT: Duration = Duration::from_secs(300);
|
||||||
|
const SERVICE: &str = "k2v";
|
||||||
|
const GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
|
||||||
|
|
||||||
|
/// Client used to query a K2V server.
|
||||||
|
pub struct K2vClient {
|
||||||
|
region: Region,
|
||||||
|
bucket: String,
|
||||||
|
creds: AwsCredentials,
|
||||||
|
client: HttpClient,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl K2vClient {
|
||||||
|
/// Create a new K2V client.
|
||||||
|
pub fn new(
|
||||||
|
region: Region,
|
||||||
|
bucket: String,
|
||||||
|
creds: AwsCredentials,
|
||||||
|
user_agent: Option<String>,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let mut client = HttpClient::new()?;
|
||||||
|
if let Some(ua) = user_agent {
|
||||||
|
client.local_agent_prepend(ua);
|
||||||
|
} else {
|
||||||
|
client.local_agent_prepend(format!("k2v/{}", env!("CARGO_PKG_VERSION")));
|
||||||
|
}
|
||||||
|
Ok(K2vClient {
|
||||||
|
region,
|
||||||
|
bucket,
|
||||||
|
creds,
|
||||||
|
client,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform a ReadItem request, reading the value(s) stored for a single pk+sk.
|
||||||
|
pub async fn read_item(
|
||||||
|
&self,
|
||||||
|
partition_key: &str,
|
||||||
|
sort_key: &str,
|
||||||
|
) -> Result<CausalValue, Error> {
|
||||||
|
let mut req = SignedRequest::new(
|
||||||
|
"GET",
|
||||||
|
SERVICE,
|
||||||
|
&self.region,
|
||||||
|
&format!("/{}/{}", self.bucket, partition_key),
|
||||||
|
);
|
||||||
|
req.add_param("sort_key", sort_key);
|
||||||
|
req.add_header(ACCEPT, "application/octet-stream, application/json");
|
||||||
|
|
||||||
|
let res = self.dispatch(req, None).await?;
|
||||||
|
|
||||||
|
let causality = res
|
||||||
|
.causality_token
|
||||||
|
.ok_or_else(|| Error::InvalidResponse("missing causality token".into()))?;
|
||||||
|
|
||||||
|
if res.status == StatusCode::NO_CONTENT {
|
||||||
|
return Ok(CausalValue {
|
||||||
|
causality,
|
||||||
|
value: vec![K2vValue::Tombstone],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
match res.content_type.as_deref() {
|
||||||
|
Some("application/octet-stream") => Ok(CausalValue {
|
||||||
|
causality,
|
||||||
|
value: vec![K2vValue::Value(res.body)],
|
||||||
|
}),
|
||||||
|
Some("application/json") => {
|
||||||
|
let value = serde_json::from_slice(&res.body)?;
|
||||||
|
Ok(CausalValue { causality, value })
|
||||||
|
}
|
||||||
|
Some(ct) => Err(Error::InvalidResponse(
|
||||||
|
format!("invalid content type: {}", ct).into(),
|
||||||
|
)),
|
||||||
|
None => Err(Error::InvalidResponse("missing content type".into())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform a PollItem request, waiting for the value(s) stored for a single pk+sk to be
|
||||||
|
/// updated.
|
||||||
|
pub async fn poll_item(
|
||||||
|
&self,
|
||||||
|
partition_key: &str,
|
||||||
|
sort_key: &str,
|
||||||
|
causality: CausalityToken,
|
||||||
|
timeout: Option<Duration>,
|
||||||
|
) -> Result<Option<CausalValue>, Error> {
|
||||||
|
let timeout = timeout.unwrap_or(DEFAULT_POLL_TIMEOUT);
|
||||||
|
|
||||||
|
let mut req = SignedRequest::new(
|
||||||
|
"GET",
|
||||||
|
SERVICE,
|
||||||
|
&self.region,
|
||||||
|
&format!("/{}/{}", self.bucket, partition_key),
|
||||||
|
);
|
||||||
|
req.add_param("sort_key", sort_key);
|
||||||
|
req.add_param("causality_token", &causality.0);
|
||||||
|
req.add_param("timeout", &timeout.as_secs().to_string());
|
||||||
|
req.add_header(ACCEPT, "application/octet-stream, application/json");
|
||||||
|
|
||||||
|
let res = self.dispatch(req, Some(timeout + DEFAULT_TIMEOUT)).await?;
|
||||||
|
|
||||||
|
let causality = res
|
||||||
|
.causality_token
|
||||||
|
.ok_or_else(|| Error::InvalidResponse("missing causality token".into()))?;
|
||||||
|
|
||||||
|
if res.status == StatusCode::NOT_MODIFIED {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.status == StatusCode::NO_CONTENT {
|
||||||
|
return Ok(Some(CausalValue {
|
||||||
|
causality,
|
||||||
|
value: vec![K2vValue::Tombstone],
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
match res.content_type.as_deref() {
|
||||||
|
Some("application/octet-stream") => Ok(Some(CausalValue {
|
||||||
|
causality,
|
||||||
|
value: vec![K2vValue::Value(res.body)],
|
||||||
|
})),
|
||||||
|
Some("application/json") => {
|
||||||
|
let value = serde_json::from_slice(&res.body)?;
|
||||||
|
Ok(Some(CausalValue { causality, value }))
|
||||||
|
}
|
||||||
|
Some(ct) => Err(Error::InvalidResponse(
|
||||||
|
format!("invalid content type: {}", ct).into(),
|
||||||
|
)),
|
||||||
|
None => Err(Error::InvalidResponse("missing content type".into())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform an InsertItem request, inserting a value for a single pk+sk.
|
||||||
|
pub async fn insert_item(
|
||||||
|
&self,
|
||||||
|
partition_key: &str,
|
||||||
|
sort_key: &str,
|
||||||
|
value: Vec<u8>,
|
||||||
|
causality: Option<CausalityToken>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut req = SignedRequest::new(
|
||||||
|
"PUT",
|
||||||
|
SERVICE,
|
||||||
|
&self.region,
|
||||||
|
&format!("/{}/{}", self.bucket, partition_key),
|
||||||
|
);
|
||||||
|
req.add_param("sort_key", sort_key);
|
||||||
|
req.set_payload(Some(value));
|
||||||
|
|
||||||
|
if let Some(causality) = causality {
|
||||||
|
req.add_header(GARAGE_CAUSALITY_TOKEN, &causality.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.dispatch(req, None).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform a DeleteItem request, deleting the value(s) stored for a single pk+sk.
|
||||||
|
pub async fn delete_item(
|
||||||
|
&self,
|
||||||
|
partition_key: &str,
|
||||||
|
sort_key: &str,
|
||||||
|
causality: CausalityToken,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let mut req = SignedRequest::new(
|
||||||
|
"DELETE",
|
||||||
|
SERVICE,
|
||||||
|
&self.region,
|
||||||
|
&format!("/{}/{}", self.bucket, partition_key),
|
||||||
|
);
|
||||||
|
req.add_param("sort_key", sort_key);
|
||||||
|
req.add_header(GARAGE_CAUSALITY_TOKEN, &causality.0);
|
||||||
|
|
||||||
|
self.dispatch(req, None).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform a ReadIndex request, listing partition key which have at least one associated
|
||||||
|
/// sort key, and which matches the filter.
|
||||||
|
pub async fn read_index(
|
||||||
|
&self,
|
||||||
|
filter: Filter<'_>,
|
||||||
|
) -> Result<PaginatedRange<PartitionInfo>, Error> {
|
||||||
|
let mut req =
|
||||||
|
SignedRequest::new("GET", SERVICE, &self.region, &format!("/{}", self.bucket));
|
||||||
|
filter.insert_params(&mut req);
|
||||||
|
|
||||||
|
let res = self.dispatch(req, None).await?;
|
||||||
|
|
||||||
|
let resp: ReadIndexResponse = serde_json::from_slice(&res.body)?;
|
||||||
|
|
||||||
|
let items = resp
|
||||||
|
.partition_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|ReadIndexItem { pk, info }| (pk, info))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok(PaginatedRange {
|
||||||
|
items,
|
||||||
|
next_start: resp.next_start,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform an InsertBatch request, inserting multiple values at once. Note: this operation is
|
||||||
|
/// *not* atomic: it is possible for some sub-operations to fails and others to success. In
|
||||||
|
/// that case, failure is reported.
|
||||||
|
pub async fn insert_batch(&self, operations: &[BatchInsertOp<'_>]) -> Result<(), Error> {
|
||||||
|
let mut req =
|
||||||
|
SignedRequest::new("POST", SERVICE, &self.region, &format!("/{}", self.bucket));
|
||||||
|
|
||||||
|
let payload = serde_json::to_vec(operations)?;
|
||||||
|
req.set_payload(Some(payload));
|
||||||
|
self.dispatch(req, None).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform a ReadBatch request, reading multiple values or range of values at once.
|
||||||
|
pub async fn read_batch(
|
||||||
|
&self,
|
||||||
|
operations: &[BatchReadOp<'_>],
|
||||||
|
) -> Result<Vec<PaginatedRange<CausalValue>>, Error> {
|
||||||
|
let mut req =
|
||||||
|
SignedRequest::new("POST", SERVICE, &self.region, &format!("/{}", self.bucket));
|
||||||
|
req.add_param("search", "");
|
||||||
|
|
||||||
|
let payload = serde_json::to_vec(operations)?;
|
||||||
|
req.set_payload(Some(payload));
|
||||||
|
let res = self.dispatch(req, None).await?;
|
||||||
|
|
||||||
|
let resp: Vec<BatchReadResponse> = serde_json::from_slice(&res.body)?;
|
||||||
|
|
||||||
|
Ok(resp
|
||||||
|
.into_iter()
|
||||||
|
.map(|e| PaginatedRange {
|
||||||
|
items: e
|
||||||
|
.items
|
||||||
|
.into_iter()
|
||||||
|
.map(|BatchReadItem { sk, ct, v }| {
|
||||||
|
(
|
||||||
|
sk,
|
||||||
|
CausalValue {
|
||||||
|
causality: ct,
|
||||||
|
value: v,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
next_start: e.next_start,
|
||||||
|
})
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform a DeleteBatch request, deleting mutiple values or range of values at once, without
|
||||||
|
/// providing causality information.
|
||||||
|
pub async fn delete_batch(&self, operations: &[BatchDeleteOp<'_>]) -> Result<Vec<u64>, Error> {
|
||||||
|
let mut req =
|
||||||
|
SignedRequest::new("POST", SERVICE, &self.region, &format!("/{}", self.bucket));
|
||||||
|
req.add_param("delete", "");
|
||||||
|
|
||||||
|
let payload = serde_json::to_vec(operations)?;
|
||||||
|
req.set_payload(Some(payload));
|
||||||
|
let res = self.dispatch(req, None).await?;
|
||||||
|
|
||||||
|
let resp: Vec<BatchDeleteResponse> = serde_json::from_slice(&res.body)?;
|
||||||
|
|
||||||
|
Ok(resp.into_iter().map(|r| r.deleted_items).collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn dispatch(
|
||||||
|
&self,
|
||||||
|
mut req: SignedRequest,
|
||||||
|
timeout: Option<Duration>,
|
||||||
|
) -> Result<Response, Error> {
|
||||||
|
req.sign(&self.creds);
|
||||||
|
let mut res = self
|
||||||
|
.client
|
||||||
|
.dispatch(req, Some(timeout.unwrap_or(DEFAULT_TIMEOUT)))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let causality_token = res
|
||||||
|
.headers
|
||||||
|
.remove(GARAGE_CAUSALITY_TOKEN)
|
||||||
|
.map(CausalityToken);
|
||||||
|
let content_type = res.headers.remove(CONTENT_TYPE);
|
||||||
|
|
||||||
|
let body = match res.status {
|
||||||
|
StatusCode::OK => read_body(&mut res.headers, res.body).await?,
|
||||||
|
StatusCode::NO_CONTENT => Vec::new(),
|
||||||
|
StatusCode::NOT_FOUND => return Err(Error::NotFound),
|
||||||
|
StatusCode::NOT_MODIFIED => Vec::new(),
|
||||||
|
s => {
|
||||||
|
let err_body = read_body(&mut res.headers, res.body)
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
let err_body_str = std::str::from_utf8(&err_body)
|
||||||
|
.map(String::from)
|
||||||
|
.unwrap_or_else(|_| base64::encode(&err_body));
|
||||||
|
|
||||||
|
if s.is_client_error() || s.is_server_error() {
|
||||||
|
error!("Error response {}: {}", res.status, err_body_str);
|
||||||
|
let err = match serde_json::from_slice::<ErrorResponse>(&err_body) {
|
||||||
|
Ok(err) => Error::Remote(
|
||||||
|
res.status,
|
||||||
|
err.code.into(),
|
||||||
|
err.message.into(),
|
||||||
|
err.path.into(),
|
||||||
|
),
|
||||||
|
Err(_) => Error::Remote(
|
||||||
|
res.status,
|
||||||
|
"unknown".into(),
|
||||||
|
err_body_str.into(),
|
||||||
|
"?".into(),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
return Err(err);
|
||||||
|
} else {
|
||||||
|
let msg = format!(
|
||||||
|
"Unexpected response code {}. Response body: {}",
|
||||||
|
res.status, err_body_str
|
||||||
|
);
|
||||||
|
error!("{}", msg);
|
||||||
|
return Err(Error::InvalidResponse(msg.into()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
debug!(
|
||||||
|
"Response body: {}",
|
||||||
|
std::str::from_utf8(&body)
|
||||||
|
.map(String::from)
|
||||||
|
.unwrap_or_else(|_| base64::encode(&body))
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(Response {
|
||||||
|
body,
|
||||||
|
status: res.status,
|
||||||
|
causality_token,
|
||||||
|
content_type,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn read_body(headers: &mut HeaderMap<String>, body: ByteStream) -> Result<Vec<u8>, Error> {
|
||||||
|
let body_len = headers
|
||||||
|
.get(CONTENT_LENGTH)
|
||||||
|
.and_then(|h| h.parse().ok())
|
||||||
|
.unwrap_or(0);
|
||||||
|
let mut res = Vec::with_capacity(body_len);
|
||||||
|
body.into_async_read().read_to_end(&mut res).await?;
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An opaque token used to convey causality between operations.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
pub struct CausalityToken(String);
|
||||||
|
|
||||||
|
impl From<String> for CausalityToken {
|
||||||
|
fn from(v: String) -> Self {
|
||||||
|
CausalityToken(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<CausalityToken> for String {
|
||||||
|
fn from(v: CausalityToken) -> Self {
|
||||||
|
v.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A value in K2V. can be either a binary value, or a tombstone.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub enum K2vValue {
|
||||||
|
Tombstone,
|
||||||
|
Value(Vec<u8>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Vec<u8>> for K2vValue {
|
||||||
|
fn from(v: Vec<u8>) -> Self {
|
||||||
|
K2vValue::Value(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Option<Vec<u8>>> for K2vValue {
|
||||||
|
fn from(v: Option<Vec<u8>>) -> Self {
|
||||||
|
match v {
|
||||||
|
Some(v) => K2vValue::Value(v),
|
||||||
|
None => K2vValue::Tombstone,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for K2vValue {
|
||||||
|
fn deserialize<D>(d: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
let val: Option<&str> = Option::deserialize(d)?;
|
||||||
|
Ok(match val {
|
||||||
|
Some(s) => {
|
||||||
|
K2vValue::Value(base64::decode(s).map_err(|_| DeError::custom("invalid base64"))?)
|
||||||
|
}
|
||||||
|
None => K2vValue::Tombstone,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for K2vValue {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
match self {
|
||||||
|
K2vValue::Tombstone => serializer.serialize_none(),
|
||||||
|
K2vValue::Value(v) => {
|
||||||
|
let b64 = base64::encode(v);
|
||||||
|
serializer.serialize_str(&b64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of K2vValue and associated causality information.
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct CausalValue {
|
||||||
|
pub causality: CausalityToken,
|
||||||
|
pub value: Vec<K2vValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Result of paginated requests.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct PaginatedRange<V> {
|
||||||
|
pub items: BTreeMap<String, V>,
|
||||||
|
pub next_start: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Filter for batch operations.
|
||||||
|
#[derive(Debug, Default, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct Filter<'a> {
|
||||||
|
pub start: Option<&'a str>,
|
||||||
|
pub end: Option<&'a str>,
|
||||||
|
pub prefix: Option<&'a str>,
|
||||||
|
pub limit: Option<u64>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub reverse: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Filter<'a> {
|
||||||
|
fn insert_params(&self, req: &mut SignedRequest) {
|
||||||
|
if let Some(start) = &self.start {
|
||||||
|
req.add_param("start", start);
|
||||||
|
}
|
||||||
|
if let Some(end) = &self.end {
|
||||||
|
req.add_param("end", end);
|
||||||
|
}
|
||||||
|
if let Some(prefix) = &self.prefix {
|
||||||
|
req.add_param("prefix", prefix);
|
||||||
|
}
|
||||||
|
if let Some(limit) = &self.limit {
|
||||||
|
req.add_param("limit", &limit.to_string());
|
||||||
|
}
|
||||||
|
if self.reverse {
|
||||||
|
req.add_param("reverse", "true");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct ReadIndexResponse<'a> {
|
||||||
|
#[serde(flatten, borrow)]
|
||||||
|
#[allow(dead_code)]
|
||||||
|
filter: Filter<'a>,
|
||||||
|
partition_keys: Vec<ReadIndexItem>,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
more: bool,
|
||||||
|
next_start: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
struct ReadIndexItem {
|
||||||
|
pk: String,
|
||||||
|
#[serde(flatten)]
|
||||||
|
info: PartitionInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information about data stored with a given partition key.
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct PartitionInfo {
|
||||||
|
pub entries: u64,
|
||||||
|
pub conflicts: u64,
|
||||||
|
pub values: u64,
|
||||||
|
pub bytes: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Single sub-operation of an InsertBatch.
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct BatchInsertOp<'a> {
|
||||||
|
#[serde(rename = "pk")]
|
||||||
|
pub partition_key: &'a str,
|
||||||
|
#[serde(rename = "sk")]
|
||||||
|
pub sort_key: &'a str,
|
||||||
|
#[serde(rename = "ct")]
|
||||||
|
pub causality: Option<CausalityToken>,
|
||||||
|
#[serde(rename = "v")]
|
||||||
|
pub value: K2vValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Single sub-operation of a ReadBatch.
|
||||||
|
#[derive(Debug, Default, Clone, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct BatchReadOp<'a> {
|
||||||
|
pub partition_key: &'a str,
|
||||||
|
#[serde(flatten, borrow)]
|
||||||
|
pub filter: Filter<'a>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub single_item: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub conflicts_only: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub tombstones: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct BatchReadResponse<'a> {
|
||||||
|
#[serde(flatten, borrow)]
|
||||||
|
#[allow(dead_code)]
|
||||||
|
op: BatchReadOp<'a>,
|
||||||
|
items: Vec<BatchReadItem>,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
more: bool,
|
||||||
|
next_start: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
struct BatchReadItem {
|
||||||
|
sk: String,
|
||||||
|
ct: CausalityToken,
|
||||||
|
v: Vec<K2vValue>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Single sub-operation of a DeleteBatch
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct BatchDeleteOp<'a> {
|
||||||
|
pub partition_key: &'a str,
|
||||||
|
pub prefix: Option<&'a str>,
|
||||||
|
pub start: Option<&'a str>,
|
||||||
|
pub end: Option<&'a str>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub single_item: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> BatchDeleteOp<'a> {
|
||||||
|
pub fn new(partition_key: &'a str) -> Self {
|
||||||
|
BatchDeleteOp {
|
||||||
|
partition_key,
|
||||||
|
prefix: None,
|
||||||
|
start: None,
|
||||||
|
end: None,
|
||||||
|
single_item: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct BatchDeleteResponse<'a> {
|
||||||
|
#[serde(flatten, borrow)]
|
||||||
|
#[allow(dead_code)]
|
||||||
|
filter: BatchDeleteOp<'a>,
|
||||||
|
deleted_items: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ErrorResponse {
|
||||||
|
code: String,
|
||||||
|
message: String,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
region: String,
|
||||||
|
path: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Response {
|
||||||
|
body: Vec<u8>,
|
||||||
|
status: StatusCode,
|
||||||
|
causality_token: Option<CausalityToken>,
|
||||||
|
content_type: Option<String>,
|
||||||
|
}
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_model"
|
name = "garage_model"
|
||||||
version = "0.7.0"
|
version = "0.7.3"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -14,16 +14,18 @@ path = "lib.rs"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_rpc = { version = "0.7.0", path = "../rpc" }
|
garage_rpc = { version = "0.7.3", path = "../rpc" }
|
||||||
garage_table = { version = "0.7.0", path = "../table" }
|
garage_table = { version = "0.7.3", path = "../table" }
|
||||||
garage_block = { version = "0.7.0", path = "../block" }
|
garage_block = { version = "0.7.3", path = "../block" }
|
||||||
garage_util = { version = "0.7.0", path = "../util" }
|
garage_util = { version = "0.7.3", path = "../util" }
|
||||||
garage_model_050 = { package = "garage_model", version = "0.5.1" }
|
garage_model_050 = { package = "garage_model", version = "0.5.1" }
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
arc-swap = "1.0"
|
arc-swap = "1.0"
|
||||||
|
blake2 = "0.9"
|
||||||
err-derive = "0.3"
|
err-derive = "0.3"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
|
base64 = "0.13"
|
||||||
tracing = "0.1.30"
|
tracing = "0.1.30"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
zstd = { version = "0.9", default-features = false }
|
zstd = { version = "0.9", default-features = false }
|
||||||
|
@ -42,3 +44,6 @@ opentelemetry = "0.17"
|
||||||
#netapp = { version = "0.3.0", git = "https://git.deuxfleurs.fr/lx/netapp" }
|
#netapp = { version = "0.3.0", git = "https://git.deuxfleurs.fr/lx/netapp" }
|
||||||
#netapp = { version = "0.4", path = "../../../netapp" }
|
#netapp = { version = "0.4", path = "../../../netapp" }
|
||||||
netapp = "0.4"
|
netapp = "0.4"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
k2v = [ "garage_util/k2v" ]
|
||||||
|
|
|
@ -13,13 +13,19 @@ use garage_table::replication::TableFullReplication;
|
||||||
use garage_table::replication::TableShardedReplication;
|
use garage_table::replication::TableShardedReplication;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
|
||||||
use crate::block_ref_table::*;
|
use crate::s3::block_ref_table::*;
|
||||||
|
use crate::s3::object_table::*;
|
||||||
|
use crate::s3::version_table::*;
|
||||||
|
|
||||||
use crate::bucket_alias_table::*;
|
use crate::bucket_alias_table::*;
|
||||||
use crate::bucket_table::*;
|
use crate::bucket_table::*;
|
||||||
use crate::helper;
|
use crate::helper;
|
||||||
use crate::key_table::*;
|
use crate::key_table::*;
|
||||||
use crate::object_table::*;
|
|
||||||
use crate::version_table::*;
|
#[cfg(feature = "k2v")]
|
||||||
|
use crate::index_counter::*;
|
||||||
|
#[cfg(feature = "k2v")]
|
||||||
|
use crate::k2v::{counter_table::*, item_table::*, poll::*, rpc::*};
|
||||||
|
|
||||||
/// An entire Garage full of data
|
/// An entire Garage full of data
|
||||||
pub struct Garage {
|
pub struct Garage {
|
||||||
|
@ -35,16 +41,32 @@ pub struct Garage {
|
||||||
/// The block manager
|
/// The block manager
|
||||||
pub block_manager: Arc<BlockManager>,
|
pub block_manager: Arc<BlockManager>,
|
||||||
|
|
||||||
/// Table containing informations about buckets
|
/// Table containing buckets
|
||||||
pub bucket_table: Arc<Table<BucketTable, TableFullReplication>>,
|
pub bucket_table: Arc<Table<BucketTable, TableFullReplication>>,
|
||||||
/// Table containing informations about bucket aliases
|
/// Table containing bucket aliases
|
||||||
pub bucket_alias_table: Arc<Table<BucketAliasTable, TableFullReplication>>,
|
pub bucket_alias_table: Arc<Table<BucketAliasTable, TableFullReplication>>,
|
||||||
/// Table containing informations about api keys
|
/// Table containing api keys
|
||||||
pub key_table: Arc<Table<KeyTable, TableFullReplication>>,
|
pub key_table: Arc<Table<KeyTable, TableFullReplication>>,
|
||||||
|
|
||||||
|
/// Table containing S3 objects
|
||||||
pub object_table: Arc<Table<ObjectTable, TableShardedReplication>>,
|
pub object_table: Arc<Table<ObjectTable, TableShardedReplication>>,
|
||||||
|
/// Table containing S3 object versions
|
||||||
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
|
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
|
||||||
|
/// Table containing S3 block references (not blocks themselves)
|
||||||
pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
|
||||||
|
|
||||||
|
#[cfg(feature = "k2v")]
|
||||||
|
pub k2v: GarageK2V,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "k2v")]
|
||||||
|
pub struct GarageK2V {
|
||||||
|
/// Table containing K2V items
|
||||||
|
pub item_table: Arc<Table<K2VItemTable, TableShardedReplication>>,
|
||||||
|
/// Indexing table containing K2V item counters
|
||||||
|
pub counter_table: Arc<IndexCounter<K2VCounterTable>>,
|
||||||
|
/// K2V RPC handler
|
||||||
|
pub rpc: Arc<K2VRpcHandler>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Garage {
|
impl Garage {
|
||||||
|
@ -95,6 +117,21 @@ impl Garage {
|
||||||
system.clone(),
|
system.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// ---- admin tables ----
|
||||||
|
info!("Initialize bucket_table...");
|
||||||
|
let bucket_table = Table::new(BucketTable, control_rep_param.clone(), system.clone(), &db);
|
||||||
|
|
||||||
|
info!("Initialize bucket_alias_table...");
|
||||||
|
let bucket_alias_table = Table::new(
|
||||||
|
BucketAliasTable,
|
||||||
|
control_rep_param.clone(),
|
||||||
|
system.clone(),
|
||||||
|
&db,
|
||||||
|
);
|
||||||
|
info!("Initialize key_table_table...");
|
||||||
|
let key_table = Table::new(KeyTable, control_rep_param, system.clone(), &db);
|
||||||
|
|
||||||
|
// ---- S3 tables ----
|
||||||
info!("Initialize block_ref_table...");
|
info!("Initialize block_ref_table...");
|
||||||
let block_ref_table = Table::new(
|
let block_ref_table = Table::new(
|
||||||
BlockRefTable {
|
BlockRefTable {
|
||||||
|
@ -117,29 +154,20 @@ impl Garage {
|
||||||
);
|
);
|
||||||
|
|
||||||
info!("Initialize object_table...");
|
info!("Initialize object_table...");
|
||||||
|
#[allow(clippy::redundant_clone)]
|
||||||
let object_table = Table::new(
|
let object_table = Table::new(
|
||||||
ObjectTable {
|
ObjectTable {
|
||||||
background: background.clone(),
|
background: background.clone(),
|
||||||
version_table: version_table.clone(),
|
version_table: version_table.clone(),
|
||||||
},
|
},
|
||||||
meta_rep_param,
|
meta_rep_param.clone(),
|
||||||
system.clone(),
|
system.clone(),
|
||||||
&db,
|
&db,
|
||||||
);
|
);
|
||||||
|
|
||||||
info!("Initialize bucket_table...");
|
// ---- K2V ----
|
||||||
let bucket_table = Table::new(BucketTable, control_rep_param.clone(), system.clone(), &db);
|
#[cfg(feature = "k2v")]
|
||||||
|
let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param);
|
||||||
info!("Initialize bucket_alias_table...");
|
|
||||||
let bucket_alias_table = Table::new(
|
|
||||||
BucketAliasTable,
|
|
||||||
control_rep_param.clone(),
|
|
||||||
system.clone(),
|
|
||||||
&db,
|
|
||||||
);
|
|
||||||
|
|
||||||
info!("Initialize key_table_table...");
|
|
||||||
let key_table = Table::new(KeyTable, control_rep_param, system.clone(), &db);
|
|
||||||
|
|
||||||
info!("Initialize Garage...");
|
info!("Initialize Garage...");
|
||||||
|
|
||||||
|
@ -155,10 +183,43 @@ impl Garage {
|
||||||
object_table,
|
object_table,
|
||||||
version_table,
|
version_table,
|
||||||
block_ref_table,
|
block_ref_table,
|
||||||
|
#[cfg(feature = "k2v")]
|
||||||
|
k2v,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
|
||||||
helper::bucket::BucketHelper(self)
|
helper::bucket::BucketHelper(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn key_helper(&self) -> helper::key::KeyHelper {
|
||||||
|
helper::key::KeyHelper(self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "k2v")]
|
||||||
|
impl GarageK2V {
|
||||||
|
fn new(system: Arc<System>, db: &sled::Db, meta_rep_param: TableShardedReplication) -> Self {
|
||||||
|
info!("Initialize K2V counter table...");
|
||||||
|
let counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), db);
|
||||||
|
info!("Initialize K2V subscription manager...");
|
||||||
|
let subscriptions = Arc::new(SubscriptionManager::new());
|
||||||
|
info!("Initialize K2V item table...");
|
||||||
|
let item_table = Table::new(
|
||||||
|
K2VItemTable {
|
||||||
|
counter_table: counter_table.clone(),
|
||||||
|
subscriptions: subscriptions.clone(),
|
||||||
|
},
|
||||||
|
meta_rep_param,
|
||||||
|
system.clone(),
|
||||||
|
db,
|
||||||
|
);
|
||||||
|
let rpc = K2VRpcHandler::new(system, item_table.clone(), subscriptions);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
item_table,
|
||||||
|
counter_table,
|
||||||
|
rpc,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +1,18 @@
|
||||||
use garage_table::util::EmptyKey;
|
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::{Error as GarageError, OkOrMessage};
|
use garage_util::error::{Error as GarageError, OkOrMessage};
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
|
use garage_table::util::*;
|
||||||
|
|
||||||
use crate::bucket_alias_table::*;
|
use crate::bucket_alias_table::*;
|
||||||
use crate::bucket_table::*;
|
use crate::bucket_table::*;
|
||||||
use crate::garage::Garage;
|
use crate::garage::Garage;
|
||||||
use crate::helper::error::*;
|
use crate::helper::error::*;
|
||||||
use crate::key_table::{Key, KeyFilter};
|
use crate::helper::key::KeyHelper;
|
||||||
|
use crate::key_table::*;
|
||||||
use crate::permission::BucketKeyPerm;
|
use crate::permission::BucketKeyPerm;
|
||||||
|
use crate::s3::object_table::ObjectFilter;
|
||||||
|
|
||||||
pub struct BucketHelper<'a>(pub(crate) &'a Garage);
|
pub struct BucketHelper<'a>(pub(crate) &'a Garage);
|
||||||
|
|
||||||
|
@ -49,6 +52,23 @@ impl<'a> BucketHelper<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::ptr_arg)]
|
||||||
|
pub async fn resolve_bucket(&self, bucket_name: &String, api_key: &Key) -> Result<Uuid, Error> {
|
||||||
|
let api_key_params = api_key
|
||||||
|
.state
|
||||||
|
.as_option()
|
||||||
|
.ok_or_message("Key should not be deleted at this point")?;
|
||||||
|
|
||||||
|
if let Some(Some(bucket_id)) = api_key_params.local_aliases.get(bucket_name) {
|
||||||
|
Ok(*bucket_id)
|
||||||
|
} else {
|
||||||
|
Ok(self
|
||||||
|
.resolve_global_bucket_name(bucket_name)
|
||||||
|
.await?
|
||||||
|
.ok_or_else(|| Error::NoSuchBucket(bucket_name.to_string()))?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns a Bucket if it is present in bucket table,
|
/// Returns a Bucket if it is present in bucket table,
|
||||||
/// even if it is in deleted state. Querying a non-existing
|
/// even if it is in deleted state. Querying a non-existing
|
||||||
/// bucket ID returns an internal error.
|
/// bucket ID returns an internal error.
|
||||||
|
@ -71,63 +91,7 @@ impl<'a> BucketHelper<'a> {
|
||||||
.get(&EmptyKey, &bucket_id)
|
.get(&EmptyKey, &bucket_id)
|
||||||
.await?
|
.await?
|
||||||
.filter(|b| !b.is_deleted())
|
.filter(|b| !b.is_deleted())
|
||||||
.ok_or_bad_request(format!(
|
.ok_or_else(|| Error::NoSuchBucket(hex::encode(bucket_id)))
|
||||||
"Bucket {:?} does not exist or has been deleted",
|
|
||||||
bucket_id
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a Key if it is present in key table,
|
|
||||||
/// even if it is in deleted state. Querying a non-existing
|
|
||||||
/// key ID returns an internal error.
|
|
||||||
pub async fn get_internal_key(&self, key_id: &String) -> Result<Key, Error> {
|
|
||||||
Ok(self
|
|
||||||
.0
|
|
||||||
.key_table
|
|
||||||
.get(&EmptyKey, key_id)
|
|
||||||
.await?
|
|
||||||
.ok_or_message(format!("Key {} does not exist", key_id))?)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a Key if it is present in key table,
|
|
||||||
/// only if it is in non-deleted state.
|
|
||||||
/// Querying a non-existing key ID or a deleted key
|
|
||||||
/// returns a bad request error.
|
|
||||||
pub async fn get_existing_key(&self, key_id: &String) -> Result<Key, Error> {
|
|
||||||
self.0
|
|
||||||
.key_table
|
|
||||||
.get(&EmptyKey, key_id)
|
|
||||||
.await?
|
|
||||||
.filter(|b| !b.state.is_deleted())
|
|
||||||
.ok_or_bad_request(format!("Key {} does not exist or has been deleted", key_id))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a Key if it is present in key table,
|
|
||||||
/// looking it up by key ID or by a match on its name,
|
|
||||||
/// only if it is in non-deleted state.
|
|
||||||
/// Querying a non-existing key ID or a deleted key
|
|
||||||
/// returns a bad request error.
|
|
||||||
pub async fn get_existing_matching_key(&self, pattern: &str) -> Result<Key, Error> {
|
|
||||||
let candidates = self
|
|
||||||
.0
|
|
||||||
.key_table
|
|
||||||
.get_range(
|
|
||||||
&EmptyKey,
|
|
||||||
None,
|
|
||||||
Some(KeyFilter::MatchesAndNotDeleted(pattern.to_string())),
|
|
||||||
10,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
if candidates.len() != 1 {
|
|
||||||
Err(Error::BadRequest(format!(
|
|
||||||
"{} matching keys",
|
|
||||||
candidates.len()
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
Ok(candidates.into_iter().next().unwrap())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets a new alias for a bucket in global namespace.
|
/// Sets a new alias for a bucket in global namespace.
|
||||||
|
@ -141,10 +105,7 @@ impl<'a> BucketHelper<'a> {
|
||||||
alias_name: &String,
|
alias_name: &String,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
if !is_valid_bucket_name(alias_name) {
|
if !is_valid_bucket_name(alias_name) {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::InvalidBucketName(alias_name.to_string()));
|
||||||
"{}: {}",
|
|
||||||
alias_name, INVALID_BUCKET_NAME_MESSAGE
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut bucket = self.get_existing_bucket(bucket_id).await?;
|
let mut bucket = self.get_existing_bucket(bucket_id).await?;
|
||||||
|
@ -175,7 +136,7 @@ impl<'a> BucketHelper<'a> {
|
||||||
|
|
||||||
let alias = match alias {
|
let alias = match alias {
|
||||||
None => BucketAlias::new(alias_name.clone(), alias_ts, Some(bucket_id))
|
None => BucketAlias::new(alias_name.clone(), alias_ts, Some(bucket_id))
|
||||||
.ok_or_bad_request(format!("{}: {}", alias_name, INVALID_BUCKET_NAME_MESSAGE))?,
|
.ok_or_else(|| Error::InvalidBucketName(alias_name.clone()))?,
|
||||||
Some(mut a) => {
|
Some(mut a) => {
|
||||||
a.state = Lww::raw(alias_ts, Some(bucket_id));
|
a.state = Lww::raw(alias_ts, Some(bucket_id));
|
||||||
a
|
a
|
||||||
|
@ -263,7 +224,7 @@ impl<'a> BucketHelper<'a> {
|
||||||
.bucket_alias_table
|
.bucket_alias_table
|
||||||
.get(&EmptyKey, alias_name)
|
.get(&EmptyKey, alias_name)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_message(format!("Alias {} not found", alias_name))?;
|
.ok_or_else(|| Error::NoSuchBucket(alias_name.to_string()))?;
|
||||||
|
|
||||||
// Checks ok, remove alias
|
// Checks ok, remove alias
|
||||||
let alias_ts = match bucket.state.as_option() {
|
let alias_ts = match bucket.state.as_option() {
|
||||||
|
@ -302,15 +263,14 @@ impl<'a> BucketHelper<'a> {
|
||||||
key_id: &String,
|
key_id: &String,
|
||||||
alias_name: &String,
|
alias_name: &String,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
let key_helper = KeyHelper(self.0);
|
||||||
|
|
||||||
if !is_valid_bucket_name(alias_name) {
|
if !is_valid_bucket_name(alias_name) {
|
||||||
return Err(Error::BadRequest(format!(
|
return Err(Error::InvalidBucketName(alias_name.to_string()));
|
||||||
"{}: {}",
|
|
||||||
alias_name, INVALID_BUCKET_NAME_MESSAGE
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut bucket = self.get_existing_bucket(bucket_id).await?;
|
let mut bucket = self.get_existing_bucket(bucket_id).await?;
|
||||||
let mut key = self.get_existing_key(key_id).await?;
|
let mut key = key_helper.get_existing_key(key_id).await?;
|
||||||
|
|
||||||
let mut key_param = key.state.as_option_mut().unwrap();
|
let mut key_param = key.state.as_option_mut().unwrap();
|
||||||
|
|
||||||
|
@ -359,8 +319,10 @@ impl<'a> BucketHelper<'a> {
|
||||||
key_id: &String,
|
key_id: &String,
|
||||||
alias_name: &String,
|
alias_name: &String,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
let key_helper = KeyHelper(self.0);
|
||||||
|
|
||||||
let mut bucket = self.get_existing_bucket(bucket_id).await?;
|
let mut bucket = self.get_existing_bucket(bucket_id).await?;
|
||||||
let mut key = self.get_existing_key(key_id).await?;
|
let mut key = key_helper.get_existing_key(key_id).await?;
|
||||||
|
|
||||||
let mut bucket_p = bucket.state.as_option_mut().unwrap();
|
let mut bucket_p = bucket.state.as_option_mut().unwrap();
|
||||||
|
|
||||||
|
@ -428,8 +390,10 @@ impl<'a> BucketHelper<'a> {
|
||||||
key_id: &String,
|
key_id: &String,
|
||||||
mut perm: BucketKeyPerm,
|
mut perm: BucketKeyPerm,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
let key_helper = KeyHelper(self.0);
|
||||||
|
|
||||||
let mut bucket = self.get_internal_bucket(bucket_id).await?;
|
let mut bucket = self.get_internal_bucket(bucket_id).await?;
|
||||||
let mut key = self.get_internal_key(key_id).await?;
|
let mut key = key_helper.get_internal_key(key_id).await?;
|
||||||
|
|
||||||
if let Some(bstate) = bucket.state.as_option() {
|
if let Some(bstate) = bucket.state.as_option() {
|
||||||
if let Some(kp) = bstate.authorized_keys.get(key_id) {
|
if let Some(kp) = bstate.authorized_keys.get(key_id) {
|
||||||
|
@ -465,4 +429,47 @@ impl<'a> BucketHelper<'a> {
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn is_bucket_empty(&self, bucket_id: Uuid) -> Result<bool, Error> {
|
||||||
|
let objects = self
|
||||||
|
.0
|
||||||
|
.object_table
|
||||||
|
.get_range(
|
||||||
|
&bucket_id,
|
||||||
|
None,
|
||||||
|
Some(ObjectFilter::IsData),
|
||||||
|
10,
|
||||||
|
EnumerationOrder::Forward,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
if !objects.is_empty() {
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "k2v")]
|
||||||
|
{
|
||||||
|
use garage_rpc::ring::Ring;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
let ring: Arc<Ring> = self.0.system.ring.borrow().clone();
|
||||||
|
let k2vindexes = self
|
||||||
|
.0
|
||||||
|
.k2v
|
||||||
|
.counter_table
|
||||||
|
.table
|
||||||
|
.get_range(
|
||||||
|
&bucket_id,
|
||||||
|
None,
|
||||||
|
Some((DeletedFilter::NotDeleted, ring.layout.node_id_vec.clone())),
|
||||||
|
10,
|
||||||
|
EnumerationOrder::Forward,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
if !k2vindexes.is_empty() {
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,6 +10,16 @@ pub enum Error {
|
||||||
|
|
||||||
#[error(display = "Bad request: {}", _0)]
|
#[error(display = "Bad request: {}", _0)]
|
||||||
BadRequest(String),
|
BadRequest(String),
|
||||||
|
|
||||||
|
/// Bucket name is not valid according to AWS S3 specs
|
||||||
|
#[error(display = "Invalid bucket name: {}", _0)]
|
||||||
|
InvalidBucketName(String),
|
||||||
|
|
||||||
|
#[error(display = "Access key not found: {}", _0)]
|
||||||
|
NoSuchAccessKey(String),
|
||||||
|
|
||||||
|
#[error(display = "Bucket not found: {}", _0)]
|
||||||
|
NoSuchBucket(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<netapp::error::Error> for Error {
|
impl From<netapp::error::Error> for Error {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue