Compare commits
No commits in common. "main" and "update-quickstart-docs-layout-apply" have entirely different histories.
main
...
update-qui
141 changed files with 8831 additions and 2307 deletions
3
.cargo/config.toml
Normal file
3
.cargo/config.toml
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
[target.x86_64-unknown-linux-gnu]
|
||||||
|
linker = "clang"
|
||||||
|
rustflags = ["-C", "link-arg=-fuse-ld=mold"]
|
|
@ -16,21 +16,32 @@ steps:
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build -j4 --attr flakePackages.dev
|
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
|
||||||
- name: unit + func tests (lmdb)
|
- name: unit + func tests
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
environment:
|
||||||
|
GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage
|
||||||
|
GARAGE_TEST_INTEGRATION_PATH: tmp-garage-integration
|
||||||
commands:
|
commands:
|
||||||
- nix-build -j4 --attr flakePackages.tests-lmdb
|
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
- nix-build --no-build-output --attr test.amd64
|
||||||
- name: unit + func tests (sqlite)
|
- ./result/bin/garage_db-*
|
||||||
image: nixpkgs/nix:nixos-22.05
|
- ./result/bin/garage_api-*
|
||||||
commands:
|
- ./result/bin/garage_model-*
|
||||||
- nix-build -j4 --attr flakePackages.tests-sqlite
|
- ./result/bin/garage_rpc-*
|
||||||
|
- ./result/bin/garage_table-*
|
||||||
|
- ./result/bin/garage_util-*
|
||||||
|
- ./result/bin/garage_web-*
|
||||||
|
- ./result/bin/garage-*
|
||||||
|
- GARAGE_TEST_INTEGRATION_DB_ENGINE=lmdb ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
||||||
|
- nix-shell --attr ci --run "killall -9 garage" || true
|
||||||
|
- GARAGE_TEST_INTEGRATION_DB_ENGINE=sqlite ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
|
||||||
|
- rm result
|
||||||
|
- rm -rv tmp-garage-integration
|
||||||
|
|
||||||
- name: integration tests
|
- name: integration tests
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build -j4 --attr flakePackages.dev
|
- nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
|
||||||
depends_on: [ build ]
|
|
||||||
|
|
|
@ -20,9 +20,8 @@ steps:
|
||||||
|
|
||||||
- name: multiarch-docker
|
- name: multiarch-docker
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
environment:
|
secrets:
|
||||||
DOCKER_AUTH:
|
- docker_auth
|
||||||
from_secret: docker_auth
|
|
||||||
commands:
|
commands:
|
||||||
- mkdir -p /root/.docker
|
- mkdir -p /root/.docker
|
||||||
- echo $DOCKER_AUTH > /root/.docker/config.json
|
- echo $DOCKER_AUTH > /root/.docker/config.json
|
||||||
|
|
|
@ -18,12 +18,13 @@ steps:
|
||||||
- name: build
|
- name: build
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
commands:
|
commands:
|
||||||
- nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
- nix-build --no-build-output --attr pkgs.${ARCH}.release --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
|
||||||
- name: check is static binary
|
- name: check is static binary
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage"
|
- nix-build --no-build-output --attr pkgs.${ARCH}.release --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
|
||||||
|
- nix-shell --attr ci --run "./script/not-dynamic.sh result-bin/bin/garage"
|
||||||
|
|
||||||
- name: integration tests
|
- name: integration tests
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
|
@ -59,8 +60,8 @@ steps:
|
||||||
environment:
|
environment:
|
||||||
DOCKER_PLATFORM: "linux/${ARCH}"
|
DOCKER_PLATFORM: "linux/${ARCH}"
|
||||||
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
|
CONTAINER_NAME: "dxflrs/${ARCH}_garage"
|
||||||
DOCKER_AUTH:
|
secrets:
|
||||||
from_secret: docker_auth
|
- docker_auth
|
||||||
commands:
|
commands:
|
||||||
- mkdir -p /root/.docker
|
- mkdir -p /root/.docker
|
||||||
- echo $DOCKER_AUTH > /root/.docker/config.json
|
- echo $DOCKER_AUTH > /root/.docker/config.json
|
||||||
|
|
1669
Cargo.lock
generated
1669
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
15
Cargo.toml
15
Cargo.toml
|
@ -8,10 +8,7 @@ members = [
|
||||||
"src/table",
|
"src/table",
|
||||||
"src/block",
|
"src/block",
|
||||||
"src/model",
|
"src/model",
|
||||||
"src/api/common",
|
"src/api",
|
||||||
"src/api/s3",
|
|
||||||
"src/api/k2v",
|
|
||||||
"src/api/admin",
|
|
||||||
"src/web",
|
"src/web",
|
||||||
"src/garage",
|
"src/garage",
|
||||||
"src/k2v-client",
|
"src/k2v-client",
|
||||||
|
@ -24,10 +21,7 @@ default-members = ["src/garage"]
|
||||||
|
|
||||||
# Internal Garage crates
|
# Internal Garage crates
|
||||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||||
garage_api_common = { version = "1.0.1", path = "src/api/common" }
|
garage_api = { version = "1.0.1", path = "src/api" }
|
||||||
garage_api_admin = { version = "1.0.1", path = "src/api/admin" }
|
|
||||||
garage_api_s3 = { version = "1.0.1", path = "src/api/s3" }
|
|
||||||
garage_api_k2v = { version = "1.0.1", path = "src/api/k2v" }
|
|
||||||
garage_block = { version = "1.0.1", path = "src/block" }
|
garage_block = { version = "1.0.1", path = "src/block" }
|
||||||
garage_db = { version = "1.0.1", path = "src/db", default-features = false }
|
garage_db = { version = "1.0.1", path = "src/db", default-features = false }
|
||||||
garage_model = { version = "1.0.1", path = "src/model", default-features = false }
|
garage_model = { version = "1.0.1", path = "src/model", default-features = false }
|
||||||
|
@ -52,6 +46,7 @@ chrono = "0.4"
|
||||||
crc32fast = "1.4"
|
crc32fast = "1.4"
|
||||||
crc32c = "0.6"
|
crc32c = "0.6"
|
||||||
crypto-common = "0.1"
|
crypto-common = "0.1"
|
||||||
|
digest = "0.10"
|
||||||
err-derive = "0.3"
|
err-derive = "0.3"
|
||||||
gethostname = "0.4"
|
gethostname = "0.4"
|
||||||
git-version = "0.3.4"
|
git-version = "0.3.4"
|
||||||
|
@ -64,7 +59,7 @@ ipnet = "2.9.0"
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
md-5 = "0.10"
|
md-5 = "0.10"
|
||||||
mktemp = "0.5"
|
mktemp = "0.5"
|
||||||
nix = { version = "0.29", default-features = false, features = ["fs"] }
|
nix = { version = "0.27", default-features = false, features = ["fs"] }
|
||||||
nom = "7.1"
|
nom = "7.1"
|
||||||
parse_duration = "2.1"
|
parse_duration = "2.1"
|
||||||
pin-project = "1.0.12"
|
pin-project = "1.0.12"
|
||||||
|
@ -141,6 +136,8 @@ thiserror = "1.0"
|
||||||
assert-json-diff = "2.0"
|
assert-json-diff = "2.0"
|
||||||
rustc_version = "0.4.0"
|
rustc_version = "0.4.0"
|
||||||
static_init = "1.0"
|
static_init = "1.0"
|
||||||
|
|
||||||
|
aws-config = "1.1.4"
|
||||||
aws-sdk-config = "1.13"
|
aws-sdk-config = "1.13"
|
||||||
aws-sdk-s3 = "1.14"
|
aws-sdk-s3 = "1.14"
|
||||||
|
|
||||||
|
|
|
@ -3,5 +3,5 @@ FROM scratch
|
||||||
ENV RUST_BACKTRACE=1
|
ENV RUST_BACKTRACE=1
|
||||||
ENV RUST_LOG=garage=info
|
ENV RUST_LOG=garage=info
|
||||||
|
|
||||||
COPY result/bin/garage /
|
COPY result-bin/bin/garage /
|
||||||
CMD [ "/garage", "server"]
|
CMD [ "/garage", "server"]
|
||||||
|
|
13
Makefile
13
Makefile
|
@ -1,10 +1,13 @@
|
||||||
.PHONY: doc all run1 run2 run3
|
.PHONY: doc all release shell run1 run2 run3
|
||||||
|
|
||||||
all:
|
all:
|
||||||
clear
|
clear; cargo build
|
||||||
cargo build \
|
|
||||||
--config 'target.x86_64-unknown-linux-gnu.linker="clang"' \
|
release:
|
||||||
--config 'target.x86_64-unknown-linux-gnu.rustflags=["-C", "link-arg=-fuse-ld=mold"]' \
|
nix-build --attr pkgs.amd64.release --no-build-output
|
||||||
|
|
||||||
|
shell:
|
||||||
|
nix-shell
|
||||||
|
|
||||||
# ----
|
# ----
|
||||||
|
|
||||||
|
|
55
default.nix
55
default.nix
|
@ -3,22 +3,53 @@
|
||||||
with import ./nix/common.nix;
|
with import ./nix/common.nix;
|
||||||
|
|
||||||
let
|
let
|
||||||
pkgs = import nixpkgs { };
|
pkgs = import pkgsSrc { };
|
||||||
compile = import ./nix/compile.nix;
|
compile = import ./nix/compile.nix;
|
||||||
|
|
||||||
build_release = target: (compile {
|
build_debug_and_release = (target: {
|
||||||
inherit target system git_version nixpkgs;
|
debug = (compile {
|
||||||
crane = flake.inputs.crane;
|
inherit system target git_version pkgsSrc cargo2nixOverlay;
|
||||||
rust-overlay = flake.inputs.rust-overlay;
|
release = false;
|
||||||
|
}).workspace.garage { compileMode = "build"; };
|
||||||
|
|
||||||
|
release = (compile {
|
||||||
|
inherit system target git_version pkgsSrc cargo2nixOverlay;
|
||||||
release = true;
|
release = true;
|
||||||
}).garage;
|
}).workspace.garage { compileMode = "build"; };
|
||||||
|
});
|
||||||
|
|
||||||
|
test = (rustPkgs:
|
||||||
|
pkgs.symlinkJoin {
|
||||||
|
name = "garage-tests";
|
||||||
|
paths =
|
||||||
|
builtins.map (key: rustPkgs.workspace.${key} { compileMode = "test"; })
|
||||||
|
(builtins.attrNames rustPkgs.workspace);
|
||||||
|
});
|
||||||
|
|
||||||
in {
|
in {
|
||||||
releasePackages = {
|
pkgs = {
|
||||||
amd64 = build_release "x86_64-unknown-linux-musl";
|
amd64 = build_debug_and_release "x86_64-unknown-linux-musl";
|
||||||
i386 = build_release "i686-unknown-linux-musl";
|
i386 = build_debug_and_release "i686-unknown-linux-musl";
|
||||||
arm64 = build_release "aarch64-unknown-linux-musl";
|
arm64 = build_debug_and_release "aarch64-unknown-linux-musl";
|
||||||
arm = build_release "armv6l-unknown-linux-musleabihf";
|
arm = build_debug_and_release "armv6l-unknown-linux-musleabihf";
|
||||||
|
};
|
||||||
|
test = {
|
||||||
|
amd64 = test (compile {
|
||||||
|
inherit system git_version pkgsSrc cargo2nixOverlay;
|
||||||
|
target = "x86_64-unknown-linux-musl";
|
||||||
|
features = [
|
||||||
|
"garage/bundled-libs"
|
||||||
|
"garage/k2v"
|
||||||
|
"garage/lmdb"
|
||||||
|
"garage/sqlite"
|
||||||
|
];
|
||||||
|
});
|
||||||
|
};
|
||||||
|
clippy = {
|
||||||
|
amd64 = (compile {
|
||||||
|
inherit system git_version pkgsSrc cargo2nixOverlay;
|
||||||
|
target = "x86_64-unknown-linux-musl";
|
||||||
|
compiler = "clippy";
|
||||||
|
}).workspace.garage { compileMode = "build"; };
|
||||||
};
|
};
|
||||||
flakePackages = flake.packages.${system};
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ Garage can also help you serve this content.
|
||||||
|
|
||||||
## Gitea
|
## Gitea
|
||||||
|
|
||||||
You can use Garage with Gitea to store your [git LFS](https://git-lfs.github.com/) data, your users' avatar, and their attachments.
|
You can use Garage with Gitea to store your [git LFS](https://git-lfs.github.com/) data, your users' avatar, and their attachements.
|
||||||
You can configure a different target for each data type (check `[lfs]` and `[attachment]` sections of the Gitea documentation) and you can provide a default one through the `[storage]` section.
|
You can configure a different target for each data type (check `[lfs]` and `[attachment]` sections of the Gitea documentation) and you can provide a default one through the `[storage]` section.
|
||||||
|
|
||||||
Let's start by creating a key and a bucket (your key id and secret will be needed later, keep them somewhere):
|
Let's start by creating a key and a bucket (your key id and secret will be needed later, keep them somewhere):
|
||||||
|
|
|
@ -36,7 +36,7 @@ sudo killall nix-daemon
|
||||||
Now you can enter our nix-shell, all the required packages will be downloaded but they will not pollute your environment outside of the shell:
|
Now you can enter our nix-shell, all the required packages will be downloaded but they will not pollute your environment outside of the shell:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
nix-shell -A devShell
|
nix-shell
|
||||||
```
|
```
|
||||||
|
|
||||||
You can use the traditional Rust development workflow:
|
You can use the traditional Rust development workflow:
|
||||||
|
@ -65,8 +65,8 @@ nix-build -j $(nproc) --max-jobs auto
|
||||||
```
|
```
|
||||||
|
|
||||||
Our build has multiple parameters you might want to set:
|
Our build has multiple parameters you might want to set:
|
||||||
- `release` to build with release optimisations instead of debug
|
- `release` build with release optimisations instead of debug
|
||||||
- `target` allows for cross compilation
|
- `target allows` for cross compilation
|
||||||
- `compileMode` can be set to test or bench to build a unit test runner
|
- `compileMode` can be set to test or bench to build a unit test runner
|
||||||
- `git_version` to inject the hash to display when running `garage stats`
|
- `git_version` to inject the hash to display when running `garage stats`
|
||||||
|
|
||||||
|
|
|
@ -21,14 +21,14 @@ data_dir = [
|
||||||
```
|
```
|
||||||
|
|
||||||
Garage will automatically balance all blocks stored by the node
|
Garage will automatically balance all blocks stored by the node
|
||||||
among the different specified directories, proportionally to the
|
among the different specified directories, proportionnally to the
|
||||||
specified capacities.
|
specified capacities.
|
||||||
|
|
||||||
## Updating the list of storage locations
|
## Updating the list of storage locations
|
||||||
|
|
||||||
If you add new storage locations to your `data_dir`,
|
If you add new storage locations to your `data_dir`,
|
||||||
Garage will not rebalance existing data between storage locations.
|
Garage will not rebalance existing data between storage locations.
|
||||||
Newly written blocks will be balanced proportionally to the specified capacities,
|
Newly written blocks will be balanced proportionnally to the specified capacities,
|
||||||
and existing data may be moved between drives to improve balancing,
|
and existing data may be moved between drives to improve balancing,
|
||||||
but only opportunistically when a data block is re-written (e.g. an object
|
but only opportunistically when a data block is re-written (e.g. an object
|
||||||
is re-uploaded, or an object with a duplicate block is uploaded).
|
is re-uploaded, or an object with a duplicate block is uploaded).
|
||||||
|
|
|
@ -349,7 +349,7 @@ Check [our s3 compatibility list](@/documentation/reference-manual/s3-compatibil
|
||||||
|
|
||||||
### Other tools for interacting with Garage
|
### Other tools for interacting with Garage
|
||||||
|
|
||||||
The following tools can also be used to send and receive files from/to Garage:
|
The following tools can also be used to send and recieve files from/to Garage:
|
||||||
|
|
||||||
- [minio-client](@/documentation/connect/cli.md#minio-client)
|
- [minio-client](@/documentation/connect/cli.md#minio-client)
|
||||||
- [s3cmd](@/documentation/connect/cli.md#s3cmd)
|
- [s3cmd](@/documentation/connect/cli.md#s3cmd)
|
||||||
|
|
|
@ -13,7 +13,6 @@ consistency_mode = "consistent"
|
||||||
|
|
||||||
metadata_dir = "/var/lib/garage/meta"
|
metadata_dir = "/var/lib/garage/meta"
|
||||||
data_dir = "/var/lib/garage/data"
|
data_dir = "/var/lib/garage/data"
|
||||||
metadata_snapshots_dir = "/var/lib/garage/snapshots"
|
|
||||||
metadata_fsync = true
|
metadata_fsync = true
|
||||||
data_fsync = false
|
data_fsync = false
|
||||||
disable_scrub = false
|
disable_scrub = false
|
||||||
|
@ -106,7 +105,6 @@ Top-level configuration options:
|
||||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
||||||
[`metadata_dir`](#metadata_dir),
|
[`metadata_dir`](#metadata_dir),
|
||||||
[`metadata_fsync`](#metadata_fsync),
|
[`metadata_fsync`](#metadata_fsync),
|
||||||
[`metadata_snapshots_dir`](#metadata_snapshots_dir),
|
|
||||||
[`replication_factor`](#replication_factor),
|
[`replication_factor`](#replication_factor),
|
||||||
[`consistency_mode`](#consistency_mode),
|
[`consistency_mode`](#consistency_mode),
|
||||||
[`rpc_bind_addr`](#rpc_bind_addr),
|
[`rpc_bind_addr`](#rpc_bind_addr),
|
||||||
|
@ -277,7 +275,6 @@ as the index of all objects, object version and object blocks.
|
||||||
|
|
||||||
Store this folder on a fast SSD drive if possible to maximize Garage's performance.
|
Store this folder on a fast SSD drive if possible to maximize Garage's performance.
|
||||||
|
|
||||||
|
|
||||||
#### `data_dir` {#data_dir}
|
#### `data_dir` {#data_dir}
|
||||||
|
|
||||||
The directory in which Garage will store the data blocks of objects.
|
The directory in which Garage will store the data blocks of objects.
|
||||||
|
@ -298,25 +295,6 @@ data_dir = [
|
||||||
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
|
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
|
||||||
on how to operate Garage in such a setup.
|
on how to operate Garage in such a setup.
|
||||||
|
|
||||||
#### `metadata_snapshots_dir` (since Garage `v1.0.2`) {#metadata_snapshots_dir}
|
|
||||||
|
|
||||||
The directory in which Garage will store metadata snapshots when it
|
|
||||||
performs a snapshot of the metadata database, either when instructed to do
|
|
||||||
so from a RPC call or regularly through
|
|
||||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval).
|
|
||||||
|
|
||||||
By default, Garage will store snapshots into a `snapshots/` subdirectory
|
|
||||||
of [`metadata_dir`](#metadata_dir). This might quickly fill up your
|
|
||||||
metadata storage space if you use snapshots, because Garage will need up
|
|
||||||
to 4x the space of the existing metadata database: each snapshot requires
|
|
||||||
roughly as much space as the original database, and Garage temporarily
|
|
||||||
needs to store up to three different snapshots before it cleans up the oldest
|
|
||||||
snapshot to go back to two stored snapshots.
|
|
||||||
|
|
||||||
To prevent filling your disk, you might to change this setting to a
|
|
||||||
directory with ample available space, e.g. on the same storage space as
|
|
||||||
[`data_dir`](#data_dir).
|
|
||||||
|
|
||||||
#### `db_engine` (since `v0.8.0`) {#db_engine}
|
#### `db_engine` (since `v0.8.0`) {#db_engine}
|
||||||
|
|
||||||
Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
Since `v0.8.0`, Garage can use alternative storage backends as follows:
|
||||||
|
|
|
@ -61,7 +61,7 @@ directed to a Garage cluster can be handled independently of one another instead
|
||||||
of going through a central bottleneck (the leader node).
|
of going through a central bottleneck (the leader node).
|
||||||
As a consequence, requests can be handled much faster, even in cases where latency
|
As a consequence, requests can be handled much faster, even in cases where latency
|
||||||
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
|
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
|
||||||
This is particularly useful when nodes are far from one another and talk to one other through standard Internet connections.
|
This is particularly usefull when nodes are far from one another and talk to one other through standard Internet connections.
|
||||||
|
|
||||||
### Web server for static websites
|
### Web server for static websites
|
||||||
|
|
||||||
|
|
|
@ -392,7 +392,7 @@ table_merkle_updater_todo_queue_length{table_name="block_ref"} 0
|
||||||
|
|
||||||
#### `table_sync_items_received`, `table_sync_items_sent` (counters)
|
#### `table_sync_items_received`, `table_sync_items_sent` (counters)
|
||||||
|
|
||||||
Number of data items sent to/received from other nodes during resync procedures
|
Number of data items sent to/recieved from other nodes during resync procedures
|
||||||
|
|
||||||
```
|
```
|
||||||
table_sync_items_received{from="<remote node>",table_name="bucket_v2"} 3
|
table_sync_items_received{from="<remote node>",table_name="bucket_v2"} 3
|
||||||
|
|
|
@ -42,7 +42,7 @@ The general principle are similar, but details have not been updated.**
|
||||||
A version is defined by the existence of at least one entry in the blocks table for a certain version UUID.
|
A version is defined by the existence of at least one entry in the blocks table for a certain version UUID.
|
||||||
We must keep the following invariant: if a version exists in the blocks table, it has to be referenced in the objects table.
|
We must keep the following invariant: if a version exists in the blocks table, it has to be referenced in the objects table.
|
||||||
We explicitly manage concurrent versions of an object: the version timestamp and version UUID columns are index columns, thus we may have several concurrent versions of an object.
|
We explicitly manage concurrent versions of an object: the version timestamp and version UUID columns are index columns, thus we may have several concurrent versions of an object.
|
||||||
Important: before deleting an older version from the objects table, we must make sure that we did a successful delete of the blocks of that version from the blocks table.
|
Important: before deleting an older version from the objects table, we must make sure that we did a successfull delete of the blocks of that version from the blocks table.
|
||||||
|
|
||||||
Thus, the workflow for reading an object is as follows:
|
Thus, the workflow for reading an object is as follows:
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ Known issue: if someone is reading from a version that we want to delete and the
|
||||||
Usefull metadata:
|
Usefull metadata:
|
||||||
|
|
||||||
- list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist
|
- list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist
|
||||||
- list of other nodes that we know have acknowledged a write of this block, useful in the rebalancing algorithm
|
- list of other nodes that we know have acknowledged a write of this block, usefull in the rebalancing algorithm
|
||||||
|
|
||||||
Write strategy: have a single thread that does all write IO so that it is serialized (or have several threads that manage independent parts of the hash space). When writing a blob, write it to a temporary file, close, then rename so that a concurrent read gets a consistent result (either not found or found with whole content).
|
Write strategy: have a single thread that does all write IO so that it is serialized (or have several threads that manage independent parts of the hash space). When writing a blob, write it to a temporary file, close, then rename so that a concurrent read gets a consistent result (either not found or found with whole content).
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ The migration steps are as follows:
|
||||||
5. Turn off Garage 0.3
|
5. Turn off Garage 0.3
|
||||||
|
|
||||||
6. Backup metadata folders if you can (i.e. if you have space to do it
|
6. Backup metadata folders if you can (i.e. if you have space to do it
|
||||||
somewhere). Backuping data folders could also be useful but that's much
|
somewhere). Backuping data folders could also be usefull but that's much
|
||||||
harder to do. If your filesystem supports snapshots, this could be a good
|
harder to do. If your filesystem supports snapshots, this could be a good
|
||||||
time to use them.
|
time to use them.
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ There are two reasons for this:
|
||||||
|
|
||||||
Reminder: rules of simplicity, concerning changes to Garage's source code.
|
Reminder: rules of simplicity, concerning changes to Garage's source code.
|
||||||
Always question what we are doing.
|
Always question what we are doing.
|
||||||
Never do anything just because it looks nice or because we "think" it might be useful at some later point but without knowing precisely why/when.
|
Never do anything just because it looks nice or because we "think" it might be usefull at some later point but without knowing precisely why/when.
|
||||||
Only do things that make perfect sense in the context of what we currently know.
|
Only do things that make perfect sense in the context of what we currently know.
|
||||||
|
|
||||||
## References
|
## References
|
||||||
|
|
|
@ -562,7 +562,7 @@ token>", v: ["<value1>", ...] }`, with the following fields:
|
||||||
- in case of concurrent update and deletion, a `null` is added to the list of concurrent values
|
- in case of concurrent update and deletion, a `null` is added to the list of concurrent values
|
||||||
|
|
||||||
- if the `tombstones` query parameter is set to `true`, tombstones are returned
|
- if the `tombstones` query parameter is set to `true`, tombstones are returned
|
||||||
for items that have been deleted (this can be useful for inserting after an
|
for items that have been deleted (this can be usefull for inserting after an
|
||||||
item that has been deleted, so that the insert is not considered
|
item that has been deleted, so that the insert is not considered
|
||||||
concurrent with the delete). Tombstones are returned as tuples in the
|
concurrent with the delete). Tombstones are returned as tuples in the
|
||||||
same format with only `null` values
|
same format with only `null` values
|
||||||
|
|
78
flake.lock
generated
78
flake.lock
generated
|
@ -1,17 +1,28 @@
|
||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
"crane": {
|
"cargo2nix": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-compat": [
|
||||||
|
"flake-compat"
|
||||||
|
],
|
||||||
|
"flake-utils": "flake-utils",
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"rust-overlay": "rust-overlay"
|
||||||
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1737689766,
|
"lastModified": 1666087781,
|
||||||
"narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=",
|
"narHash": "sha256-trKVdjMZ8mNkGfLcY5LsJJGtdV3xJDZnMVrkFjErlcs=",
|
||||||
"owner": "ipetkov",
|
"owner": "Alexis211",
|
||||||
"repo": "crane",
|
"repo": "cargo2nix",
|
||||||
"rev": "6fe74265bbb6d016d663b1091f015e2976c4a527",
|
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "ipetkov",
|
"owner": "Alexis211",
|
||||||
"repo": "crane",
|
"repo": "cargo2nix",
|
||||||
|
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -31,15 +42,12 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-utils": {
|
"flake-utils": {
|
||||||
"inputs": {
|
|
||||||
"systems": "systems"
|
|
||||||
},
|
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1731533236,
|
"lastModified": 1659877975,
|
||||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -50,62 +58,50 @@
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1736692550,
|
"lastModified": 1724681257,
|
||||||
"narHash": "sha256-7tk8xH+g0sJkKLTJFOxphJxxOjMDFMWv24nXslaU2ro=",
|
"narHash": "sha256-EJRuc5Qp7yfXko5ZNeEMYAs4DzAvkCyALuJ/tGllhN4=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815",
|
"rev": "0239aeb2f82ea27ccd6b61582b8f7fb8750eeada",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"crane": "crane",
|
"cargo2nix": "cargo2nix",
|
||||||
"flake-compat": "flake-compat",
|
"flake-compat": "flake-compat",
|
||||||
"flake-utils": "flake-utils",
|
"flake-utils": [
|
||||||
"nixpkgs": "nixpkgs",
|
"cargo2nix",
|
||||||
"rust-overlay": "rust-overlay"
|
"flake-utils"
|
||||||
|
],
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-overlay": {
|
"rust-overlay": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nixpkgs": [
|
"nixpkgs": [
|
||||||
|
"cargo2nix",
|
||||||
"nixpkgs"
|
"nixpkgs"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1738549608,
|
"lastModified": 1724638882,
|
||||||
"narHash": "sha256-GdyT9QEUSx5k/n8kILuNy83vxxdyUfJ8jL5mMpQZWfw=",
|
"narHash": "sha256-ap2jIQi/FuUHR6HCht6ASWhoz8EiB99XmI8Esot38VE=",
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
|
"rev": "19b70f147b9c67a759e35824b241f1ed92e46694",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
|
"rev": "19b70f147b9c67a759e35824b241f1ed92e46694",
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"systems": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
103
flake.nix
103
flake.nix
|
@ -2,84 +2,89 @@
|
||||||
description =
|
description =
|
||||||
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||||
|
|
||||||
# Nixpkgs 24.11 as of 2025-01-12
|
# Nixpkgs 24.05 as of 2024-08-26 has rustc v1.77
|
||||||
inputs.nixpkgs.url =
|
inputs.nixpkgs.url =
|
||||||
"github:NixOS/nixpkgs/7c4869c47090dd7f9f1bdfb49a22aea026996815";
|
"github:NixOS/nixpkgs/0239aeb2f82ea27ccd6b61582b8f7fb8750eeada";
|
||||||
|
|
||||||
# Rust overlay as of 2025-02-03
|
|
||||||
inputs.rust-overlay.url =
|
|
||||||
"github:oxalica/rust-overlay/35c6f8c4352f995ecd53896200769f80a3e8f22d";
|
|
||||||
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
|
|
||||||
inputs.crane.url = "github:ipetkov/crane";
|
|
||||||
|
|
||||||
inputs.flake-compat.url = "github:nix-community/flake-compat";
|
inputs.flake-compat.url = "github:nix-community/flake-compat";
|
||||||
inputs.flake-utils.url = "github:numtide/flake-utils";
|
|
||||||
|
|
||||||
outputs = { self, nixpkgs, flake-utils, crane, rust-overlay, ... }:
|
inputs.cargo2nix = {
|
||||||
|
# As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection
|
||||||
|
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
|
||||||
|
|
||||||
|
# As of 2023-04-25:
|
||||||
|
# - my two patches were merged into unstable (one for clippy and one to "fix" feature detection)
|
||||||
|
# - rustc v1.66
|
||||||
|
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
|
||||||
|
|
||||||
|
# Rust overlay as of 2024-08-26
|
||||||
|
inputs.rust-overlay.url =
|
||||||
|
"github:oxalica/rust-overlay/19b70f147b9c67a759e35824b241f1ed92e46694";
|
||||||
|
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
inputs.flake-compat.follows = "flake-compat";
|
||||||
|
};
|
||||||
|
|
||||||
|
inputs.flake-utils.follows = "cargo2nix/flake-utils";
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs, cargo2nix, flake-utils, ... }:
|
||||||
let
|
let
|
||||||
|
git_version = self.lastModifiedDate;
|
||||||
compile = import ./nix/compile.nix;
|
compile = import ./nix/compile.nix;
|
||||||
in
|
in
|
||||||
flake-utils.lib.eachDefaultSystem (system:
|
flake-utils.lib.eachDefaultSystem (system:
|
||||||
let
|
let
|
||||||
pkgs = nixpkgs.legacyPackages.${system};
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
packageFor = target: release: (compile {
|
|
||||||
inherit system target nixpkgs crane rust-overlay release;
|
|
||||||
}).garage;
|
|
||||||
testWith = extraTestEnv: (compile {
|
|
||||||
inherit system nixpkgs crane rust-overlay extraTestEnv;
|
|
||||||
release = false;
|
|
||||||
}).garage-test;
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
packages = {
|
packages =
|
||||||
|
let
|
||||||
|
packageFor = target: (compile {
|
||||||
|
inherit system git_version target;
|
||||||
|
pkgsSrc = nixpkgs;
|
||||||
|
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||||
|
release = true;
|
||||||
|
}).workspace.garage { compileMode = "build"; };
|
||||||
|
in
|
||||||
|
{
|
||||||
# default = native release build
|
# default = native release build
|
||||||
default = packageFor null true;
|
default = packageFor null;
|
||||||
|
# other = cross-compiled, statically-linked builds
|
||||||
# <arch> = cross-compiled, statically-linked release builds
|
amd64 = packageFor "x86_64-unknown-linux-musl";
|
||||||
amd64 = packageFor "x86_64-unknown-linux-musl" true;
|
i386 = packageFor "i686-unknown-linux-musl";
|
||||||
i386 = packageFor "i686-unknown-linux-musl" true;
|
arm64 = packageFor "aarch64-unknown-linux-musl";
|
||||||
arm64 = packageFor "aarch64-unknown-linux-musl" true;
|
arm = packageFor "armv6l-unknown-linux-musl";
|
||||||
arm = packageFor "armv6l-unknown-linux-musl" true;
|
|
||||||
|
|
||||||
# dev = native dev build
|
|
||||||
dev = packageFor null false;
|
|
||||||
|
|
||||||
# test = cargo test
|
|
||||||
tests = testWith {};
|
|
||||||
tests-lmdb = testWith {
|
|
||||||
GARAGE_TEST_INTEGRATION_DB_ENGINE = "lmdb";
|
|
||||||
};
|
|
||||||
tests-sqlite = testWith {
|
|
||||||
GARAGE_TEST_INTEGRATION_DB_ENGINE = "sqlite";
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
# ---- developpment shell, for making native builds only ----
|
# ---- developpment shell, for making native builds only ----
|
||||||
devShells =
|
devShells =
|
||||||
let
|
let
|
||||||
targets = compile {
|
shellWithPackages = (packages: (compile {
|
||||||
inherit system nixpkgs crane rust-overlay;
|
inherit system git_version;
|
||||||
};
|
pkgsSrc = nixpkgs;
|
||||||
|
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||||
|
}).workspaceShell { inherit packages; });
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
default = targets.devShell;
|
default = shellWithPackages
|
||||||
|
(with pkgs; [
|
||||||
|
rustfmt
|
||||||
|
clang
|
||||||
|
mold
|
||||||
|
]);
|
||||||
|
|
||||||
# import the full shell using `nix develop .#full`
|
# import the full shell using `nix develop .#full`
|
||||||
full = pkgs.mkShell {
|
full = shellWithPackages (with pkgs; [
|
||||||
buildInputs = with pkgs; [
|
rustfmt
|
||||||
targets.toolchain
|
rust-analyzer
|
||||||
protobuf
|
|
||||||
clang
|
clang
|
||||||
mold
|
mold
|
||||||
# ---- extra packages for dev tasks ----
|
# ---- extra packages for dev tasks ----
|
||||||
rust-analyzer
|
|
||||||
cargo-audit
|
cargo-audit
|
||||||
cargo-outdated
|
cargo-outdated
|
||||||
cargo-machete
|
cargo-machete
|
||||||
nixpkgs-fmt
|
nixpkgs-fmt
|
||||||
];
|
]);
|
||||||
};
|
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
with import ./common.nix;
|
with import ./common.nix;
|
||||||
let
|
let
|
||||||
pkgs = import nixpkgs { };
|
pkgs = import pkgsSrc { };
|
||||||
lib = pkgs.lib;
|
lib = pkgs.lib;
|
||||||
|
|
||||||
/* Converts a key list and a value list to a set
|
/* Converts a key list and a value list to a set
|
||||||
|
|
|
@ -10,9 +10,9 @@ let
|
||||||
|
|
||||||
flake = (import flake-compat { system = builtins.currentSystem; src = ../.; });
|
flake = (import flake-compat { system = builtins.currentSystem; src = ../.; });
|
||||||
in
|
in
|
||||||
|
rec {
|
||||||
{
|
pkgsSrc = flake.defaultNix.inputs.nixpkgs;
|
||||||
flake = flake.defaultNix;
|
cargo2nix = flake.defaultNix.inputs.cargo2nix;
|
||||||
nixpkgs = flake.defaultNix.inputs.nixpkgs;
|
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||||
devShells = flake.defaultNix.devShells.${builtins.currentSystem};
|
devShells = builtins.getAttr builtins.currentSystem flake.defaultNix.devShells;
|
||||||
}
|
}
|
||||||
|
|
318
nix/compile.nix
318
nix/compile.nix
|
@ -1,64 +1,164 @@
|
||||||
{
|
{ system, target ? null, pkgsSrc, cargo2nixOverlay, compiler ? "rustc"
|
||||||
/* build inputs */
|
, release ? false, git_version ? null, features ? null, }:
|
||||||
nixpkgs,
|
|
||||||
crane,
|
|
||||||
rust-overlay,
|
|
||||||
|
|
||||||
/* parameters */
|
|
||||||
system,
|
|
||||||
git_version ? null,
|
|
||||||
target ? null,
|
|
||||||
release ? false,
|
|
||||||
features ? null,
|
|
||||||
extraTestEnv ? {}
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
let
|
||||||
log = v: builtins.trace v v;
|
log = v: builtins.trace v v;
|
||||||
|
|
||||||
# NixOS and Rust/Cargo triples do not match for ARM, fix it here.
|
|
||||||
rustTarget = if target == "armv6l-unknown-linux-musleabihf" then
|
|
||||||
"arm-unknown-linux-musleabihf"
|
|
||||||
else
|
|
||||||
target;
|
|
||||||
|
|
||||||
rustTargetEnvMap = {
|
|
||||||
"x86_64-unknown-linux-musl" = "X86_64_UNKNOWN_LINUX_MUSL";
|
|
||||||
"aarch64-unknown-linux-musl" = "AARCH64_UNKNOWN_LINUX_MUSL";
|
|
||||||
"i686-unknown-linux-musl" = "I686_UNKNOWN_LINUX_MUSL";
|
|
||||||
"arm-unknown-linux-musleabihf" = "ARM_UNKNOWN_LINUX_MUSLEABIHF";
|
|
||||||
};
|
|
||||||
|
|
||||||
pkgsNative = import nixpkgs {
|
|
||||||
inherit system;
|
|
||||||
overlays = [ (import rust-overlay) ];
|
|
||||||
};
|
|
||||||
|
|
||||||
pkgs = if target != null then
|
pkgs = if target != null then
|
||||||
import nixpkgs {
|
import pkgsSrc {
|
||||||
inherit system;
|
inherit system;
|
||||||
crossSystem = {
|
crossSystem = {
|
||||||
config = target;
|
config = target;
|
||||||
isStatic = true;
|
isStatic = true;
|
||||||
};
|
};
|
||||||
overlays = [ (import rust-overlay) ];
|
overlays = [ cargo2nixOverlay ];
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
pkgsNative;
|
import pkgsSrc {
|
||||||
|
inherit system;
|
||||||
|
overlays = [ cargo2nixOverlay ];
|
||||||
|
};
|
||||||
|
|
||||||
inherit (pkgs) lib stdenv;
|
toolchainOptions = {
|
||||||
|
rustVersion = "1.77.0";
|
||||||
|
extraRustComponents = [ "clippy" ];
|
||||||
|
};
|
||||||
|
|
||||||
toolchainFn = (p: p.rust-bin.stable."1.82.0".default.override {
|
buildEnv = (drv:
|
||||||
targets = lib.optionals (target != null) [ rustTarget ];
|
{
|
||||||
extensions = [
|
rustc = drv.setBuildEnv;
|
||||||
"rust-src"
|
clippy = ''
|
||||||
"rustfmt"
|
${drv.setBuildEnv or ""}
|
||||||
|
echo
|
||||||
|
echo --- BUILDING WITH CLIPPY ---
|
||||||
|
echo
|
||||||
|
|
||||||
|
export NIX_RUST_BUILD_FLAGS="''${NIX_RUST_BUILD_FLAGS} --deny warnings"
|
||||||
|
export RUSTC="''${CLIPPY_DRIVER}"
|
||||||
|
'';
|
||||||
|
}.${compiler});
|
||||||
|
|
||||||
|
/* Cargo2nix provides many overrides by default, you can take inspiration from them:
|
||||||
|
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
|
||||||
|
|
||||||
|
You can have a complete list of the available options by looking at the overriden object, mkcrate:
|
||||||
|
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/mkcrate.nix
|
||||||
|
*/
|
||||||
|
packageOverrides = pkgs:
|
||||||
|
pkgs.rustBuilder.overrides.all ++ [
|
||||||
|
/* [1] We add some logic to compile our crates with clippy, it provides us many additional lints
|
||||||
|
|
||||||
|
[2] We need to alter Nix hardening to make static binaries: PIE,
|
||||||
|
Position Independent Executables seems to be supported only on amd64. Having
|
||||||
|
this flag set either 1. make our executables crash or 2. compile as dynamic on some platforms.
|
||||||
|
Here, we deactivate it. Later (find `codegenOpts`), we reactivate it for supported targets
|
||||||
|
(only amd64 curently) through the `-static-pie` flag.
|
||||||
|
PIE is a feature used by ASLR, which helps mitigate security issues.
|
||||||
|
Learn more about Nix Hardening at: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh
|
||||||
|
|
||||||
|
[3] We want to inject the git version while keeping the build deterministic.
|
||||||
|
As we do not want to consider the .git folder as part of the input source,
|
||||||
|
we ask the user (the CI often) to pass the value to Nix.
|
||||||
|
|
||||||
|
[4] We don't want libsodium-sys and zstd-sys to try to use pkgconfig to build against a system library.
|
||||||
|
However the features to do so get activated for some reason (due to a bug in cargo2nix?),
|
||||||
|
so disable them manually here.
|
||||||
|
*/
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage";
|
||||||
|
overrideAttrs = drv:
|
||||||
|
(if git_version != null then {
|
||||||
|
# [3]
|
||||||
|
preConfigure = ''
|
||||||
|
${drv.preConfigure or ""}
|
||||||
|
export GIT_VERSION="${git_version}"
|
||||||
|
'';
|
||||||
|
} else
|
||||||
|
{ }) // {
|
||||||
|
# [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
# [2]
|
||||||
|
hardeningDisable = [ "pie" ];
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_rpc";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_db";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_util";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_table";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_block";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_model";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_api";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "garage_web";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "k2v-client";
|
||||||
|
overrideAttrs = drv: { # [1]
|
||||||
|
setBuildEnv = (buildEnv drv);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "libsodium-sys";
|
||||||
|
overrideArgs = old: {
|
||||||
|
features = [ ]; # [4]
|
||||||
|
};
|
||||||
|
})
|
||||||
|
|
||||||
|
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||||
|
name = "zstd-sys";
|
||||||
|
overrideArgs = old: {
|
||||||
|
features = [ ]; # [4]
|
||||||
|
};
|
||||||
|
})
|
||||||
];
|
];
|
||||||
});
|
|
||||||
|
|
||||||
craneLib = (crane.mkLib pkgs).overrideToolchain toolchainFn;
|
|
||||||
|
|
||||||
src = craneLib.cleanCargoSource ../.;
|
|
||||||
|
|
||||||
/* We ship some parts of the code disabled by default by putting them behind a flag.
|
/* We ship some parts of the code disabled by default by putting them behind a flag.
|
||||||
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.).
|
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.).
|
||||||
|
@ -68,15 +168,16 @@ let
|
||||||
rootFeatures = if features != null then
|
rootFeatures = if features != null then
|
||||||
features
|
features
|
||||||
else
|
else
|
||||||
([ "bundled-libs" "lmdb" "sqlite" "k2v" ] ++ (lib.optionals release [
|
([ "garage/bundled-libs" "garage/lmdb" "garage/sqlite" "garage/k2v" ] ++ (if release then [
|
||||||
"consul-discovery"
|
"garage/consul-discovery"
|
||||||
"kubernetes-discovery"
|
"garage/kubernetes-discovery"
|
||||||
"metrics"
|
"garage/metrics"
|
||||||
"telemetry-otlp"
|
"garage/telemetry-otlp"
|
||||||
"syslog"
|
"garage/syslog"
|
||||||
]));
|
] else
|
||||||
|
[ ]));
|
||||||
|
|
||||||
featuresStr = lib.concatStringsSep "," rootFeatures;
|
packageFun = import ../Cargo.nix;
|
||||||
|
|
||||||
/* We compile fully static binaries with musl to simplify deployment on most systems.
|
/* We compile fully static binaries with musl to simplify deployment on most systems.
|
||||||
When possible, we reactivate PIE hardening (see above).
|
When possible, we reactivate PIE hardening (see above).
|
||||||
|
@ -87,9 +188,12 @@ let
|
||||||
For more information on static builds, please refer to Rust's RFC 1721.
|
For more information on static builds, please refer to Rust's RFC 1721.
|
||||||
https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage
|
https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage
|
||||||
*/
|
*/
|
||||||
codegenOptsMap = {
|
|
||||||
"x86_64-unknown-linux-musl" =
|
codegenOpts = {
|
||||||
[ "target-feature=+crt-static" "link-arg=-static-pie" ];
|
"armv6l-unknown-linux-musleabihf" = [
|
||||||
|
"target-feature=+crt-static"
|
||||||
|
"link-arg=-static"
|
||||||
|
]; # compile as dynamic with static-pie
|
||||||
"aarch64-unknown-linux-musl" = [
|
"aarch64-unknown-linux-musl" = [
|
||||||
"target-feature=+crt-static"
|
"target-feature=+crt-static"
|
||||||
"link-arg=-static"
|
"link-arg=-static"
|
||||||
|
@ -98,95 +202,17 @@ let
|
||||||
"target-feature=+crt-static"
|
"target-feature=+crt-static"
|
||||||
"link-arg=-static"
|
"link-arg=-static"
|
||||||
]; # segfault with static-pie
|
]; # segfault with static-pie
|
||||||
"armv6l-unknown-linux-musleabihf" = [
|
"x86_64-unknown-linux-musl" =
|
||||||
"target-feature=+crt-static"
|
[ "target-feature=+crt-static" "link-arg=-static-pie" ];
|
||||||
"link-arg=-static"
|
|
||||||
]; # compile as dynamic with static-pie
|
|
||||||
};
|
};
|
||||||
|
|
||||||
codegenOpts = if target != null then codegenOptsMap.${target} else [
|
# NixOS and Rust/Cargo triples do not match for ARM, fix it here.
|
||||||
"link-arg=-fuse-ld=mold"
|
rustTarget = if target == "armv6l-unknown-linux-musleabihf" then
|
||||||
];
|
"arm-unknown-linux-musleabihf"
|
||||||
|
else
|
||||||
|
target;
|
||||||
|
|
||||||
commonArgs =
|
in pkgs.rustBuilder.makePackageSet ({
|
||||||
{
|
inherit release packageFun packageOverrides codegenOpts rootFeatures;
|
||||||
inherit src;
|
target = rustTarget;
|
||||||
pname = "garage";
|
} // toolchainOptions)
|
||||||
version = "dev";
|
|
||||||
|
|
||||||
strictDeps = true;
|
|
||||||
cargoExtraArgs = "--locked --features ${featuresStr}";
|
|
||||||
cargoTestExtraArgs = "--workspace";
|
|
||||||
|
|
||||||
nativeBuildInputs = [
|
|
||||||
pkgsNative.protobuf
|
|
||||||
pkgs.stdenv.cc
|
|
||||||
] ++ lib.optionals (target == null) [
|
|
||||||
pkgs.clang
|
|
||||||
pkgs.mold
|
|
||||||
];
|
|
||||||
|
|
||||||
CARGO_PROFILE = if release then "release" else "dev";
|
|
||||||
CARGO_BUILD_RUSTFLAGS =
|
|
||||||
lib.concatStringsSep
|
|
||||||
" "
|
|
||||||
(builtins.map (flag: "-C ${flag}") codegenOpts);
|
|
||||||
}
|
|
||||||
//
|
|
||||||
(if rustTarget != null then {
|
|
||||||
CARGO_BUILD_TARGET = rustTarget;
|
|
||||||
|
|
||||||
"CARGO_TARGET_${rustTargetEnvMap.${rustTarget}}_LINKER" = "${stdenv.cc.targetPrefix}cc";
|
|
||||||
|
|
||||||
HOST_CC = "${stdenv.cc.nativePrefix}cc";
|
|
||||||
TARGET_CC = "${stdenv.cc.targetPrefix}cc";
|
|
||||||
} else {
|
|
||||||
CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER = "clang";
|
|
||||||
});
|
|
||||||
|
|
||||||
in rec {
|
|
||||||
toolchain = toolchainFn pkgs;
|
|
||||||
|
|
||||||
devShell = pkgs.mkShell {
|
|
||||||
buildInputs = [
|
|
||||||
toolchain
|
|
||||||
] ++ (with pkgs; [
|
|
||||||
protobuf
|
|
||||||
clang
|
|
||||||
mold
|
|
||||||
]);
|
|
||||||
};
|
|
||||||
|
|
||||||
# ---- building garage ----
|
|
||||||
|
|
||||||
garage-deps = craneLib.buildDepsOnly commonArgs;
|
|
||||||
|
|
||||||
garage = craneLib.buildPackage (commonArgs // {
|
|
||||||
cargoArtifacts = garage-deps;
|
|
||||||
|
|
||||||
doCheck = false;
|
|
||||||
} //
|
|
||||||
(if git_version != null then {
|
|
||||||
version = git_version;
|
|
||||||
GIT_VERSION = git_version;
|
|
||||||
} else {}));
|
|
||||||
|
|
||||||
# ---- testing garage ----
|
|
||||||
|
|
||||||
garage-test-bin = craneLib.cargoBuild (commonArgs // {
|
|
||||||
cargoArtifacts = garage-deps;
|
|
||||||
|
|
||||||
pname = "garage-tests";
|
|
||||||
|
|
||||||
CARGO_PROFILE = "test";
|
|
||||||
cargoExtraArgs = "${commonArgs.cargoExtraArgs} --tests --workspace";
|
|
||||||
doCheck = false;
|
|
||||||
});
|
|
||||||
|
|
||||||
garage-test = craneLib.cargoTest (commonArgs // {
|
|
||||||
cargoArtifacts = garage-test-bin;
|
|
||||||
nativeBuildInputs = commonArgs.nativeBuildInputs ++ [
|
|
||||||
pkgs.cacert
|
|
||||||
];
|
|
||||||
} // extraTestEnv);
|
|
||||||
}
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ type: application
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.6.0
|
version: 0.5.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|
|
@ -1,86 +0,0 @@
|
||||||
# garage
|
|
||||||
|
|
||||||
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.0.1](https://img.shields.io/badge/AppVersion-v1.0.1-informational?style=flat-square)
|
|
||||||
|
|
||||||
S3-compatible object store for small self-hosted geo-distributed deployments
|
|
||||||
|
|
||||||
## Values
|
|
||||||
|
|
||||||
| Key | Type | Default | Description |
|
|
||||||
|-----|------|---------|-------------|
|
|
||||||
| affinity | object | `{}` | |
|
|
||||||
| deployment.kind | string | `"StatefulSet"` | Switchable to DaemonSet |
|
|
||||||
| deployment.podManagementPolicy | string | `"OrderedReady"` | If using statefulset, allow Parallel or OrderedReady (default) |
|
|
||||||
| deployment.replicaCount | int | `3` | Number of StatefulSet replicas/garage nodes to start |
|
|
||||||
| environment | object | `{}` | |
|
|
||||||
| extraVolumeMounts | object | `{}` | |
|
|
||||||
| extraVolumes | object | `{}` | |
|
|
||||||
| fullnameOverride | string | `""` | |
|
|
||||||
| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size |
|
|
||||||
| garage.bootstrapPeers | list | `[]` | This is not required if you use the integrated kubernetes discovery |
|
|
||||||
| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level |
|
|
||||||
| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 |
|
|
||||||
| garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml |
|
|
||||||
| garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ |
|
|
||||||
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources |
|
|
||||||
| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode |
|
|
||||||
| garage.rpcBindAddr | string | `"[::]:3901"` | |
|
|
||||||
| garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object |
|
|
||||||
| garage.s3.api.region | string | `"garage"` | |
|
|
||||||
| garage.s3.api.rootDomain | string | `".s3.garage.tld"` | |
|
|
||||||
| garage.s3.web.index | string | `"index.html"` | |
|
|
||||||
| garage.s3.web.rootDomain | string | `".web.garage.tld"` | |
|
|
||||||
| image.pullPolicy | string | `"IfNotPresent"` | |
|
|
||||||
| image.repository | string | `"dxflrs/amd64_garage"` | default to amd64 docker image |
|
|
||||||
| image.tag | string | `""` | set the image tag, please prefer using the chart version and not this to avoid compatibility issues |
|
|
||||||
| imagePullSecrets | list | `[]` | set if you need credentials to pull your custom image |
|
|
||||||
| ingress.s3.api.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
|
|
||||||
| ingress.s3.api.enabled | bool | `false` | |
|
|
||||||
| ingress.s3.api.hosts[0] | object | `{"host":"s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, to be used with awscli for example |
|
|
||||||
| ingress.s3.api.hosts[1] | object | `{"host":"*.s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, DNS style bucket access |
|
|
||||||
| ingress.s3.api.labels | object | `{}` | |
|
|
||||||
| ingress.s3.api.tls | list | `[]` | |
|
|
||||||
| ingress.s3.web.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
|
|
||||||
| ingress.s3.web.enabled | bool | `false` | |
|
|
||||||
| ingress.s3.web.hosts[0] | object | `{"host":"*.web.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | wildcard website access with bucket name prefix |
|
|
||||||
| ingress.s3.web.hosts[1] | object | `{"host":"mywebpage.example.com","paths":[{"path":"/","pathType":"Prefix"}]}` | specific bucket access with FQDN bucket |
|
|
||||||
| ingress.s3.web.labels | object | `{}` | |
|
|
||||||
| ingress.s3.web.tls | list | `[]` | |
|
|
||||||
| initImage.pullPolicy | string | `"IfNotPresent"` | |
|
|
||||||
| initImage.repository | string | `"busybox"` | |
|
|
||||||
| initImage.tag | string | `"stable"` | |
|
|
||||||
| monitoring.metrics.enabled | bool | `false` | If true, a service for monitoring is created with a prometheus.io/scrape annotation |
|
|
||||||
| monitoring.metrics.serviceMonitor.enabled | bool | `false` | If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator |
|
|
||||||
| monitoring.metrics.serviceMonitor.interval | string | `"15s"` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.labels | object | `{}` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.path | string | `"/metrics"` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.relabelings | list | `[]` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.scheme | string | `"http"` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.scrapeTimeout | string | `"10s"` | |
|
|
||||||
| monitoring.metrics.serviceMonitor.tlsConfig | object | `{}` | |
|
|
||||||
| monitoring.tracing.sink | string | `""` | specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317` |
|
|
||||||
| nameOverride | string | `""` | |
|
|
||||||
| nodeSelector | object | `{}` | |
|
|
||||||
| persistence.data.hostPath | string | `"/var/lib/garage/data"` | |
|
|
||||||
| persistence.data.size | string | `"100Mi"` | |
|
|
||||||
| persistence.enabled | bool | `true` | |
|
|
||||||
| persistence.meta.hostPath | string | `"/var/lib/garage/meta"` | |
|
|
||||||
| persistence.meta.size | string | `"100Mi"` | |
|
|
||||||
| podAnnotations | object | `{}` | additonal pod annotations |
|
|
||||||
| podSecurityContext.fsGroup | int | `1000` | |
|
|
||||||
| podSecurityContext.runAsGroup | int | `1000` | |
|
|
||||||
| podSecurityContext.runAsNonRoot | bool | `true` | |
|
|
||||||
| podSecurityContext.runAsUser | int | `1000` | |
|
|
||||||
| resources | object | `{}` | |
|
|
||||||
| securityContext.capabilities | object | `{"drop":["ALL"]}` | The default security context is heavily restricted, feel free to tune it to your requirements |
|
|
||||||
| securityContext.readOnlyRootFilesystem | bool | `true` | |
|
|
||||||
| service.s3.api.port | int | `3900` | |
|
|
||||||
| service.s3.web.port | int | `3902` | |
|
|
||||||
| service.type | string | `"ClusterIP"` | You can rely on any service to expose your cluster - ClusterIP (+ Ingress) - NodePort (+ Ingress) - LoadBalancer |
|
|
||||||
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
|
|
||||||
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
|
|
||||||
| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
|
|
||||||
| tolerations | list | `[]` | |
|
|
||||||
|
|
||||||
----------------------------------------------
|
|
||||||
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)
|
|
|
@ -1,49 +1,7 @@
|
||||||
{{- if not .Values.garage.existingConfigMap }}
|
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "garage.fullname" . }}-config
|
name: {{ include "garage.fullname" . }}-config
|
||||||
data:
|
data:
|
||||||
garage.toml: |-
|
garage.toml: |-
|
||||||
{{- if .Values.garage.garageTomlString }}
|
{{- tpl (index (index .Values.garage) "garage.toml") $ | nindent 4 }}
|
||||||
{{- tpl (index (index .Values.garage) "garageTomlString") $ | nindent 4 }}
|
|
||||||
{{- else }}
|
|
||||||
metadata_dir = "/mnt/meta"
|
|
||||||
data_dir = "/mnt/data"
|
|
||||||
|
|
||||||
db_engine = "{{ .Values.garage.dbEngine }}"
|
|
||||||
|
|
||||||
block_size = {{ .Values.garage.blockSize }}
|
|
||||||
|
|
||||||
replication_mode = "{{ .Values.garage.replicationMode }}"
|
|
||||||
|
|
||||||
compression_level = {{ .Values.garage.compressionLevel }}
|
|
||||||
|
|
||||||
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
|
|
||||||
# rpc_secret will be populated by the init container from a k8s secret object
|
|
||||||
rpc_secret = "__RPC_SECRET_REPLACE__"
|
|
||||||
|
|
||||||
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
|
|
||||||
|
|
||||||
[kubernetes_discovery]
|
|
||||||
namespace = "{{ .Release.Namespace }}"
|
|
||||||
service_name = "{{ include "garage.fullname" . }}"
|
|
||||||
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
|
|
||||||
|
|
||||||
[s3_api]
|
|
||||||
s3_region = "{{ .Values.garage.s3.api.region }}"
|
|
||||||
api_bind_addr = "[::]:3900"
|
|
||||||
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
|
|
||||||
|
|
||||||
[s3_web]
|
|
||||||
bind_addr = "[::]:3902"
|
|
||||||
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
|
|
||||||
index = "{{ .Values.garage.s3.web.index }}"
|
|
||||||
|
|
||||||
[admin]
|
|
||||||
api_bind_addr = "[::]:3903"
|
|
||||||
{{- if .Values.monitoring.tracing.sink }}
|
|
||||||
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
|
@ -4,30 +4,28 @@
|
||||||
|
|
||||||
# Garage configuration. These values go to garage.toml
|
# Garage configuration. These values go to garage.toml
|
||||||
garage:
|
garage:
|
||||||
# -- Can be changed for better performance on certain systems
|
# Can be changed for better performance on certain systems
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
|
||||||
dbEngine: "lmdb"
|
dbEngine: "lmdb"
|
||||||
|
|
||||||
# -- Defaults is 1MB
|
# Defaults is 1MB
|
||||||
# An increase can result in better performance in certain scenarios
|
# An increase can result in better performance in certain scenarios
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
|
||||||
blockSize: "1048576"
|
blockSize: "1048576"
|
||||||
|
|
||||||
# -- Default to 3 replicas, see the replication_mode section at
|
# Default to 3 replicas, see the replication_mode section at
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
|
||||||
replicationMode: "3"
|
replicationMode: "3"
|
||||||
|
|
||||||
# -- zstd compression level of stored blocks
|
# zstd compression level of stored blocks
|
||||||
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
|
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
|
||||||
compressionLevel: "1"
|
compressionLevel: "1"
|
||||||
|
|
||||||
rpcBindAddr: "[::]:3901"
|
rpcBindAddr: "[::]:3901"
|
||||||
# -- If not given, a random secret will be generated and stored in a Secret object
|
# If not given, a random secret will be generated and stored in a Secret object
|
||||||
rpcSecret: ""
|
rpcSecret: ""
|
||||||
# -- This is not required if you use the integrated kubernetes discovery
|
# This is not required if you use the integrated kubernetes discovery
|
||||||
bootstrapPeers: []
|
bootstrapPeers: []
|
||||||
# -- Set to true if you want to use k8s discovery but install the CRDs manually outside
|
|
||||||
# of the helm chart, for example if you operate at namespace level without cluster ressources
|
|
||||||
kubernetesSkipCrd: false
|
kubernetesSkipCrd: false
|
||||||
s3:
|
s3:
|
||||||
api:
|
api:
|
||||||
|
@ -36,16 +34,47 @@ garage:
|
||||||
web:
|
web:
|
||||||
rootDomain: ".web.garage.tld"
|
rootDomain: ".web.garage.tld"
|
||||||
index: "index.html"
|
index: "index.html"
|
||||||
|
# Template for the garage configuration
|
||||||
|
# Values can be templated
|
||||||
|
# ref: https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
|
||||||
|
garage.toml: |-
|
||||||
|
metadata_dir = "/mnt/meta"
|
||||||
|
data_dir = "/mnt/data"
|
||||||
|
|
||||||
# -- if not empty string, allow using an existing ConfigMap for the garage.toml,
|
db_engine = "{{ .Values.garage.dbEngine }}"
|
||||||
# if set, ignores garage.toml
|
|
||||||
existingConfigMap: ""
|
|
||||||
|
|
||||||
# -- String Template for the garage configuration
|
block_size = {{ .Values.garage.blockSize }}
|
||||||
# if set, ignores above values.
|
|
||||||
# Values can be templated,
|
replication_mode = "{{ .Values.garage.replicationMode }}"
|
||||||
# see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
|
|
||||||
garageTomlString: ""
|
compression_level = {{ .Values.garage.compressionLevel }}
|
||||||
|
|
||||||
|
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
|
||||||
|
# rpc_secret will be populated by the init container from a k8s secret object
|
||||||
|
rpc_secret = "__RPC_SECRET_REPLACE__"
|
||||||
|
|
||||||
|
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
|
||||||
|
|
||||||
|
[kubernetes_discovery]
|
||||||
|
namespace = "{{ .Release.Namespace }}"
|
||||||
|
service_name = "{{ include "garage.fullname" . }}"
|
||||||
|
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
|
||||||
|
|
||||||
|
[s3_api]
|
||||||
|
s3_region = "{{ .Values.garage.s3.api.region }}"
|
||||||
|
api_bind_addr = "[::]:3900"
|
||||||
|
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
|
||||||
|
|
||||||
|
[s3_web]
|
||||||
|
bind_addr = "[::]:3902"
|
||||||
|
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
|
||||||
|
index = "{{ .Values.garage.s3.web.index }}"
|
||||||
|
|
||||||
|
[admin]
|
||||||
|
api_bind_addr = "[::]:3903"
|
||||||
|
{{- if .Values.monitoring.tracing.sink }}
|
||||||
|
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
# Data persistence
|
# Data persistence
|
||||||
persistence:
|
persistence:
|
||||||
|
@ -63,18 +92,16 @@ persistence:
|
||||||
|
|
||||||
# Deployment configuration
|
# Deployment configuration
|
||||||
deployment:
|
deployment:
|
||||||
# -- Switchable to DaemonSet
|
# Switchable to DaemonSet
|
||||||
kind: StatefulSet
|
kind: StatefulSet
|
||||||
# -- Number of StatefulSet replicas/garage nodes to start
|
# Number of StatefulSet replicas/garage nodes to start
|
||||||
replicaCount: 3
|
replicaCount: 3
|
||||||
# -- If using statefulset, allow Parallel or OrderedReady (default)
|
# If using statefulset, allow Parallel or OrderedReady (default)
|
||||||
podManagementPolicy: OrderedReady
|
podManagementPolicy: OrderedReady
|
||||||
|
|
||||||
image:
|
image:
|
||||||
# -- default to amd64 docker image
|
|
||||||
repository: dxflrs/amd64_garage
|
repository: dxflrs/amd64_garage
|
||||||
# -- set the image tag, please prefer using the chart version and not this
|
# please prefer using the chart version and not this tag
|
||||||
# to avoid compatibility issues
|
|
||||||
tag: ""
|
tag: ""
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
@ -83,21 +110,19 @@ initImage:
|
||||||
tag: stable
|
tag: stable
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
# -- set if you need credentials to pull your custom image
|
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
|
|
||||||
serviceAccount:
|
serviceAccount:
|
||||||
# -- Specifies whether a service account should be created
|
# Specifies whether a service account should be created
|
||||||
create: true
|
create: true
|
||||||
# -- Annotations to add to the service account
|
# Annotations to add to the service account
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# -- The name of the service account to use.
|
# The name of the service account to use.
|
||||||
# If not set and create is true, a name is generated using the fullname template
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
name: ""
|
name: ""
|
||||||
|
|
||||||
# -- additonal pod annotations
|
|
||||||
podAnnotations: {}
|
podAnnotations: {}
|
||||||
|
|
||||||
podSecurityContext:
|
podSecurityContext:
|
||||||
|
@ -107,7 +132,7 @@ podSecurityContext:
|
||||||
runAsNonRoot: true
|
runAsNonRoot: true
|
||||||
|
|
||||||
securityContext:
|
securityContext:
|
||||||
# -- The default security context is heavily restricted,
|
# The default security context is heavily restricted
|
||||||
# feel free to tune it to your requirements
|
# feel free to tune it to your requirements
|
||||||
capabilities:
|
capabilities:
|
||||||
drop:
|
drop:
|
||||||
|
@ -115,7 +140,7 @@ securityContext:
|
||||||
readOnlyRootFilesystem: true
|
readOnlyRootFilesystem: true
|
||||||
|
|
||||||
service:
|
service:
|
||||||
# -- You can rely on any service to expose your cluster
|
# You can rely on any service to expose your cluster
|
||||||
# - ClusterIP (+ Ingress)
|
# - ClusterIP (+ Ingress)
|
||||||
# - NodePort (+ Ingress)
|
# - NodePort (+ Ingress)
|
||||||
# - LoadBalancer
|
# - LoadBalancer
|
||||||
|
@ -131,23 +156,20 @@ ingress:
|
||||||
s3:
|
s3:
|
||||||
api:
|
api:
|
||||||
enabled: false
|
enabled: false
|
||||||
# -- Rely _either_ on the className or the annotation below but not both!
|
# Rely either on the className or the annotation below but not both
|
||||||
# If you want to use the className, set
|
# replace "nginx" by an Ingress controller
|
||||||
|
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
|
||||||
# className: "nginx"
|
# className: "nginx"
|
||||||
# and replace "nginx" by an Ingress controller name,
|
|
||||||
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
|
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# kubernetes.io/ingress.class: "nginx"
|
# kubernetes.io/ingress.class: "nginx"
|
||||||
# kubernetes.io/tls-acme: "true"
|
# kubernetes.io/tls-acme: "true"
|
||||||
labels: {}
|
labels: {}
|
||||||
hosts:
|
hosts:
|
||||||
# -- garage S3 API endpoint, to be used with awscli for example
|
- host: "s3.garage.tld" # garage S3 API endpoint
|
||||||
- host: "s3.garage.tld"
|
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
# -- garage S3 API endpoint, DNS style bucket access
|
- host: "*.s3.garage.tld" # garage S3 API endpoint, DNS style bucket access
|
||||||
- host: "*.s3.garage.tld"
|
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
|
@ -157,23 +179,20 @@ ingress:
|
||||||
# - kubernetes.docker.internal
|
# - kubernetes.docker.internal
|
||||||
web:
|
web:
|
||||||
enabled: false
|
enabled: false
|
||||||
# -- Rely _either_ on the className or the annotation below but not both!
|
# Rely either on the className or the annotation below but not both
|
||||||
# If you want to use the className, set
|
# replace "nginx" by an Ingress controller
|
||||||
|
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
|
||||||
# className: "nginx"
|
# className: "nginx"
|
||||||
# and replace "nginx" by an Ingress controller name,
|
|
||||||
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
|
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# kubernetes.io/ingress.class: nginx
|
# kubernetes.io/ingress.class: nginx
|
||||||
# kubernetes.io/tls-acme: "true"
|
# kubernetes.io/tls-acme: "true"
|
||||||
labels: {}
|
labels: {}
|
||||||
hosts:
|
hosts:
|
||||||
# -- wildcard website access with bucket name prefix
|
- host: "*.web.garage.tld" # wildcard website access with bucket name prefix
|
||||||
- host: "*.web.garage.tld"
|
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
# -- specific bucket access with FQDN bucket
|
- host: "mywebpage.example.com" # specific bucket access with FQDN bucket
|
||||||
- host: "mywebpage.example.com"
|
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
|
@ -205,10 +224,10 @@ extraVolumeMounts: {}
|
||||||
|
|
||||||
monitoring:
|
monitoring:
|
||||||
metrics:
|
metrics:
|
||||||
# -- If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
||||||
enabled: false
|
enabled: false
|
||||||
serviceMonitor:
|
serviceMonitor:
|
||||||
# -- If true, a ServiceMonitor CRD is created for a prometheus operator
|
# If true, a ServiceMonitor CRD is created for a prometheus operator
|
||||||
# https://github.com/coreos/prometheus-operator
|
# https://github.com/coreos/prometheus-operator
|
||||||
enabled: false
|
enabled: false
|
||||||
path: /metrics
|
path: /metrics
|
||||||
|
@ -220,5 +239,4 @@ monitoring:
|
||||||
scrapeTimeout: 10s
|
scrapeTimeout: 10s
|
||||||
relabelings: []
|
relabelings: []
|
||||||
tracing:
|
tracing:
|
||||||
# -- specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317`
|
|
||||||
sink: ""
|
sink: ""
|
||||||
|
|
|
@ -7,12 +7,7 @@ if [ "$#" -ne 1 ]; then
|
||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -x "$1" ]; then
|
if file $1 | grep 'dynamically linked' 2>&1; then
|
||||||
echo "[fail] $1 does not exist or is not an executable"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if file "$1" | grep 'dynamically linked' 2>&1; then
|
|
||||||
echo "[fail] $1 is dynamic"
|
echo "[fail] $1 is dynamic"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
with import ./nix/common.nix;
|
with import ./nix/common.nix;
|
||||||
|
|
||||||
let
|
let
|
||||||
pkgs = import nixpkgs {
|
pkgs = import pkgsSrc {
|
||||||
inherit system;
|
inherit system;
|
||||||
};
|
};
|
||||||
winscp = (import ./nix/winscp.nix) pkgs;
|
winscp = (import ./nix/winscp.nix) pkgs;
|
||||||
|
@ -39,7 +39,7 @@ in
|
||||||
--endpoint-url https://garage.deuxfleurs.fr \
|
--endpoint-url https://garage.deuxfleurs.fr \
|
||||||
--region garage \
|
--region garage \
|
||||||
s3 cp \
|
s3 cp \
|
||||||
./result/bin/garage \
|
./result-bin/bin/garage \
|
||||||
s3://garagehq.deuxfleurs.fr/_releases/''${CI_COMMIT_TAG:-$CI_COMMIT_SHA}/''${TARGET}/garage
|
s3://garagehq.deuxfleurs.fr/_releases/''${CI_COMMIT_TAG:-$CI_COMMIT_SHA}/''${TARGET}/garage
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ in
|
||||||
shellHook = ''
|
shellHook = ''
|
||||||
function refresh_cache {
|
function refresh_cache {
|
||||||
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
||||||
for attr in pkgs.amd64.debug test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
|
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
|
||||||
echo "Updating cache for ''${attr}"
|
echo "Updating cache for ''${attr}"
|
||||||
nix copy -j8 \
|
nix copy -j8 \
|
||||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
[package]
|
[package]
|
||||||
name = "garage_api_s3"
|
name = "garage_api"
|
||||||
version = "1.0.1"
|
version = "1.0.1"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
@ -20,9 +20,9 @@ garage_block.workspace = true
|
||||||
garage_net.workspace = true
|
garage_net.workspace = true
|
||||||
garage_util.workspace = true
|
garage_util.workspace = true
|
||||||
garage_rpc.workspace = true
|
garage_rpc.workspace = true
|
||||||
garage_api_common.workspace = true
|
|
||||||
|
|
||||||
aes-gcm.workspace = true
|
aes-gcm.workspace = true
|
||||||
|
argon2.workspace = true
|
||||||
async-compression.workspace = true
|
async-compression.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
|
@ -30,15 +30,20 @@ bytes.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
crc32fast.workspace = true
|
crc32fast.workspace = true
|
||||||
crc32c.workspace = true
|
crc32c.workspace = true
|
||||||
|
crypto-common.workspace = true
|
||||||
err-derive.workspace = true
|
err-derive.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
|
hmac.workspace = true
|
||||||
|
idna.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
md-5.workspace = true
|
md-5.workspace = true
|
||||||
|
nom.workspace = true
|
||||||
pin-project.workspace = true
|
pin-project.workspace = true
|
||||||
sha1.workspace = true
|
sha1.workspace = true
|
||||||
sha2.workspace = true
|
sha2.workspace = true
|
||||||
|
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
futures-util.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tokio-stream.workspace = true
|
tokio-stream.workspace = true
|
||||||
tokio-util.workspace = true
|
tokio-util.workspace = true
|
||||||
|
@ -49,13 +54,21 @@ httpdate.workspace = true
|
||||||
http-range.workspace = true
|
http-range.workspace = true
|
||||||
http-body-util.workspace = true
|
http-body-util.workspace = true
|
||||||
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
|
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
|
||||||
|
hyper-util.workspace = true
|
||||||
multer.workspace = true
|
multer.workspace = true
|
||||||
percent-encoding.workspace = true
|
percent-encoding.workspace = true
|
||||||
roxmltree.workspace = true
|
roxmltree.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
|
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
serde_bytes.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
quick-xml.workspace = true
|
quick-xml.workspace = true
|
||||||
|
|
||||||
opentelemetry.workspace = true
|
opentelemetry.workspace = true
|
||||||
|
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||||
|
prometheus = { workspace = true, optional = true }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
k2v = [ "garage_util/k2v", "garage_model/k2v" ]
|
||||||
|
metrics = [ "opentelemetry-prometheus", "prometheus" ]
|
|
@ -1,43 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "garage_api_admin"
|
|
||||||
version = "1.0.1"
|
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
|
||||||
edition = "2018"
|
|
||||||
license = "AGPL-3.0"
|
|
||||||
description = "Admin API server crate for the Garage object store"
|
|
||||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
|
||||||
readme = "../../README.md"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
path = "lib.rs"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
garage_model.workspace = true
|
|
||||||
garage_table.workspace = true
|
|
||||||
garage_util.workspace = true
|
|
||||||
garage_rpc.workspace = true
|
|
||||||
garage_api_common.workspace = true
|
|
||||||
|
|
||||||
argon2.workspace = true
|
|
||||||
async-trait.workspace = true
|
|
||||||
err-derive.workspace = true
|
|
||||||
hex.workspace = true
|
|
||||||
tracing.workspace = true
|
|
||||||
|
|
||||||
futures.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
http.workspace = true
|
|
||||||
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
|
|
||||||
url.workspace = true
|
|
||||||
|
|
||||||
serde.workspace = true
|
|
||||||
serde_json.workspace = true
|
|
||||||
|
|
||||||
opentelemetry.workspace = true
|
|
||||||
opentelemetry-prometheus = { workspace = true, optional = true }
|
|
||||||
prometheus = { workspace = true, optional = true }
|
|
||||||
|
|
||||||
[features]
|
|
||||||
metrics = [ "opentelemetry-prometheus", "prometheus" ]
|
|
|
@ -20,15 +20,15 @@ use garage_rpc::system::ClusterHealthStatus;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||||
|
|
||||||
use garage_api_common::generic_server::*;
|
use crate::generic_server::*;
|
||||||
use garage_api_common::helpers::*;
|
|
||||||
|
|
||||||
use crate::bucket::*;
|
use crate::admin::bucket::*;
|
||||||
use crate::cluster::*;
|
use crate::admin::cluster::*;
|
||||||
use crate::error::*;
|
use crate::admin::error::*;
|
||||||
use crate::key::*;
|
use crate::admin::key::*;
|
||||||
use crate::router_v0;
|
use crate::admin::router_v0;
|
||||||
use crate::router_v1::{Authorization, Endpoint};
|
use crate::admin::router_v1::{Authorization, Endpoint};
|
||||||
|
use crate::helpers::*;
|
||||||
|
|
||||||
pub type ResBody = BoxBody<Error>;
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
|
|
|
@ -17,12 +17,11 @@ use garage_model::permission::*;
|
||||||
use garage_model::s3::mpu_table;
|
use garage_model::s3::mpu_table;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
use garage_api_common::common_error::CommonError;
|
use crate::admin::api_server::ResBody;
|
||||||
use garage_api_common::helpers::*;
|
use crate::admin::error::*;
|
||||||
|
use crate::admin::key::ApiBucketKeyPerm;
|
||||||
use crate::api_server::ResBody;
|
use crate::common_error::CommonError;
|
||||||
use crate::error::*;
|
use crate::helpers::*;
|
||||||
use crate::key::ApiBucketKeyPerm;
|
|
||||||
|
|
||||||
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let buckets = garage
|
let buckets = garage
|
||||||
|
|
|
@ -12,10 +12,9 @@ use garage_rpc::layout;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
|
|
||||||
use garage_api_common::helpers::{json_ok_response, parse_json_body};
|
use crate::admin::api_server::ResBody;
|
||||||
|
use crate::admin::error::*;
|
||||||
use crate::api_server::ResBody;
|
use crate::helpers::{json_ok_response, parse_json_body};
|
||||||
use crate::error::*;
|
|
||||||
|
|
||||||
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let layout = garage.system.cluster_layout();
|
let layout = garage.system.cluster_layout();
|
||||||
|
|
|
@ -1,24 +1,20 @@
|
||||||
use std::convert::TryFrom;
|
|
||||||
|
|
||||||
use err_derive::Error;
|
use err_derive::Error;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
|
||||||
pub use garage_model::helper::error::Error as HelperError;
|
pub use garage_model::helper::error::Error as HelperError;
|
||||||
|
|
||||||
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
|
use crate::common_error::CommonError;
|
||||||
pub use garage_api_common::common_error::{
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
|
use crate::generic_server::ApiError;
|
||||||
};
|
use crate::helpers::*;
|
||||||
use garage_api_common::generic_server::ApiError;
|
|
||||||
use garage_api_common::helpers::*;
|
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "{}", _0)]
|
#[error(display = "{}", _0)]
|
||||||
/// Error from common error
|
/// Error from common error
|
||||||
Common(#[error(source)] CommonError),
|
Common(CommonError),
|
||||||
|
|
||||||
// Category: cannot process
|
// Category: cannot process
|
||||||
/// The API access key does not exist
|
/// The API access key does not exist
|
||||||
|
@ -33,21 +29,17 @@ pub enum Error {
|
||||||
KeyAlreadyExists(String),
|
KeyAlreadyExists(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
commonErrorDerivative!(Error);
|
impl<T> From<T> for Error
|
||||||
|
where
|
||||||
/// FIXME: helper errors are transformed into their corresponding variants
|
CommonError: From<T>,
|
||||||
/// in the Error struct, but in many case a helper error should be considered
|
{
|
||||||
/// an internal error.
|
fn from(err: T) -> Self {
|
||||||
impl From<HelperError> for Error {
|
Error::Common(CommonError::from(err))
|
||||||
fn from(err: HelperError) -> Error {
|
|
||||||
match CommonError::try_from(err) {
|
|
||||||
Ok(ce) => Self::Common(ce),
|
|
||||||
Err(HelperError::NoSuchAccessKey(k)) => Self::NoSuchAccessKey(k),
|
|
||||||
Err(_) => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
fn code(&self) -> &'static str {
|
fn code(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
|
|
|
@ -9,10 +9,9 @@ use garage_table::*;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
use crate::admin::api_server::ResBody;
|
||||||
|
use crate::admin::error::*;
|
||||||
use crate::api_server::ResBody;
|
use crate::helpers::*;
|
||||||
use crate::error::*;
|
|
||||||
|
|
||||||
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let res = garage
|
let res = garage
|
||||||
|
|
|
@ -1,6 +1,3 @@
|
||||||
#[macro_use]
|
|
||||||
extern crate tracing;
|
|
||||||
|
|
||||||
pub mod api_server;
|
pub mod api_server;
|
||||||
mod error;
|
mod error;
|
||||||
mod router_v0;
|
mod router_v0;
|
|
@ -2,9 +2,8 @@ use std::borrow::Cow;
|
||||||
|
|
||||||
use hyper::{Method, Request};
|
use hyper::{Method, Request};
|
||||||
|
|
||||||
use garage_api_common::router_macros::*;
|
use crate::admin::error::*;
|
||||||
|
use crate::router_macros::*;
|
||||||
use crate::error::*;
|
|
||||||
|
|
||||||
router_match! {@func
|
router_match! {@func
|
||||||
|
|
||||||
|
|
|
@ -2,10 +2,9 @@ use std::borrow::Cow;
|
||||||
|
|
||||||
use hyper::{Method, Request};
|
use hyper::{Method, Request};
|
||||||
|
|
||||||
use garage_api_common::router_macros::*;
|
use crate::admin::error::*;
|
||||||
|
use crate::admin::router_v0;
|
||||||
use crate::error::*;
|
use crate::router_macros::*;
|
||||||
use crate::router_v0;
|
|
||||||
|
|
||||||
pub enum Authorization {
|
pub enum Authorization {
|
||||||
None,
|
None,
|
||||||
|
|
|
@ -1,45 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "garage_api_common"
|
|
||||||
version = "1.0.1"
|
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
|
||||||
edition = "2018"
|
|
||||||
license = "AGPL-3.0"
|
|
||||||
description = "Common functions for the API server crates for the Garage object store"
|
|
||||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
|
||||||
readme = "../../README.md"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
path = "lib.rs"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
garage_model.workspace = true
|
|
||||||
garage_table.workspace = true
|
|
||||||
garage_util.workspace = true
|
|
||||||
|
|
||||||
async-trait.workspace = true
|
|
||||||
bytes.workspace = true
|
|
||||||
chrono.workspace = true
|
|
||||||
crypto-common.workspace = true
|
|
||||||
err-derive.workspace = true
|
|
||||||
hex.workspace = true
|
|
||||||
hmac.workspace = true
|
|
||||||
idna.workspace = true
|
|
||||||
tracing.workspace = true
|
|
||||||
nom.workspace = true
|
|
||||||
pin-project.workspace = true
|
|
||||||
sha2.workspace = true
|
|
||||||
|
|
||||||
futures.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
http.workspace = true
|
|
||||||
http-body-util.workspace = true
|
|
||||||
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
|
|
||||||
hyper-util.workspace = true
|
|
||||||
url.workspace = true
|
|
||||||
|
|
||||||
serde.workspace = true
|
|
||||||
serde_json.workspace = true
|
|
||||||
|
|
||||||
opentelemetry.workspace = true
|
|
|
@ -1,170 +0,0 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use http::header::{
|
|
||||||
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
|
|
||||||
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
|
|
||||||
};
|
|
||||||
use hyper::{body::Body, body::Incoming as IncomingBody, Request, Response, StatusCode};
|
|
||||||
|
|
||||||
use garage_model::bucket_table::{BucketParams, CorsRule as GarageCorsRule};
|
|
||||||
use garage_model::garage::Garage;
|
|
||||||
|
|
||||||
use crate::common_error::{
|
|
||||||
helper_error_as_internal, CommonError, OkOrBadRequest, OkOrInternalError,
|
|
||||||
};
|
|
||||||
use crate::helpers::*;
|
|
||||||
|
|
||||||
pub fn find_matching_cors_rule<'a>(
|
|
||||||
bucket_params: &'a BucketParams,
|
|
||||||
req: &Request<impl Body>,
|
|
||||||
) -> Result<Option<&'a GarageCorsRule>, CommonError> {
|
|
||||||
if let Some(cors_config) = bucket_params.cors_config.get() {
|
|
||||||
if let Some(origin) = req.headers().get("Origin") {
|
|
||||||
let origin = origin.to_str()?;
|
|
||||||
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
|
|
||||||
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
|
|
||||||
None => vec![],
|
|
||||||
};
|
|
||||||
return Ok(cors_config.iter().find(|rule| {
|
|
||||||
cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter())
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn cors_rule_matches<'a, HI, S>(
|
|
||||||
rule: &GarageCorsRule,
|
|
||||||
origin: &'a str,
|
|
||||||
method: &'a str,
|
|
||||||
mut request_headers: HI,
|
|
||||||
) -> bool
|
|
||||||
where
|
|
||||||
HI: Iterator<Item = S>,
|
|
||||||
S: AsRef<str>,
|
|
||||||
{
|
|
||||||
rule.allow_origins.iter().any(|x| x == "*" || x == origin)
|
|
||||||
&& rule.allow_methods.iter().any(|x| x == "*" || x == method)
|
|
||||||
&& request_headers.all(|h| {
|
|
||||||
rule.allow_headers
|
|
||||||
.iter()
|
|
||||||
.any(|x| x == "*" || x == h.as_ref())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_cors_headers(
|
|
||||||
resp: &mut Response<impl Body>,
|
|
||||||
rule: &GarageCorsRule,
|
|
||||||
) -> Result<(), http::header::InvalidHeaderValue> {
|
|
||||||
let h = resp.headers_mut();
|
|
||||||
h.insert(
|
|
||||||
ACCESS_CONTROL_ALLOW_ORIGIN,
|
|
||||||
rule.allow_origins.join(", ").parse()?,
|
|
||||||
);
|
|
||||||
h.insert(
|
|
||||||
ACCESS_CONTROL_ALLOW_METHODS,
|
|
||||||
rule.allow_methods.join(", ").parse()?,
|
|
||||||
);
|
|
||||||
h.insert(
|
|
||||||
ACCESS_CONTROL_ALLOW_HEADERS,
|
|
||||||
rule.allow_headers.join(", ").parse()?,
|
|
||||||
);
|
|
||||||
h.insert(
|
|
||||||
ACCESS_CONTROL_EXPOSE_HEADERS,
|
|
||||||
rule.expose_headers.join(", ").parse()?,
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn handle_options_api(
|
|
||||||
garage: Arc<Garage>,
|
|
||||||
req: &Request<IncomingBody>,
|
|
||||||
bucket_name: Option<String>,
|
|
||||||
) -> Result<Response<EmptyBody>, CommonError> {
|
|
||||||
// FIXME: CORS rules of buckets with local aliases are
|
|
||||||
// not taken into account.
|
|
||||||
|
|
||||||
// If the bucket name is a global bucket name,
|
|
||||||
// we try to apply the CORS rules of that bucket.
|
|
||||||
// If a user has a local bucket name that has
|
|
||||||
// the same name, its CORS rules won't be applied
|
|
||||||
// and will be shadowed by the rules of the globally
|
|
||||||
// existing bucket (but this is inevitable because
|
|
||||||
// OPTIONS calls are not auhtenticated).
|
|
||||||
if let Some(bn) = bucket_name {
|
|
||||||
let helper = garage.bucket_helper();
|
|
||||||
let bucket_id = helper
|
|
||||||
.resolve_global_bucket_name(&bn)
|
|
||||||
.await
|
|
||||||
.map_err(helper_error_as_internal)?;
|
|
||||||
if let Some(id) = bucket_id {
|
|
||||||
let bucket = garage
|
|
||||||
.bucket_helper()
|
|
||||||
.get_existing_bucket(id)
|
|
||||||
.await
|
|
||||||
.map_err(helper_error_as_internal)?;
|
|
||||||
let bucket_params = bucket.state.into_option().unwrap();
|
|
||||||
handle_options_for_bucket(req, &bucket_params)
|
|
||||||
} else {
|
|
||||||
// If there is a bucket name in the request, but that name
|
|
||||||
// does not correspond to a global alias for a bucket,
|
|
||||||
// then it's either a non-existing bucket or a local bucket.
|
|
||||||
// We have no way of knowing, because the request is not
|
|
||||||
// authenticated and thus we can't resolve local aliases.
|
|
||||||
// We take the permissive approach of allowing everything,
|
|
||||||
// because we don't want to prevent web apps that use
|
|
||||||
// local bucket names from making API calls.
|
|
||||||
Ok(Response::builder()
|
|
||||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
|
||||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.body(EmptyBody::new())?)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// If there is no bucket name in the request,
|
|
||||||
// we are doing a ListBuckets call, which we want to allow
|
|
||||||
// for all origins.
|
|
||||||
Ok(Response::builder()
|
|
||||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
|
||||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.body(EmptyBody::new())?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn handle_options_for_bucket(
|
|
||||||
req: &Request<IncomingBody>,
|
|
||||||
bucket_params: &BucketParams,
|
|
||||||
) -> Result<Response<EmptyBody>, CommonError> {
|
|
||||||
let origin = req
|
|
||||||
.headers()
|
|
||||||
.get("Origin")
|
|
||||||
.ok_or_bad_request("Missing Origin header")?
|
|
||||||
.to_str()?;
|
|
||||||
let request_method = req
|
|
||||||
.headers()
|
|
||||||
.get(ACCESS_CONTROL_REQUEST_METHOD)
|
|
||||||
.ok_or_bad_request("Missing Access-Control-Request-Method header")?
|
|
||||||
.to_str()?;
|
|
||||||
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
|
|
||||||
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
|
|
||||||
None => vec![],
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(cors_config) = bucket_params.cors_config.get() {
|
|
||||||
let matching_rule = cors_config
|
|
||||||
.iter()
|
|
||||||
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
|
|
||||||
if let Some(rule) = matching_rule {
|
|
||||||
let mut resp = Response::builder()
|
|
||||||
.status(StatusCode::OK)
|
|
||||||
.body(EmptyBody::new())?;
|
|
||||||
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
|
|
||||||
return Ok(resp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(CommonError::Forbidden(
|
|
||||||
"This CORS request is not allowed.".into(),
|
|
||||||
))
|
|
||||||
}
|
|
|
@ -1,12 +0,0 @@
|
||||||
//! Crate for serving a S3 compatible API
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tracing;
|
|
||||||
|
|
||||||
pub mod common_error;
|
|
||||||
|
|
||||||
pub mod cors;
|
|
||||||
pub mod encoding;
|
|
||||||
pub mod generic_server;
|
|
||||||
pub mod helpers;
|
|
||||||
pub mod router_macros;
|
|
||||||
pub mod signature;
|
|
|
@ -1,5 +1,3 @@
|
||||||
use std::convert::TryFrom;
|
|
||||||
|
|
||||||
use err_derive::Error;
|
use err_derive::Error;
|
||||||
use hyper::StatusCode;
|
use hyper::StatusCode;
|
||||||
|
|
||||||
|
@ -57,35 +55,6 @@ pub enum CommonError {
|
||||||
InvalidBucketName(String),
|
InvalidBucketName(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! commonErrorDerivative {
|
|
||||||
( $error_struct: ident ) => {
|
|
||||||
impl From<garage_util::error::Error> for $error_struct {
|
|
||||||
fn from(err: garage_util::error::Error) -> Self {
|
|
||||||
Self::Common(CommonError::InternalError(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<http::Error> for $error_struct {
|
|
||||||
fn from(err: http::Error) -> Self {
|
|
||||||
Self::Common(CommonError::Http(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<hyper::Error> for $error_struct {
|
|
||||||
fn from(err: hyper::Error) -> Self {
|
|
||||||
Self::Common(CommonError::Hyper(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<hyper::header::ToStrError> for $error_struct {
|
|
||||||
fn from(err: hyper::header::ToStrError) -> Self {
|
|
||||||
Self::Common(CommonError::InvalidHeader(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl CommonErrorDerivative for $error_struct {}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub use commonErrorDerivative;
|
|
||||||
|
|
||||||
impl CommonError {
|
impl CommonError {
|
||||||
pub fn http_status_code(&self) -> StatusCode {
|
pub fn http_status_code(&self) -> StatusCode {
|
||||||
match self {
|
match self {
|
||||||
|
@ -128,39 +97,18 @@ impl CommonError {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<HelperError> for CommonError {
|
impl From<HelperError> for CommonError {
|
||||||
type Error = HelperError;
|
fn from(err: HelperError) -> Self {
|
||||||
|
|
||||||
fn try_from(err: HelperError) -> Result<Self, HelperError> {
|
|
||||||
match err {
|
match err {
|
||||||
HelperError::Internal(i) => Ok(Self::InternalError(i)),
|
HelperError::Internal(i) => Self::InternalError(i),
|
||||||
HelperError::BadRequest(b) => Ok(Self::BadRequest(b)),
|
HelperError::BadRequest(b) => Self::BadRequest(b),
|
||||||
HelperError::InvalidBucketName(n) => Ok(Self::InvalidBucketName(n)),
|
HelperError::InvalidBucketName(n) => Self::InvalidBucketName(n),
|
||||||
HelperError::NoSuchBucket(n) => Ok(Self::NoSuchBucket(n)),
|
HelperError::NoSuchBucket(n) => Self::NoSuchBucket(n),
|
||||||
e => Err(e),
|
e => Self::bad_request(format!("{}", e)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This function converts HelperErrors into CommonErrors,
|
|
||||||
/// for variants that exist in CommonError.
|
|
||||||
/// This is used for helper functions that might return InvalidBucketName
|
|
||||||
/// or NoSuchBucket for instance, and we want to pass that error
|
|
||||||
/// up to our caller.
|
|
||||||
pub fn pass_helper_error(err: HelperError) -> CommonError {
|
|
||||||
match CommonError::try_from(err) {
|
|
||||||
Ok(e) => e,
|
|
||||||
Err(e) => panic!("Helper error `{}` should hot have happenned here", e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn helper_error_as_internal(err: HelperError) -> CommonError {
|
|
||||||
match err {
|
|
||||||
HelperError::Internal(e) => CommonError::InternalError(e),
|
|
||||||
e => CommonError::InternalError(GarageError::Message(e.to_string())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait CommonErrorDerivative: From<CommonError> {
|
pub trait CommonErrorDerivative: From<CommonError> {
|
||||||
fn internal_error<M: ToString>(msg: M) -> Self {
|
fn internal_error<M: ToString>(msg: M) -> Self {
|
||||||
Self::from(CommonError::InternalError(GarageError::Message(
|
Self::from(CommonError::InternalError(GarageError::Message(
|
|
@ -36,7 +36,7 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||||
|
|
||||||
use crate::helpers::{BoxBody, ErrorBody};
|
use crate::helpers::{BoxBody, ErrorBody};
|
||||||
|
|
||||||
pub trait ApiEndpoint: Send + Sync + 'static {
|
pub(crate) trait ApiEndpoint: Send + Sync + 'static {
|
||||||
fn name(&self) -> &'static str;
|
fn name(&self) -> &'static str;
|
||||||
fn add_span_attributes(&self, span: SpanRef<'_>);
|
fn add_span_attributes(&self, span: SpanRef<'_>);
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ pub trait ApiError: std::error::Error + Send + Sync + 'static {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait ApiHandler: Send + Sync + 'static {
|
pub(crate) trait ApiHandler: Send + Sync + 'static {
|
||||||
const API_NAME: &'static str;
|
const API_NAME: &'static str;
|
||||||
const API_NAME_DISPLAY: &'static str;
|
const API_NAME_DISPLAY: &'static str;
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ pub trait ApiHandler: Send + Sync + 'static {
|
||||||
) -> Result<Response<BoxBody<Self::Error>>, Self::Error>;
|
) -> Result<Response<BoxBody<Self::Error>>, Self::Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct ApiServer<A: ApiHandler> {
|
pub(crate) struct ApiServer<A: ApiHandler> {
|
||||||
region: String,
|
region: String,
|
||||||
api_handler: A,
|
api_handler: A,
|
||||||
|
|
|
@ -363,9 +363,9 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
pub struct CustomApiErrorBody {
|
pub(crate) struct CustomApiErrorBody {
|
||||||
pub code: String,
|
pub(crate) code: String,
|
||||||
pub message: String,
|
pub(crate) message: String,
|
||||||
pub region: String,
|
pub(crate) region: String,
|
||||||
pub path: String,
|
pub(crate) path: String,
|
||||||
}
|
}
|
|
@ -1,38 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "garage_api_k2v"
|
|
||||||
version = "1.0.1"
|
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
|
||||||
edition = "2018"
|
|
||||||
license = "AGPL-3.0"
|
|
||||||
description = "K2V API server crate for the Garage object store"
|
|
||||||
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
|
||||||
readme = "../../README.md"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
path = "lib.rs"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
garage_model = { workspace = true, features = [ "k2v" ] }
|
|
||||||
garage_table.workspace = true
|
|
||||||
garage_util = { workspace = true, features = [ "k2v" ] }
|
|
||||||
garage_api_common.workspace = true
|
|
||||||
|
|
||||||
async-trait.workspace = true
|
|
||||||
base64.workspace = true
|
|
||||||
err-derive.workspace = true
|
|
||||||
tracing.workspace = true
|
|
||||||
|
|
||||||
futures.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
http.workspace = true
|
|
||||||
http-body-util.workspace = true
|
|
||||||
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
|
|
||||||
percent-encoding.workspace = true
|
|
||||||
url.workspace = true
|
|
||||||
|
|
||||||
serde.workspace = true
|
|
||||||
serde_json.workspace = true
|
|
||||||
|
|
||||||
opentelemetry.workspace = true
|
|
|
@ -12,25 +12,26 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
|
|
||||||
use garage_api_common::cors::*;
|
use crate::generic_server::*;
|
||||||
use garage_api_common::generic_server::*;
|
use crate::k2v::error::*;
|
||||||
use garage_api_common::helpers::*;
|
|
||||||
use garage_api_common::signature::verify_request;
|
|
||||||
|
|
||||||
use crate::batch::*;
|
use crate::signature::verify_request;
|
||||||
use crate::error::*;
|
|
||||||
use crate::index::*;
|
|
||||||
use crate::item::*;
|
|
||||||
use crate::router::Endpoint;
|
|
||||||
|
|
||||||
pub use garage_api_common::signature::streaming::ReqBody;
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::batch::*;
|
||||||
|
use crate::k2v::index::*;
|
||||||
|
use crate::k2v::item::*;
|
||||||
|
use crate::k2v::router::Endpoint;
|
||||||
|
use crate::s3::cors::*;
|
||||||
|
|
||||||
|
pub use crate::signature::streaming::ReqBody;
|
||||||
pub type ResBody = BoxBody<Error>;
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
pub struct K2VApiServer {
|
pub struct K2VApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct K2VApiEndpoint {
|
pub(crate) struct K2VApiEndpoint {
|
||||||
bucket_name: String,
|
bucket_name: String,
|
||||||
endpoint: Endpoint,
|
endpoint: Endpoint,
|
||||||
}
|
}
|
||||||
|
@ -76,7 +77,7 @@ impl ApiHandler for K2VApiServer {
|
||||||
} = endpoint;
|
} = endpoint;
|
||||||
let garage = self.garage.clone();
|
let garage = self.garage.clone();
|
||||||
|
|
||||||
// The OPTIONS method is processed early, before we even check for an API key
|
// The OPTIONS method is procesed early, before we even check for an API key
|
||||||
if let Endpoint::Options = endpoint {
|
if let Endpoint::Options = endpoint {
|
||||||
let options_res = handle_options_api(garage, &req, Some(bucket_name))
|
let options_res = handle_options_api(garage, &req, Some(bucket_name))
|
||||||
.await
|
.await
|
||||||
|
@ -89,13 +90,11 @@ impl ApiHandler for K2VApiServer {
|
||||||
let bucket_id = garage
|
let bucket_id = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.resolve_bucket(&bucket_name, &api_key)
|
.resolve_bucket(&bucket_name, &api_key)
|
||||||
.await
|
.await?;
|
||||||
.map_err(pass_helper_error)?;
|
|
||||||
let bucket = garage
|
let bucket = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.get_existing_bucket(bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
.await
|
.await?;
|
||||||
.map_err(helper_error_as_internal)?;
|
|
||||||
let bucket_params = bucket.state.into_option().unwrap();
|
let bucket_params = bucket.state.into_option().unwrap();
|
||||||
|
|
||||||
let allowed = match endpoint.authorization_type() {
|
let allowed = match endpoint.authorization_type() {
|
||||||
|
|
|
@ -4,14 +4,13 @@ use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_table::{EnumerationOrder, TableSchema};
|
use garage_table::{EnumerationOrder, TableSchema};
|
||||||
|
|
||||||
|
use garage_model::k2v::causality::*;
|
||||||
use garage_model::k2v::item_table::*;
|
use garage_model::k2v::item_table::*;
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::api_server::{ReqBody, ResBody};
|
||||||
use crate::api_server::{ReqBody, ResBody};
|
use crate::k2v::error::*;
|
||||||
use crate::error::*;
|
use crate::k2v::range::read_range;
|
||||||
use crate::item::parse_causality_token;
|
|
||||||
use crate::range::read_range;
|
|
||||||
|
|
||||||
pub async fn handle_insert_batch(
|
pub async fn handle_insert_batch(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
|
@ -24,7 +23,7 @@ pub async fn handle_insert_batch(
|
||||||
|
|
||||||
let mut items2 = vec![];
|
let mut items2 = vec![];
|
||||||
for it in items {
|
for it in items {
|
||||||
let ct = it.ct.map(|s| parse_causality_token(&s)).transpose()?;
|
let ct = it.ct.map(|s| CausalContext::parse_helper(&s)).transpose()?;
|
||||||
let v = match it.v {
|
let v = match it.v {
|
||||||
Some(vs) => DvvsValue::Value(
|
Some(vs) => DvvsValue::Value(
|
||||||
BASE64_STANDARD
|
BASE64_STANDARD
|
||||||
|
@ -282,8 +281,7 @@ pub(crate) async fn handle_poll_range(
|
||||||
query.seen_marker,
|
query.seen_marker,
|
||||||
timeout_msec,
|
timeout_msec,
|
||||||
)
|
)
|
||||||
.await
|
.await?;
|
||||||
.map_err(pass_helper_error)?;
|
|
||||||
|
|
||||||
if let Some((items, seen_marker)) = resp {
|
if let Some((items, seen_marker)) = resp {
|
||||||
let resp = PollRangeResponse {
|
let resp = PollRangeResponse {
|
||||||
|
|
|
@ -2,21 +2,18 @@ use err_derive::Error;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
|
||||||
use garage_api_common::common_error::{commonErrorDerivative, CommonError};
|
use crate::common_error::CommonError;
|
||||||
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error};
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
pub use garage_api_common::common_error::{
|
use crate::generic_server::ApiError;
|
||||||
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
|
use crate::helpers::*;
|
||||||
};
|
use crate::signature::error::Error as SignatureError;
|
||||||
use garage_api_common::generic_server::ApiError;
|
|
||||||
use garage_api_common::helpers::*;
|
|
||||||
use garage_api_common::signature::error::Error as SignatureError;
|
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "{}", _0)]
|
#[error(display = "{}", _0)]
|
||||||
/// Error from common error
|
/// Error from common error
|
||||||
Common(#[error(source)] CommonError),
|
Common(CommonError),
|
||||||
|
|
||||||
// Category: cannot process
|
// Category: cannot process
|
||||||
/// Authorization Header Malformed
|
/// Authorization Header Malformed
|
||||||
|
@ -31,10 +28,6 @@ pub enum Error {
|
||||||
#[error(display = "Invalid base64: {}", _0)]
|
#[error(display = "Invalid base64: {}", _0)]
|
||||||
InvalidBase64(#[error(source)] base64::DecodeError),
|
InvalidBase64(#[error(source)] base64::DecodeError),
|
||||||
|
|
||||||
/// Invalid causality token
|
|
||||||
#[error(display = "Invalid causality token")]
|
|
||||||
InvalidCausalityToken,
|
|
||||||
|
|
||||||
/// The client asked for an invalid return format (invalid Accept header)
|
/// The client asked for an invalid return format (invalid Accept header)
|
||||||
#[error(display = "Not acceptable: {}", _0)]
|
#[error(display = "Not acceptable: {}", _0)]
|
||||||
NotAcceptable(String),
|
NotAcceptable(String),
|
||||||
|
@ -44,7 +37,16 @@ pub enum Error {
|
||||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
commonErrorDerivative!(Error);
|
impl<T> From<T> for Error
|
||||||
|
where
|
||||||
|
CommonError: From<T>,
|
||||||
|
{
|
||||||
|
fn from(err: T) -> Self {
|
||||||
|
Error::Common(CommonError::from(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
impl From<SignatureError> for Error {
|
impl From<SignatureError> for Error {
|
||||||
fn from(err: SignatureError) -> Self {
|
fn from(err: SignatureError) -> Self {
|
||||||
|
@ -70,7 +72,6 @@ impl Error {
|
||||||
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
||||||
Error::InvalidBase64(_) => "InvalidBase64",
|
Error::InvalidBase64(_) => "InvalidBase64",
|
||||||
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
|
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
|
||||||
Error::InvalidCausalityToken => "CausalityToken",
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -84,8 +85,7 @@ impl ApiError for Error {
|
||||||
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
|
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
|
||||||
Error::AuthorizationHeaderMalformed(_)
|
Error::AuthorizationHeaderMalformed(_)
|
||||||
| Error::InvalidBase64(_)
|
| Error::InvalidBase64(_)
|
||||||
| Error::InvalidUtf8Str(_)
|
| Error::InvalidUtf8Str(_) => StatusCode::BAD_REQUEST,
|
||||||
| Error::InvalidCausalityToken => StatusCode::BAD_REQUEST,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,11 +5,10 @@ use garage_table::util::*;
|
||||||
|
|
||||||
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
|
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::api_server::ResBody;
|
||||||
use crate::api_server::ResBody;
|
use crate::k2v::error::*;
|
||||||
use crate::error::*;
|
use crate::k2v::range::read_range;
|
||||||
use crate::range::read_range;
|
|
||||||
|
|
||||||
pub async fn handle_read_index(
|
pub async fn handle_read_index(
|
||||||
ctx: ReqCtx,
|
ctx: ReqCtx,
|
||||||
|
|
|
@ -6,10 +6,9 @@ use hyper::{Request, Response, StatusCode};
|
||||||
use garage_model::k2v::causality::*;
|
use garage_model::k2v::causality::*;
|
||||||
use garage_model::k2v::item_table::*;
|
use garage_model::k2v::item_table::*;
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::api_server::{ReqBody, ResBody};
|
||||||
use crate::api_server::{ReqBody, ResBody};
|
use crate::k2v::error::*;
|
||||||
use crate::error::*;
|
|
||||||
|
|
||||||
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
|
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
|
||||||
|
|
||||||
|
@ -19,10 +18,6 @@ pub enum ReturnFormat {
|
||||||
Either,
|
Either,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn parse_causality_token(s: &str) -> Result<CausalContext, Error> {
|
|
||||||
CausalContext::parse(s).ok_or(Error::InvalidCausalityToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReturnFormat {
|
impl ReturnFormat {
|
||||||
pub fn from(req: &Request<ReqBody>) -> Result<Self, Error> {
|
pub fn from(req: &Request<ReqBody>) -> Result<Self, Error> {
|
||||||
let accept = match req.headers().get(header::ACCEPT) {
|
let accept = match req.headers().get(header::ACCEPT) {
|
||||||
|
@ -141,7 +136,7 @@ pub async fn handle_insert_item(
|
||||||
.get(X_GARAGE_CAUSALITY_TOKEN)
|
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||||
.map(|s| s.to_str())
|
.map(|s| s.to_str())
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.map(parse_causality_token)
|
.map(CausalContext::parse_helper)
|
||||||
.transpose()?;
|
.transpose()?;
|
||||||
|
|
||||||
let body = http_body_util::BodyExt::collect(req.into_body())
|
let body = http_body_util::BodyExt::collect(req.into_body())
|
||||||
|
@ -181,7 +176,7 @@ pub async fn handle_delete_item(
|
||||||
.get(X_GARAGE_CAUSALITY_TOKEN)
|
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||||
.map(|s| s.to_str())
|
.map(|s| s.to_str())
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.map(parse_causality_token)
|
.map(CausalContext::parse_helper)
|
||||||
.transpose()?;
|
.transpose()?;
|
||||||
|
|
||||||
let value = DvvsValue::Deleted;
|
let value = DvvsValue::Deleted;
|
||||||
|
|
|
@ -1,6 +1,3 @@
|
||||||
#[macro_use]
|
|
||||||
extern crate tracing;
|
|
||||||
|
|
||||||
pub mod api_server;
|
pub mod api_server;
|
||||||
mod error;
|
mod error;
|
||||||
mod router;
|
mod router;
|
|
@ -7,9 +7,8 @@ use std::sync::Arc;
|
||||||
use garage_table::replication::TableShardedReplication;
|
use garage_table::replication::TableShardedReplication;
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
|
||||||
use garage_api_common::helpers::key_after_prefix;
|
use crate::helpers::key_after_prefix;
|
||||||
|
use crate::k2v::error::*;
|
||||||
use crate::error::*;
|
|
||||||
|
|
||||||
/// Read range in a Garage table.
|
/// Read range in a Garage table.
|
||||||
/// Returns (entries, more?, nextStart)
|
/// Returns (entries, more?, nextStart)
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
use crate::error::*;
|
use crate::k2v::error::*;
|
||||||
|
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
|
|
||||||
use hyper::{Method, Request};
|
use hyper::{Method, Request};
|
||||||
|
|
||||||
use garage_api_common::helpers::Authorization;
|
use crate::helpers::Authorization;
|
||||||
use garage_api_common::router_macros::{generateQueryParameters, router_match};
|
use crate::router_macros::{generateQueryParameters, router_match};
|
||||||
|
|
||||||
router_match! {@func
|
router_match! {@func
|
||||||
|
|
||||||
|
|
17
src/api/lib.rs
Normal file
17
src/api/lib.rs
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
//! Crate for serving a S3 compatible API
|
||||||
|
#[macro_use]
|
||||||
|
extern crate tracing;
|
||||||
|
|
||||||
|
pub mod common_error;
|
||||||
|
|
||||||
|
mod encoding;
|
||||||
|
pub mod generic_server;
|
||||||
|
pub mod helpers;
|
||||||
|
mod router_macros;
|
||||||
|
/// This mode is public only to help testing. Don't expect stability here
|
||||||
|
pub mod signature;
|
||||||
|
|
||||||
|
pub mod admin;
|
||||||
|
#[cfg(feature = "k2v")]
|
||||||
|
pub mod k2v;
|
||||||
|
pub mod s3;
|
|
@ -1,6 +1,5 @@
|
||||||
/// This macro is used to generate very repetitive match {} blocks in this module
|
/// This macro is used to generate very repetitive match {} blocks in this module
|
||||||
/// It is _not_ made to be used anywhere else
|
/// It is _not_ made to be used anywhere else
|
||||||
#[macro_export]
|
|
||||||
macro_rules! router_match {
|
macro_rules! router_match {
|
||||||
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
|
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
|
||||||
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
|
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
|
||||||
|
@ -134,7 +133,6 @@ macro_rules! router_match {
|
||||||
|
|
||||||
/// This macro is used to generate part of the code in this module. It must be called only one, and
|
/// This macro is used to generate part of the code in this module. It must be called only one, and
|
||||||
/// is useless outside of this module.
|
/// is useless outside of this module.
|
||||||
#[macro_export]
|
|
||||||
macro_rules! generateQueryParameters {
|
macro_rules! generateQueryParameters {
|
||||||
(
|
(
|
||||||
keywords: [ $($kw_param:expr => $kw_name: ident),* ],
|
keywords: [ $($kw_param:expr => $kw_name: ident),* ],
|
||||||
|
@ -206,7 +204,7 @@ macro_rules! generateQueryParameters {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get an error message in case not all parameters where used when extracting them to
|
/// Get an error message in case not all parameters where used when extracting them to
|
||||||
/// build an Endpoint variant
|
/// build an Enpoint variant
|
||||||
fn nonempty_message(&self) -> Option<&str> {
|
fn nonempty_message(&self) -> Option<&str> {
|
||||||
if self.keyword.is_some() {
|
if self.keyword.is_some() {
|
||||||
Some("Keyword not used")
|
Some("Keyword not used")
|
||||||
|
@ -222,5 +220,5 @@ macro_rules! generateQueryParameters {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub use generateQueryParameters;
|
pub(crate) use generateQueryParameters;
|
||||||
pub use router_match;
|
pub(crate) use router_match;
|
|
@ -14,33 +14,33 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::key_table::Key;
|
use garage_model::key_table::Key;
|
||||||
|
|
||||||
use garage_api_common::cors::*;
|
use crate::generic_server::*;
|
||||||
use garage_api_common::generic_server::*;
|
use crate::s3::error::*;
|
||||||
use garage_api_common::helpers::*;
|
|
||||||
use garage_api_common::signature::verify_request;
|
|
||||||
|
|
||||||
use crate::bucket::*;
|
use crate::signature::verify_request;
|
||||||
use crate::copy::*;
|
|
||||||
use crate::cors::*;
|
|
||||||
use crate::delete::*;
|
|
||||||
use crate::error::*;
|
|
||||||
use crate::get::*;
|
|
||||||
use crate::lifecycle::*;
|
|
||||||
use crate::list::*;
|
|
||||||
use crate::multipart::*;
|
|
||||||
use crate::post_object::handle_post_object;
|
|
||||||
use crate::put::*;
|
|
||||||
use crate::router::Endpoint;
|
|
||||||
use crate::website::*;
|
|
||||||
|
|
||||||
pub use garage_api_common::signature::streaming::ReqBody;
|
use crate::helpers::*;
|
||||||
|
use crate::s3::bucket::*;
|
||||||
|
use crate::s3::copy::*;
|
||||||
|
use crate::s3::cors::*;
|
||||||
|
use crate::s3::delete::*;
|
||||||
|
use crate::s3::get::*;
|
||||||
|
use crate::s3::lifecycle::*;
|
||||||
|
use crate::s3::list::*;
|
||||||
|
use crate::s3::multipart::*;
|
||||||
|
use crate::s3::post_object::handle_post_object;
|
||||||
|
use crate::s3::put::*;
|
||||||
|
use crate::s3::router::Endpoint;
|
||||||
|
use crate::s3::website::*;
|
||||||
|
|
||||||
|
pub use crate::signature::streaming::ReqBody;
|
||||||
pub type ResBody = BoxBody<Error>;
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
pub struct S3ApiServer {
|
pub struct S3ApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct S3ApiEndpoint {
|
pub(crate) struct S3ApiEndpoint {
|
||||||
bucket_name: Option<String>,
|
bucket_name: Option<String>,
|
||||||
endpoint: Endpoint,
|
endpoint: Endpoint,
|
||||||
}
|
}
|
||||||
|
@ -150,8 +150,7 @@ impl ApiHandler for S3ApiServer {
|
||||||
let bucket_id = garage
|
let bucket_id = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.resolve_bucket(&bucket_name, &api_key)
|
.resolve_bucket(&bucket_name, &api_key)
|
||||||
.await
|
.await?;
|
||||||
.map_err(pass_helper_error)?;
|
|
||||||
let bucket = garage
|
let bucket = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.get_existing_bucket(bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
|
|
|
@ -13,13 +13,12 @@ use garage_util::crdt::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use garage_api_common::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
use garage_api_common::helpers::*;
|
use crate::helpers::*;
|
||||||
use garage_api_common::signature::verify_signed_content;
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
|
use crate::s3::error::*;
|
||||||
use crate::api_server::{ReqBody, ResBody};
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::error::*;
|
use crate::signature::verify_signed_content;
|
||||||
use crate::xml as s3_xml;
|
|
||||||
|
|
||||||
pub fn handle_get_bucket_location(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
pub fn handle_get_bucket_location(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||||
let ReqCtx { garage, .. } = ctx;
|
let ReqCtx { garage, .. } = ctx;
|
||||||
|
|
|
@ -15,7 +15,7 @@ use garage_util::error::OkOrMessage;
|
||||||
|
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
use crate::error::*;
|
use crate::s3::error::*;
|
||||||
|
|
||||||
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
|
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
|
||||||
HeaderName::from_static("x-amz-checksum-algorithm");
|
HeaderName::from_static("x-amz-checksum-algorithm");
|
||||||
|
@ -340,8 +340,8 @@ pub(crate) fn request_checksum_value(
|
||||||
Ok(ret.pop())
|
Ok(ret.pop())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks for the presence of x-amz-checksum-algorithm
|
/// Checks for the presense of x-amz-checksum-algorithm
|
||||||
/// if so extract the corresponding x-amz-checksum-* value
|
/// if so extract the corrseponding x-amz-checksum-* value
|
||||||
pub(crate) fn request_checksum_algorithm_value(
|
pub(crate) fn request_checksum_algorithm_value(
|
||||||
headers: &HeaderMap<HeaderValue>,
|
headers: &HeaderMap<HeaderValue>,
|
||||||
) -> Result<Option<ChecksumValue>, Error> {
|
) -> Result<Option<ChecksumValue>, Error> {
|
||||||
|
|
|
@ -20,16 +20,15 @@ use garage_model::s3::mpu_table::*;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::s3::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::api_server::{ReqBody, ResBody};
|
use crate::s3::checksum::*;
|
||||||
use crate::checksum::*;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::encryption::EncryptionParams;
|
use crate::s3::error::*;
|
||||||
use crate::error::*;
|
use crate::s3::get::full_object_byte_stream;
|
||||||
use crate::get::full_object_byte_stream;
|
use crate::s3::multipart;
|
||||||
use crate::multipart;
|
use crate::s3::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
|
||||||
use crate::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
|
use crate::s3::xml::{self as s3_xml, xmlns_tag};
|
||||||
use crate::xml::{self as s3_xml, xmlns_tag};
|
|
||||||
|
|
||||||
// -------- CopyObject ---------
|
// -------- CopyObject ---------
|
||||||
|
|
||||||
|
@ -64,7 +63,7 @@ pub async fn handle_copy(
|
||||||
let source_checksum_algorithm = source_checksum.map(|x| x.algorithm());
|
let source_checksum_algorithm = source_checksum.map(|x| x.algorithm());
|
||||||
|
|
||||||
// If source object has a checksum, the destination object must as well.
|
// If source object has a checksum, the destination object must as well.
|
||||||
// The x-amz-checksum-algorithm header allows to change that algorithm,
|
// The x-amz-checksum-algorihtm header allows to change that algorithm,
|
||||||
// but if it is absent, we must use the same as before
|
// but if it is absent, we must use the same as before
|
||||||
let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm);
|
let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm);
|
||||||
|
|
||||||
|
@ -656,8 +655,7 @@ async fn get_copy_source(ctx: &ReqCtx, req: &Request<ReqBody>) -> Result<Object,
|
||||||
let source_bucket_id = garage
|
let source_bucket_id = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.resolve_bucket(&source_bucket.to_string(), api_key)
|
.resolve_bucket(&source_bucket.to_string(), api_key)
|
||||||
.await
|
.await?;
|
||||||
.map_err(pass_helper_error)?;
|
|
||||||
|
|
||||||
if !api_key.allow_read(&source_bucket_id) {
|
if !api_key.allow_read(&source_bucket_id) {
|
||||||
return Err(Error::forbidden(format!(
|
return Err(Error::forbidden(format!(
|
||||||
|
@ -863,7 +861,7 @@ pub struct CopyPartResult {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::xml::to_xml_with_header;
|
use crate::s3::xml::to_xml_with_header;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn copy_object_result() -> Result<(), Error> {
|
fn copy_object_result() -> Result<(), Error> {
|
||||||
|
|
|
@ -1,21 +1,30 @@
|
||||||
use quick_xml::de::from_reader;
|
use quick_xml::de::from_reader;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{header::HeaderName, Method, Request, Response, StatusCode};
|
use http::header::{
|
||||||
|
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
|
||||||
|
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
|
||||||
|
};
|
||||||
|
use hyper::{
|
||||||
|
body::Body, body::Incoming as IncomingBody, header::HeaderName, Method, Request, Response,
|
||||||
|
StatusCode,
|
||||||
|
};
|
||||||
|
|
||||||
use http_body_util::BodyExt;
|
use http_body_util::BodyExt;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule};
|
use crate::common_error::CommonError;
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
|
use crate::s3::error::*;
|
||||||
|
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||||
|
use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
|
use garage_model::bucket_table::{Bucket, BucketParams, CorsRule as GarageCorsRule};
|
||||||
|
use garage_model::garage::Garage;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
|
||||||
use garage_api_common::signature::verify_signed_content;
|
|
||||||
|
|
||||||
use crate::api_server::{ReqBody, ResBody};
|
|
||||||
use crate::error::*;
|
|
||||||
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
|
||||||
|
|
||||||
pub async fn handle_get_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
pub async fn handle_get_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||||
let ReqCtx { bucket_params, .. } = ctx;
|
let ReqCtx { bucket_params, .. } = ctx;
|
||||||
if let Some(cors) = bucket_params.cors_config.get() {
|
if let Some(cors) = bucket_params.cors_config.get() {
|
||||||
|
@ -90,6 +99,154 @@ pub async fn handle_put_cors(
|
||||||
.body(empty_body())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn handle_options_api(
|
||||||
|
garage: Arc<Garage>,
|
||||||
|
req: &Request<IncomingBody>,
|
||||||
|
bucket_name: Option<String>,
|
||||||
|
) -> Result<Response<EmptyBody>, CommonError> {
|
||||||
|
// FIXME: CORS rules of buckets with local aliases are
|
||||||
|
// not taken into account.
|
||||||
|
|
||||||
|
// If the bucket name is a global bucket name,
|
||||||
|
// we try to apply the CORS rules of that bucket.
|
||||||
|
// If a user has a local bucket name that has
|
||||||
|
// the same name, its CORS rules won't be applied
|
||||||
|
// and will be shadowed by the rules of the globally
|
||||||
|
// existing bucket (but this is inevitable because
|
||||||
|
// OPTIONS calls are not auhtenticated).
|
||||||
|
if let Some(bn) = bucket_name {
|
||||||
|
let helper = garage.bucket_helper();
|
||||||
|
let bucket_id = helper.resolve_global_bucket_name(&bn).await?;
|
||||||
|
if let Some(id) = bucket_id {
|
||||||
|
let bucket = garage.bucket_helper().get_existing_bucket(id).await?;
|
||||||
|
let bucket_params = bucket.state.into_option().unwrap();
|
||||||
|
handle_options_for_bucket(req, &bucket_params)
|
||||||
|
} else {
|
||||||
|
// If there is a bucket name in the request, but that name
|
||||||
|
// does not correspond to a global alias for a bucket,
|
||||||
|
// then it's either a non-existing bucket or a local bucket.
|
||||||
|
// We have no way of knowing, because the request is not
|
||||||
|
// authenticated and thus we can't resolve local aliases.
|
||||||
|
// We take the permissive approach of allowing everything,
|
||||||
|
// because we don't want to prevent web apps that use
|
||||||
|
// local bucket names from making API calls.
|
||||||
|
Ok(Response::builder()
|
||||||
|
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||||
|
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(EmptyBody::new())?)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If there is no bucket name in the request,
|
||||||
|
// we are doing a ListBuckets call, which we want to allow
|
||||||
|
// for all origins.
|
||||||
|
Ok(Response::builder()
|
||||||
|
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||||
|
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(EmptyBody::new())?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_options_for_bucket(
|
||||||
|
req: &Request<IncomingBody>,
|
||||||
|
bucket_params: &BucketParams,
|
||||||
|
) -> Result<Response<EmptyBody>, CommonError> {
|
||||||
|
let origin = req
|
||||||
|
.headers()
|
||||||
|
.get("Origin")
|
||||||
|
.ok_or_bad_request("Missing Origin header")?
|
||||||
|
.to_str()?;
|
||||||
|
let request_method = req
|
||||||
|
.headers()
|
||||||
|
.get(ACCESS_CONTROL_REQUEST_METHOD)
|
||||||
|
.ok_or_bad_request("Missing Access-Control-Request-Method header")?
|
||||||
|
.to_str()?;
|
||||||
|
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
|
||||||
|
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
|
||||||
|
None => vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(cors_config) = bucket_params.cors_config.get() {
|
||||||
|
let matching_rule = cors_config
|
||||||
|
.iter()
|
||||||
|
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
|
||||||
|
if let Some(rule) = matching_rule {
|
||||||
|
let mut resp = Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(EmptyBody::new())?;
|
||||||
|
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
|
||||||
|
return Ok(resp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(CommonError::Forbidden(
|
||||||
|
"This CORS request is not allowed.".into(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_matching_cors_rule<'a>(
|
||||||
|
bucket_params: &'a BucketParams,
|
||||||
|
req: &Request<impl Body>,
|
||||||
|
) -> Result<Option<&'a GarageCorsRule>, Error> {
|
||||||
|
if let Some(cors_config) = bucket_params.cors_config.get() {
|
||||||
|
if let Some(origin) = req.headers().get("Origin") {
|
||||||
|
let origin = origin.to_str()?;
|
||||||
|
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
|
||||||
|
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
|
||||||
|
None => vec![],
|
||||||
|
};
|
||||||
|
return Ok(cors_config.iter().find(|rule| {
|
||||||
|
cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter())
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cors_rule_matches<'a, HI, S>(
|
||||||
|
rule: &GarageCorsRule,
|
||||||
|
origin: &'a str,
|
||||||
|
method: &'a str,
|
||||||
|
mut request_headers: HI,
|
||||||
|
) -> bool
|
||||||
|
where
|
||||||
|
HI: Iterator<Item = S>,
|
||||||
|
S: AsRef<str>,
|
||||||
|
{
|
||||||
|
rule.allow_origins.iter().any(|x| x == "*" || x == origin)
|
||||||
|
&& rule.allow_methods.iter().any(|x| x == "*" || x == method)
|
||||||
|
&& request_headers.all(|h| {
|
||||||
|
rule.allow_headers
|
||||||
|
.iter()
|
||||||
|
.any(|x| x == "*" || x == h.as_ref())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_cors_headers(
|
||||||
|
resp: &mut Response<impl Body>,
|
||||||
|
rule: &GarageCorsRule,
|
||||||
|
) -> Result<(), http::header::InvalidHeaderValue> {
|
||||||
|
let h = resp.headers_mut();
|
||||||
|
h.insert(
|
||||||
|
ACCESS_CONTROL_ALLOW_ORIGIN,
|
||||||
|
rule.allow_origins.join(", ").parse()?,
|
||||||
|
);
|
||||||
|
h.insert(
|
||||||
|
ACCESS_CONTROL_ALLOW_METHODS,
|
||||||
|
rule.allow_methods.join(", ").parse()?,
|
||||||
|
);
|
||||||
|
h.insert(
|
||||||
|
ACCESS_CONTROL_ALLOW_HEADERS,
|
||||||
|
rule.allow_headers.join(", ").parse()?,
|
||||||
|
);
|
||||||
|
h.insert(
|
||||||
|
ACCESS_CONTROL_EXPOSE_HEADERS,
|
||||||
|
rule.expose_headers.join(", ").parse()?,
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ----
|
// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ----
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
|
|
@ -5,13 +5,12 @@ use garage_util::data::*;
|
||||||
|
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
use crate::helpers::*;
|
||||||
use garage_api_common::signature::verify_signed_content;
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
|
use crate::s3::error::*;
|
||||||
use crate::api_server::{ReqBody, ResBody};
|
use crate::s3::put::next_timestamp;
|
||||||
use crate::error::*;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::put::next_timestamp;
|
use crate::signature::verify_signed_content;
|
||||||
use crate::xml as s3_xml;
|
|
||||||
|
|
||||||
async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid), Error> {
|
async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid), Error> {
|
||||||
let ReqCtx {
|
let ReqCtx {
|
||||||
|
|
|
@ -28,10 +28,9 @@ use garage_util::migrate::Migrate;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
|
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
|
||||||
|
|
||||||
use garage_api_common::common_error::*;
|
use crate::common_error::*;
|
||||||
|
use crate::s3::checksum::Md5Checksum;
|
||||||
use crate::checksum::Md5Checksum;
|
use crate::s3::error::Error;
|
||||||
use crate::error::Error;
|
|
||||||
|
|
||||||
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
|
||||||
HeaderName::from_static("x-amz-server-side-encryption-customer-algorithm");
|
HeaderName::from_static("x-amz-server-side-encryption-customer-algorithm");
|
||||||
|
|
|
@ -4,30 +4,19 @@ use err_derive::Error;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
|
||||||
use garage_model::helper::error::Error as HelperError;
|
use crate::common_error::CommonError;
|
||||||
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
pub(crate) use garage_api_common::common_error::pass_helper_error;
|
use crate::generic_server::ApiError;
|
||||||
|
use crate::helpers::*;
|
||||||
use garage_api_common::common_error::{
|
use crate::s3::xml as s3_xml;
|
||||||
commonErrorDerivative, helper_error_as_internal, CommonError,
|
use crate::signature::error::Error as SignatureError;
|
||||||
};
|
|
||||||
|
|
||||||
pub use garage_api_common::common_error::{
|
|
||||||
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
|
|
||||||
};
|
|
||||||
|
|
||||||
use garage_api_common::generic_server::ApiError;
|
|
||||||
use garage_api_common::helpers::*;
|
|
||||||
use garage_api_common::signature::error::Error as SignatureError;
|
|
||||||
|
|
||||||
use crate::xml as s3_xml;
|
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error(display = "{}", _0)]
|
#[error(display = "{}", _0)]
|
||||||
/// Error from common error
|
/// Error from common error
|
||||||
Common(#[error(source)] CommonError),
|
Common(CommonError),
|
||||||
|
|
||||||
// Category: cannot process
|
// Category: cannot process
|
||||||
/// Authorization Header Malformed
|
/// Authorization Header Malformed
|
||||||
|
@ -89,16 +78,17 @@ pub enum Error {
|
||||||
NotImplemented(String),
|
NotImplemented(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
commonErrorDerivative!(Error);
|
impl<T> From<T> for Error
|
||||||
|
where
|
||||||
// Helper errors are always passed as internal errors by default.
|
CommonError: From<T>,
|
||||||
// To pass the specific error code back to the client, use `pass_helper_error`.
|
{
|
||||||
impl From<HelperError> for Error {
|
fn from(err: T) -> Self {
|
||||||
fn from(err: HelperError) -> Error {
|
Error::Common(CommonError::from(err))
|
||||||
Error::Common(helper_error_as_internal(err))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
impl From<roxmltree::Error> for Error {
|
impl From<roxmltree::Error> for Error {
|
||||||
fn from(err: roxmltree::Error) -> Self {
|
fn from(err: roxmltree::Error) -> Self {
|
||||||
Self::InvalidXml(format!("{}", err))
|
Self::InvalidXml(format!("{}", err))
|
||||||
|
|
|
@ -25,12 +25,11 @@ use garage_model::garage::Garage;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::s3::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::ResBody;
|
||||||
use crate::api_server::ResBody;
|
use crate::s3::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
||||||
use crate::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::encryption::EncryptionParams;
|
use crate::s3::error::*;
|
||||||
use crate::error::*;
|
|
||||||
|
|
||||||
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
|
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
|
||||||
|
|
||||||
|
@ -69,11 +68,14 @@ fn object_headers(
|
||||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
|
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
|
||||||
let mut headers_by_name = BTreeMap::new();
|
let mut headers_by_name = BTreeMap::new();
|
||||||
for (name, value) in meta_inner.headers.iter() {
|
for (name, value) in meta_inner.headers.iter() {
|
||||||
let name_lower = name.to_ascii_lowercase();
|
match headers_by_name.get_mut(name) {
|
||||||
headers_by_name
|
None => {
|
||||||
.entry(name_lower)
|
headers_by_name.insert(name, vec![value.as_str()]);
|
||||||
.or_insert(vec![])
|
}
|
||||||
.push(value.as_str());
|
Some(headers) => {
|
||||||
|
headers.push(value.as_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (name, values) in headers_by_name {
|
for (name, values) in headers_by_name {
|
||||||
|
|
|
@ -5,12 +5,11 @@ use hyper::{Request, Response, StatusCode};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
use crate::helpers::*;
|
||||||
use garage_api_common::signature::verify_signed_content;
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
|
use crate::s3::error::*;
|
||||||
use crate::api_server::{ReqBody, ResBody};
|
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||||
use crate::error::*;
|
use crate::signature::verify_signed_content;
|
||||||
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
|
||||||
|
|
||||||
use garage_model::bucket_table::{
|
use garage_model::bucket_table::{
|
||||||
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
|
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,
|
||||||
|
|
|
@ -13,14 +13,13 @@ use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
use garage_table::EnumerationOrder;
|
use garage_table::EnumerationOrder;
|
||||||
|
|
||||||
use garage_api_common::encoding::*;
|
use crate::encoding::*;
|
||||||
use garage_api_common::helpers::*;
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::api_server::{ReqBody, ResBody};
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::encryption::EncryptionParams;
|
use crate::s3::error::*;
|
||||||
use crate::error::*;
|
use crate::s3::multipart as s3_multipart;
|
||||||
use crate::multipart as s3_multipart;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::xml as s3_xml;
|
|
||||||
|
|
||||||
const DUMMY_NAME: &str = "Dummy Key";
|
const DUMMY_NAME: &str = "Dummy Key";
|
||||||
const DUMMY_KEY: &str = "GKDummyKey";
|
const DUMMY_KEY: &str = "GKDummyKey";
|
||||||
|
@ -399,7 +398,7 @@ enum ExtractionResult {
|
||||||
key: String,
|
key: String,
|
||||||
},
|
},
|
||||||
// Fallback key is used for legacy APIs that only support
|
// Fallback key is used for legacy APIs that only support
|
||||||
// exclusive pagination (and not inclusive one).
|
// exlusive pagination (and not inclusive one).
|
||||||
SkipTo {
|
SkipTo {
|
||||||
key: String,
|
key: String,
|
||||||
fallback_key: Option<String>,
|
fallback_key: Option<String>,
|
||||||
|
@ -409,7 +408,7 @@ enum ExtractionResult {
|
||||||
#[derive(PartialEq, Clone, Debug)]
|
#[derive(PartialEq, Clone, Debug)]
|
||||||
enum RangeBegin {
|
enum RangeBegin {
|
||||||
// Fallback key is used for legacy APIs that only support
|
// Fallback key is used for legacy APIs that only support
|
||||||
// exclusive pagination (and not inclusive one).
|
// exlusive pagination (and not inclusive one).
|
||||||
IncludingKey {
|
IncludingKey {
|
||||||
key: String,
|
key: String,
|
||||||
fallback_key: Option<String>,
|
fallback_key: Option<String>,
|
||||||
|
|
|
@ -1,6 +1,3 @@
|
||||||
#[macro_use]
|
|
||||||
extern crate tracing;
|
|
||||||
|
|
||||||
pub mod api_server;
|
pub mod api_server;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
|
|
|
@ -15,15 +15,14 @@ use garage_model::s3::mpu_table::*;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::s3::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
use crate::helpers::*;
|
||||||
use garage_api_common::signature::verify_signed_content;
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
|
use crate::s3::checksum::*;
|
||||||
use crate::api_server::{ReqBody, ResBody};
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::checksum::*;
|
use crate::s3::error::*;
|
||||||
use crate::encryption::EncryptionParams;
|
use crate::s3::put::*;
|
||||||
use crate::error::*;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::put::*;
|
use crate::signature::verify_signed_content;
|
||||||
use crate::xml as s3_xml;
|
|
||||||
|
|
||||||
// ----
|
// ----
|
||||||
|
|
||||||
|
|
|
@ -16,16 +16,15 @@ use serde::Deserialize;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
use garage_api_common::cors::*;
|
use crate::helpers::*;
|
||||||
use garage_api_common::helpers::*;
|
use crate::s3::api_server::ResBody;
|
||||||
use garage_api_common::signature::payload::{verify_v4, Authorization};
|
use crate::s3::checksum::*;
|
||||||
|
use crate::s3::cors::*;
|
||||||
use crate::api_server::ResBody;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::checksum::*;
|
use crate::s3::error::*;
|
||||||
use crate::encryption::EncryptionParams;
|
use crate::s3::put::{get_headers, save_stream, ChecksumMode};
|
||||||
use crate::error::*;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::put::{get_headers, save_stream, ChecksumMode};
|
use crate::signature::payload::{verify_v4, Authorization};
|
||||||
use crate::xml as s3_xml;
|
|
||||||
|
|
||||||
pub async fn handle_post_object(
|
pub async fn handle_post_object(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
|
@ -108,8 +107,7 @@ pub async fn handle_post_object(
|
||||||
let bucket_id = garage
|
let bucket_id = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.resolve_bucket(&bucket_name, &api_key)
|
.resolve_bucket(&bucket_name, &api_key)
|
||||||
.await
|
.await?;
|
||||||
.map_err(pass_helper_error)?;
|
|
||||||
|
|
||||||
if !api_key.allow_write(&bucket_id) {
|
if !api_key.allow_write(&bucket_id) {
|
||||||
return Err(Error::forbidden("Operation is not allowed for this key."));
|
return Err(Error::forbidden("Operation is not allowed for this key."));
|
||||||
|
@ -215,7 +213,7 @@ pub async fn handle_post_object(
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
|
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
|
||||||
// around here to make sure the rest of the machinery takes our acl into account.
|
// arround here to make sure the rest of the machinery takes our acl into account.
|
||||||
let headers = get_headers(¶ms)?;
|
let headers = get_headers(¶ms)?;
|
||||||
|
|
||||||
let expected_checksums = ExpectedChecksums {
|
let expected_checksums = ExpectedChecksums {
|
||||||
|
|
|
@ -30,12 +30,11 @@ use garage_model::s3::block_ref_table::*;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::s3::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::api_server::{ReqBody, ResBody};
|
use crate::s3::checksum::*;
|
||||||
use crate::checksum::*;
|
use crate::s3::encryption::EncryptionParams;
|
||||||
use crate::encryption::EncryptionParams;
|
use crate::s3::error::*;
|
||||||
use crate::error::*;
|
|
||||||
|
|
||||||
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
|
||||||
|
|
||||||
|
@ -623,7 +622,7 @@ pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList
|
||||||
for (name, value) in headers.iter() {
|
for (name, value) in headers.iter() {
|
||||||
if name.as_str().starts_with("x-amz-meta-") {
|
if name.as_str().starts_with("x-amz-meta-") {
|
||||||
ret.push((
|
ret.push((
|
||||||
name.as_str().to_ascii_lowercase(),
|
name.to_string(),
|
||||||
std::str::from_utf8(value.as_bytes())?.to_string(),
|
std::str::from_utf8(value.as_bytes())?.to_string(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,10 +3,9 @@ use std::borrow::Cow;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{HeaderMap, Method, Request};
|
use hyper::{HeaderMap, Method, Request};
|
||||||
|
|
||||||
use garage_api_common::helpers::Authorization;
|
use crate::helpers::Authorization;
|
||||||
use garage_api_common::router_macros::{generateQueryParameters, router_match};
|
use crate::router_macros::{generateQueryParameters, router_match};
|
||||||
|
use crate::s3::error::*;
|
||||||
use crate::error::*;
|
|
||||||
|
|
||||||
router_match! {@func
|
router_match! {@func
|
||||||
|
|
||||||
|
|
|
@ -4,16 +4,15 @@ use http_body_util::BodyExt;
|
||||||
use hyper::{Request, Response, StatusCode};
|
use hyper::{Request, Response, StatusCode};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
|
use crate::s3::error::*;
|
||||||
|
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||||
|
use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
use garage_model::bucket_table::*;
|
use garage_model::bucket_table::*;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_api_common::helpers::*;
|
|
||||||
use garage_api_common::signature::verify_signed_content;
|
|
||||||
|
|
||||||
use crate::api_server::{ReqBody, ResBody};
|
|
||||||
use crate::error::*;
|
|
||||||
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
|
||||||
|
|
||||||
pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
|
||||||
let ReqCtx { bucket_params, .. } = ctx;
|
let ReqCtx { bucket_params, .. } = ctx;
|
||||||
if let Some(website) = bucket_params.website_config.get() {
|
if let Some(website) = bucket_params.website_config.get() {
|
||||||
|
@ -277,7 +276,7 @@ impl Redirect {
|
||||||
return Err(Error::bad_request("Bad XML: invalid protocol"));
|
return Err(Error::bad_request("Bad XML: invalid protocol"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO there are probably more invalid cases, but which ones?
|
// TODO there are probably more invalide cases, but which ones?
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use quick_xml::se::to_string;
|
use quick_xml::se::to_string;
|
||||||
use serde::{Deserialize, Serialize, Serializer};
|
use serde::{Deserialize, Serialize, Serializer};
|
||||||
|
|
||||||
use crate::error::Error as ApiError;
|
use crate::s3::error::Error as ApiError;
|
||||||
|
|
||||||
pub fn to_xml_with_header<T: Serialize>(x: &T) -> Result<String, ApiError> {
|
pub fn to_xml_with_header<T: Serialize>(x: &T) -> Result<String, ApiError> {
|
||||||
let mut xml = r#"<?xml version="1.0" encoding="UTF-8"?>"#.to_string();
|
let mut xml = r#"<?xml version="1.0" encoding="UTF-8"?>"#.to_string();
|
||||||
|
|
|
@ -47,8 +47,8 @@ pub async fn check_payload_signature(
|
||||||
let query = parse_query_map(request.uri())?;
|
let query = parse_query_map(request.uri())?;
|
||||||
|
|
||||||
if query.contains_key(&X_AMZ_ALGORITHM) {
|
if query.contains_key(&X_AMZ_ALGORITHM) {
|
||||||
// We check for presigned-URL-style authentication first, because
|
// We check for presigned-URL-style authentification first, because
|
||||||
// the browser or something else could inject an Authorization header
|
// the browser or someting else could inject an Authorization header
|
||||||
// that is totally unrelated to AWS signatures.
|
// that is totally unrelated to AWS signatures.
|
||||||
check_presigned_signature(garage, service, request, query).await
|
check_presigned_signature(garage, service, request, query).await
|
||||||
} else if request.headers().contains_key(AUTHORIZATION) {
|
} else if request.headers().contains_key(AUTHORIZATION) {
|
||||||
|
@ -132,7 +132,7 @@ async fn check_presigned_signature(
|
||||||
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
|
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
|
||||||
|
|
||||||
// Verify that all necessary request headers are included in signed_headers
|
// Verify that all necessary request headers are included in signed_headers
|
||||||
// For AWSv4 pre-signed URLs, the following must be included:
|
// For AWSv4 pre-signed URLs, the following must be incldued:
|
||||||
// - the Host header (mandatory)
|
// - the Host header (mandatory)
|
||||||
// - all x-amz-* headers used in the request
|
// - all x-amz-* headers used in the request
|
||||||
let signed_headers = split_signed_headers(&authorization)?;
|
let signed_headers = split_signed_headers(&authorization)?;
|
||||||
|
@ -306,7 +306,7 @@ pub fn canonical_request(
|
||||||
// Note that there is also the issue of path normalization, which I hope is unrelated to the
|
// Note that there is also the issue of path normalization, which I hope is unrelated to the
|
||||||
// one of URI-encoding. At least in aws-sigv4 both parameters can be set independently,
|
// one of URI-encoding. At least in aws-sigv4 both parameters can be set independently,
|
||||||
// and rusoto_signature does not seem to do any effective path normalization, even though
|
// and rusoto_signature does not seem to do any effective path normalization, even though
|
||||||
// it mentions it in the comments (same link to the source code as above).
|
// it mentions it in the comments (same link to the souce code as above).
|
||||||
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so
|
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so
|
||||||
// would make non-normalized paths invalid K2V partition keys, and we don't want that.
|
// would make non-normalized paths invalid K2V partition keys, and we don't want that.
|
||||||
let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
|
let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
|
||||||
|
@ -518,7 +518,7 @@ impl Authorization {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
|
pub(crate) fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
|
||||||
let algorithm = params
|
let algorithm = params
|
||||||
.get(X_AMZ_ALGORITHM)
|
.get(X_AMZ_ALGORITHM)
|
||||||
.ok_or_bad_request("Missing X-Amz-Algorithm header")?
|
.ok_or_bad_request("Missing X-Amz-Algorithm header")?
|
|
@ -34,8 +34,10 @@ async-compression.workspace = true
|
||||||
zstd.workspace = true
|
zstd.workspace = true
|
||||||
|
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
serde_bytes.workspace = true
|
||||||
|
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
futures-util.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tokio-util.workspace = true
|
tokio-util.workspace = true
|
||||||
|
|
||||||
|
|
|
@ -279,8 +279,7 @@ impl DataLayout {
|
||||||
u16::from_be_bytes([
|
u16::from_be_bytes([
|
||||||
hash.as_slice()[HASH_DRIVE_BYTES.0],
|
hash.as_slice()[HASH_DRIVE_BYTES.0],
|
||||||
hash.as_slice()[HASH_DRIVE_BYTES.1],
|
hash.as_slice()[HASH_DRIVE_BYTES.1],
|
||||||
]) as usize
|
]) as usize % DRIVE_NPART
|
||||||
% DRIVE_NPART
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_dir_from(&self, hash: &Hash, dir: &PathBuf) -> PathBuf {
|
fn block_dir_from(&self, hash: &Hash, dir: &PathBuf) -> PathBuf {
|
||||||
|
|
|
@ -105,7 +105,7 @@ impl BlockResyncManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get length of resync queue
|
/// Get lenght of resync queue
|
||||||
pub fn queue_len(&self) -> Result<usize, Error> {
|
pub fn queue_len(&self) -> Result<usize, Error> {
|
||||||
Ok(self.queue.len()?)
|
Ok(self.queue.len()?)
|
||||||
}
|
}
|
||||||
|
@ -185,10 +185,10 @@ impl BlockResyncManager {
|
||||||
//
|
//
|
||||||
// - resync.errors: a tree that indicates for each block
|
// - resync.errors: a tree that indicates for each block
|
||||||
// if the last resync resulted in an error, and if so,
|
// if the last resync resulted in an error, and if so,
|
||||||
// the following two information (see the ErrorCounter struct):
|
// the following two informations (see the ErrorCounter struct):
|
||||||
// - how many consecutive resync errors for this block?
|
// - how many consecutive resync errors for this block?
|
||||||
// - when was the last try?
|
// - when was the last try?
|
||||||
// These two information are used to implement an
|
// These two informations are used to implement an
|
||||||
// exponential backoff retry strategy.
|
// exponential backoff retry strategy.
|
||||||
// The key in this tree is the 32-byte hash of the block,
|
// The key in this tree is the 32-byte hash of the block,
|
||||||
// and the value is the encoded ErrorCounter value.
|
// and the value is the encoded ErrorCounter value.
|
||||||
|
|
|
@ -13,6 +13,7 @@ path = "lib.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
err-derive.workspace = true
|
err-derive.workspace = true
|
||||||
|
hexdump.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
|
||||||
heed = { workspace = true, optional = true }
|
heed = { workspace = true, optional = true }
|
||||||
|
|
|
@ -122,7 +122,7 @@ impl Db {
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
},
|
},
|
||||||
Err(TxError::Db(e2)) => match ret {
|
Err(TxError::Db(e2)) => match ret {
|
||||||
// Ok was stored -> the error occurred when finalizing
|
// Ok was stored -> the error occured when finalizing
|
||||||
// transaction
|
// transaction
|
||||||
Ok(_) => Err(TxError::Db(e2)),
|
Ok(_) => Err(TxError::Db(e2)),
|
||||||
// An error was already stored: that's the one we want to
|
// An error was already stored: that's the one we want to
|
||||||
|
|
|
@ -233,7 +233,7 @@ impl<'a> LmdbTx<'a> {
|
||||||
fn get_tree(&self, i: usize) -> TxOpResult<&Database> {
|
fn get_tree(&self, i: usize) -> TxOpResult<&Database> {
|
||||||
self.trees.get(i).ok_or_else(|| {
|
self.trees.get(i).ok_or_else(|| {
|
||||||
TxOpError(Error(
|
TxOpError(Error(
|
||||||
"invalid tree id (it might have been opened after the transaction started)".into(),
|
"invalid tree id (it might have been openned after the transaction started)".into(),
|
||||||
))
|
))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,14 +142,11 @@ impl IDb for SqliteDb {
|
||||||
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
fn snapshot(&self, to: &PathBuf) -> Result<()> {
|
||||||
fn progress(p: rusqlite::backup::Progress) {
|
fn progress(p: rusqlite::backup::Progress) {
|
||||||
let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
|
let percent = (p.pagecount - p.remaining) * 100 / p.pagecount;
|
||||||
info!("Sqlite snapshot progress: {}%", percent);
|
info!("Sqlite snapshot progres: {}%", percent);
|
||||||
}
|
}
|
||||||
std::fs::create_dir_all(to)?;
|
|
||||||
let mut path = to.clone();
|
|
||||||
path.push("db.sqlite");
|
|
||||||
self.db
|
self.db
|
||||||
.get()?
|
.get()?
|
||||||
.backup(rusqlite::DatabaseName::Main, path, Some(progress))?;
|
.backup(rusqlite::DatabaseName::Main, to, Some(progress))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -307,7 +304,7 @@ impl<'a> SqliteTx<'a> {
|
||||||
fn get_tree(&self, i: usize) -> TxOpResult<&'_ str> {
|
fn get_tree(&self, i: usize) -> TxOpResult<&'_ str> {
|
||||||
self.trees.get(i).map(Arc::as_ref).ok_or_else(|| {
|
self.trees.get(i).map(Arc::as_ref).ok_or_else(|| {
|
||||||
TxOpError(Error(
|
TxOpError(Error(
|
||||||
"invalid tree id (it might have been opened after the transaction started)".into(),
|
"invalid tree id (it might have been openned after the transaction started)".into(),
|
||||||
))
|
))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,9 +23,7 @@ path = "tests/lib.rs"
|
||||||
[dependencies]
|
[dependencies]
|
||||||
format_table.workspace = true
|
format_table.workspace = true
|
||||||
garage_db.workspace = true
|
garage_db.workspace = true
|
||||||
garage_api_admin.workspace = true
|
garage_api.workspace = true
|
||||||
garage_api_s3.workspace = true
|
|
||||||
garage_api_k2v = { workspace = true, optional = true }
|
|
||||||
garage_block.workspace = true
|
garage_block.workspace = true
|
||||||
garage_model.workspace = true
|
garage_model.workspace = true
|
||||||
garage_net.workspace = true
|
garage_net.workspace = true
|
||||||
|
@ -42,6 +40,7 @@ parse_duration.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
tracing-subscriber.workspace = true
|
tracing-subscriber.workspace = true
|
||||||
|
rand.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
sha1.workspace = true
|
sha1.workspace = true
|
||||||
sodiumoxide.workspace = true
|
sodiumoxide.workspace = true
|
||||||
|
@ -49,18 +48,21 @@ structopt.workspace = true
|
||||||
git-version.workspace = true
|
git-version.workspace = true
|
||||||
|
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
serde_bytes.workspace = true
|
||||||
|
toml.workspace = true
|
||||||
|
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
futures-util.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|
||||||
opentelemetry.workspace = true
|
opentelemetry.workspace = true
|
||||||
opentelemetry-prometheus = { workspace = true, optional = true }
|
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||||
opentelemetry-otlp = { workspace = true, optional = true }
|
opentelemetry-otlp = { workspace = true, optional = true }
|
||||||
|
prometheus = { workspace = true, optional = true }
|
||||||
syslog-tracing = { workspace = true, optional = true }
|
syslog-tracing = { workspace = true, optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
garage_api_common.workspace = true
|
aws-config.workspace = true
|
||||||
|
|
||||||
aws-sdk-s3.workspace = true
|
aws-sdk-s3.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
http.workspace = true
|
http.workspace = true
|
||||||
|
@ -82,7 +84,7 @@ k2v-client.workspace = true
|
||||||
[features]
|
[features]
|
||||||
default = [ "bundled-libs", "metrics", "lmdb", "sqlite", "k2v" ]
|
default = [ "bundled-libs", "metrics", "lmdb", "sqlite", "k2v" ]
|
||||||
|
|
||||||
k2v = [ "garage_util/k2v", "garage_api_k2v" ]
|
k2v = [ "garage_util/k2v", "garage_api/k2v" ]
|
||||||
|
|
||||||
# Database engines
|
# Database engines
|
||||||
lmdb = [ "garage_model/lmdb" ]
|
lmdb = [ "garage_model/lmdb" ]
|
||||||
|
@ -93,7 +95,7 @@ consul-discovery = [ "garage_rpc/consul-discovery" ]
|
||||||
# Automatic registration and discovery via Kubernetes API
|
# Automatic registration and discovery via Kubernetes API
|
||||||
kubernetes-discovery = [ "garage_rpc/kubernetes-discovery" ]
|
kubernetes-discovery = [ "garage_rpc/kubernetes-discovery" ]
|
||||||
# Prometheus exporter (/metrics endpoint).
|
# Prometheus exporter (/metrics endpoint).
|
||||||
metrics = [ "garage_api_admin/metrics", "opentelemetry-prometheus" ]
|
metrics = [ "garage_api/metrics", "opentelemetry-prometheus", "prometheus" ]
|
||||||
# Exporter for the OpenTelemetry Collector.
|
# Exporter for the OpenTelemetry Collector.
|
||||||
telemetry-otlp = [ "opentelemetry-otlp" ]
|
telemetry-otlp = [ "opentelemetry-otlp" ]
|
||||||
# Logging to syslog
|
# Logging to syslog
|
||||||
|
|
|
@ -129,7 +129,7 @@ pub async fn cmd_assign_role(
|
||||||
zone: args
|
zone: args
|
||||||
.zone
|
.zone
|
||||||
.clone()
|
.clone()
|
||||||
.ok_or("Please specify a zone with the -z flag")?,
|
.ok_or("Please specifiy a zone with the -z flag")?,
|
||||||
capacity,
|
capacity,
|
||||||
tags: args.tags.clone(),
|
tags: args.tags.clone(),
|
||||||
}
|
}
|
||||||
|
@ -145,7 +145,7 @@ pub async fn cmd_assign_role(
|
||||||
|
|
||||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||||
|
|
||||||
println!("Role changes are staged but not yet committed.");
|
println!("Role changes are staged but not yet commited.");
|
||||||
println!("Use `garage layout show` to view staged role changes,");
|
println!("Use `garage layout show` to view staged role changes,");
|
||||||
println!("and `garage layout apply` to enact staged changes.");
|
println!("and `garage layout apply` to enact staged changes.");
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -172,7 +172,7 @@ pub async fn cmd_remove_role(
|
||||||
|
|
||||||
send_layout(rpc_cli, rpc_host, layout).await?;
|
send_layout(rpc_cli, rpc_host, layout).await?;
|
||||||
|
|
||||||
println!("Role removal is staged but not yet committed.");
|
println!("Role removal is staged but not yet commited.");
|
||||||
println!("Use `garage layout show` to view staged role changes,");
|
println!("Use `garage layout show` to view staged role changes,");
|
||||||
println!("and `garage layout apply` to enact staged changes.");
|
println!("and `garage layout apply` to enact staged changes.");
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -184,7 +184,7 @@ pub struct SkipDeadNodesOpt {
|
||||||
/// This will generally be the current layout version.
|
/// This will generally be the current layout version.
|
||||||
#[structopt(long = "version")]
|
#[structopt(long = "version")]
|
||||||
pub(crate) version: u64,
|
pub(crate) version: u64,
|
||||||
/// Allow the skip even if a quorum of nodes could not be found for
|
/// Allow the skip even if a quorum of ndoes could not be found for
|
||||||
/// the data among the remaining nodes
|
/// the data among the remaining nodes
|
||||||
#[structopt(long = "allow-missing-data")]
|
#[structopt(long = "allow-missing-data")]
|
||||||
pub(crate) allow_missing_data: bool,
|
pub(crate) allow_missing_data: bool,
|
||||||
|
|
|
@ -107,7 +107,7 @@ async fn main() {
|
||||||
);
|
);
|
||||||
|
|
||||||
// Initialize panic handler that aborts on panic and shows a nice message.
|
// Initialize panic handler that aborts on panic and shows a nice message.
|
||||||
// By default, Tokio continues running normally when a task panics. We want
|
// By default, Tokio continues runing normally when a task panics. We want
|
||||||
// to avoid this behavior in Garage as this would risk putting the process in an
|
// to avoid this behavior in Garage as this would risk putting the process in an
|
||||||
// unknown/uncontrollable state. We prefer to exit the process and restart it
|
// unknown/uncontrollable state. We prefer to exit the process and restart it
|
||||||
// from scratch, so that it boots back into a fresh, known state.
|
// from scratch, so that it boots back into a fresh, known state.
|
||||||
|
|
|
@ -104,7 +104,7 @@ pub(crate) fn fill_secret(
|
||||||
|
|
||||||
if let Some(val) = cli_value {
|
if let Some(val) = cli_value {
|
||||||
if config_secret.is_some() || config_secret_file.is_some() {
|
if config_secret.is_some() || config_secret_file.is_some() {
|
||||||
debug!("Overriding secret `{}` using value specified using CLI argument or environment variable.", name);
|
debug!("Overriding secret `{}` using value specified using CLI argument or environnement variable.", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
*config_secret = Some(val);
|
*config_secret = Some(val);
|
||||||
|
|
|
@ -6,13 +6,13 @@ use garage_util::background::*;
|
||||||
use garage_util::config::*;
|
use garage_util::config::*;
|
||||||
use garage_util::error::Error;
|
use garage_util::error::Error;
|
||||||
|
|
||||||
use garage_api_admin::api_server::AdminApiServer;
|
use garage_api::admin::api_server::AdminApiServer;
|
||||||
use garage_api_s3::api_server::S3ApiServer;
|
use garage_api::s3::api_server::S3ApiServer;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_web::WebServer;
|
use garage_web::WebServer;
|
||||||
|
|
||||||
#[cfg(feature = "k2v")]
|
#[cfg(feature = "k2v")]
|
||||||
use garage_api_k2v::api_server::K2VApiServer;
|
use garage_api::k2v::api_server::K2VApiServer;
|
||||||
|
|
||||||
use crate::admin::*;
|
use crate::admin::*;
|
||||||
use crate::secrets::{fill_secrets, Secrets};
|
use crate::secrets::{fill_secrets, Secrets};
|
||||||
|
|
|
@ -15,7 +15,7 @@ use hyper_util::client::legacy::{connect::HttpConnector, Client};
|
||||||
use hyper_util::rt::TokioExecutor;
|
use hyper_util::rt::TokioExecutor;
|
||||||
|
|
||||||
use super::garage::{Instance, Key};
|
use super::garage::{Instance, Key};
|
||||||
use garage_api_common::signature;
|
use garage_api::signature;
|
||||||
|
|
||||||
pub type Body = FullBody<hyper::body::Bytes>;
|
pub type Body = FullBody<hyper::body::Bytes>;
|
||||||
|
|
||||||
|
@ -153,7 +153,7 @@ impl<'a> RequestBuilder<'a> {
|
||||||
|
|
||||||
pub async fn send(&mut self) -> Result<Response<Body>, String> {
|
pub async fn send(&mut self) -> Result<Response<Body>, String> {
|
||||||
// TODO this is a bit incorrect in that path and query params should be url-encoded and
|
// TODO this is a bit incorrect in that path and query params should be url-encoded and
|
||||||
// aren't, but this is good enough for now.
|
// aren't, but this is good enought for now.
|
||||||
|
|
||||||
let query = query_param_to_string(&self.query_params);
|
let query = query_param_to_string(&self.query_params);
|
||||||
let (host, path) = if self.vhost_style {
|
let (host, path) = if self.vhost_style {
|
||||||
|
@ -210,9 +210,9 @@ impl<'a> RequestBuilder<'a> {
|
||||||
HeaderName::from_static("x-amz-decoded-content-length"),
|
HeaderName::from_static("x-amz-decoded-content-length"),
|
||||||
HeaderValue::from_str(&self.body.len().to_string()).unwrap(),
|
HeaderValue::from_str(&self.body.len().to_string()).unwrap(),
|
||||||
);
|
);
|
||||||
// Get length of body by doing the conversion to a streaming body with an
|
// Get lenght of body by doing the conversion to a streaming body with an
|
||||||
// invalid signature (we don't know the seed) just to get its length. This
|
// invalid signature (we don't know the seed) just to get its length. This
|
||||||
// is a pretty lazy and inefficient way to do it, but it's enough for test
|
// is a pretty lazy and inefficient way to do it, but it's enought for test
|
||||||
// code.
|
// code.
|
||||||
all_headers.insert(
|
all_headers.insert(
|
||||||
CONTENT_LENGTH,
|
CONTENT_LENGTH,
|
||||||
|
|
|
@ -29,11 +29,12 @@ tokio.workspace = true
|
||||||
# cli deps
|
# cli deps
|
||||||
clap = { workspace = true, optional = true }
|
clap = { workspace = true, optional = true }
|
||||||
format_table = { workspace = true, optional = true }
|
format_table = { workspace = true, optional = true }
|
||||||
|
tracing = { workspace = true, optional = true }
|
||||||
tracing-subscriber = { workspace = true, optional = true }
|
tracing-subscriber = { workspace = true, optional = true }
|
||||||
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cli = ["clap", "tokio/fs", "tokio/io-std", "tracing-subscriber", "format_table"]
|
cli = ["clap", "tokio/fs", "tokio/io-std", "tracing", "tracing-subscriber", "format_table"]
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
path = "lib.rs"
|
path = "lib.rs"
|
||||||
|
|
|
@ -54,7 +54,7 @@ enum Command {
|
||||||
partition_key: String,
|
partition_key: String,
|
||||||
/// Sort key to read from
|
/// Sort key to read from
|
||||||
sort_key: String,
|
sort_key: String,
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: ReadOutputKind,
|
output_kind: ReadOutputKind,
|
||||||
},
|
},
|
||||||
|
@ -70,7 +70,7 @@ enum Command {
|
||||||
/// Timeout, in seconds
|
/// Timeout, in seconds
|
||||||
#[clap(short = 'T', long)]
|
#[clap(short = 'T', long)]
|
||||||
timeout: Option<u64>,
|
timeout: Option<u64>,
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: ReadOutputKind,
|
output_kind: ReadOutputKind,
|
||||||
},
|
},
|
||||||
|
@ -87,7 +87,7 @@ enum Command {
|
||||||
/// Timeout, in seconds
|
/// Timeout, in seconds
|
||||||
#[clap(short = 'T', long)]
|
#[clap(short = 'T', long)]
|
||||||
timeout: Option<u64>,
|
timeout: Option<u64>,
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: BatchOutputKind,
|
output_kind: BatchOutputKind,
|
||||||
},
|
},
|
||||||
|
@ -103,7 +103,7 @@ enum Command {
|
||||||
},
|
},
|
||||||
/// List partition keys
|
/// List partition keys
|
||||||
ReadIndex {
|
ReadIndex {
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: BatchOutputKind,
|
output_kind: BatchOutputKind,
|
||||||
/// Output only partition keys matching this filter
|
/// Output only partition keys matching this filter
|
||||||
|
@ -114,7 +114,7 @@ enum Command {
|
||||||
ReadRange {
|
ReadRange {
|
||||||
/// Partition key to read from
|
/// Partition key to read from
|
||||||
partition_key: String,
|
partition_key: String,
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: BatchOutputKind,
|
output_kind: BatchOutputKind,
|
||||||
/// Output only sort keys matching this filter
|
/// Output only sort keys matching this filter
|
||||||
|
@ -125,7 +125,7 @@ enum Command {
|
||||||
DeleteRange {
|
DeleteRange {
|
||||||
/// Partition key to delete from
|
/// Partition key to delete from
|
||||||
partition_key: String,
|
partition_key: String,
|
||||||
/// Output formatting
|
/// Output formating
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
output_kind: BatchOutputKind,
|
output_kind: BatchOutputKind,
|
||||||
/// Delete only sort keys matching this filter
|
/// Delete only sort keys matching this filter
|
||||||
|
@ -185,10 +185,10 @@ struct ReadOutputKind {
|
||||||
/// Raw output. Conflicts generate error, causality token is not returned
|
/// Raw output. Conflicts generate error, causality token is not returned
|
||||||
#[clap(short, long, group = "output-kind")]
|
#[clap(short, long, group = "output-kind")]
|
||||||
raw: bool,
|
raw: bool,
|
||||||
/// Human formatted output
|
/// Human formated output
|
||||||
#[clap(short = 'H', long, group = "output-kind")]
|
#[clap(short = 'H', long, group = "output-kind")]
|
||||||
human: bool,
|
human: bool,
|
||||||
/// JSON formatted output
|
/// JSON formated output
|
||||||
#[clap(short, long, group = "output-kind")]
|
#[clap(short, long, group = "output-kind")]
|
||||||
json: bool,
|
json: bool,
|
||||||
}
|
}
|
||||||
|
@ -207,7 +207,7 @@ impl ReadOutputKind {
|
||||||
let mut val = val.value;
|
let mut val = val.value;
|
||||||
if val.len() != 1 {
|
if val.len() != 1 {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"Raw mode can only read non-concurrent values, found {} values, expected 1",
|
"Raw mode can only read non-concurent values, found {} values, expected 1",
|
||||||
val.len()
|
val.len()
|
||||||
);
|
);
|
||||||
exit(1);
|
exit(1);
|
||||||
|
@ -265,10 +265,10 @@ impl ReadOutputKind {
|
||||||
#[derive(Parser, Debug)]
|
#[derive(Parser, Debug)]
|
||||||
#[clap(group = clap::ArgGroup::new("output-kind").multiple(false).required(false))]
|
#[clap(group = clap::ArgGroup::new("output-kind").multiple(false).required(false))]
|
||||||
struct BatchOutputKind {
|
struct BatchOutputKind {
|
||||||
/// Human formatted output
|
/// Human formated output
|
||||||
#[clap(short = 'H', long, group = "output-kind")]
|
#[clap(short = 'H', long, group = "output-kind")]
|
||||||
human: bool,
|
human: bool,
|
||||||
/// JSON formatted output
|
/// JSON formated output
|
||||||
#[clap(short, long, group = "output-kind")]
|
#[clap(short, long, group = "output-kind")]
|
||||||
json: bool,
|
json: bool,
|
||||||
}
|
}
|
||||||
|
|
|
@ -336,7 +336,7 @@ impl K2vClient {
|
||||||
.collect())
|
.collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Perform a DeleteBatch request, deleting multiple values or range of values at once, without
|
/// Perform a DeleteBatch request, deleting mutiple values or range of values at once, without
|
||||||
/// providing causality information.
|
/// providing causality information.
|
||||||
pub async fn delete_batch(&self, operations: &[BatchDeleteOp<'_>]) -> Result<Vec<u64>, Error> {
|
pub async fn delete_batch(&self, operations: &[BatchDeleteOp<'_>]) -> Result<Vec<u64>, Error> {
|
||||||
let url = self.build_url(None, &[("delete", "")]);
|
let url = self.build_url(None, &[("delete", "")]);
|
||||||
|
|
|
@ -22,6 +22,7 @@ garage_util.workspace = true
|
||||||
garage_net.workspace = true
|
garage_net.workspace = true
|
||||||
|
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
|
arc-swap.workspace = true
|
||||||
blake2.workspace = true
|
blake2.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
err-derive.workspace = true
|
err-derive.workspace = true
|
||||||
|
@ -37,7 +38,9 @@ serde.workspace = true
|
||||||
serde_bytes.workspace = true
|
serde_bytes.workspace = true
|
||||||
|
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
futures-util.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
opentelemetry.workspace = true
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = [ "lmdb", "sqlite" ]
|
default = [ "lmdb", "sqlite" ]
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue