Compare commits
38 commits
4787685912
...
e852c91d18
Author | SHA1 | Date | |
---|---|---|---|
e852c91d18 | |||
e9b0068079 | |||
49a138b670 | |||
e94d6f78d7 | |||
1af4a5ed56 | |||
f7c65e830e | |||
0e61e3b6fb | |||
a0abf41762 | |||
2ac75018a1 | |||
980572a887 | |||
7a0014b6f7 | |||
edb0b9c1ee | |||
f58a813a36 | |||
defd7d9e63 | |||
533afcf4e1 | |||
5ea5fd2130 | |||
35f8e8e2fb | |||
d5a2502b09 | |||
d7868c48a4 | |||
280d1be7b1 | |||
2065f011ca | |||
243b7c9a1c | |||
a3afc761b6 | |||
19bdd1c799 | |||
448dcc5cf4 | |||
26121bb619 | |||
280330ac72 | |||
4d7b4d9d20 | |||
fc450ec13a | |||
379b2049f5 | |||
293139a94a | |||
54e800ef8d | |||
1e40c93fd0 | |||
0cfb56d33e | |||
c1fb65194c | |||
67941000ee | |||
60c26fbc62 | |||
e76dba9561 |
1
.gitattributes
vendored
|
@ -1 +0,0 @@
|
|||
*.pdf filter=lfs diff=lfs merge=lfs -text
|
63
Cargo.nix
|
@ -946,20 +946,20 @@ in
|
|||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||
src = fetchCratesIo { inherit name version; sha256 = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c"; };
|
||||
features = builtins.concatLists [
|
||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "alloc")
|
||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "default")
|
||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "lazy_static")
|
||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "std")
|
||||
[ "alloc" ]
|
||||
[ "default" ]
|
||||
[ "lazy_static" ]
|
||||
[ "std" ]
|
||||
];
|
||||
dependencies = {
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "cfg_if" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "crossbeam_utils" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.8" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "lazy_static" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "memoffset" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".memoffset."0.6.5" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "scopeguard" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".scopeguard."1.1.0" { inherit profileName; }).out;
|
||||
cfg_if = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cfg-if."1.0.0" { inherit profileName; }).out;
|
||||
crossbeam_utils = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.8" { inherit profileName; }).out;
|
||||
lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
|
||||
memoffset = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".memoffset."0.6.5" { inherit profileName; }).out;
|
||||
scopeguard = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".scopeguard."1.1.0" { inherit profileName; }).out;
|
||||
};
|
||||
buildDependencies = {
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "autocfg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".autocfg."1.1.0" { profileName = "__noProfile"; }).out;
|
||||
autocfg = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".autocfg."1.1.0" { profileName = "__noProfile"; }).out;
|
||||
};
|
||||
});
|
||||
|
||||
|
@ -995,7 +995,7 @@ in
|
|||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||
src = fetchCratesIo { inherit name version; sha256 = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38"; };
|
||||
features = builtins.concatLists [
|
||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "default")
|
||||
[ "default" ]
|
||||
[ "lazy_static" ]
|
||||
[ "std" ]
|
||||
];
|
||||
|
@ -1321,8 +1321,8 @@ in
|
|||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||
src = fetchCratesIo { inherit name version; sha256 = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"; };
|
||||
dependencies = {
|
||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") && hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }).out;
|
||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") && hostPlatform.isWindows then "winapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }).out;
|
||||
${ if hostPlatform.isUnix then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }).out;
|
||||
${ if hostPlatform.isWindows then "winapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }).out;
|
||||
};
|
||||
});
|
||||
|
||||
|
@ -1490,7 +1490,7 @@ in
|
|||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||
src = fetchCratesIo { inherit name version; sha256 = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"; };
|
||||
dependencies = {
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "byteorder" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
|
||||
byteorder = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".byteorder."1.4.3" { inherit profileName; }).out;
|
||||
};
|
||||
});
|
||||
|
||||
|
@ -1658,11 +1658,12 @@ in
|
|||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage_db/bundled-libs") "bundled-libs")
|
||||
(lib.optional (rootFeatures' ? "garage_db/clap" || rootFeatures' ? "garage_db/cli") "clap")
|
||||
(lib.optional (rootFeatures' ? "garage_db/cli") "cli")
|
||||
[ "default" ]
|
||||
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/heed" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "heed")
|
||||
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_db/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
||||
(lib.optional (rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger") "pretty_env_logger")
|
||||
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "rusqlite")
|
||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "sled")
|
||||
[ "sled" ]
|
||||
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
||||
];
|
||||
dependencies = {
|
||||
|
@ -1672,7 +1673,7 @@ in
|
|||
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage_db/cli" || rootFeatures' ? "garage_db/pretty_env_logger" then "pretty_env_logger" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pretty_env_logger."0.4.0" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_db/bundled-libs" || rootFeatures' ? "garage_db/rusqlite" || rootFeatures' ? "garage_db/sqlite" || rootFeatures' ? "garage_model/sqlite" then "rusqlite" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusqlite."0.27.0" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "sled" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }).out;
|
||||
sled = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }).out;
|
||||
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }).out;
|
||||
};
|
||||
devDependencies = {
|
||||
|
@ -1686,9 +1687,10 @@ in
|
|||
registry = "unknown";
|
||||
src = fetchCrateLocal (workspaceSrc + "/src/model");
|
||||
features = builtins.concatLists [
|
||||
[ "default" ]
|
||||
(lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
|
||||
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
|
||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_model/sled") "sled")
|
||||
[ "sled" ]
|
||||
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
|
||||
];
|
||||
dependencies = {
|
||||
|
@ -2846,10 +2848,10 @@ in
|
|||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||
src = fetchCratesIo { inherit name version; sha256 = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"; };
|
||||
features = builtins.concatLists [
|
||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "default")
|
||||
[ "default" ]
|
||||
];
|
||||
buildDependencies = {
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "autocfg" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".autocfg."1.1.0" { profileName = "__noProfile"; }).out;
|
||||
autocfg = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".autocfg."1.1.0" { profileName = "__noProfile"; }).out;
|
||||
};
|
||||
});
|
||||
|
||||
|
@ -4732,18 +4734,18 @@ in
|
|||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||
src = fetchCratesIo { inherit name version; sha256 = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935"; };
|
||||
features = builtins.concatLists [
|
||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "default")
|
||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "no_metrics")
|
||||
[ "default" ]
|
||||
[ "no_metrics" ]
|
||||
];
|
||||
dependencies = {
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "crc32fast" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "crossbeam_epoch" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-epoch."0.9.8" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "crossbeam_utils" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.8" { inherit profileName; }).out;
|
||||
${ if (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") && (hostPlatform.parsed.kernel.name == "linux" || hostPlatform.parsed.kernel.name == "darwin" || hostPlatform.parsed.kernel.name == "windows") then "fs2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fs2."0.4.3" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "fxhash" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fxhash."0.2.1" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "libc" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "log" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out;
|
||||
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled" then "parking_lot" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot."0.11.2" { inherit profileName; }).out;
|
||||
crc32fast = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crc32fast."1.3.2" { inherit profileName; }).out;
|
||||
crossbeam_epoch = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-epoch."0.9.8" { inherit profileName; }).out;
|
||||
crossbeam_utils = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".crossbeam-utils."0.8.8" { inherit profileName; }).out;
|
||||
${ if hostPlatform.parsed.kernel.name == "linux" || hostPlatform.parsed.kernel.name == "darwin" || hostPlatform.parsed.kernel.name == "windows" then "fs2" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fs2."0.4.3" { inherit profileName; }).out;
|
||||
fxhash = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".fxhash."0.2.1" { inherit profileName; }).out;
|
||||
libc = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }).out;
|
||||
log = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out;
|
||||
parking_lot = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parking_lot."0.11.2" { inherit profileName; }).out;
|
||||
};
|
||||
});
|
||||
|
||||
|
@ -5396,7 +5398,6 @@ in
|
|||
[ "attributes" ]
|
||||
[ "default" ]
|
||||
[ "log" ]
|
||||
[ "log-always" ]
|
||||
[ "std" ]
|
||||
[ "tracing-attributes" ]
|
||||
];
|
||||
|
@ -5890,7 +5891,7 @@ in
|
|||
[ "ntstatus" ]
|
||||
[ "objbase" ]
|
||||
[ "processenv" ]
|
||||
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/sled" || rootFeatures' ? "garage_db/sled" || rootFeatures' ? "garage_model/sled") "processthreadsapi")
|
||||
[ "processthreadsapi" ]
|
||||
[ "profileapi" ]
|
||||
[ "schannel" ]
|
||||
[ "securitybaseapi" ]
|
||||
|
|
|
@ -11,14 +11,14 @@ let
|
|||
|
||||
build_debug_and_release = (target: {
|
||||
debug = (compile {
|
||||
inherit target git_version;
|
||||
inherit system target git_version pkgsSrc cargo2nixOverlay;
|
||||
release = false;
|
||||
}).workspace.garage {
|
||||
compileMode = "build";
|
||||
};
|
||||
|
||||
release = (compile {
|
||||
inherit target git_version;
|
||||
inherit system target git_version pkgsSrc cargo2nixOverlay;
|
||||
release = true;
|
||||
}).workspace.garage {
|
||||
compileMode = "build";
|
||||
|
@ -39,7 +39,7 @@ in {
|
|||
};
|
||||
test = {
|
||||
amd64 = test (compile {
|
||||
inherit git_version;
|
||||
inherit system git_version pkgsSrc cargo2nixOverlay;
|
||||
target = "x86_64-unknown-linux-musl";
|
||||
features = [
|
||||
"garage/bundled-libs"
|
||||
|
@ -52,7 +52,7 @@ in {
|
|||
};
|
||||
clippy = {
|
||||
amd64 = (compile {
|
||||
inherit git_version;
|
||||
inherit system git_version pkgsSrc cargo2nixOverlay;
|
||||
target = "x86_64-unknown-linux-musl";
|
||||
compiler = "clippy";
|
||||
}).workspace.garage {
|
||||
|
|
|
@ -48,7 +48,8 @@ garage:
|
|||
replicationMode: "2"
|
||||
|
||||
# Start 4 instances (StatefulSets) of garage
|
||||
replicaCount: 4
|
||||
deployment:
|
||||
replicaCount: 4
|
||||
|
||||
# Override default storage class and size
|
||||
persistence:
|
||||
|
|
|
@ -109,7 +109,7 @@ especially you must consider the following folders/files:
|
|||
this folder will be your main data storage and must be on a large storage (e.g. large HDD)
|
||||
|
||||
|
||||
A valid `/etc/garage/garage.toml` for our cluster would look as follows:
|
||||
A valid `/etc/garage.toml` for our cluster would look as follows:
|
||||
|
||||
```toml
|
||||
metadata_dir = "/var/lib/garage/meta"
|
||||
|
|
|
@ -12,7 +12,7 @@ as pictures, video, images, documents, etc., in a redundant multi-node
|
|||
setting. S3 is versatile enough to also be used to publish a static
|
||||
website.
|
||||
|
||||
Garage is an opinionated object storage solutoin, we focus on the following **desirable properties**:
|
||||
Garage is an opinionated object storage solution, we focus on the following **desirable properties**:
|
||||
|
||||
- **Internet enabled**: made for multi-sites (eg. datacenters, offices, households, etc.) interconnected through regular Internet connections.
|
||||
- **Self-contained & lightweight**: works everywhere and integrates well in existing environments to target [hyperconverged infrastructures](https://en.wikipedia.org/wiki/Hyper-converged_infrastructure).
|
||||
|
|
|
@ -83,7 +83,7 @@ This feature is totally invisible to S3 clients and does not break compatibility
|
|||
### Cluster administration API
|
||||
|
||||
Garage provides a fully-fledged REST API to administer your cluster programatically.
|
||||
Functionnality included in the admin API include: setting up and monitoring
|
||||
Functionality included in the admin API include: setting up and monitoring
|
||||
cluster nodes, managing access credentials, and managing storage buckets and bucket aliases.
|
||||
A full reference of the administration API is available [here](@/documentation/reference-manual/admin-api.md).
|
||||
|
||||
|
|
686
doc/drafts/admin-api.md
Normal file
|
@ -0,0 +1,686 @@
|
|||
+++
|
||||
title = "Administration API"
|
||||
weight = 60
|
||||
+++
|
||||
|
||||
The Garage administration API is accessible through a dedicated server whose
|
||||
listen address is specified in the `[admin]` section of the configuration
|
||||
file (see [configuration file
|
||||
reference](@/documentation/reference-manual/configuration.md))
|
||||
|
||||
**WARNING.** At this point, there is no comittement to stability of the APIs described in this document.
|
||||
We will bump the version numbers prefixed to each API endpoint at each time the syntax
|
||||
or semantics change, meaning that code that relies on these endpoint will break
|
||||
when changes are introduced.
|
||||
|
||||
The Garage administration API was introduced in version 0.7.2, this document
|
||||
does not apply to older versions of Garage.
|
||||
|
||||
|
||||
## Access control
|
||||
|
||||
The admin API uses two different tokens for acces control, that are specified in the config file's `[admin]` section:
|
||||
|
||||
- `metrics_token`: the token for accessing the Metrics endpoint (if this token
|
||||
is not set in the config file, the Metrics endpoint can be accessed without
|
||||
access control);
|
||||
|
||||
- `admin_token`: the token for accessing all of the other administration
|
||||
endpoints (if this token is not set in the config file, access to these
|
||||
endpoints is disabled entirely).
|
||||
|
||||
These tokens are used as simple HTTP bearer tokens. In other words, to
|
||||
authenticate access to an admin API endpoint, add the following HTTP header
|
||||
to your request:
|
||||
|
||||
```
|
||||
Authorization: Bearer <token>
|
||||
```
|
||||
|
||||
## Administration API endpoints
|
||||
|
||||
### Metrics-related endpoints
|
||||
|
||||
#### Metrics `GET /metrics`
|
||||
|
||||
Returns internal Garage metrics in Prometheus format.
|
||||
|
||||
#### Health `GET /health`
|
||||
|
||||
Used for simple health checks in a cluster setting with an orchestrator.
|
||||
Returns an HTTP status 200 if the node is ready to answer user's requests,
|
||||
and an HTTP status 503 (Service Unavailable) if there are some partitions
|
||||
for which a quorum of nodes is not available.
|
||||
A simple textual message is also returned in a body with content-type `text/plain`.
|
||||
See `/v0/health` for an API that also returns JSON output.
|
||||
|
||||
### Cluster operations
|
||||
|
||||
#### GetClusterStatus `GET /v0/status`
|
||||
|
||||
Returns the cluster's current status in JSON, including:
|
||||
|
||||
- ID of the node being queried and its version of the Garage daemon
|
||||
- Live nodes
|
||||
- Currently configured cluster layout
|
||||
- Staged changes to the cluster layout
|
||||
|
||||
Example response body:
|
||||
|
||||
```json
|
||||
{
|
||||
"node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
|
||||
"garage_version": "git:v0.8.0",
|
||||
"knownNodes": {
|
||||
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
|
||||
"addr": "10.0.0.11:3901",
|
||||
"is_up": true,
|
||||
"last_seen_secs_ago": 9,
|
||||
"hostname": "node1"
|
||||
},
|
||||
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
|
||||
"addr": "10.0.0.12:3901",
|
||||
"is_up": true,
|
||||
"last_seen_secs_ago": 1,
|
||||
"hostname": "node2"
|
||||
},
|
||||
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
|
||||
"addr": "10.0.0.21:3901",
|
||||
"is_up": true,
|
||||
"last_seen_secs_ago": 7,
|
||||
"hostname": "node3"
|
||||
},
|
||||
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
|
||||
"addr": "10.0.0.22:3901",
|
||||
"is_up": true,
|
||||
"last_seen_secs_ago": 1,
|
||||
"hostname": "node4"
|
||||
}
|
||||
},
|
||||
"layout": {
|
||||
"version": 12,
|
||||
"roles": {
|
||||
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
|
||||
"zone": "dc1",
|
||||
"capacity": 4,
|
||||
"tags": [
|
||||
"node1"
|
||||
]
|
||||
},
|
||||
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
|
||||
"zone": "dc1",
|
||||
"capacity": 6,
|
||||
"tags": [
|
||||
"node2"
|
||||
]
|
||||
},
|
||||
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
|
||||
"zone": "dc2",
|
||||
"capacity": 10,
|
||||
"tags": [
|
||||
"node3"
|
||||
]
|
||||
}
|
||||
},
|
||||
"stagedRoleChanges": {
|
||||
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
|
||||
"zone": "dc2",
|
||||
"capacity": 5,
|
||||
"tags": [
|
||||
"node4"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### GetClusterHealth `GET /v0/health`
|
||||
|
||||
Returns the cluster's current health in JSON format, with the following variables:
|
||||
|
||||
- `status`: one of `Healthy`, `Degraded` or `Unavailable`:
|
||||
- Healthy: Garage node is connected to all storage nodes
|
||||
- Degraded: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions
|
||||
- Unavailable: a quorum of write nodes is not available for some partitions
|
||||
- `known_nodes`: the number of nodes this Garage node has had a TCP connection to since the daemon started
|
||||
- `connected_nodes`: the nubmer of nodes this Garage node currently has an open connection to
|
||||
- `storage_nodes`: the number of storage nodes currently registered in the cluster layout
|
||||
- `storage_nodes_ok`: the number of storage nodes to which a connection is currently open
|
||||
- `partitions`: the total number of partitions of the data (currently always 256)
|
||||
- `partitions_quorum`: the number of partitions for which a quorum of write nodes is available
|
||||
- `partitions_all_ok`: the number of partitions for which we are connected to all storage nodes responsible of storing it
|
||||
|
||||
Contrarily to `GET /health`, this endpoint always returns a 200 OK HTTP response code.
|
||||
|
||||
Example response body:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "Degraded",
|
||||
"known_nodes": 3,
|
||||
"connected_nodes": 2,
|
||||
"storage_nodes": 3,
|
||||
"storage_nodes_ok": 2,
|
||||
"partitions": 256,
|
||||
"partitions_quorum": 256,
|
||||
"partitions_all_ok": 0
|
||||
}
|
||||
```
|
||||
|
||||
#### ConnectClusterNodes `POST /v0/connect`
|
||||
|
||||
Instructs this Garage node to connect to other Garage nodes at specified addresses.
|
||||
|
||||
Example request body:
|
||||
|
||||
```json
|
||||
[
|
||||
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f@10.0.0.11:3901",
|
||||
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff@10.0.0.12:3901"
|
||||
]
|
||||
```
|
||||
|
||||
The format of the string for a node to connect to is: `<node ID>@<ip address>:<port>`, same as in the `garage node connect` CLI call.
|
||||
|
||||
Example response:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"success": true,
|
||||
"error": null
|
||||
},
|
||||
{
|
||||
"success": false,
|
||||
"error": "Handshake error"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
#### GetClusterLayout `GET /v0/layout`
|
||||
|
||||
Returns the cluster's current layout in JSON, including:
|
||||
|
||||
- Currently configured cluster layout
|
||||
- Staged changes to the cluster layout
|
||||
|
||||
(the info returned by this endpoint is a subset of the info returned by GetClusterStatus)
|
||||
|
||||
Example response body:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 12,
|
||||
"roles": {
|
||||
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f": {
|
||||
"zone": "dc1",
|
||||
"capacity": 4,
|
||||
"tags": [
|
||||
"node1"
|
||||
]
|
||||
},
|
||||
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff": {
|
||||
"zone": "dc1",
|
||||
"capacity": 6,
|
||||
"tags": [
|
||||
"node2"
|
||||
]
|
||||
},
|
||||
"23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27": {
|
||||
"zone": "dc2",
|
||||
"capacity": 10,
|
||||
"tags": [
|
||||
"node3"
|
||||
]
|
||||
}
|
||||
},
|
||||
"stagedRoleChanges": {
|
||||
"e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b": {
|
||||
"zone": "dc2",
|
||||
"capacity": 5,
|
||||
"tags": [
|
||||
"node4"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### UpdateClusterLayout `POST /v0/layout`
|
||||
|
||||
Send modifications to the cluster layout. These modifications will
|
||||
be included in the staged role changes, visible in subsequent calls
|
||||
of `GetClusterLayout`. Once the set of staged changes is satisfactory,
|
||||
the user may call `ApplyClusterLayout` to apply the changed changes,
|
||||
or `Revert ClusterLayout` to clear all of the staged changes in
|
||||
the layout.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
<node_id>: {
|
||||
"capacity": <new_capacity>,
|
||||
"zone": <new_zone>,
|
||||
"tags": [
|
||||
<new_tag>,
|
||||
...
|
||||
]
|
||||
},
|
||||
<node_id_to_remove>: null,
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Contrary to the CLI that may update only a subset of the fields
|
||||
`capacity`, `zone` and `tags`, when calling this API all of these
|
||||
values must be specified.
|
||||
|
||||
|
||||
#### ApplyClusterLayout `POST /v0/layout/apply`
|
||||
|
||||
Applies to the cluster the layout changes currently registered as
|
||||
staged layout changes.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 13
|
||||
}
|
||||
```
|
||||
|
||||
Similarly to the CLI, the body must include the version of the new layout
|
||||
that will be created, which MUST be 1 + the value of the currently
|
||||
existing layout in the cluster.
|
||||
|
||||
#### RevertClusterLayout `POST /v0/layout/revert`
|
||||
|
||||
Clears all of the staged layout changes.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 13
|
||||
}
|
||||
```
|
||||
|
||||
Reverting the staged changes is done by incrementing the version number
|
||||
and clearing the contents of the staged change list.
|
||||
Similarly to the CLI, the body must include the incremented
|
||||
version number, which MUST be 1 + the value of the currently
|
||||
existing layout in the cluster.
|
||||
|
||||
|
||||
### Access key operations
|
||||
|
||||
#### ListKeys `GET /v0/key`
|
||||
|
||||
Returns all API access keys in the cluster.
|
||||
|
||||
Example response:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "GK31c2f218a2e44f485b94239e",
|
||||
"name": "test"
|
||||
},
|
||||
{
|
||||
"id": "GKe10061ac9c2921f09e4c5540",
|
||||
"name": "test2"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
#### CreateKey `POST /v0/key`
|
||||
|
||||
Creates a new API access key.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "NameOfMyKey"
|
||||
}
|
||||
```
|
||||
|
||||
#### ImportKey `POST /v0/key/import`
|
||||
|
||||
Imports an existing API key.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835",
|
||||
"name": "NameOfMyKey"
|
||||
}
|
||||
```
|
||||
|
||||
#### GetKeyInfo `GET /v0/key?id=<acces key id>`
|
||||
#### GetKeyInfo `GET /v0/key?search=<pattern>`
|
||||
|
||||
Returns information about the requested API access key.
|
||||
|
||||
If `id` is set, the key is looked up using its exact identifier (faster).
|
||||
If `search` is set, the key is looked up using its name or prefix
|
||||
of identifier (slower, all keys are enumerated to do this).
|
||||
|
||||
Example response:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "test",
|
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835",
|
||||
"permissions": {
|
||||
"createBucket": false
|
||||
},
|
||||
"buckets": [
|
||||
{
|
||||
"id": "70dc3bed7fe83a75e46b66e7ddef7d56e65f3c02f9f80b6749fb97eccb5e1033",
|
||||
"globalAliases": [
|
||||
"test2"
|
||||
],
|
||||
"localAliases": [],
|
||||
"permissions": {
|
||||
"read": true,
|
||||
"write": true,
|
||||
"owner": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "d7452a935e663fc1914f3a5515163a6d3724010ce8dfd9e4743ca8be5974f995",
|
||||
"globalAliases": [
|
||||
"test3"
|
||||
],
|
||||
"localAliases": [],
|
||||
"permissions": {
|
||||
"read": true,
|
||||
"write": true,
|
||||
"owner": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"globalAliases": [],
|
||||
"localAliases": [
|
||||
"test"
|
||||
],
|
||||
"permissions": {
|
||||
"read": true,
|
||||
"write": true,
|
||||
"owner": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "96470e0df00ec28807138daf01915cfda2bee8eccc91dea9558c0b4855b5bf95",
|
||||
"globalAliases": [
|
||||
"alex"
|
||||
],
|
||||
"localAliases": [],
|
||||
"permissions": {
|
||||
"read": true,
|
||||
"write": true,
|
||||
"owner": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### DeleteKey `DELETE /v0/key?id=<acces key id>`
|
||||
|
||||
Deletes an API access key.
|
||||
|
||||
#### UpdateKey `POST /v0/key?id=<acces key id>`
|
||||
|
||||
Updates information about the specified API access key.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "NameOfMyKey",
|
||||
"allow": {
|
||||
"createBucket": true,
|
||||
},
|
||||
"deny": {}
|
||||
}
|
||||
```
|
||||
|
||||
All fields (`name`, `allow` and `deny`) are optionnal.
|
||||
If they are present, the corresponding modifications are applied to the key, otherwise nothing is changed.
|
||||
The possible flags in `allow` and `deny` are: `createBucket`.
|
||||
|
||||
|
||||
### Bucket operations
|
||||
|
||||
#### ListBuckets `GET /v0/bucket`
|
||||
|
||||
Returns all storage buckets in the cluster.
|
||||
|
||||
Example response:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "70dc3bed7fe83a75e46b66e7ddef7d56e65f3c02f9f80b6749fb97eccb5e1033",
|
||||
"globalAliases": [
|
||||
"test2"
|
||||
],
|
||||
"localAliases": []
|
||||
},
|
||||
{
|
||||
"id": "96470e0df00ec28807138daf01915cfda2bee8eccc91dea9558c0b4855b5bf95",
|
||||
"globalAliases": [
|
||||
"alex"
|
||||
],
|
||||
"localAliases": []
|
||||
},
|
||||
{
|
||||
"id": "d7452a935e663fc1914f3a5515163a6d3724010ce8dfd9e4743ca8be5974f995",
|
||||
"globalAliases": [
|
||||
"test3"
|
||||
],
|
||||
"localAliases": []
|
||||
},
|
||||
{
|
||||
"id": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"globalAliases": [],
|
||||
"localAliases": [
|
||||
{
|
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||
"alias": "test"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
#### GetBucketInfo `GET /v0/bucket?id=<bucket id>`
|
||||
#### GetBucketInfo `GET /v0/bucket?globalAlias=<alias>`
|
||||
|
||||
Returns information about the requested storage bucket.
|
||||
|
||||
If `id` is set, the bucket is looked up using its exact identifier.
|
||||
If `globalAlias` is set, the bucket is looked up using its global alias.
|
||||
(both are fast)
|
||||
|
||||
Example response:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "afa8f0a22b40b1247ccd0affb869b0af5cff980924a20e4b5e0720a44deb8d39",
|
||||
"globalAliases": [],
|
||||
"websiteAccess": false,
|
||||
"websiteConfig": null,
|
||||
"keys": [
|
||||
{
|
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||
"name": "Imported key",
|
||||
"permissions": {
|
||||
"read": true,
|
||||
"write": true,
|
||||
"owner": true
|
||||
},
|
||||
"bucketLocalAliases": [
|
||||
"debug"
|
||||
]
|
||||
}
|
||||
],
|
||||
"objects": 14827,
|
||||
"bytes": 13189855625,
|
||||
"unfinshedUploads": 0,
|
||||
"quotas": {
|
||||
"maxSize": null,
|
||||
"maxObjects": null
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### CreateBucket `POST /v0/bucket`
|
||||
|
||||
Creates a new storage bucket.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"globalAlias": "NameOfMyBucket"
|
||||
}
|
||||
```
|
||||
|
||||
OR
|
||||
|
||||
```json
|
||||
{
|
||||
"localAlias": {
|
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||
"alias": "NameOfMyBucket",
|
||||
"allow": {
|
||||
"read": true,
|
||||
"write": true,
|
||||
"owner": false
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
OR
|
||||
|
||||
```json
|
||||
{}
|
||||
```
|
||||
|
||||
Creates a new bucket, either with a global alias, a local one,
|
||||
or no alias at all.
|
||||
|
||||
Technically, you can also specify both `globalAlias` and `localAlias` and that would create
|
||||
two aliases, but I don't see why you would want to do that.
|
||||
|
||||
#### DeleteBucket `DELETE /v0/bucket?id=<bucket id>`
|
||||
|
||||
Deletes a storage bucket. A bucket cannot be deleted if it is not empty.
|
||||
|
||||
Warning: this will delete all aliases associated with the bucket!
|
||||
|
||||
#### UpdateBucket `PUT /v0/bucket?id=<bucket id>`
|
||||
|
||||
Updates configuration of the given bucket.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"websiteAccess": {
|
||||
"enabled": true,
|
||||
"indexDocument": "index.html",
|
||||
"errorDocument": "404.html"
|
||||
},
|
||||
"quotas": {
|
||||
"maxSize": 19029801,
|
||||
"maxObjects": null,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
All fields (`websiteAccess` and `quotas`) are optionnal.
|
||||
If they are present, the corresponding modifications are applied to the bucket, otherwise nothing is changed.
|
||||
|
||||
In `websiteAccess`: if `enabled` is `true`, `indexDocument` must be specified.
|
||||
The field `errorDocument` is optional, if no error document is set a generic
|
||||
error message is displayed when errors happen. Conversely, if `enabled` is
|
||||
`false`, neither `indexDocument` nor `errorDocument` must be specified.
|
||||
|
||||
In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or set to `null`
|
||||
to remove the quotas. An absent value will be considered the same as a `null`. It is not possible
|
||||
to change only one of the two quotas.
|
||||
|
||||
### Operations on permissions for keys on buckets
|
||||
|
||||
#### BucketAllowKey `POST /v0/bucket/allow`
|
||||
|
||||
Allows a key to do read/write/owner operations on a bucket.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||
"permissions": {
|
||||
"read": true,
|
||||
"write": true,
|
||||
"owner": true
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Flags in `permissions` which have the value `true` will be activated.
|
||||
Other flags will remain unchanged.
|
||||
|
||||
#### BucketDenyKey `POST /v0/bucket/deny`
|
||||
|
||||
Denies a key from doing read/write/owner operations on a bucket.
|
||||
|
||||
Request body format:
|
||||
|
||||
```json
|
||||
{
|
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b",
|
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e",
|
||||
"permissions": {
|
||||
"read": false,
|
||||
"write": false,
|
||||
"owner": true
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Flags in `permissions` which have the value `true` will be deactivated.
|
||||
Other flags will remain unchanged.
|
||||
|
||||
|
||||
### Operations on bucket aliases
|
||||
|
||||
#### GlobalAliasBucket `PUT /v0/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||
|
||||
Empty body. Creates a global alias for a bucket.
|
||||
|
||||
#### GlobalUnaliasBucket `DELETE /v0/bucket/alias/global?id=<bucket id>&alias=<global alias>`
|
||||
|
||||
Removes a global alias for a bucket.
|
||||
|
||||
#### LocalAliasBucket `PUT /v0/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>`
|
||||
|
||||
Empty body. Creates a local alias for a bucket in the namespace of a specific access key.
|
||||
|
||||
#### LocalUnaliasBucket `DELETE /v0/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>`
|
||||
|
||||
Removes a local alias for a bucket in the namespace of a specific access key.
|
||||
|
10
doc/talks/2022-11-19-Capitole-du-Libre/.gitignore
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
*.aux
|
||||
*.bbl
|
||||
*.blg
|
||||
*.log
|
||||
*.nav
|
||||
*.out
|
||||
*.snm
|
||||
*.synctex.gz
|
||||
*.toc
|
||||
*.dvi
|
8
doc/talks/2022-11-19-Capitole-du-Libre/Makefile
Normal file
|
@ -0,0 +1,8 @@
|
|||
all:
|
||||
pdflatex présentation.tex
|
||||
|
||||
clean:
|
||||
rm -f *.aux *.bbl *.blg *.log *.nav *.out *.snm *.synctex.gz *.toc *.dvi présentation.pdf
|
||||
|
||||
clean_sauf_pdf:
|
||||
rm -f *.aux *.bbl *.blg *.log *.nav *.out *.snm *.synctex.gz *.toc *.dvi
|
BIN
doc/talks/2022-11-19-Capitole-du-Libre/NGI.png
Normal file
After Width: | Height: | Size: 61 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/agpl-v3-logo.png
Normal file
After Width: | Height: | Size: 196 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/carte-Europe.pdf
Normal file
BIN
doc/talks/2022-11-19-Capitole-du-Libre/deuxfleurs-logo.png
Normal file
After Width: | Height: | Size: 105 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/drapeau_européen.png
Normal file
After Width: | Height: | Size: 2.5 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/garage-logo.png
Normal file
After Width: | Height: | Size: 73 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/logo_chatons.png
Normal file
After Width: | Height: | Size: 199 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/mastodon-logo.png
Normal file
After Width: | Height: | Size: 41 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/matrix-logo.png
Normal file
After Width: | Height: | Size: 52 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/neptune.jpg
Normal file
After Width: | Height: | Size: 174 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/nextcloud-logo.png
Normal file
After Width: | Height: | Size: 126 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/peertube-logo.png
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/présentation.pdf
Normal file
340
doc/talks/2022-11-19-Capitole-du-Libre/présentation.tex
Normal file
|
@ -0,0 +1,340 @@
|
|||
\documentclass[11pt, aspectratio=1610]{beamer}
|
||||
\usetheme{Warsaw}
|
||||
\usepackage[utf8]{inputenc}
|
||||
\usepackage[french]{babel}
|
||||
\usepackage{amsmath}
|
||||
\usepackage{amsfonts}
|
||||
\usepackage{amssymb}
|
||||
\usepackage{tikz}
|
||||
\usepackage{graphicx}
|
||||
\usepackage{xcolor}
|
||||
\usepackage{setspace}
|
||||
\usepackage{todonotes}
|
||||
\presetkeys{todonotes}{inline}{}
|
||||
\renewcommand{\baselinestretch}{1.25}
|
||||
|
||||
\definecolor{orange_garage}{RGB}{255,147,41}
|
||||
\definecolor{gris_garage}{RGB}{78,78,78}
|
||||
|
||||
\author[Association Deuxfleurs]{~\linebreak Vincent Giraud}
|
||||
\title[De l'auto-hébergement à l'entre-hébergement avec Garage]{De l'auto-hébergement à l'entre-hébergement :\\Garage, pour conserver ses données ensemble}
|
||||
%\setbeamercovered{transparent}
|
||||
%\setbeamertemplate{navigation symbols}{}
|
||||
\date{Capitole du Libre 2022\linebreak
|
||||
|
||||
\scriptsize Samedi 19 novembre 2022\linebreak
|
||||
}
|
||||
|
||||
\setbeamercolor{palette primary}{fg=gris_garage,bg=orange_garage}
|
||||
\setbeamercolor{palette secondary}{fg=gris_garage,bg=gris_garage}
|
||||
\setbeamercolor{palette tiertary}{fg=white,bg=gris_garage}
|
||||
\setbeamercolor{palette quaternary}{fg=white,bg=gris_garage}
|
||||
\setbeamercolor{navigation symbols}{fg=black, bg=white}
|
||||
\setbeamercolor{navigation symbols dimmed}{fg=darkgray, bg=white}
|
||||
\setbeamercolor{itemize item}{fg=gris_garage}
|
||||
\setbeamertemplate{itemize item}[circle]
|
||||
|
||||
\addtobeamertemplate{navigation symbols}{}{%
|
||||
\usebeamerfont{footline}%
|
||||
\usebeamercolor[fg]{footline}%
|
||||
\hspace{1em}%
|
||||
\insertframenumber/\inserttotalframenumber
|
||||
}
|
||||
|
||||
\setbeamertemplate{headline}
|
||||
{%
|
||||
\leavevmode%
|
||||
\begin{beamercolorbox}[wd=.5\paperwidth,ht=2.5ex,dp=1.125ex]{section in head/foot}%
|
||||
\hbox to .5\paperwidth{\hfil\insertsectionhead\hfil}
|
||||
\end{beamercolorbox}%
|
||||
\begin{beamercolorbox}[wd=.5\paperwidth,ht=2.5ex,dp=1.125ex]{subsection in head/foot}%
|
||||
\hbox to .5\paperwidth{\hfil\insertsubsectionhead\hfil}
|
||||
\end{beamercolorbox}%
|
||||
}
|
||||
\addtobeamertemplate{footnote}{}{\vspace{2ex}}
|
||||
|
||||
\begin{document}
|
||||
\begin{frame}
|
||||
\titlepage
|
||||
\end{frame}
|
||||
|
||||
\section{Introduction}
|
||||
\subsection{Présentation}
|
||||
\begin{frame}
|
||||
\begin{columns}
|
||||
\column{0.5 \linewidth}
|
||||
\begin{center}
|
||||
\includegraphics[width=3.5cm]{deuxfleurs-logo.png}\linebreak
|
||||
|
||||
\texttt{https://deuxfleurs.fr}
|
||||
\end{center}
|
||||
\column{0.4 \linewidth}
|
||||
\begin{center}
|
||||
Deuxfleurs est une association militant en faveur d'un internet plus convivial, avec une organisation et des rapports de force repensés.\linebreak
|
||||
|
||||
Nous faisons partie du CHATONS\footnote[frame]{Collectif des Hébergeurs Alternatifs, Transparents, Ouverts, Neutres et Solidaires} depuis avril 2022.
|
||||
|
||||
\includegraphics[width=2cm]{logo_chatons.png}
|
||||
\end{center}
|
||||
\end{columns}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Héberger à la maison}
|
||||
\begin{frame}
|
||||
\begin{columns}
|
||||
\begin{column}{0.5 \linewidth}
|
||||
\begin{center}
|
||||
Pour échapper au contrôle et au giron des opérateurs de clouds, héberger ses données à la maison présente de nombreux avantages...
|
||||
\end{center}
|
||||
|
||||
\vspace{0.5cm}
|
||||
|
||||
\begin{itemize}[<+(1)->]
|
||||
\item On récupère la souveraineté sur ses données
|
||||
\item On gagne en vie privée
|
||||
\item On gagne en libertés
|
||||
\item On est responsabilisé face à ses besoins
|
||||
\end{itemize}
|
||||
\end{column}
|
||||
\vrule{}
|
||||
\begin{column}{0.5 \linewidth}
|
||||
\begin{center}
|
||||
\onslide<6->{... mais aussi bien des contraintes...}
|
||||
\end{center}
|
||||
|
||||
\vspace{0.5cm}
|
||||
|
||||
\begin{itemize}[<+(2)->]
|
||||
\item On repose sur une connexion internet pour particulier
|
||||
\item Un certain savoir-faire et moultes compétences sont requis
|
||||
\item Assurer la résilience de ses services est difficile
|
||||
\item Bien sauvegarder ses données, et ceci au-delà de son site géographique, n'est pas évident
|
||||
\end{itemize}
|
||||
\end{column}
|
||||
\end{columns}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Sauvegarder pour se parer à tout imprévu}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
Sauvegarder pour se parer contre les pannes matérielles est une chose...
|
||||
|
||||
Sauvegarder pour se parer contre les cambriolages et les incendies en est une autre !\linebreak
|
||||
|
||||
\vspace{1cm}
|
||||
\onslide<2->{Répartir géographiquement ses données devient alors nécessaire.}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\section{Les solutions à explorer}
|
||||
\subsection{L'entre-hébergement}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
On a vu récemment se développer au sein du CHATONS la notion d'entre-hébergement : en plus de renforcer l'intégrité des sauvegardes, on va améliorer la disponibilité pendant les coupures de liaison internet, de courant, ou pendant les déménagements d'administrateurs par exemple.\linebreak
|
||||
|
||||
\vspace{1cm}
|
||||
\onslide<2->
|
||||
{
|
||||
Dans le cadre du collectif, il s'agit de partager ses volumes de données entre hébergeurs.\linebreak
|
||||
|
||||
Pour assurer la confidentialité, on peut chiffrer les données au niveau applicatif.
|
||||
}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\subsection{S3 contre les systèmes de fichiers}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
Dans le cadre de l'administration de services en ligne, les systèmes de fichiers recèlent certaines difficultés.\linebreak
|
||||
|
||||
\vspace{1cm}
|
||||
Le standard S3 apporte des facilités; on réduit le stockage à un paradigme de clé-valeur basé essentiellement sur deux opérations seulement: lire ou écrire une clé.
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\section{Garage}
|
||||
\subsection{Présentation}
|
||||
\begin{frame}
|
||||
\begin{columns}
|
||||
\column{0.5 \linewidth}
|
||||
\begin{center}
|
||||
Garage essaye de répondre à l'ensemble de ces besoins.\linebreak
|
||||
|
||||
\vspace{0.5cm}
|
||||
Il s'agit d'un logiciel libre permettant de distribuer un service S3 sur diverses machines éloignées.
|
||||
\end{center}
|
||||
\column{0.5 \linewidth}
|
||||
\begin{center}
|
||||
\includegraphics[width=4cm]{garage-logo.png}\linebreak
|
||||
|
||||
\texttt{https://garagehq.deuxfleurs.fr/}
|
||||
\end{center}
|
||||
\end{columns}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Gestion des zones}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
Garage va prendre en compte les zones géographiques au moment de répliquer les données.\linebreak
|
||||
|
||||
\vspace{1cm}
|
||||
\includegraphics[width=13.25cm]{zones.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Comment ça marche ?}
|
||||
\begin{frame}
|
||||
\begin{columns}
|
||||
\column{0.5 \linewidth}
|
||||
\input{schéma europe}
|
||||
\column{0.5 \linewidth}
|
||||
\begin{center}
|
||||
Chaque objet est dupliqué sur plusieurs zones différentes.\linebreak
|
||||
|
||||
\onslide<5->{Lorsqu'un nouvel hébergeur rejoint le réseau, la charge se voit équilibrée.}\linebreak
|
||||
|
||||
\onslide<12->{Si une zone devient indisponible, les autres continuent d'assurer le service.}\linebreak
|
||||
\end{center}
|
||||
\end{columns}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Financement}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
Dans le cadre du programme \textit{Horizon 2021} de l'Union Européenne, nous avons reçu une subvention de la part de l'initiative NGI Pointer\footnote[frame]{Next Generation Internet Program for Open Internet Renovation}.\linebreak
|
||||
|
||||
\includegraphics[width=3cm]{drapeau_européen.png}\hspace{1cm}
|
||||
\includegraphics[width=3cm]{NGI.png}\linebreak
|
||||
|
||||
Nous avons ainsi pu financer le développement de Garage pendant 1 an.
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Licence}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
De par nos valeurs, nous avons attribué la licence AGPL version 3 à Garage, notamment afin qu'il reste parmi les biens communs.\linebreak
|
||||
|
||||
\vspace{0.5cm}
|
||||
\includegraphics[width=5cm]{agpl-v3-logo.png}\linebreak
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Langage utilisé}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
Nous avons décidé d'écrire Garage à l'aide du langage Rust, afin d'obtenir une compilation vers des binaires natifs et efficaces.\linebreak
|
||||
|
||||
\includegraphics[width=3.5cm]{rust-logo.png}\linebreak
|
||||
|
||||
Ce choix permet également de bénéficier des avantages reconnus de Rust en termes de sécurité.
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Matériel requis}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
Garage peut ainsi être performant sur des machines limitées. Les prérequis sont minimes : n'importe quelle machine avec un processeur qui a moins d'une décennie, 1~gigaoctet de mémoire vive, et 16~gigaoctets de stockage suffit.\linebreak
|
||||
|
||||
\vspace{1cm}
|
||||
|
||||
Cet aspect est déterminant : il permet en effet d'héberger sur du matériel acheté d'occasion, pour réduire l'impact écologique de nos infrastructures.
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Performances}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=13.25cm]{rpc-amplification.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=11cm]{rpc-complexity.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Services}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
Puisqu'il suit le standard S3, beaucoup de services populaires sont par conséquence compatibles avec Garage :\linebreak
|
||||
|
||||
\begin{columns}
|
||||
\column{0.2 \linewidth}
|
||||
\begin{center}
|
||||
\includegraphics[width=2.5cm]{nextcloud-logo.png}
|
||||
\end{center}
|
||||
\column{0.2 \linewidth}
|
||||
\begin{center}
|
||||
\includegraphics[width=2.5cm]{peertube-logo.png}
|
||||
\end{center}
|
||||
\column{0.2 \linewidth}
|
||||
\begin{center}
|
||||
\includegraphics[width=2.5cm]{matrix-logo.png}
|
||||
\end{center}
|
||||
\column{0.2 \linewidth}
|
||||
\begin{center}
|
||||
\includegraphics[width=2.5cm]{mastodon-logo.png}
|
||||
\end{center}
|
||||
\end{columns}
|
||||
~\linebreak
|
||||
|
||||
Et comme souvent avec S3, on peut assimiler un bucket à un site, et utiliser le serveur pour héberger des sites web statiques.
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\section{Intégration chez Deuxfleurs}
|
||||
\subsection{Matériel}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=13cm]{neptune.jpg}\linebreak
|
||||
|
||||
En pratique, nos serveurs ne sont effectivement que des machines achetées d'occasion (très souvent des anciens ordinateurs destinés à la bureautique en entreprise).
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Environnement logiciel}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
Pour faciliter la reproduction d'un environnement connu, NixOS est installé sur nos machines.\linebreak
|
||||
|
||||
\vspace{1cm}
|
||||
Pour s’accommoder des réseaux qu'on trouve derrière des routeurs pour particuliers, on s'aide de notre logiciel Diplonat\footnote[frame]{\texttt{https://git.deuxfleurs.fr/Deuxfleurs/diplonat}}.
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\section{Au-delà...}
|
||||
\subsection{... de Deuxfleurs}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\includegraphics[width=10cm]{tedomum.png}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\subsection{... de Garage}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
Nous avons récemment lancé le développement d'Aérogramme\footnote[frame]{\texttt{https://git.deuxfleurs.fr/Deuxfleurs/aerogramme}}.\linebreak
|
||||
|
||||
\vspace{1cm}
|
||||
Il s'agit d'un serveur de stockage de courriels chiffrés.\linebreak
|
||||
|
||||
\vspace{1cm}
|
||||
Il est conçu pour pouvoir travailler avec Garage.
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\section{Fin}
|
||||
\subsection{Contacts}
|
||||
\begin{frame}
|
||||
\begin{center}
|
||||
\begin{tikzpicture}
|
||||
\node (ronce) {\includegraphics[width=0.95\textwidth]{ronce.jpg}};
|
||||
\node[white] at (-5.1,3.6) {Intéressé(e) ?};
|
||||
\node[white, align=center] at (4.2,-2.6) {Contactez-nous !\\\texttt{coucou@deuxfleurs.fr}\\\texttt{\#forum:deuxfleurs.fr}};
|
||||
\end{tikzpicture}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
\end{document}
|
||||
|
BIN
doc/talks/2022-11-19-Capitole-du-Libre/ronce.jpg
Normal file
After Width: | Height: | Size: 1.4 MiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/rpc-amplification.png
Normal file
After Width: | Height: | Size: 124 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/rpc-complexity.png
Normal file
After Width: | Height: | Size: 194 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/rust-logo.png
Normal file
After Width: | Height: | Size: 6.5 KiB |
52
doc/talks/2022-11-19-Capitole-du-Libre/schéma europe.tex
Normal file
|
@ -0,0 +1,52 @@
|
|||
\begin{tikzpicture}
|
||||
\node (carte) {\includegraphics[width=\textwidth]{carte-Europe.pdf}};
|
||||
|
||||
% \personnage{position X}{position Y}{facteur d'échelle}
|
||||
\newcommand{\personnage}[4]
|
||||
{
|
||||
\fill[#4] ({#1-(0.4 * #3)},{#2-(0.9 * #3)}) .. controls ({#1-(0.4 * #3)},#2) and ({#1+(0.4 * #3)},#2) .. ({#1+(0.4 * #3)},{#2-(0.9 * #3)}) -- ({#1-(0.4 * #3)},{#2-(0.9 * #3)});
|
||||
\fill[#4] (#1,#2) circle ({0.25 * #3});
|
||||
}
|
||||
|
||||
\onslide<1-11>{\personnage{-2.25}{-0.75}{0.75}{green}}
|
||||
\onslide<1-11>{\draw (-1.9,-1.6) rectangle ++(1,1.2);}
|
||||
\onslide<2-11>{\draw[fill=green] (-1.8,-1.525) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 1};}
|
||||
\onslide<4-5>{\draw[fill=red] (-1.8,-1.15) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 3};}
|
||||
\onslide<7-11>{\draw[fill=yellow] (-1.8,-1.15) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 4};}
|
||||
\onslide<9-11>{\draw[fill=red] (-1.8,-0.775) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 6};}
|
||||
\onslide<3-11>{\draw[fill=blue] (-1.35,-1.525) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 2};}
|
||||
\onslide<8-11>{\draw[fill=blue] (-1.35,-1.15) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 5};}
|
||||
\onslide<11-11>{\draw[fill=yellow] (-1.35,-0.775) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 8};}
|
||||
|
||||
\personnage{1.65}{1.5}{0.75}{blue}
|
||||
\draw (0.3,0.7) rectangle ++(1,1.2);
|
||||
\onslide<2->{\draw[fill=green] (0.4,0.775) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 1};}
|
||||
\onslide<4->{\draw[fill=red] (0.4,1.15) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 3};}
|
||||
\onslide<10->{\draw[fill=green] (0.4,1.525) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 7};}
|
||||
\onslide<3->{\draw[fill=blue] (0.85,0.775) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 2};}
|
||||
\onslide<9->{\draw[fill=red] (0.85,1.15) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 6};}
|
||||
\onslide<11->{\draw[fill=yellow] (0.85,1.525) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 8};}
|
||||
|
||||
\personnage{1.85}{-2.3}{0.75}{red}
|
||||
\draw (0.5,-3.15) rectangle ++(1,1.2);
|
||||
\onslide<2->{\draw[fill=green] (0.6,-3.075) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 1};}
|
||||
\onslide<4-5>{\draw[fill=red] (0.6,-2.7) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 3};}
|
||||
\onslide<7->{\draw[fill=yellow] (0.6,-2.7) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 4};}
|
||||
\onslide<9->{\draw[fill=red] (0.6,-2.325) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 6};}
|
||||
\onslide<3-5>{\draw[fill=blue] (1.05,-3.075) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 2};}
|
||||
\onslide<6->{\draw[fill=red] (1.05,-3.075) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 3};}
|
||||
\onslide<8->{\draw[fill=blue] (1.05,-2.7) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 5};}
|
||||
\onslide<10->{\draw[fill=green] (1.05,-2.325) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 7};}
|
||||
|
||||
\onslide<5->{\personnage{1.05}{-0.15}{0.75}{yellow}}
|
||||
\onslide<5->{\draw (-0.35,-1) rectangle ++(1,1.2);}
|
||||
\onslide<6->{\draw[fill=blue] (-0.25,-0.925) rectangle ++(0.35,0.3) node[pos=0.5, white] {\tiny 2};}
|
||||
\onslide<7->{\draw[fill=yellow] (-0.25,-0.55) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 4};}
|
||||
\onslide<10->{\draw[fill=green] (-0.25,-0.175) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 7};}
|
||||
\onslide<6->{\draw[fill=red] (0.2,-0.925) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 3};}
|
||||
\onslide<8->{\draw[fill=blue] (0.2,-0.55) rectangle ++(0.35,0.3) node[pos=0.5,white] {\tiny 5};}
|
||||
\onslide<11->{\draw[fill=yellow] (0.2,-0.175) rectangle ++(0.35,0.3) node[pos=0.5] {\tiny 8};}
|
||||
|
||||
\onslide<12->{\draw[line width=0.25cm] (-2.15,-0.5) -- ++(1,-1);}
|
||||
\onslide<12->{\draw[line width=0.25cm] (-2.15,-1.5) -- ++(1,1);}
|
||||
\end{tikzpicture}
|
BIN
doc/talks/2022-11-19-Capitole-du-Libre/tedomum.png
Normal file
After Width: | Height: | Size: 236 KiB |
BIN
doc/talks/2022-11-19-Capitole-du-Libre/zones.png
Normal file
After Width: | Height: | Size: 97 KiB |
108
flake.lock
Normal file
|
@ -0,0 +1,108 @@
|
|||
{
|
||||
"nodes": {
|
||||
"cargo2nix": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-overlay": "rust-overlay"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1666087781,
|
||||
"narHash": "sha256-trKVdjMZ8mNkGfLcY5LsJJGtdV3xJDZnMVrkFjErlcs=",
|
||||
"owner": "Alexis211",
|
||||
"repo": "cargo2nix",
|
||||
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "Alexis211",
|
||||
"repo": "cargo2nix",
|
||||
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1650374568,
|
||||
"narHash": "sha256-Z+s0J8/r907g149rllvwhb4pKi8Wam5ij0st8PwAh+E=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "b4a34015c698c7793d592d66adbab377907a2be8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1659877975,
|
||||
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1665657542,
|
||||
"narHash": "sha256-mojxNyzbvmp8NtVtxqiHGhRfjCALLfk9i/Uup68Y5q8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "a3073c49bc0163fea6a121c276f526837672b555",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "a3073c49bc0163fea6a121c276f526837672b555",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"cargo2nix": "cargo2nix",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
"cargo2nix",
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"cargo2nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1664247556,
|
||||
"narHash": "sha256-J4vazHU3609ekn7dr+3wfqPo5WGlZVAgV7jfux352L0=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "524db9c9ea7bc7743bb74cdd45b6d46ea3fcc2ab",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
28
flake.nix
Normal file
|
@ -0,0 +1,28 @@
|
|||
{
|
||||
description = "Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/a3073c49bc0163fea6a121c276f526837672b555";
|
||||
inputs.cargo2nix = {
|
||||
# As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection
|
||||
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, cargo2nix }: let
|
||||
git_version = self.lastModifiedDate;
|
||||
compile = import ./nix/compile.nix;
|
||||
forAllSystems = nixpkgs.lib.genAttrs nixpkgs.lib.systems.flakeExposed;
|
||||
in
|
||||
{
|
||||
packages = forAllSystems (system: {
|
||||
default = (compile {
|
||||
inherit system git_version;
|
||||
pkgsSrc = nixpkgs;
|
||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||
release = true;
|
||||
}).workspace.garage {
|
||||
compileMode = "build";
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
|
@ -1,25 +1,32 @@
|
|||
{
|
||||
system ? builtins.currentSystem,
|
||||
target,
|
||||
system,
|
||||
target ? null,
|
||||
pkgsSrc,
|
||||
cargo2nixOverlay,
|
||||
compiler ? "rustc",
|
||||
release ? false,
|
||||
git_version ? null,
|
||||
features ? null,
|
||||
}:
|
||||
|
||||
with import ./common.nix;
|
||||
|
||||
let
|
||||
log = v: builtins.trace v v;
|
||||
|
||||
pkgs = import pkgsSrc {
|
||||
inherit system;
|
||||
crossSystem = {
|
||||
config = target;
|
||||
isStatic = true;
|
||||
};
|
||||
overlays = [ cargo2nixOverlay ];
|
||||
};
|
||||
pkgs =
|
||||
if target != null then
|
||||
import pkgsSrc {
|
||||
inherit system;
|
||||
crossSystem = {
|
||||
config = target;
|
||||
isStatic = true;
|
||||
};
|
||||
overlays = [ cargo2nixOverlay ];
|
||||
}
|
||||
else
|
||||
import pkgsSrc {
|
||||
inherit system;
|
||||
overlays = [ cargo2nixOverlay ];
|
||||
};
|
||||
|
||||
/*
|
||||
Cargo2nix is built for rustOverlay which installs Rust from Mozilla releases.
|
||||
|
@ -34,7 +41,7 @@ let
|
|||
NixOS ships them in separate ones. We reunite them with symlinkJoin.
|
||||
*/
|
||||
toolchainOptions =
|
||||
if target == "x86_64-unknown-linux-musl" || target == "aarch64-unknown-linux-musl" then {
|
||||
if target == null || target == "x86_64-unknown-linux-musl" || target == "aarch64-unknown-linux-musl" then {
|
||||
rustVersion = "1.63.0";
|
||||
extraRustComponents = [ "clippy" ];
|
||||
} else {
|
||||
|
|
|
@ -15,7 +15,7 @@ type: application
|
|||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
version: 0.2.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
|
|
@ -18,6 +18,9 @@ metadata:
|
|||
name: {{ $fullName }}-s3-api
|
||||
labels:
|
||||
{{- include "garage.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.s3.api.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.ingress.s3.api.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
|
@ -80,6 +83,9 @@ metadata:
|
|||
name: {{ $fullName }}-s3-web
|
||||
labels:
|
||||
{{- include "garage.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.s3.web.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.ingress.s3.web.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
|
|
|
@ -1,15 +1,17 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
kind: {{ .Values.deployment.kind }}
|
||||
metadata:
|
||||
name: {{ include "garage.fullname" . }}
|
||||
labels:
|
||||
{{- include "garage.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "garage.selectorLabels" . | nindent 6 }}
|
||||
{{- if eq .Values.deployment.kind "StatefulSet" }}
|
||||
replicas: {{ .Values.deployment.replicaCount }}
|
||||
serviceName: {{ include "garage.fullname" . }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
|
@ -79,6 +81,23 @@ spec:
|
|||
name: {{ include "garage.fullname" . }}-config
|
||||
- name: etc
|
||||
emptyDir: {}
|
||||
{{- if .Values.persistence.enabled }}
|
||||
{{- if eq .Values.deployment.kind "DaemonSet" }}
|
||||
- name: meta
|
||||
hostPath:
|
||||
path: {{ .Values.persistence.meta.hostPath }}
|
||||
type: DirectoryOrCreate
|
||||
- name: data
|
||||
hostPath:
|
||||
path: {{ .Values.persistence.data.hostPath }}
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
- name: meta
|
||||
emptyDir: {}
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
@ -91,7 +110,7 @@ spec:
|
|||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.persistence.enabled }}
|
||||
{{- if and .Values.persistence.enabled (eq .Values.deployment.kind "StatefulSet") }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: meta
|
|
@ -29,12 +29,20 @@ persistence:
|
|||
meta:
|
||||
# storageClass: "fast-storage-class"
|
||||
size: 100Mi
|
||||
# used only for daemon sets
|
||||
hostPath: /var/lib/garage/meta
|
||||
data:
|
||||
# storageClass: "slow-storage-class"
|
||||
size: 100Mi
|
||||
# used only for daemon sets
|
||||
hostPath: /var/lib/garage/data
|
||||
|
||||
# Number of StatefulSet replicas/garage nodes to start
|
||||
replicaCount: 3
|
||||
# Deployment configuration
|
||||
deployment:
|
||||
# Switchable to DaemonSet
|
||||
kind: StatefulSet
|
||||
# Number of StatefulSet replicas/garage nodes to start
|
||||
replicaCount: 3
|
||||
|
||||
image:
|
||||
repository: dxflrs/amd64_garage
|
||||
|
@ -85,14 +93,15 @@ service:
|
|||
ingress:
|
||||
s3:
|
||||
api:
|
||||
enabled: true
|
||||
enabled: false
|
||||
# Rely either on the className or the annotation below but not both
|
||||
# replace "nginx" by an Ingress controller
|
||||
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
|
||||
className: "nginx"
|
||||
annotations:
|
||||
# className: "nginx"
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: "nginx"
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
hosts:
|
||||
- host: "s3.garage.tld" # garage S3 API endpoint
|
||||
paths:
|
||||
|
@ -107,11 +116,15 @@ ingress:
|
|||
# hosts:
|
||||
# - kubernetes.docker.internal
|
||||
web:
|
||||
enabled: true
|
||||
className: "nginx"
|
||||
enabled: false
|
||||
# Rely either on the className or the annotation below but not both
|
||||
# replace "nginx" by an Ingress controller
|
||||
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
|
||||
# className: "nginx"
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
hosts:
|
||||
- host: "*.web.garage.tld" # wildcard website access with bucket name prefix
|
||||
paths:
|
||||
|
|
16
shell.nix
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
system ? builtins.currentSystem,
|
||||
system ? builtins.currentSystem,
|
||||
}:
|
||||
|
||||
with import ./nix/common.nix;
|
||||
|
@ -71,13 +71,25 @@ function refresh_cache {
|
|||
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.{debug,release}; do
|
||||
echo "Updating cache for ''${attr}"
|
||||
derivation=$(nix-instantiate --attr ''${attr})
|
||||
nix copy \
|
||||
nix copy -j8 \
|
||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
||||
$(nix-store -qR ''${derivation%\!bin})
|
||||
done
|
||||
rm /tmp/nix-signing-key.sec
|
||||
}
|
||||
|
||||
function refresh_flake_cache {
|
||||
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
||||
for attr in packages.x86_64-linux.default; do
|
||||
echo "Updating cache for ''${attr}"
|
||||
derivation=$(nix path-info --derivation ".#''${attr}")
|
||||
nix copy -j8 \
|
||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
||||
$(nix-store -qR ''${derivation})
|
||||
done
|
||||
rm /tmp/nix-signing-key.sec
|
||||
}
|
||||
|
||||
function to_s3 {
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
|
|
|
@ -15,6 +15,7 @@ use opentelemetry_prometheus::PrometheusExporter;
|
|||
use prometheus::{Encoder, TextEncoder};
|
||||
|
||||
use garage_model::garage::Garage;
|
||||
use garage_rpc::system::ClusterHealthStatus;
|
||||
use garage_util::error::Error as GarageError;
|
||||
|
||||
use crate::generic_server::*;
|
||||
|
@ -76,6 +77,31 @@ impl AdminApiServer {
|
|||
.body(Body::empty())?)
|
||||
}
|
||||
|
||||
fn handle_health(&self) -> Result<Response<Body>, Error> {
|
||||
let health = self.garage.system.health();
|
||||
|
||||
let (status, status_str) = match health.status {
|
||||
ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
|
||||
ClusterHealthStatus::Degraded => (
|
||||
StatusCode::OK,
|
||||
"Garage is operational but some storage nodes are unavailable",
|
||||
),
|
||||
ClusterHealthStatus::Unavailable => (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Quorum is not available for some/all partitions, reads and writes will fail",
|
||||
),
|
||||
};
|
||||
let status_str = format!(
|
||||
"{}\nConsult the full health check API endpoint at /v0/health for more details\n",
|
||||
status_str
|
||||
);
|
||||
|
||||
Ok(Response::builder()
|
||||
.status(status)
|
||||
.header(http::header::CONTENT_TYPE, "text/plain")
|
||||
.body(Body::from(status_str))?)
|
||||
}
|
||||
|
||||
fn handle_metrics(&self) -> Result<Response<Body>, Error> {
|
||||
#[cfg(feature = "metrics")]
|
||||
{
|
||||
|
@ -124,6 +150,7 @@ impl ApiHandler for AdminApiServer {
|
|||
) -> Result<Response<Body>, Error> {
|
||||
let expected_auth_header =
|
||||
match endpoint.authorization_type() {
|
||||
Authorization::None => None,
|
||||
Authorization::MetricsToken => self.metrics_token.as_ref(),
|
||||
Authorization::AdminToken => match &self.admin_token {
|
||||
None => return Err(Error::forbidden(
|
||||
|
@ -147,8 +174,10 @@ impl ApiHandler for AdminApiServer {
|
|||
|
||||
match endpoint {
|
||||
Endpoint::Options => self.handle_options(&req),
|
||||
Endpoint::Health => self.handle_health(),
|
||||
Endpoint::Metrics => self.handle_metrics(),
|
||||
Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await,
|
||||
Endpoint::GetClusterHealth => handle_get_cluster_health(&self.garage).await,
|
||||
Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await,
|
||||
// Layout
|
||||
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
|
||||
|
|
|
@ -43,6 +43,11 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
|||
Ok(json_ok_response(&res)?)
|
||||
}
|
||||
|
||||
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
||||
let health = garage.system.health();
|
||||
Ok(json_ok_response(&health)?)
|
||||
}
|
||||
|
||||
pub async fn handle_connect_cluster_nodes(
|
||||
garage: &Arc<Garage>,
|
||||
req: Request<Body>,
|
||||
|
|
|
@ -6,6 +6,7 @@ use crate::admin::error::*;
|
|||
use crate::router_macros::*;
|
||||
|
||||
pub enum Authorization {
|
||||
None,
|
||||
MetricsToken,
|
||||
AdminToken,
|
||||
}
|
||||
|
@ -16,8 +17,10 @@ router_match! {@func
|
|||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Endpoint {
|
||||
Options,
|
||||
Health,
|
||||
Metrics,
|
||||
GetClusterStatus,
|
||||
GetClusterHealth,
|
||||
ConnectClusterNodes,
|
||||
// Layout
|
||||
GetClusterLayout,
|
||||
|
@ -88,8 +91,10 @@ impl Endpoint {
|
|||
|
||||
let res = router_match!(@gen_path_parser (req.method(), path, query) [
|
||||
OPTIONS _ => Options,
|
||||
GET "/health" => Health,
|
||||
GET "/metrics" => Metrics,
|
||||
GET "/v0/status" => GetClusterStatus,
|
||||
GET "/v0/health" => GetClusterHealth,
|
||||
POST "/v0/connect" => ConnectClusterNodes,
|
||||
// Layout endpoints
|
||||
GET "/v0/layout" => GetClusterLayout,
|
||||
|
@ -130,6 +135,7 @@ impl Endpoint {
|
|||
/// Get the kind of authorization which is required to perform the operation.
|
||||
pub fn authorization_type(&self) -> Authorization {
|
||||
match self {
|
||||
Self::Health => Authorization::None,
|
||||
Self::Metrics => Authorization::MetricsToken,
|
||||
_ => Authorization::AdminToken,
|
||||
}
|
||||
|
@ -137,9 +143,13 @@ impl Endpoint {
|
|||
}
|
||||
|
||||
generateQueryParameters! {
|
||||
"id" => id,
|
||||
"search" => search,
|
||||
"globalAlias" => global_alias,
|
||||
"alias" => alias,
|
||||
"accessKeyId" => access_key_id
|
||||
keywords: [],
|
||||
fields: [
|
||||
"format" => format,
|
||||
"id" => id,
|
||||
"search" => search,
|
||||
"globalAlias" => global_alias,
|
||||
"alias" => alias,
|
||||
"accessKeyId" => access_key_id
|
||||
]
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ impl Endpoint {
|
|||
fn from_get(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||
router_match! {
|
||||
@gen_parser
|
||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||
key: [
|
||||
EMPTY if causality_token => PollItem (query::sort_key, query::causality_token, opt_parse::timeout),
|
||||
EMPTY => ReadItem (query::sort_key),
|
||||
|
@ -111,7 +111,7 @@ impl Endpoint {
|
|||
fn from_search(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||
router_match! {
|
||||
@gen_parser
|
||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||
key: [
|
||||
],
|
||||
no_key: [
|
||||
|
@ -125,7 +125,7 @@ impl Endpoint {
|
|||
fn from_head(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||
router_match! {
|
||||
@gen_parser
|
||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||
key: [
|
||||
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
|
||||
],
|
||||
|
@ -140,7 +140,7 @@ impl Endpoint {
|
|||
fn from_post(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||
router_match! {
|
||||
@gen_parser
|
||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||
key: [
|
||||
],
|
||||
no_key: [
|
||||
|
@ -155,7 +155,7 @@ impl Endpoint {
|
|||
fn from_put(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||
router_match! {
|
||||
@gen_parser
|
||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||
key: [
|
||||
EMPTY => InsertItem (query::sort_key),
|
||||
|
||||
|
@ -169,7 +169,7 @@ impl Endpoint {
|
|||
fn from_delete(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||
router_match! {
|
||||
@gen_parser
|
||||
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
|
||||
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
|
||||
key: [
|
||||
EMPTY => DeleteItem (query::sort_key),
|
||||
],
|
||||
|
@ -232,21 +232,18 @@ impl Endpoint {
|
|||
|
||||
// parameter name => struct field
|
||||
generateQueryParameters! {
|
||||
"prefix" => prefix,
|
||||
"start" => start,
|
||||
"causality_token" => causality_token,
|
||||
"end" => end,
|
||||
"limit" => limit,
|
||||
"reverse" => reverse,
|
||||
"sort_key" => sort_key,
|
||||
"timeout" => timeout
|
||||
}
|
||||
|
||||
mod keywords {
|
||||
//! This module contain all query parameters with no associated value
|
||||
//! used to differentiate endpoints.
|
||||
pub const EMPTY: &str = "";
|
||||
|
||||
pub const DELETE: &str = "delete";
|
||||
pub const SEARCH: &str = "search";
|
||||
keywords: [
|
||||
"delete" => DELETE,
|
||||
"search" => SEARCH
|
||||
],
|
||||
fields: [
|
||||
"prefix" => prefix,
|
||||
"start" => start,
|
||||
"causality_token" => causality_token,
|
||||
"end" => end,
|
||||
"limit" => limit,
|
||||
"reverse" => reverse,
|
||||
"sort_key" => sort_key,
|
||||
"timeout" => timeout
|
||||
]
|
||||
}
|
||||
|
|
|
@ -4,10 +4,9 @@ macro_rules! router_match {
|
|||
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
|
||||
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
|
||||
// returns true if the variant was one of the listed variants, false otherwise.
|
||||
use Endpoint::*;
|
||||
match $enum {
|
||||
$(
|
||||
$endpoint { .. } => true,
|
||||
Endpoint::$endpoint { .. } => true,
|
||||
)*
|
||||
_ => false
|
||||
}
|
||||
|
@ -15,37 +14,35 @@ macro_rules! router_match {
|
|||
(@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{
|
||||
// usage: router_match {@extract my_enum, field_name, [ VariantWithField1, VariantWithField2 ..] }
|
||||
// returns Some(field_value), or None if the variant was not one of the listed variants.
|
||||
use Endpoint::*;
|
||||
match $enum {
|
||||
$(
|
||||
$endpoint {$param, ..} => Some($param),
|
||||
Endpoint::$endpoint {$param, ..} => Some($param),
|
||||
)*
|
||||
_ => None
|
||||
}
|
||||
}};
|
||||
(@gen_path_parser ($method:expr, $reqpath:expr, $query:expr)
|
||||
[
|
||||
$($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)*
|
||||
]) => {{
|
||||
{
|
||||
use Endpoint::*;
|
||||
match ($method, $reqpath) {
|
||||
$(
|
||||
(&Method::$meth, $path) if true $(&& $query.$required.is_some())? => $api {
|
||||
$($(
|
||||
$param: router_match!(@@parse_param $query, $conv, $param),
|
||||
)*)?
|
||||
},
|
||||
)*
|
||||
(m, p) => {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Unknown API endpoint: {} {}",
|
||||
m, p
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
(@gen_path_parser ($method:expr, $reqpath:expr, $query:expr)
|
||||
[
|
||||
$($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)*
|
||||
]) => {{
|
||||
{
|
||||
match ($method, $reqpath) {
|
||||
$(
|
||||
(&Method::$meth, $path) if true $(&& $query.$required.is_some())? => Endpoint::$api {
|
||||
$($(
|
||||
$param: router_match!(@@parse_param $query, $conv, $param),
|
||||
)*)?
|
||||
},
|
||||
)*
|
||||
(m, p) => {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Unknown API endpoint: {} {}",
|
||||
m, p
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}};
|
||||
(@gen_parser ($keyword:expr, $key:ident, $query:expr, $header:expr),
|
||||
key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*],
|
||||
no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{
|
||||
|
@ -60,11 +57,9 @@ macro_rules! router_match {
|
|||
// ]
|
||||
// }
|
||||
// See in from_{method} for more detailed usage.
|
||||
use Endpoint::*;
|
||||
use keywords::*;
|
||||
match ($keyword, !$key.is_empty()){
|
||||
$(
|
||||
($kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok($api_k {
|
||||
(Keyword::$kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok(Endpoint::$api_k {
|
||||
$key,
|
||||
$($(
|
||||
$param_k: router_match!(@@parse_param $query, $conv_k, $param_k),
|
||||
|
@ -72,7 +67,7 @@ macro_rules! router_match {
|
|||
}),
|
||||
)*
|
||||
$(
|
||||
($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk {
|
||||
(Keyword::$kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok(Endpoint::$api_nk {
|
||||
$($(
|
||||
$param_nk: router_match!(@@parse_param $query, $conv_nk, $param_nk),
|
||||
)*)?
|
||||
|
@ -84,7 +79,7 @@ macro_rules! router_match {
|
|||
|
||||
(@@parse_param $query:expr, query_opt, $param:ident) => {{
|
||||
// extract optional query parameter
|
||||
$query.$param.take().map(|param| param.into_owned())
|
||||
$query.$param.take().map(|param| param.into_owned())
|
||||
}};
|
||||
(@@parse_param $query:expr, query, $param:ident) => {{
|
||||
// extract mendatory query parameter
|
||||
|
@ -93,7 +88,7 @@ macro_rules! router_match {
|
|||
(@@parse_param $query:expr, opt_parse, $param:ident) => {{
|
||||
// extract and parse optional query parameter
|
||||
// missing parameter is file, however parse error is reported as an error
|
||||
$query.$param
|
||||
$query.$param
|
||||
.take()
|
||||
.map(|param| param.parse())
|
||||
.transpose()
|
||||
|
@ -144,14 +139,39 @@ macro_rules! router_match {
|
|||
/// This macro is used to generate part of the code in this module. It must be called only one, and
|
||||
/// is useless outside of this module.
|
||||
macro_rules! generateQueryParameters {
|
||||
( $($rest:expr => $name:ident),* ) => {
|
||||
(
|
||||
keywords: [ $($kw_param:expr => $kw_name: ident),* ],
|
||||
fields: [ $($f_param:expr => $f_name:ident),* ]
|
||||
) => {
|
||||
#[derive(Debug)]
|
||||
#[allow(non_camel_case_types)]
|
||||
enum Keyword {
|
||||
EMPTY,
|
||||
$( $kw_name, )*
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Keyword {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self {
|
||||
Keyword::EMPTY => write!(f, "``"),
|
||||
$( Keyword::$kw_name => write!(f, "`{}`", $kw_param), )*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Keyword {
|
||||
fn default() -> Self {
|
||||
Keyword::EMPTY
|
||||
}
|
||||
}
|
||||
|
||||
/// Struct containing all query parameters used in endpoints. Think of it as an HashMap,
|
||||
/// but with keys statically known.
|
||||
#[derive(Debug, Default)]
|
||||
struct QueryParameters<'a> {
|
||||
keyword: Option<Cow<'a, str>>,
|
||||
keyword: Option<Keyword>,
|
||||
$(
|
||||
$name: Option<Cow<'a, str>>,
|
||||
$f_name: Option<Cow<'a, str>>,
|
||||
)*
|
||||
}
|
||||
|
||||
|
@ -160,34 +180,29 @@ macro_rules! generateQueryParameters {
|
|||
fn from_query(query: &'a str) -> Result<Self, Error> {
|
||||
let mut res: Self = Default::default();
|
||||
for (k, v) in url::form_urlencoded::parse(query.as_bytes()) {
|
||||
let repeated = match k.as_ref() {
|
||||
match k.as_ref() {
|
||||
$(
|
||||
$rest => if !v.is_empty() {
|
||||
res.$name.replace(v).is_some()
|
||||
} else {
|
||||
false
|
||||
$kw_param => if let Some(prev_kw) = res.keyword.replace(Keyword::$kw_name) {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Multiple keywords: '{}' and '{}'", prev_kw, $kw_param
|
||||
)));
|
||||
},
|
||||
)*
|
||||
$(
|
||||
$f_param => if !v.is_empty() {
|
||||
if res.$f_name.replace(v).is_some() {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Query parameter repeated: '{}'", k
|
||||
)));
|
||||
}
|
||||
},
|
||||
)*
|
||||
_ => {
|
||||
if k.starts_with("response-") || k.starts_with("X-Amz-") {
|
||||
false
|
||||
} else if v.as_ref().is_empty() {
|
||||
if res.keyword.replace(k).is_some() {
|
||||
return Err(Error::bad_request("Multiple keywords"));
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
if !(k.starts_with("response-") || k.starts_with("X-Amz-")) {
|
||||
debug!("Received an unknown query parameter: '{}'", k);
|
||||
false
|
||||
}
|
||||
}
|
||||
};
|
||||
if repeated {
|
||||
return Err(Error::bad_request(format!(
|
||||
"Query parameter repeated: '{}'",
|
||||
k
|
||||
)));
|
||||
}
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
@ -198,8 +213,8 @@ macro_rules! generateQueryParameters {
|
|||
if self.keyword.is_some() {
|
||||
Some("Keyword not used")
|
||||
} $(
|
||||
else if self.$name.is_some() {
|
||||
Some(concat!("'", $rest, "'"))
|
||||
else if self.$f_name.is_some() {
|
||||
Some(concat!("'", $f_param, "'"))
|
||||
}
|
||||
)* else {
|
||||
None
|
||||
|
|
|
@ -161,6 +161,15 @@ pub async fn handle_create_bucket(
|
|||
return Err(CommonError::BucketAlreadyExists.into());
|
||||
}
|
||||
} else {
|
||||
// Check user is allowed to create bucket
|
||||
if !key_params.allow_create_bucket.get() {
|
||||
return Err(CommonError::Forbidden(format!(
|
||||
"Access key {} is not allowed to create buckets",
|
||||
api_key.key_id
|
||||
))
|
||||
.into());
|
||||
}
|
||||
|
||||
// Create the bucket!
|
||||
if !is_valid_bucket_name(&bucket_name) {
|
||||
return Err(Error::bad_request(format!(
|
||||
|
|
|
@ -355,7 +355,7 @@ impl Endpoint {
|
|||
fn from_get(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||
router_match! {
|
||||
@gen_parser
|
||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
||||
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||
key: [
|
||||
EMPTY if upload_id => ListParts (query::upload_id, opt_parse::max_parts, opt_parse::part_number_marker),
|
||||
EMPTY => GetObject (query_opt::version_id, opt_parse::part_number),
|
||||
|
@ -412,7 +412,7 @@ impl Endpoint {
|
|||
fn from_head(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||
router_match! {
|
||||
@gen_parser
|
||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
||||
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||
key: [
|
||||
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
|
||||
],
|
||||
|
@ -426,7 +426,7 @@ impl Endpoint {
|
|||
fn from_post(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||
router_match! {
|
||||
@gen_parser
|
||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
||||
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||
key: [
|
||||
EMPTY if upload_id => CompleteMultipartUpload (query::upload_id),
|
||||
RESTORE => RestoreObject (query_opt::version_id),
|
||||
|
@ -448,7 +448,7 @@ impl Endpoint {
|
|||
) -> Result<Self, Error> {
|
||||
router_match! {
|
||||
@gen_parser
|
||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, headers),
|
||||
(query.keyword.take().unwrap_or_default(), key, query, headers),
|
||||
key: [
|
||||
EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id),
|
||||
EMPTY header "x-amz-copy-source" => CopyObject,
|
||||
|
@ -490,7 +490,7 @@ impl Endpoint {
|
|||
fn from_delete(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
|
||||
router_match! {
|
||||
@gen_parser
|
||||
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
|
||||
(query.keyword.take().unwrap_or_default(), key, query, None),
|
||||
key: [
|
||||
EMPTY if upload_id => AbortMultipartUpload (query::upload_id),
|
||||
EMPTY => DeleteObject (query_opt::version_id),
|
||||
|
@ -624,63 +624,60 @@ impl Endpoint {
|
|||
|
||||
// parameter name => struct field
|
||||
generateQueryParameters! {
|
||||
"continuation-token" => continuation_token,
|
||||
"delimiter" => delimiter,
|
||||
"encoding-type" => encoding_type,
|
||||
"fetch-owner" => fetch_owner,
|
||||
"id" => id,
|
||||
"key-marker" => key_marker,
|
||||
"list-type" => list_type,
|
||||
"marker" => marker,
|
||||
"max-keys" => max_keys,
|
||||
"max-parts" => max_parts,
|
||||
"max-uploads" => max_uploads,
|
||||
"partNumber" => part_number,
|
||||
"part-number-marker" => part_number_marker,
|
||||
"prefix" => prefix,
|
||||
"select-type" => select_type,
|
||||
"start-after" => start_after,
|
||||
"uploadId" => upload_id,
|
||||
"upload-id-marker" => upload_id_marker,
|
||||
"versionId" => version_id,
|
||||
"version-id-marker" => version_id_marker
|
||||
}
|
||||
|
||||
mod keywords {
|
||||
//! This module contain all query parameters with no associated value S3 uses to differentiate
|
||||
//! endpoints.
|
||||
pub const EMPTY: &str = "";
|
||||
|
||||
pub const ACCELERATE: &str = "accelerate";
|
||||
pub const ACL: &str = "acl";
|
||||
pub const ANALYTICS: &str = "analytics";
|
||||
pub const CORS: &str = "cors";
|
||||
pub const DELETE: &str = "delete";
|
||||
pub const ENCRYPTION: &str = "encryption";
|
||||
pub const INTELLIGENT_TIERING: &str = "intelligent-tiering";
|
||||
pub const INVENTORY: &str = "inventory";
|
||||
pub const LEGAL_HOLD: &str = "legal-hold";
|
||||
pub const LIFECYCLE: &str = "lifecycle";
|
||||
pub const LOCATION: &str = "location";
|
||||
pub const LOGGING: &str = "logging";
|
||||
pub const METRICS: &str = "metrics";
|
||||
pub const NOTIFICATION: &str = "notification";
|
||||
pub const OBJECT_LOCK: &str = "object-lock";
|
||||
pub const OWNERSHIP_CONTROLS: &str = "ownershipControls";
|
||||
pub const POLICY: &str = "policy";
|
||||
pub const POLICY_STATUS: &str = "policyStatus";
|
||||
pub const PUBLIC_ACCESS_BLOCK: &str = "publicAccessBlock";
|
||||
pub const REPLICATION: &str = "replication";
|
||||
pub const REQUEST_PAYMENT: &str = "requestPayment";
|
||||
pub const RESTORE: &str = "restore";
|
||||
pub const RETENTION: &str = "retention";
|
||||
pub const SELECT: &str = "select";
|
||||
pub const TAGGING: &str = "tagging";
|
||||
pub const TORRENT: &str = "torrent";
|
||||
pub const UPLOADS: &str = "uploads";
|
||||
pub const VERSIONING: &str = "versioning";
|
||||
pub const VERSIONS: &str = "versions";
|
||||
pub const WEBSITE: &str = "website";
|
||||
keywords: [
|
||||
"accelerate" => ACCELERATE,
|
||||
"acl" => ACL,
|
||||
"analytics" => ANALYTICS,
|
||||
"cors" => CORS,
|
||||
"delete" => DELETE,
|
||||
"encryption" => ENCRYPTION,
|
||||
"intelligent-tiering" => INTELLIGENT_TIERING,
|
||||
"inventory" => INVENTORY,
|
||||
"legal-hold" => LEGAL_HOLD,
|
||||
"lifecycle" => LIFECYCLE,
|
||||
"location" => LOCATION,
|
||||
"logging" => LOGGING,
|
||||
"metrics" => METRICS,
|
||||
"notification" => NOTIFICATION,
|
||||
"object-lock" => OBJECT_LOCK,
|
||||
"ownershipControls" => OWNERSHIP_CONTROLS,
|
||||
"policy" => POLICY,
|
||||
"policyStatus" => POLICY_STATUS,
|
||||
"publicAccessBlock" => PUBLIC_ACCESS_BLOCK,
|
||||
"replication" => REPLICATION,
|
||||
"requestPayment" => REQUEST_PAYMENT,
|
||||
"restore" => RESTORE,
|
||||
"retention" => RETENTION,
|
||||
"select" => SELECT,
|
||||
"tagging" => TAGGING,
|
||||
"torrent" => TORRENT,
|
||||
"uploads" => UPLOADS,
|
||||
"versioning" => VERSIONING,
|
||||
"versions" => VERSIONS,
|
||||
"website" => WEBSITE
|
||||
],
|
||||
fields: [
|
||||
"continuation-token" => continuation_token,
|
||||
"delimiter" => delimiter,
|
||||
"encoding-type" => encoding_type,
|
||||
"fetch-owner" => fetch_owner,
|
||||
"id" => id,
|
||||
"key-marker" => key_marker,
|
||||
"list-type" => list_type,
|
||||
"marker" => marker,
|
||||
"max-keys" => max_keys,
|
||||
"max-parts" => max_parts,
|
||||
"max-uploads" => max_uploads,
|
||||
"partNumber" => part_number,
|
||||
"part-number-marker" => part_number_marker,
|
||||
"prefix" => prefix,
|
||||
"select-type" => select_type,
|
||||
"start-after" => start_after,
|
||||
"uploadId" => upload_id,
|
||||
"upload-id-marker" => upload_id_marker,
|
||||
"versionId" => version_id,
|
||||
"version-id-marker" => version_id_marker
|
||||
]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -33,6 +33,7 @@ pretty_env_logger = { version = "0.4", optional = true }
|
|||
mktemp = "0.4"
|
||||
|
||||
[features]
|
||||
default = [ "sled" ]
|
||||
bundled-libs = [ "rusqlite/bundled" ]
|
||||
cli = ["clap", "pretty_env_logger"]
|
||||
lmdb = [ "heed" ]
|
||||
|
|
|
@ -36,7 +36,7 @@ bytesize = "1.1"
|
|||
timeago = "0.3"
|
||||
parse_duration = "2.1"
|
||||
hex = "0.4"
|
||||
tracing = { version = "0.1.30", features = ["log-always"] }
|
||||
tracing = { version = "0.1.30" }
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
rand = "0.8"
|
||||
async-trait = "0.1.7"
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use crate::common;
|
||||
use crate::common::ext::CommandExt;
|
||||
use aws_sdk_s3::model::BucketLocationConstraint;
|
||||
use aws_sdk_s3::output::DeleteBucketOutput;
|
||||
|
||||
|
@ -8,6 +9,27 @@ async fn test_bucket_all() {
|
|||
let bucket_name = "hello";
|
||||
|
||||
{
|
||||
// Check bucket cannot be created if not authorized
|
||||
ctx.garage
|
||||
.command()
|
||||
.args(["key", "deny"])
|
||||
.args(["--create-bucket", &ctx.garage.key.id])
|
||||
.quiet()
|
||||
.expect_success_output("Could not deny key to create buckets");
|
||||
|
||||
// Try create bucket, should fail
|
||||
let r = ctx.client.create_bucket().bucket(bucket_name).send().await;
|
||||
assert!(r.is_err());
|
||||
}
|
||||
{
|
||||
// Now allow key to create bucket
|
||||
ctx.garage
|
||||
.command()
|
||||
.args(["key", "allow"])
|
||||
.args(["--create-bucket", &ctx.garage.key.id])
|
||||
.quiet()
|
||||
.expect_success_output("Could not deny key to create buckets");
|
||||
|
||||
// Create bucket
|
||||
//@TODO check with an invalid bucket name + with an already existing bucket
|
||||
let r = ctx
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use crate::common;
|
||||
use crate::common::ext::CommandExt;
|
||||
use common::custom_requester::BodySignature;
|
||||
use hyper::Method;
|
||||
|
||||
|
@ -105,6 +106,13 @@ async fn test_create_bucket_streaming() {
|
|||
let ctx = common::context();
|
||||
let bucket = "createbucket-streaming";
|
||||
|
||||
ctx.garage
|
||||
.command()
|
||||
.args(["key", "allow"])
|
||||
.args(["--create-bucket", &ctx.garage.key.id])
|
||||
.quiet()
|
||||
.expect_success_output("Could not allow key to create buckets");
|
||||
|
||||
{
|
||||
// create bucket
|
||||
let _ = ctx
|
||||
|
|
|
@ -14,7 +14,7 @@ path = "lib.rs"
|
|||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
garage_db = { version = "0.8.0", path = "../db" }
|
||||
garage_db = { version = "0.8.0", default-features = false, path = "../db" }
|
||||
garage_rpc = { version = "0.8.0", path = "../rpc" }
|
||||
garage_table = { version = "0.8.0", path = "../table" }
|
||||
garage_block = { version = "0.8.0", path = "../block" }
|
||||
|
@ -42,6 +42,7 @@ opentelemetry = "0.17"
|
|||
netapp = "0.5"
|
||||
|
||||
[features]
|
||||
default = [ "sled" ]
|
||||
k2v = [ "garage_util/k2v" ]
|
||||
lmdb = [ "garage_db/lmdb" ]
|
||||
sled = [ "garage_db/sled" ]
|
||||
|
|
|
@ -8,10 +8,10 @@ use garage_util::background::*;
|
|||
use garage_util::config::*;
|
||||
use garage_util::error::*;
|
||||
|
||||
use garage_rpc::replication_mode::ReplicationMode;
|
||||
use garage_rpc::system::System;
|
||||
|
||||
use garage_block::manager::*;
|
||||
use garage_table::replication::ReplicationMode;
|
||||
use garage_table::replication::TableFullReplication;
|
||||
use garage_table::replication::TableShardedReplication;
|
||||
use garage_table::*;
|
||||
|
@ -34,6 +34,9 @@ pub struct Garage {
|
|||
/// The parsed configuration Garage is running
|
||||
pub config: Config,
|
||||
|
||||
/// The replication mode of this cluster
|
||||
pub replication_mode: ReplicationMode,
|
||||
|
||||
/// The local database
|
||||
pub db: db::Db,
|
||||
/// A background job runner
|
||||
|
@ -164,12 +167,7 @@ impl Garage {
|
|||
.expect("Invalid replication_mode in config file.");
|
||||
|
||||
info!("Initialize membership management system...");
|
||||
let system = System::new(
|
||||
network_key,
|
||||
background.clone(),
|
||||
replication_mode.replication_factor(),
|
||||
&config,
|
||||
)?;
|
||||
let system = System::new(network_key, background.clone(), replication_mode, &config)?;
|
||||
|
||||
let data_rep_param = TableShardedReplication {
|
||||
system: system.clone(),
|
||||
|
@ -258,6 +256,7 @@ impl Garage {
|
|||
// -- done --
|
||||
Ok(Arc::new(Self {
|
||||
config,
|
||||
replication_mode,
|
||||
db,
|
||||
background,
|
||||
system,
|
||||
|
|
|
@ -254,9 +254,11 @@ To know the correct value of the new layout version, invoke `garage layout show`
|
|||
match self.initial_partition_assignation() {
|
||||
Some(initial_partitions) => {
|
||||
for (part, ipart) in partitions.iter_mut().zip(initial_partitions.iter()) {
|
||||
for (id, info) in ipart.nodes.iter() {
|
||||
if part.nodes.len() < self.replication_factor {
|
||||
part.add(None, n_zones, id, info.unwrap());
|
||||
for _ in 0..2 {
|
||||
for (id, info) in ipart.nodes.iter() {
|
||||
if part.nodes.len() < self.replication_factor {
|
||||
part.add(None, n_zones, id, info.unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
assert!(part.nodes.len() == self.replication_factor);
|
||||
|
|
|
@ -9,6 +9,7 @@ mod consul;
|
|||
mod kubernetes;
|
||||
|
||||
pub mod layout;
|
||||
pub mod replication_mode;
|
||||
pub mod ring;
|
||||
pub mod system;
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#[derive(Clone, Copy)]
|
||||
pub enum ReplicationMode {
|
||||
None,
|
||||
TwoWay,
|
|
@ -35,6 +35,7 @@ use crate::consul::ConsulDiscovery;
|
|||
#[cfg(feature = "kubernetes-discovery")]
|
||||
use crate::kubernetes::*;
|
||||
use crate::layout::*;
|
||||
use crate::replication_mode::*;
|
||||
use crate::ring::*;
|
||||
use crate::rpc_helper::*;
|
||||
|
||||
|
@ -102,6 +103,7 @@ pub struct System {
|
|||
#[cfg(feature = "kubernetes-discovery")]
|
||||
kubernetes_discovery: Option<KubernetesDiscoveryConfig>,
|
||||
|
||||
replication_mode: ReplicationMode,
|
||||
replication_factor: usize,
|
||||
|
||||
/// The ring
|
||||
|
@ -136,6 +138,37 @@ pub struct KnownNodeInfo {
|
|||
pub status: NodeStatus,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub struct ClusterHealth {
|
||||
/// The current health status of the cluster (see below)
|
||||
pub status: ClusterHealthStatus,
|
||||
/// Number of nodes already seen once in the cluster
|
||||
pub known_nodes: usize,
|
||||
/// Number of nodes currently connected
|
||||
pub connected_nodes: usize,
|
||||
/// Number of storage nodes declared in the current layout
|
||||
pub storage_nodes: usize,
|
||||
/// Number of storage nodes currently connected
|
||||
pub storage_nodes_ok: usize,
|
||||
/// Number of partitions in the layout
|
||||
pub partitions: usize,
|
||||
/// Number of partitions for which we have a quorum of connected nodes
|
||||
pub partitions_quorum: usize,
|
||||
/// Number of partitions for which all storage nodes are connected
|
||||
pub partitions_all_ok: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub enum ClusterHealthStatus {
|
||||
/// All nodes are available
|
||||
Healthy,
|
||||
/// Some storage nodes are unavailable, but quorum is stil
|
||||
/// achieved for all partitions
|
||||
Degraded,
|
||||
/// Quorum is not available for some partitions
|
||||
Unavailable,
|
||||
}
|
||||
|
||||
pub fn read_node_id(metadata_dir: &Path) -> Result<NodeID, Error> {
|
||||
let mut pubkey_file = metadata_dir.to_path_buf();
|
||||
pubkey_file.push("node_key.pub");
|
||||
|
@ -200,9 +233,11 @@ impl System {
|
|||
pub fn new(
|
||||
network_key: NetworkKey,
|
||||
background: Arc<BackgroundRunner>,
|
||||
replication_factor: usize,
|
||||
replication_mode: ReplicationMode,
|
||||
config: &Config,
|
||||
) -> Result<Arc<Self>, Error> {
|
||||
let replication_factor = replication_mode.replication_factor();
|
||||
|
||||
let node_key =
|
||||
gen_node_key(&config.metadata_dir).expect("Unable to read or generate node ID");
|
||||
info!(
|
||||
|
@ -324,6 +359,7 @@ impl System {
|
|||
config.rpc_timeout_msec.map(Duration::from_millis),
|
||||
),
|
||||
system_endpoint,
|
||||
replication_mode,
|
||||
replication_factor,
|
||||
rpc_listen_addr: config.rpc_bind_addr,
|
||||
#[cfg(any(feature = "consul-discovery", feature = "kubernetes-discovery"))]
|
||||
|
@ -429,6 +465,67 @@ impl System {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn health(&self) -> ClusterHealth {
|
||||
let ring: Arc<_> = self.ring.borrow().clone();
|
||||
let quorum = self.replication_mode.write_quorum();
|
||||
let replication_factor = self.replication_factor;
|
||||
|
||||
let nodes = self
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|n| (n.id, n))
|
||||
.collect::<HashMap<Uuid, _>>();
|
||||
let connected_nodes = nodes.iter().filter(|(_, n)| n.is_up).count();
|
||||
|
||||
let storage_nodes = ring
|
||||
.layout
|
||||
.roles
|
||||
.items()
|
||||
.iter()
|
||||
.filter(|(_, _, v)| matches!(v, NodeRoleV(Some(r)) if r.capacity.is_some()))
|
||||
.collect::<Vec<_>>();
|
||||
let storage_nodes_ok = storage_nodes
|
||||
.iter()
|
||||
.filter(|(x, _, _)| nodes.get(x).map(|n| n.is_up).unwrap_or(false))
|
||||
.count();
|
||||
|
||||
let partitions = ring.partitions();
|
||||
let partitions_n_up = partitions
|
||||
.iter()
|
||||
.map(|(_, h)| {
|
||||
let pn = ring.get_nodes(h, ring.replication_factor);
|
||||
pn.iter()
|
||||
.filter(|x| nodes.get(x).map(|n| n.is_up).unwrap_or(false))
|
||||
.count()
|
||||
})
|
||||
.collect::<Vec<usize>>();
|
||||
let partitions_all_ok = partitions_n_up
|
||||
.iter()
|
||||
.filter(|c| **c == replication_factor)
|
||||
.count();
|
||||
let partitions_quorum = partitions_n_up.iter().filter(|c| **c >= quorum).count();
|
||||
|
||||
let status =
|
||||
if partitions_quorum == partitions.len() && storage_nodes_ok == storage_nodes.len() {
|
||||
ClusterHealthStatus::Healthy
|
||||
} else if partitions_quorum == partitions.len() {
|
||||
ClusterHealthStatus::Degraded
|
||||
} else {
|
||||
ClusterHealthStatus::Unavailable
|
||||
};
|
||||
|
||||
ClusterHealth {
|
||||
status,
|
||||
known_nodes: nodes.len(),
|
||||
connected_nodes,
|
||||
storage_nodes: storage_nodes.len(),
|
||||
storage_nodes_ok,
|
||||
partitions: partitions.len(),
|
||||
partitions_quorum,
|
||||
partitions_all_ok,
|
||||
}
|
||||
}
|
||||
|
||||
// ---- INTERNALS ----
|
||||
|
||||
#[cfg(feature = "consul-discovery")]
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
mod parameters;
|
||||
|
||||
mod fullcopy;
|
||||
mod mode;
|
||||
mod sharded;
|
||||
|
||||
pub use fullcopy::TableFullReplication;
|
||||
pub use mode::ReplicationMode;
|
||||
pub use parameters::*;
|
||||
pub use sharded::TableShardedReplication;
|
||||
|
|