Merge pull request 'Report available disk space in garage stats
' (#487) from report-disk-usage into main
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #487
This commit is contained in:
commit
611792ddcf
20 changed files with 843 additions and 564 deletions
15
Cargo.lock
generated
15
Cargo.lock
generated
|
@ -1257,6 +1257,7 @@ dependencies = [
|
|||
"serde",
|
||||
"serde_bytes",
|
||||
"serde_json",
|
||||
"systemstat",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tracing",
|
||||
|
@ -3549,6 +3550,20 @@ dependencies = [
|
|||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "systemstat"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a24aec24a9312c83999a28e3ef9db7e2afd5c64bf47725b758cdc1cafd5b0bd2"
|
||||
dependencies = [
|
||||
"bytesize",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"nom",
|
||||
"time 0.3.17",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.3.0"
|
||||
|
|
20
Cargo.nix
20
Cargo.nix
|
@ -32,7 +32,7 @@ args@{
|
|||
ignoreLockHash,
|
||||
}:
|
||||
let
|
||||
nixifiedLockHash = "456bca1fe75cfe5c26a9b56401a40b5b205c0096a0dc7287c7853c35498bc5c0";
|
||||
nixifiedLockHash = "cf836c01a9c668bab5f9a09d468f47aa24c50abec92855503624d706721335ef";
|
||||
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
|
||||
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
|
||||
lockHashIgnored = if ignoreLockHash
|
||||
|
@ -1790,6 +1790,7 @@ in
|
|||
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.152" { inherit profileName; }).out;
|
||||
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.8" { inherit profileName; }).out;
|
||||
serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.91" { inherit profileName; }).out;
|
||||
systemstat = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".systemstat."0.2.3" { inherit profileName; }).out;
|
||||
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.24.2" { inherit profileName; }).out;
|
||||
tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.11" { inherit profileName; }).out;
|
||||
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.37" { inherit profileName; }).out;
|
||||
|
@ -4938,6 +4939,21 @@ in
|
|||
};
|
||||
});
|
||||
|
||||
"registry+https://github.com/rust-lang/crates.io-index".systemstat."0.2.3" = overridableMkRustCrate (profileName: rec {
|
||||
name = "systemstat";
|
||||
version = "0.2.3";
|
||||
registry = "registry+https://github.com/rust-lang/crates.io-index";
|
||||
src = fetchCratesIo { inherit name version; sha256 = "a24aec24a9312c83999a28e3ef9db7e2afd5c64bf47725b758cdc1cafd5b0bd2"; };
|
||||
dependencies = {
|
||||
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.1.0" { inherit profileName; }).out;
|
||||
lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
|
||||
libc = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.139" { inherit profileName; }).out;
|
||||
${ if hostPlatform.parsed.kernel.name == "linux" || hostPlatform.parsed.kernel.name == "android" then "nom" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".nom."7.1.3" { inherit profileName; }).out;
|
||||
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.17" { inherit profileName; }).out;
|
||||
${ if hostPlatform.isWindows then "winapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }).out;
|
||||
};
|
||||
});
|
||||
|
||||
"registry+https://github.com/rust-lang/crates.io-index".tempfile."3.3.0" = overridableMkRustCrate (profileName: rec {
|
||||
name = "tempfile";
|
||||
version = "3.3.0";
|
||||
|
@ -5961,6 +5977,7 @@ in
|
|||
[ "ntsecapi" ]
|
||||
[ "ntstatus" ]
|
||||
[ "objbase" ]
|
||||
[ "pdh" ]
|
||||
[ "processenv" ]
|
||||
[ "processthreadsapi" ]
|
||||
[ "profileapi" ]
|
||||
|
@ -5977,6 +5994,7 @@ in
|
|||
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest") "winreg")
|
||||
[ "winsock2" ]
|
||||
[ "winstring" ]
|
||||
[ "ws2def" ]
|
||||
[ "ws2ipdef" ]
|
||||
[ "ws2tcpip" ]
|
||||
[ "wtypesbase" ]
|
||||
|
|
2
Makefile
2
Makefile
|
@ -4,7 +4,7 @@ all:
|
|||
clear; cargo build
|
||||
|
||||
release:
|
||||
nix-build --arg release true
|
||||
nix-build --attr pkgs.amd64.release --no-build-output
|
||||
|
||||
shell:
|
||||
nix-shell
|
||||
|
|
26
default.nix
26
default.nix
|
@ -1,7 +1,4 @@
|
|||
{
|
||||
system ? builtins.currentSystem,
|
||||
git_version ? null,
|
||||
}:
|
||||
{ system ? builtins.currentSystem, git_version ? null, }:
|
||||
|
||||
with import ./nix/common.nix;
|
||||
|
||||
|
@ -13,21 +10,20 @@ let
|
|||
debug = (compile {
|
||||
inherit system target git_version pkgsSrc cargo2nixOverlay;
|
||||
release = false;
|
||||
}).workspace.garage {
|
||||
compileMode = "build";
|
||||
};
|
||||
}).workspace.garage { compileMode = "build"; };
|
||||
|
||||
release = (compile {
|
||||
inherit system target git_version pkgsSrc cargo2nixOverlay;
|
||||
release = true;
|
||||
}).workspace.garage {
|
||||
compileMode = "build";
|
||||
};
|
||||
}).workspace.garage { compileMode = "build"; };
|
||||
});
|
||||
|
||||
test = (rustPkgs: pkgs.symlinkJoin {
|
||||
name ="garage-tests";
|
||||
paths = builtins.map (key: rustPkgs.workspace.${key} { compileMode = "test"; }) (builtins.attrNames rustPkgs.workspace);
|
||||
test = (rustPkgs:
|
||||
pkgs.symlinkJoin {
|
||||
name = "garage-tests";
|
||||
paths =
|
||||
builtins.map (key: rustPkgs.workspace.${key} { compileMode = "test"; })
|
||||
(builtins.attrNames rustPkgs.workspace);
|
||||
});
|
||||
|
||||
in {
|
||||
|
@ -55,8 +51,6 @@ in {
|
|||
inherit system git_version pkgsSrc cargo2nixOverlay;
|
||||
target = "x86_64-unknown-linux-musl";
|
||||
compiler = "clippy";
|
||||
}).workspace.garage {
|
||||
compileMode = "build";
|
||||
};
|
||||
}).workspace.garage { compileMode = "build"; };
|
||||
};
|
||||
}
|
||||
|
|
12
flake.nix
12
flake.nix
|
@ -1,7 +1,9 @@
|
|||
{
|
||||
description = "Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||
description =
|
||||
"Garage, an S3-compatible distributed object store for self-hosted deployments";
|
||||
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/a3073c49bc0163fea6a121c276f526837672b555";
|
||||
inputs.nixpkgs.url =
|
||||
"github:NixOS/nixpkgs/a3073c49bc0163fea6a121c276f526837672b555";
|
||||
inputs.cargo2nix = {
|
||||
# As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection
|
||||
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
|
||||
|
@ -24,13 +26,11 @@
|
|||
release = true;
|
||||
}).workspace.garage { compileMode = "build"; };
|
||||
};
|
||||
devShell = ((compile {
|
||||
devShell = (compile {
|
||||
inherit system git_version;
|
||||
pkgsSrc = nixpkgs;
|
||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||
release = false;
|
||||
}).workspaceShell {
|
||||
packages = [ pkgs.rustfmt cargo2nix.packages.${system}.default ];
|
||||
});
|
||||
}).workspaceShell { packages = [ pkgs.rustfmt ]; };
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
{
|
||||
path ? "/../aws-list.txt",
|
||||
}:
|
||||
{ path ? "/../aws-list.txt", }:
|
||||
|
||||
with import ./common.nix;
|
||||
let
|
||||
pkgs = import pkgsSrc {};
|
||||
pkgs = import pkgsSrc { };
|
||||
lib = pkgs.lib;
|
||||
|
||||
/* Converts a key list and a value list to a set
|
||||
|
@ -14,11 +12,10 @@ let
|
|||
=> { name = "latex"; version = "3.14"; }
|
||||
*/
|
||||
listToSet = keys: values:
|
||||
builtins.listToAttrs
|
||||
(lib.zipListsWith
|
||||
(a: b: { name = a; value = b; })
|
||||
keys
|
||||
values);
|
||||
builtins.listToAttrs (lib.zipListsWith (a: b: {
|
||||
name = a;
|
||||
value = b;
|
||||
}) keys values);
|
||||
|
||||
/* Says if datetime a is more recent than datetime b
|
||||
|
||||
|
@ -27,72 +24,101 @@ let
|
|||
=> true
|
||||
*/
|
||||
cmpDate = a: b:
|
||||
let da = (builtins.head a.builds).date;
|
||||
let
|
||||
da = (builtins.head a.builds).date;
|
||||
db = (builtins.head b.builds).date;
|
||||
in
|
||||
if da == db then (builtins.head a.builds).time > (builtins.head b.builds).time
|
||||
else da > db;
|
||||
in if da == db then
|
||||
(builtins.head a.builds).time > (builtins.head b.builds).time
|
||||
else
|
||||
da > db;
|
||||
|
||||
/* Pretty platforms */
|
||||
# Pretty platforms
|
||||
prettyPlatform = name:
|
||||
if name == "aarch64-unknown-linux-musl" then "linux/arm64"
|
||||
else if name == "armv6l-unknown-linux-musleabihf" then "linux/arm"
|
||||
else if name == "x86_64-unknown-linux-musl" then "linux/amd64"
|
||||
else if name == "i686-unknown-linux-musl" then "linux/386"
|
||||
else name;
|
||||
if name == "aarch64-unknown-linux-musl" then
|
||||
"linux/arm64"
|
||||
else if name == "armv6l-unknown-linux-musleabihf" then
|
||||
"linux/arm"
|
||||
else if name == "x86_64-unknown-linux-musl" then
|
||||
"linux/amd64"
|
||||
else if name == "i686-unknown-linux-musl" then
|
||||
"linux/386"
|
||||
else
|
||||
name;
|
||||
|
||||
/* Parsing */
|
||||
# Parsing
|
||||
list = builtins.readFile (./. + path);
|
||||
entries = lib.splitString "\n" list;
|
||||
|
||||
elems = builtins.filter
|
||||
(e: (builtins.length e) == 4)
|
||||
(map
|
||||
(x: builtins.filter (e: e != "") (lib.splitString " " x))
|
||||
entries);
|
||||
elems = builtins.filter (e: (builtins.length e) == 4)
|
||||
(map (x: builtins.filter (e: e != "") (lib.splitString " " x)) entries);
|
||||
|
||||
keys = ["date" "time" "size" "path"];
|
||||
keys = [ "date" "time" "size" "path" ];
|
||||
parsed = map (entry: listToSet keys entry) elems;
|
||||
|
||||
subkeys = ["root" "version" "platform" "binary" ];
|
||||
builds = map (entry: entry // listToSet subkeys (lib.splitString "/" entry.path) // { url = "https://garagehq.deuxfleurs.fr/" + entry.path; }) parsed;
|
||||
subkeys = [ "root" "version" "platform" "binary" ];
|
||||
builds = map (entry:
|
||||
entry // listToSet subkeys (lib.splitString "/" entry.path) // {
|
||||
url = "https://garagehq.deuxfleurs.fr/" + entry.path;
|
||||
}) parsed;
|
||||
|
||||
/* Aggregation */
|
||||
builds_per_version = lib.foldl (acc: v: acc // { ${v.version} = if builtins.hasAttr v.version acc then acc.${v.version} ++ [ v ] else [ v ]; }) {} builds;
|
||||
# Aggregation
|
||||
builds_per_version = lib.foldl (acc: v:
|
||||
acc // {
|
||||
${v.version} = if builtins.hasAttr v.version acc then
|
||||
acc.${v.version} ++ [ v ]
|
||||
else
|
||||
[ v ];
|
||||
}) { } builds;
|
||||
|
||||
versions = builtins.attrNames builds_per_version;
|
||||
versions_release = builtins.filter (x: builtins.match "v[0-9]+\.[0-9]+\.[0-9]+(\.[0-9]+)?" x != null) versions;
|
||||
versions_commit = builtins.filter (x: builtins.match "[0-9a-f]{40}" x != null) versions;
|
||||
versions_extra = lib.subtractLists (versions_release ++ versions_commit) versions;
|
||||
versions_release = builtins.filter
|
||||
(x: builtins.match "v[0-9]+.[0-9]+.[0-9]+(.[0-9]+)?" x != null) versions;
|
||||
versions_commit =
|
||||
builtins.filter (x: builtins.match "[0-9a-f]{40}" x != null) versions;
|
||||
versions_extra =
|
||||
lib.subtractLists (versions_release ++ versions_commit) versions;
|
||||
|
||||
sorted_builds = [
|
||||
{
|
||||
name = "Release";
|
||||
hide = false;
|
||||
type = "tag";
|
||||
description = "Release builds are the official builds, they are tailored for productions and are the most tested.";
|
||||
builds = builtins.sort (a: b: a.version > b.version) (map (x: { version = x; builds = builtins.getAttr x builds_per_version; }) versions_release);
|
||||
description =
|
||||
"Release builds are the official builds, they are tailored for productions and are the most tested.";
|
||||
builds = builtins.sort (a: b: a.version > b.version) (map (x: {
|
||||
version = x;
|
||||
builds = builtins.getAttr x builds_per_version;
|
||||
}) versions_release);
|
||||
}
|
||||
{
|
||||
name = "Extra";
|
||||
hide = true;
|
||||
type = "tag";
|
||||
description = "Extra builds are built on demand to test a specific feature or a specific need.";
|
||||
builds = builtins.sort cmpDate (map (x: { version = x; builds = builtins.getAttr x builds_per_version; }) versions_extra);
|
||||
description =
|
||||
"Extra builds are built on demand to test a specific feature or a specific need.";
|
||||
builds = builtins.sort cmpDate (map (x: {
|
||||
version = x;
|
||||
builds = builtins.getAttr x builds_per_version;
|
||||
}) versions_extra);
|
||||
}
|
||||
{
|
||||
name = "Development";
|
||||
hide = true;
|
||||
type = "commit";
|
||||
description = "Development builds are built periodically. Use them if you want to test a specific feature that is not yet released.";
|
||||
builds = builtins.sort cmpDate (map (x: { version = x; builds = builtins.getAttr x builds_per_version; }) versions_commit);
|
||||
description =
|
||||
"Development builds are built periodically. Use them if you want to test a specific feature that is not yet released.";
|
||||
builds = builtins.sort cmpDate (map (x: {
|
||||
version = x;
|
||||
builds = builtins.getAttr x builds_per_version;
|
||||
}) versions_commit);
|
||||
}
|
||||
];
|
||||
|
||||
json = pkgs.writeTextDir "share/_releases.json" (builtins.toJSON sorted_builds);
|
||||
json =
|
||||
pkgs.writeTextDir "share/_releases.json" (builtins.toJSON sorted_builds);
|
||||
html = pkgs.writeTextDir "share/_releases.html" ''
|
||||
<!doctype html>
|
||||
<html>
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>Garage releases</title>
|
||||
|
@ -114,20 +140,29 @@ let
|
|||
</style>
|
||||
</head>
|
||||
<body>
|
||||
${ builtins.toString (lib.forEach sorted_builds (r: ''
|
||||
${
|
||||
builtins.toString (lib.forEach sorted_builds (r: ''
|
||||
<section>
|
||||
<h2>${r.name} builds</h2>
|
||||
|
||||
<p>${r.description}</p>
|
||||
|
||||
${if r.hide then "<details><summary>Show ${r.name} builds</summary>" else ""}
|
||||
${ builtins.toString (lib.forEach r.builds (x: ''
|
||||
${
|
||||
if r.hide then
|
||||
"<details><summary>Show ${r.name} builds</summary>"
|
||||
else
|
||||
""
|
||||
}
|
||||
${
|
||||
builtins.toString (lib.forEach r.builds (x: ''
|
||||
<h3> ${x.version} (${(builtins.head x.builds).date}) </h3>
|
||||
<p>See this build on</p>
|
||||
<p> Binaries:
|
||||
<ul>
|
||||
${ builtins.toString (lib.forEach x.builds (b: ''
|
||||
<li><a href="/${b.path}">${prettyPlatform b.platform}</a></li>
|
||||
${builtins.toString (lib.forEach x.builds (b: ''
|
||||
<li><a href="/${b.path}">${
|
||||
prettyPlatform b.platform
|
||||
}</a></li>
|
||||
''))}
|
||||
</ul></p>
|
||||
<p> Sources:
|
||||
|
@ -136,16 +171,22 @@ let
|
|||
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/archive/${x.version}.zip">.zip</a></li>
|
||||
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/archive/${x.version}.tar.gz">.tar.gz</a></li>
|
||||
</ul></p>
|
||||
'')) }
|
||||
${ if builtins.length r.builds == 0 then "<em>There is no build for this category</em>" else "" }
|
||||
''))
|
||||
}
|
||||
${
|
||||
if builtins.length r.builds == 0 then
|
||||
"<em>There is no build for this category</em>"
|
||||
else
|
||||
""
|
||||
}
|
||||
${if r.hide then "</details>" else ""}
|
||||
</section>
|
||||
''))}
|
||||
''))
|
||||
}
|
||||
</body>
|
||||
</html>
|
||||
'';
|
||||
in
|
||||
pkgs.symlinkJoin {
|
||||
</html>
|
||||
'';
|
||||
in pkgs.symlinkJoin {
|
||||
name = "releases";
|
||||
paths = [ json html ];
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
rec {
|
||||
/*
|
||||
* Fixed dependencies
|
||||
*/
|
||||
# * Fixed dependencies
|
||||
pkgsSrc = fetchTarball {
|
||||
# As of 2022-10-13
|
||||
url = "https://github.com/NixOS/nixpkgs/archive/a3073c49bc0163fea6a121c276f526837672b555.zip";
|
||||
url =
|
||||
"https://github.com/NixOS/nixpkgs/archive/a3073c49bc0163fea6a121c276f526837672b555.zip";
|
||||
sha256 = "1bz632psfbpmicyzjb8b4265y50shylccvfm6ry6mgnv5hvz324s";
|
||||
};
|
||||
cargo2nixSrc = fetchGit {
|
||||
|
@ -14,9 +13,7 @@ rec {
|
|||
rev = "a7a61179b66054904ef6a195d8da736eaaa06c36";
|
||||
};
|
||||
|
||||
/*
|
||||
* Shared objects
|
||||
*/
|
||||
# * Shared objects
|
||||
cargo2nix = import cargo2nixSrc;
|
||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||
}
|
||||
|
|
145
nix/compile.nix
145
nix/compile.nix
|
@ -1,19 +1,10 @@
|
|||
{
|
||||
system,
|
||||
target ? null,
|
||||
pkgsSrc,
|
||||
cargo2nixOverlay,
|
||||
compiler ? "rustc",
|
||||
release ? false,
|
||||
git_version ? null,
|
||||
features ? null,
|
||||
}:
|
||||
{ system, target ? null, pkgsSrc, cargo2nixOverlay, compiler ? "rustc"
|
||||
, release ? false, git_version ? null, features ? null, }:
|
||||
|
||||
let
|
||||
log = v: builtins.trace v v;
|
||||
|
||||
pkgs =
|
||||
if target != null then
|
||||
pkgs = if target != null then
|
||||
import pkgsSrc {
|
||||
inherit system;
|
||||
crossSystem = {
|
||||
|
@ -28,8 +19,7 @@ let
|
|||
overlays = [ cargo2nixOverlay ];
|
||||
};
|
||||
|
||||
/*
|
||||
Cargo2nix is built for rustOverlay which installs Rust from Mozilla releases.
|
||||
/* Cargo2nix is built for rustOverlay which installs Rust from Mozilla releases.
|
||||
This is fine for 64-bit platforms, but for 32-bit platforms, we need our own Rust
|
||||
to avoid incompatibilities with time_t between different versions of musl
|
||||
(>= 1.2.0 shipped by NixOS, < 1.2.0 with which rustc was built), which lead to compilation breakage.
|
||||
|
@ -40,8 +30,8 @@ let
|
|||
In practise, rustOverlay ships rustc+cargo in a single derivation while
|
||||
NixOS ships them in separate ones. We reunite them with symlinkJoin.
|
||||
*/
|
||||
toolchainOptions =
|
||||
if target == null || target == "x86_64-unknown-linux-musl" || target == "aarch64-unknown-linux-musl" then {
|
||||
toolchainOptions = if target == null || target == "x86_64-unknown-linux-musl"
|
||||
|| target == "aarch64-unknown-linux-musl" then {
|
||||
rustVersion = "1.63.0";
|
||||
extraRustComponents = [ "clippy" ];
|
||||
} else {
|
||||
|
@ -55,11 +45,11 @@ let
|
|||
};
|
||||
};
|
||||
|
||||
|
||||
buildEnv = (drv: {
|
||||
buildEnv = (drv:
|
||||
{
|
||||
rustc = drv.setBuildEnv;
|
||||
clippy = ''
|
||||
${drv.setBuildEnv or "" }
|
||||
${drv.setBuildEnv or ""}
|
||||
echo
|
||||
echo --- BUILDING WITH CLIPPY ---
|
||||
echo
|
||||
|
@ -69,16 +59,15 @@ let
|
|||
'';
|
||||
}.${compiler});
|
||||
|
||||
/*
|
||||
Cargo2nix provides many overrides by default, you can take inspiration from them:
|
||||
/* Cargo2nix provides many overrides by default, you can take inspiration from them:
|
||||
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
|
||||
|
||||
You can have a complete list of the available options by looking at the overriden object, mkcrate:
|
||||
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/mkcrate.nix
|
||||
*/
|
||||
packageOverrides = pkgs: pkgs.rustBuilder.overrides.all ++ [
|
||||
/*
|
||||
[1] We add some logic to compile our crates with clippy, it provides us many additional lints
|
||||
packageOverrides = pkgs:
|
||||
pkgs.rustBuilder.overrides.all ++ [
|
||||
/* [1] We add some logic to compile our crates with clippy, it provides us many additional lints
|
||||
|
||||
[2] We need to alter Nix hardening to make static binaries: PIE,
|
||||
Position Independent Executables seems to be supported only on amd64. Having
|
||||
|
@ -100,103 +89,119 @@ let
|
|||
name = "garage";
|
||||
overrideAttrs = drv:
|
||||
(if git_version != null then {
|
||||
/* [3] */ preConfigure = ''
|
||||
# [3]
|
||||
preConfigure = ''
|
||||
${drv.preConfigure or ""}
|
||||
export GIT_VERSION="${git_version}"
|
||||
'';
|
||||
} else {})
|
||||
//
|
||||
{
|
||||
/* [1] */ setBuildEnv = (buildEnv drv);
|
||||
/* [2] */ hardeningDisable = [ "pie" ];
|
||||
} else
|
||||
{ }) // {
|
||||
# [1]
|
||||
setBuildEnv = (buildEnv drv);
|
||||
# [2]
|
||||
hardeningDisable = [ "pie" ];
|
||||
};
|
||||
})
|
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||
name = "garage_rpc";
|
||||
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||
overrideAttrs = drv: { # [1]
|
||||
setBuildEnv = (buildEnv drv);
|
||||
};
|
||||
})
|
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||
name = "garage_db";
|
||||
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||
overrideAttrs = drv: { # [1]
|
||||
setBuildEnv = (buildEnv drv);
|
||||
};
|
||||
})
|
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||
name = "garage_util";
|
||||
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||
overrideAttrs = drv: { # [1]
|
||||
setBuildEnv = (buildEnv drv);
|
||||
};
|
||||
})
|
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||
name = "garage_table";
|
||||
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||
overrideAttrs = drv: { # [1]
|
||||
setBuildEnv = (buildEnv drv);
|
||||
};
|
||||
})
|
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||
name = "garage_block";
|
||||
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||
overrideAttrs = drv: { # [1]
|
||||
setBuildEnv = (buildEnv drv);
|
||||
};
|
||||
})
|
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||
name = "garage_model";
|
||||
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||
overrideAttrs = drv: { # [1]
|
||||
setBuildEnv = (buildEnv drv);
|
||||
};
|
||||
})
|
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||
name = "garage_api";
|
||||
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||
overrideAttrs = drv: { # [1]
|
||||
setBuildEnv = (buildEnv drv);
|
||||
};
|
||||
})
|
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||
name = "garage_web";
|
||||
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||
overrideAttrs = drv: { # [1]
|
||||
setBuildEnv = (buildEnv drv);
|
||||
};
|
||||
})
|
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||
name = "k2v-client";
|
||||
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
|
||||
overrideAttrs = drv: { # [1]
|
||||
setBuildEnv = (buildEnv drv);
|
||||
};
|
||||
})
|
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||
name = "libsodium-sys";
|
||||
overrideArgs = old: {
|
||||
features = [ ]; /* [4] */
|
||||
features = [ ]; # [4]
|
||||
};
|
||||
})
|
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride {
|
||||
name = "zstd-sys";
|
||||
overrideArgs = old: {
|
||||
features = [ ]; /* [4] */
|
||||
features = [ ]; # [4]
|
||||
};
|
||||
})
|
||||
];
|
||||
|
||||
/*
|
||||
We ship some parts of the code disabled by default by putting them behind a flag.
|
||||
/* We ship some parts of the code disabled by default by putting them behind a flag.
|
||||
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.).
|
||||
But we want to ship these additional features when we release Garage.
|
||||
In the end, we chose to exclude all features from debug builds while putting (all of) them in the release builds.
|
||||
*/
|
||||
rootFeatures = if features != null then features else
|
||||
([
|
||||
"garage/bundled-libs"
|
||||
"garage/sled"
|
||||
"garage/k2v"
|
||||
] ++ (if release then [
|
||||
rootFeatures = if features != null then
|
||||
features
|
||||
else
|
||||
([ "garage/bundled-libs" "garage/sled" "garage/k2v" ] ++ (if release then [
|
||||
"garage/consul-discovery"
|
||||
"garage/kubernetes-discovery"
|
||||
"garage/metrics"
|
||||
"garage/telemetry-otlp"
|
||||
"garage/lmdb"
|
||||
"garage/sqlite"
|
||||
] else []));
|
||||
|
||||
] else
|
||||
[ ]));
|
||||
|
||||
packageFun = import ../Cargo.nix;
|
||||
|
||||
/*
|
||||
We compile fully static binaries with musl to simplify deployment on most systems.
|
||||
/* We compile fully static binaries with musl to simplify deployment on most systems.
|
||||
When possible, we reactivate PIE hardening (see above).
|
||||
|
||||
Also, if you set the RUSTFLAGS environment variable, the following parameters will
|
||||
|
@ -207,21 +212,29 @@ let
|
|||
*/
|
||||
|
||||
codegenOpts = {
|
||||
"armv6l-unknown-linux-musleabihf" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* compile as dynamic with static-pie */
|
||||
"aarch64-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* segfault with static-pie */
|
||||
"i686-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* segfault with static-pie */
|
||||
"x86_64-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static-pie" ];
|
||||
"armv6l-unknown-linux-musleabihf" = [
|
||||
"target-feature=+crt-static"
|
||||
"link-arg=-static"
|
||||
]; # compile as dynamic with static-pie
|
||||
"aarch64-unknown-linux-musl" = [
|
||||
"target-feature=+crt-static"
|
||||
"link-arg=-static"
|
||||
]; # segfault with static-pie
|
||||
"i686-unknown-linux-musl" = [
|
||||
"target-feature=+crt-static"
|
||||
"link-arg=-static"
|
||||
]; # segfault with static-pie
|
||||
"x86_64-unknown-linux-musl" =
|
||||
[ "target-feature=+crt-static" "link-arg=-static-pie" ];
|
||||
};
|
||||
|
||||
/*
|
||||
NixOS and Rust/Cargo triples do not match for ARM, fix it here.
|
||||
*/
|
||||
rustTarget = if target == "armv6l-unknown-linux-musleabihf"
|
||||
then "arm-unknown-linux-musleabihf"
|
||||
else target;
|
||||
# NixOS and Rust/Cargo triples do not match for ARM, fix it here.
|
||||
rustTarget = if target == "armv6l-unknown-linux-musleabihf" then
|
||||
"arm-unknown-linux-musleabihf"
|
||||
else
|
||||
target;
|
||||
|
||||
in
|
||||
pkgs.rustBuilder.makePackageSet ({
|
||||
in pkgs.rustBuilder.makePackageSet ({
|
||||
inherit release packageFun packageOverrides codegenOpts rootFeatures;
|
||||
target = rustTarget;
|
||||
} // toolchainOptions)
|
||||
} // toolchainOptions)
|
||||
|
|
|
@ -15,7 +15,8 @@ pkgs.buildGoModule rec {
|
|||
checkPhase = "true";
|
||||
|
||||
meta = with pkgs.lib; {
|
||||
description = "kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster.";
|
||||
description =
|
||||
"kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster.";
|
||||
homepage = "https://github.com/GoogleContainerTools/kaniko";
|
||||
license = licenses.asl20;
|
||||
platforms = platforms.linux;
|
||||
|
|
|
@ -15,7 +15,8 @@ pkgs.buildGoModule rec {
|
|||
checkPhase = "true";
|
||||
|
||||
meta = with pkgs.lib; {
|
||||
description = "Command line tool to create and query container image manifest list/indexes";
|
||||
description =
|
||||
"Command line tool to create and query container image manifest list/indexes";
|
||||
homepage = "https://github.com/estesp/manifest-tool";
|
||||
license = licenses.asl20;
|
||||
platforms = platforms.linux;
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
{
|
||||
system ? builtins.currentSystem,
|
||||
}:
|
||||
{ system ? builtins.currentSystem, }:
|
||||
|
||||
with import ./common.nix;
|
||||
|
||||
|
@ -11,7 +9,8 @@ let
|
|||
#"aarch64-unknown-linux-musl"
|
||||
"armv6l-unknown-linux-musleabihf"
|
||||
];
|
||||
pkgsList = builtins.map (target: import pkgsSrc {
|
||||
pkgsList = builtins.map (target:
|
||||
import pkgsSrc {
|
||||
inherit system;
|
||||
crossSystem = {
|
||||
config = target;
|
||||
|
@ -19,19 +18,14 @@ let
|
|||
};
|
||||
overlays = [ cargo2nixOverlay ];
|
||||
}) platforms;
|
||||
pkgsHost = import pkgsSrc {};
|
||||
pkgsHost = import pkgsSrc { };
|
||||
lib = pkgsHost.lib;
|
||||
kaniko = (import ./kaniko.nix) pkgsHost;
|
||||
winscp = (import ./winscp.nix) pkgsHost;
|
||||
manifestTool = (import ./manifest-tool.nix) pkgsHost;
|
||||
in
|
||||
lib.flatten (builtins.map (pkgs: [
|
||||
in lib.flatten (builtins.map (pkgs: [
|
||||
pkgs.rustPlatform.rust.rustc
|
||||
pkgs.rustPlatform.rust.cargo
|
||||
pkgs.buildPackages.stdenv.cc
|
||||
]) pkgsList) ++ [
|
||||
kaniko
|
||||
winscp
|
||||
manifestTool
|
||||
]
|
||||
]) pkgsList) ++ [ kaniko winscp manifestTool ]
|
||||
|
||||
|
|
|
@ -12,11 +12,11 @@ pkgs.stdenv.mkDerivation rec {
|
|||
|
||||
buildPhase = ''
|
||||
cat > winscp <<EOF
|
||||
#!${pkgs.bash}/bin/bash
|
||||
#!${pkgs.bash}/bin/bash
|
||||
|
||||
WINEDEBUG=-all
|
||||
${pkgs.winePackages.minimal}/bin/wine $out/opt/WinSCP.com
|
||||
EOF
|
||||
WINEDEBUG=-all
|
||||
${pkgs.winePackages.minimal}/bin/wine $out/opt/WinSCP.com
|
||||
EOF
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
|
|
69
shell.nix
69
shell.nix
|
@ -1,6 +1,4 @@
|
|||
{
|
||||
system ? builtins.currentSystem,
|
||||
}:
|
||||
{ system ? builtins.currentSystem, }:
|
||||
|
||||
with import ./nix/common.nix;
|
||||
|
||||
|
@ -13,11 +11,9 @@ let
|
|||
manifest-tool = (import ./nix/manifest-tool.nix) pkgs;
|
||||
winscp = (import ./nix/winscp.nix) pkgs;
|
||||
|
||||
in
|
||||
{
|
||||
/* --- Rust Shell ---
|
||||
* Use it to compile Garage
|
||||
*/
|
||||
in {
|
||||
# --- Rust Shell ---
|
||||
# Use it to compile Garage
|
||||
rust = pkgs.mkShell {
|
||||
nativeBuildInputs = [
|
||||
#pkgs.rustPlatform.rust.rustc
|
||||
|
@ -33,9 +29,8 @@ in
|
|||
];
|
||||
};
|
||||
|
||||
/* --- Integration shell ---
|
||||
* Use it to test Garage with common S3 clients
|
||||
*/
|
||||
# --- Integration shell ---
|
||||
# Use it to test Garage with common S3 clients
|
||||
integration = pkgs.mkShell {
|
||||
nativeBuildInputs = [
|
||||
winscp
|
||||
|
@ -52,21 +47,20 @@ in
|
|||
];
|
||||
};
|
||||
|
||||
/* --- Release shell ---
|
||||
* A shell built to make releasing easier
|
||||
*/
|
||||
# --- Release shell ---
|
||||
# A shell built to make releasing easier
|
||||
release = pkgs.mkShell {
|
||||
shellHook = ''
|
||||
function refresh_toolchain {
|
||||
function refresh_toolchain {
|
||||
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
||||
nix copy \
|
||||
--to 's3://nix?endpoint=garage.deuxfleurs.fr®ion=garage&secret-key=/tmp/nix-signing-key.sec' \
|
||||
$(nix-store -qR \
|
||||
$(nix-build --no-build-output --no-out-link nix/toolchain.nix))
|
||||
rm /tmp/nix-signing-key.sec
|
||||
}
|
||||
}
|
||||
|
||||
function refresh_cache {
|
||||
function refresh_cache {
|
||||
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
||||
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.{debug,release}; do
|
||||
echo "Updating cache for ''${attr}"
|
||||
|
@ -76,11 +70,11 @@ function refresh_cache {
|
|||
$(nix-store -qR ''${derivation%\!bin})
|
||||
done
|
||||
rm /tmp/nix-signing-key.sec
|
||||
}
|
||||
}
|
||||
|
||||
function refresh_flake_cache {
|
||||
function refresh_flake_cache {
|
||||
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
|
||||
for attr in packages.x86_64-linux.default; do
|
||||
for attr in packages.x86_64-linux.default devShell.x86_64-linux; do
|
||||
echo "Updating cache for ''${attr}"
|
||||
derivation=$(nix path-info --derivation ".#''${attr}")
|
||||
nix copy -j8 \
|
||||
|
@ -88,30 +82,30 @@ function refresh_flake_cache {
|
|||
$(nix-store -qR ''${derivation})
|
||||
done
|
||||
rm /tmp/nix-signing-key.sec
|
||||
}
|
||||
}
|
||||
|
||||
function to_s3 {
|
||||
function to_s3 {
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
--region garage \
|
||||
s3 cp \
|
||||
./result-bin/bin/garage \
|
||||
s3://garagehq.deuxfleurs.fr/_releases/''${DRONE_TAG:-$DRONE_COMMIT}/''${TARGET}/garage
|
||||
}
|
||||
}
|
||||
|
||||
function to_docker {
|
||||
function to_docker {
|
||||
executor \
|
||||
--force \
|
||||
--customPlatform="''${DOCKER_PLATFORM}" \
|
||||
--destination "''${CONTAINER_NAME}:''${CONTAINER_TAG}" \
|
||||
--context dir://`pwd` \
|
||||
--verbosity=debug
|
||||
}
|
||||
}
|
||||
|
||||
function multiarch_docker {
|
||||
function multiarch_docker {
|
||||
manifest-tool push from-spec <(cat <<EOF
|
||||
image: dxflrs/garage:''${CONTAINER_TAG}
|
||||
manifests:
|
||||
image: dxflrs/garage:''${CONTAINER_TAG}
|
||||
manifests:
|
||||
-
|
||||
image: dxflrs/arm64_garage:''${CONTAINER_TAG}
|
||||
platform:
|
||||
|
@ -132,11 +126,11 @@ manifests:
|
|||
platform:
|
||||
architecture: arm
|
||||
os: linux
|
||||
EOF
|
||||
EOF
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
function refresh_index {
|
||||
function refresh_index {
|
||||
aws \
|
||||
--endpoint-url https://garage.deuxfleurs.fr \
|
||||
--region garage \
|
||||
|
@ -160,14 +154,9 @@ function refresh_index {
|
|||
s3 cp \
|
||||
result/share/_releases.html \
|
||||
s3://garagehq.deuxfleurs.fr/
|
||||
}
|
||||
'';
|
||||
nativeBuildInputs = [
|
||||
pkgs.awscli2
|
||||
kaniko
|
||||
manifest-tool
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
'';
|
||||
nativeBuildInputs = [ pkgs.awscli2 kaniko manifest-tool ];
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ use garage_util::time::*;
|
|||
use garage_table::replication::*;
|
||||
use garage_table::*;
|
||||
|
||||
use garage_rpc::ring::PARTITION_BITS;
|
||||
use garage_rpc::*;
|
||||
|
||||
use garage_block::manager::BlockResyncErrorInfo;
|
||||
|
@ -783,6 +784,7 @@ impl AdminRpcHandler {
|
|||
for node in ring.layout.node_ids().iter() {
|
||||
let mut opt = opt.clone();
|
||||
opt.all_nodes = false;
|
||||
opt.skip_global = true;
|
||||
|
||||
writeln!(&mut ret, "\n======================").unwrap();
|
||||
writeln!(&mut ret, "Stats for node {:?}:", node).unwrap();
|
||||
|
@ -799,6 +801,15 @@ impl AdminRpcHandler {
|
|||
Err(e) => writeln!(&mut ret, "Network error: {}", e).unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
writeln!(&mut ret, "\n======================").unwrap();
|
||||
write!(
|
||||
&mut ret,
|
||||
"Cluster statistics:\n\n{}",
|
||||
self.gather_cluster_stats()
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
Ok(AdminRpc::Ok(ret))
|
||||
} else {
|
||||
Ok(AdminRpc::Ok(self.gather_stats_local(opt)?))
|
||||
|
@ -819,22 +830,6 @@ impl AdminRpcHandler {
|
|||
|
||||
writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap();
|
||||
|
||||
// Gather ring statistics
|
||||
let ring = self.garage.system.ring.borrow().clone();
|
||||
let mut ring_nodes = HashMap::new();
|
||||
for (_i, loc) in ring.partitions().iter() {
|
||||
for n in ring.get_nodes(loc, ring.replication_factor).iter() {
|
||||
if !ring_nodes.contains_key(n) {
|
||||
ring_nodes.insert(*n, 0usize);
|
||||
}
|
||||
*ring_nodes.get_mut(n).unwrap() += 1;
|
||||
}
|
||||
}
|
||||
writeln!(&mut ret, "\nRing nodes & partition count:").unwrap();
|
||||
for (n, c) in ring_nodes.iter() {
|
||||
writeln!(&mut ret, " {:?} {}", n, c).unwrap();
|
||||
}
|
||||
|
||||
// Gather table statistics
|
||||
let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()];
|
||||
table.push(self.gather_table_stats(&self.garage.bucket_table, opt.detailed)?);
|
||||
|
@ -881,12 +876,108 @@ impl AdminRpcHandler {
|
|||
.unwrap();
|
||||
|
||||
if !opt.detailed {
|
||||
writeln!(&mut ret, "\nIf values are missing (marked as NC), consider adding the --detailed flag - this will be slow.").unwrap();
|
||||
writeln!(&mut ret, "\nIf values are missing above (marked as NC), consider adding the --detailed flag (this will be slow).").unwrap();
|
||||
}
|
||||
|
||||
if !opt.skip_global {
|
||||
write!(&mut ret, "\n{}", self.gather_cluster_stats()).unwrap();
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
fn gather_cluster_stats(&self) -> String {
|
||||
let mut ret = String::new();
|
||||
|
||||
// Gather storage node and free space statistics
|
||||
let layout = &self.garage.system.ring.borrow().layout;
|
||||
let mut node_partition_count = HashMap::<Uuid, u64>::new();
|
||||
for short_id in layout.ring_assignation_data.iter() {
|
||||
let id = layout.node_id_vec[*short_id as usize];
|
||||
*node_partition_count.entry(id).or_default() += 1;
|
||||
}
|
||||
let node_info = self
|
||||
.garage
|
||||
.system
|
||||
.get_known_nodes()
|
||||
.into_iter()
|
||||
.map(|n| (n.id, n))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let mut table = vec![" ID\tHostname\tZone\tCapacity\tPart.\tDataAvail\tMetaAvail".into()];
|
||||
for (id, parts) in node_partition_count.iter() {
|
||||
let info = node_info.get(id);
|
||||
let status = info.map(|x| &x.status);
|
||||
let role = layout.roles.get(id).and_then(|x| x.0.as_ref());
|
||||
let hostname = status.map(|x| x.hostname.as_str()).unwrap_or("?");
|
||||
let zone = role.map(|x| x.zone.as_str()).unwrap_or("?");
|
||||
let capacity = role.map(|x| x.capacity_string()).unwrap_or("?".into());
|
||||
let avail_str = |x| match x {
|
||||
Some((avail, total)) => {
|
||||
let pct = (avail as f64) / (total as f64) * 100.;
|
||||
let avail = bytesize::ByteSize::b(avail);
|
||||
let total = bytesize::ByteSize::b(total);
|
||||
format!("{}/{} ({:.1}%)", avail, total, pct)
|
||||
}
|
||||
None => "?".into(),
|
||||
};
|
||||
let data_avail = avail_str(status.and_then(|x| x.data_disk_avail));
|
||||
let meta_avail = avail_str(status.and_then(|x| x.meta_disk_avail));
|
||||
table.push(format!(
|
||||
" {:?}\t{}\t{}\t{}\t{}\t{}\t{}",
|
||||
id, hostname, zone, capacity, parts, data_avail, meta_avail
|
||||
));
|
||||
}
|
||||
write!(
|
||||
&mut ret,
|
||||
"Storage nodes:\n{}",
|
||||
format_table_to_string(table)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let meta_part_avail = node_partition_count
|
||||
.iter()
|
||||
.filter_map(|(id, parts)| {
|
||||
node_info
|
||||
.get(id)
|
||||
.and_then(|x| x.status.meta_disk_avail)
|
||||
.map(|c| c.0 / *parts)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let data_part_avail = node_partition_count
|
||||
.iter()
|
||||
.filter_map(|(id, parts)| {
|
||||
node_info
|
||||
.get(id)
|
||||
.and_then(|x| x.status.data_disk_avail)
|
||||
.map(|c| c.0 / *parts)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if !meta_part_avail.is_empty() && !data_part_avail.is_empty() {
|
||||
let meta_avail =
|
||||
bytesize::ByteSize(meta_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
|
||||
let data_avail =
|
||||
bytesize::ByteSize(data_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
|
||||
writeln!(
|
||||
&mut ret,
|
||||
"\nEstimated available storage space cluster-wide (might be lower in practice):"
|
||||
)
|
||||
.unwrap();
|
||||
if meta_part_avail.len() < node_partition_count.len()
|
||||
|| data_part_avail.len() < node_partition_count.len()
|
||||
{
|
||||
writeln!(&mut ret, " data: < {}", data_avail).unwrap();
|
||||
writeln!(&mut ret, " metadata: < {}", meta_avail).unwrap();
|
||||
writeln!(&mut ret, "A precise estimate could not be given as information is missing for some storage nodes.").unwrap();
|
||||
} else {
|
||||
writeln!(&mut ret, " data: {}", data_avail).unwrap();
|
||||
writeln!(&mut ret, " metadata: {}", meta_avail).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
fn gather_table_stats<F, R>(
|
||||
&self,
|
||||
t: &Arc<Table<F, R>>,
|
||||
|
|
|
@ -59,18 +59,29 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
|
|||
let layout = fetch_layout(rpc_cli, rpc_host).await?;
|
||||
|
||||
println!("==== HEALTHY NODES ====");
|
||||
let mut healthy_nodes = vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity".to_string()];
|
||||
let mut healthy_nodes =
|
||||
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail\tMetaAvail".to_string()];
|
||||
for adv in status.iter().filter(|adv| adv.is_up) {
|
||||
match layout.roles.get(&adv.id) {
|
||||
Some(NodeRoleV(Some(cfg))) => {
|
||||
let data_avail = match &adv.status.data_disk_avail {
|
||||
_ if cfg.capacity.is_none() => "N/A".into(),
|
||||
Some((avail, total)) => {
|
||||
let pct = (*avail as f64) / (*total as f64) * 100.;
|
||||
let avail = bytesize::ByteSize::b(*avail);
|
||||
format!("{} ({:.1}%)", avail, pct)
|
||||
}
|
||||
None => "?".into(),
|
||||
};
|
||||
healthy_nodes.push(format!(
|
||||
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}",
|
||||
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
|
||||
id = adv.id,
|
||||
host = adv.status.hostname,
|
||||
addr = adv.addr,
|
||||
tags = cfg.tags.join(","),
|
||||
zone = cfg.zone,
|
||||
capacity = cfg.capacity_string(),
|
||||
data_avail = data_avail,
|
||||
));
|
||||
}
|
||||
_ => {
|
||||
|
|
|
@ -504,6 +504,11 @@ pub struct StatsOpt {
|
|||
/// Gather detailed statistics (this can be long)
|
||||
#[structopt(short = "d", long = "detailed")]
|
||||
pub detailed: bool,
|
||||
|
||||
/// Don't show global cluster stats (internal use in RPC)
|
||||
#[structopt(skip)]
|
||||
#[serde(default)]
|
||||
pub skip_global: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
|
||||
|
|
|
@ -23,6 +23,7 @@ hex = "0.4"
|
|||
tracing = "0.1"
|
||||
rand = "0.8"
|
||||
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
||||
systemstat = "0.2.3"
|
||||
|
||||
async-trait = "0.1.7"
|
||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||
|
|
|
@ -3,6 +3,9 @@
|
|||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
mod metrics;
|
||||
mod system_metrics;
|
||||
|
||||
#[cfg(feature = "consul-discovery")]
|
||||
mod consul;
|
||||
#[cfg(feature = "kubernetes-discovery")]
|
||||
|
@ -13,9 +16,6 @@ pub mod replication_mode;
|
|||
pub mod ring;
|
||||
pub mod system;
|
||||
|
||||
mod metrics;
|
||||
pub mod rpc_helper;
|
||||
|
||||
pub use rpc_helper::*;
|
||||
|
||||
pub mod system_metrics;
|
||||
|
|
|
@ -3,6 +3,7 @@ use std::collections::HashMap;
|
|||
use std::io::{Read, Write};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
|
@ -38,7 +39,6 @@ use crate::replication_mode::*;
|
|||
use crate::ring::*;
|
||||
use crate::rpc_helper::*;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
use crate::system_metrics::*;
|
||||
|
||||
const DISCOVERY_INTERVAL: Duration = Duration::from_secs(60);
|
||||
|
@ -106,7 +106,7 @@ pub struct System {
|
|||
consul_discovery: Option<ConsulDiscovery>,
|
||||
#[cfg(feature = "kubernetes-discovery")]
|
||||
kubernetes_discovery: Option<KubernetesDiscoveryConfig>,
|
||||
#[cfg(feature = "metrics")]
|
||||
|
||||
metrics: SystemMetrics,
|
||||
|
||||
replication_mode: ReplicationMode,
|
||||
|
@ -118,18 +118,28 @@ pub struct System {
|
|||
|
||||
/// Path to metadata directory
|
||||
pub metadata_dir: PathBuf,
|
||||
/// Path to data directory
|
||||
pub data_dir: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct NodeStatus {
|
||||
/// Hostname of the node
|
||||
pub hostname: String,
|
||||
|
||||
/// Replication factor configured on the node
|
||||
pub replication_factor: usize,
|
||||
/// Cluster layout version
|
||||
pub cluster_layout_version: u64,
|
||||
/// Hash of cluster layout staging data
|
||||
pub cluster_layout_staging_hash: Hash,
|
||||
|
||||
/// Disk usage on partition containing metadata directory (tuple: `(avail, total)`)
|
||||
#[serde(default)]
|
||||
pub meta_disk_avail: Option<(u64, u64)>,
|
||||
/// Disk usage on partition containing data directory (tuple: `(avail, total)`)
|
||||
#[serde(default)]
|
||||
pub data_disk_avail: Option<(u64, u64)>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
|
@ -271,18 +281,11 @@ impl System {
|
|||
}
|
||||
};
|
||||
|
||||
let local_status = NodeStatus {
|
||||
hostname: gethostname::gethostname()
|
||||
.into_string()
|
||||
.unwrap_or_else(|_| "<invalid utf-8>".to_string()),
|
||||
replication_factor,
|
||||
cluster_layout_version: cluster_layout.version,
|
||||
cluster_layout_staging_hash: cluster_layout.staging_hash,
|
||||
};
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
let metrics = SystemMetrics::new(replication_factor);
|
||||
|
||||
let mut local_status = NodeStatus::initial(replication_factor, &cluster_layout);
|
||||
local_status.update_disk_usage(&config.metadata_dir, &config.data_dir, &metrics);
|
||||
|
||||
let ring = Ring::new(cluster_layout, replication_factor);
|
||||
let (update_ring, ring) = watch::channel(Arc::new(ring));
|
||||
|
||||
|
@ -373,12 +376,12 @@ impl System {
|
|||
consul_discovery,
|
||||
#[cfg(feature = "kubernetes-discovery")]
|
||||
kubernetes_discovery: config.kubernetes_discovery.clone(),
|
||||
#[cfg(feature = "metrics")]
|
||||
metrics,
|
||||
|
||||
ring,
|
||||
update_ring: Mutex::new(update_ring),
|
||||
metadata_dir: config.metadata_dir.clone(),
|
||||
data_dir: config.data_dir.clone(),
|
||||
});
|
||||
sys.system_endpoint.set_handler(sys.clone());
|
||||
Ok(sys)
|
||||
|
@ -416,12 +419,7 @@ impl System {
|
|||
.get(&n.id.into())
|
||||
.cloned()
|
||||
.map(|(_, st)| st)
|
||||
.unwrap_or(NodeStatus {
|
||||
hostname: "?".to_string(),
|
||||
replication_factor: 0,
|
||||
cluster_layout_version: 0,
|
||||
cluster_layout_staging_hash: Hash::from([0u8; 32]),
|
||||
}),
|
||||
.unwrap_or(NodeStatus::unknown()),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
known_nodes
|
||||
|
@ -600,6 +598,9 @@ impl System {
|
|||
let ring = self.ring.borrow();
|
||||
new_si.cluster_layout_version = ring.layout.version;
|
||||
new_si.cluster_layout_staging_hash = ring.layout.staging_hash;
|
||||
|
||||
new_si.update_disk_usage(&self.metadata_dir, &self.data_dir, &self.metrics);
|
||||
|
||||
self.local_status.swap(Arc::new(new_si));
|
||||
}
|
||||
|
||||
|
@ -864,6 +865,69 @@ impl EndpointHandler<SystemRpc> for System {
|
|||
}
|
||||
}
|
||||
|
||||
impl NodeStatus {
|
||||
fn initial(replication_factor: usize, layout: &ClusterLayout) -> Self {
|
||||
NodeStatus {
|
||||
hostname: gethostname::gethostname()
|
||||
.into_string()
|
||||
.unwrap_or_else(|_| "<invalid utf-8>".to_string()),
|
||||
replication_factor,
|
||||
cluster_layout_version: layout.version,
|
||||
cluster_layout_staging_hash: layout.staging_hash,
|
||||
meta_disk_avail: None,
|
||||
data_disk_avail: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn unknown() -> Self {
|
||||
NodeStatus {
|
||||
hostname: "?".to_string(),
|
||||
replication_factor: 0,
|
||||
cluster_layout_version: 0,
|
||||
cluster_layout_staging_hash: Hash::from([0u8; 32]),
|
||||
meta_disk_avail: None,
|
||||
data_disk_avail: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn update_disk_usage(&mut self, meta_dir: &Path, data_dir: &Path, metrics: &SystemMetrics) {
|
||||
use systemstat::{Platform, System};
|
||||
let mounts = System::new().mounts().unwrap_or_default();
|
||||
|
||||
let mount_avail = |path: &Path| {
|
||||
mounts
|
||||
.iter()
|
||||
.filter(|x| path.starts_with(&x.fs_mounted_on))
|
||||
.max_by_key(|x| x.fs_mounted_on.len())
|
||||
.map(|x| (x.avail.as_u64(), x.total.as_u64()))
|
||||
};
|
||||
|
||||
self.meta_disk_avail = mount_avail(meta_dir);
|
||||
self.data_disk_avail = mount_avail(data_dir);
|
||||
|
||||
if let Some((avail, total)) = self.meta_disk_avail {
|
||||
metrics
|
||||
.values
|
||||
.meta_disk_avail
|
||||
.store(avail, Ordering::Relaxed);
|
||||
metrics
|
||||
.values
|
||||
.meta_disk_total
|
||||
.store(total, Ordering::Relaxed);
|
||||
}
|
||||
if let Some((avail, total)) = self.data_disk_avail {
|
||||
metrics
|
||||
.values
|
||||
.data_disk_avail
|
||||
.store(avail, Ordering::Relaxed);
|
||||
metrics
|
||||
.values
|
||||
.data_disk_total
|
||||
.store(total, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_default_ip() -> Option<IpAddr> {
|
||||
pnet_datalink::interfaces()
|
||||
.iter()
|
||||
|
|
|
@ -1,14 +1,31 @@
|
|||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use opentelemetry::{global, metrics::*, KeyValue};
|
||||
|
||||
/// TableMetrics reference all counter used for metrics
|
||||
pub struct SystemMetrics {
|
||||
pub(crate) _garage_build_info: ValueObserver<u64>,
|
||||
pub(crate) _replication_factor: ValueObserver<u64>,
|
||||
pub(crate) _disk_avail: ValueObserver<u64>,
|
||||
pub(crate) _disk_total: ValueObserver<u64>,
|
||||
pub(crate) values: Arc<SystemMetricsValues>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SystemMetricsValues {
|
||||
pub(crate) data_disk_total: AtomicU64,
|
||||
pub(crate) data_disk_avail: AtomicU64,
|
||||
pub(crate) meta_disk_total: AtomicU64,
|
||||
pub(crate) meta_disk_avail: AtomicU64,
|
||||
}
|
||||
|
||||
impl SystemMetrics {
|
||||
pub fn new(replication_factor: usize) -> Self {
|
||||
let meter = global::meter("garage_system");
|
||||
let values = Arc::new(SystemMetricsValues::default());
|
||||
let values1 = values.clone();
|
||||
let values2 = values.clone();
|
||||
Self {
|
||||
_garage_build_info: meter
|
||||
.u64_value_observer("garage_build_info", move |observer| {
|
||||
|
@ -28,6 +45,33 @@ impl SystemMetrics {
|
|||
})
|
||||
.with_description("Garage replication factor setting")
|
||||
.init(),
|
||||
_disk_avail: meter
|
||||
.u64_value_observer("garage_local_disk_avail", move |observer| {
|
||||
match values1.data_disk_avail.load(Ordering::Relaxed) {
|
||||
0 => (),
|
||||
x => observer.observe(x, &[KeyValue::new("volume", "data")]),
|
||||
};
|
||||
match values1.meta_disk_avail.load(Ordering::Relaxed) {
|
||||
0 => (),
|
||||
x => observer.observe(x, &[KeyValue::new("volume", "metadata")]),
|
||||
};
|
||||
})
|
||||
.with_description("Garage available disk space on each node")
|
||||
.init(),
|
||||
_disk_total: meter
|
||||
.u64_value_observer("garage_local_disk_total", move |observer| {
|
||||
match values2.data_disk_total.load(Ordering::Relaxed) {
|
||||
0 => (),
|
||||
x => observer.observe(x, &[KeyValue::new("volume", "data")]),
|
||||
};
|
||||
match values2.meta_disk_total.load(Ordering::Relaxed) {
|
||||
0 => (),
|
||||
x => observer.observe(x, &[KeyValue::new("volume", "metadata")]),
|
||||
};
|
||||
})
|
||||
.with_description("Garage total disk space on each node")
|
||||
.init(),
|
||||
values,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue