Merge pull request 'Report available disk space in `garage stats`' (#487) from report-disk-usage into main
continuous-integration/drone/push Build is passing Details

Reviewed-on: #487
This commit is contained in:
Alex 2023-01-26 15:40:41 +00:00
commit 611792ddcf
20 changed files with 843 additions and 564 deletions

15
Cargo.lock generated
View File

@ -1257,6 +1257,7 @@ dependencies = [
"serde", "serde",
"serde_bytes", "serde_bytes",
"serde_json", "serde_json",
"systemstat",
"tokio", "tokio",
"tokio-stream", "tokio-stream",
"tracing", "tracing",
@ -3549,6 +3550,20 @@ dependencies = [
"unicode-xid", "unicode-xid",
] ]
[[package]]
name = "systemstat"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a24aec24a9312c83999a28e3ef9db7e2afd5c64bf47725b758cdc1cafd5b0bd2"
dependencies = [
"bytesize",
"lazy_static",
"libc",
"nom",
"time 0.3.17",
"winapi",
]
[[package]] [[package]]
name = "tempfile" name = "tempfile"
version = "3.3.0" version = "3.3.0"

View File

@ -32,7 +32,7 @@ args@{
ignoreLockHash, ignoreLockHash,
}: }:
let let
nixifiedLockHash = "456bca1fe75cfe5c26a9b56401a40b5b205c0096a0dc7287c7853c35498bc5c0"; nixifiedLockHash = "cf836c01a9c668bab5f9a09d468f47aa24c50abec92855503624d706721335ef";
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc; workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock); currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
lockHashIgnored = if ignoreLockHash lockHashIgnored = if ignoreLockHash
@ -1790,6 +1790,7 @@ in
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.152" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.152" { inherit profileName; }).out;
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.8" { inherit profileName; }).out; serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.8" { inherit profileName; }).out;
serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.91" { inherit profileName; }).out; serde_json = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.91" { inherit profileName; }).out;
systemstat = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".systemstat."0.2.3" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.24.2" { inherit profileName; }).out; tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.24.2" { inherit profileName; }).out;
tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.11" { inherit profileName; }).out; tokio_stream = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio-stream."0.1.11" { inherit profileName; }).out;
tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.37" { inherit profileName; }).out; tracing = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.37" { inherit profileName; }).out;
@ -4938,6 +4939,21 @@ in
}; };
}); });
"registry+https://github.com/rust-lang/crates.io-index".systemstat."0.2.3" = overridableMkRustCrate (profileName: rec {
name = "systemstat";
version = "0.2.3";
registry = "registry+https://github.com/rust-lang/crates.io-index";
src = fetchCratesIo { inherit name version; sha256 = "a24aec24a9312c83999a28e3ef9db7e2afd5c64bf47725b758cdc1cafd5b0bd2"; };
dependencies = {
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.1.0" { inherit profileName; }).out;
lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
libc = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.139" { inherit profileName; }).out;
${ if hostPlatform.parsed.kernel.name == "linux" || hostPlatform.parsed.kernel.name == "android" then "nom" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".nom."7.1.3" { inherit profileName; }).out;
time = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.3.17" { inherit profileName; }).out;
${ if hostPlatform.isWindows then "winapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }).out;
};
});
"registry+https://github.com/rust-lang/crates.io-index".tempfile."3.3.0" = overridableMkRustCrate (profileName: rec { "registry+https://github.com/rust-lang/crates.io-index".tempfile."3.3.0" = overridableMkRustCrate (profileName: rec {
name = "tempfile"; name = "tempfile";
version = "3.3.0"; version = "3.3.0";
@ -5961,6 +5977,7 @@ in
[ "ntsecapi" ] [ "ntsecapi" ]
[ "ntstatus" ] [ "ntstatus" ]
[ "objbase" ] [ "objbase" ]
[ "pdh" ]
[ "processenv" ] [ "processenv" ]
[ "processthreadsapi" ] [ "processthreadsapi" ]
[ "profileapi" ] [ "profileapi" ]
@ -5977,6 +5994,7 @@ in
(lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest") "winreg") (lib.optional (rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest") "winreg")
[ "winsock2" ] [ "winsock2" ]
[ "winstring" ] [ "winstring" ]
[ "ws2def" ]
[ "ws2ipdef" ] [ "ws2ipdef" ]
[ "ws2tcpip" ] [ "ws2tcpip" ]
[ "wtypesbase" ] [ "wtypesbase" ]

View File

@ -4,7 +4,7 @@ all:
clear; cargo build clear; cargo build
release: release:
nix-build --arg release true nix-build --attr pkgs.amd64.release --no-build-output
shell: shell:
nix-shell nix-shell

View File

@ -1,7 +1,4 @@
{ { system ? builtins.currentSystem, git_version ? null, }:
system ? builtins.currentSystem,
git_version ? null,
}:
with import ./nix/common.nix; with import ./nix/common.nix;
@ -13,22 +10,21 @@ let
debug = (compile { debug = (compile {
inherit system target git_version pkgsSrc cargo2nixOverlay; inherit system target git_version pkgsSrc cargo2nixOverlay;
release = false; release = false;
}).workspace.garage { }).workspace.garage { compileMode = "build"; };
compileMode = "build";
};
release = (compile { release = (compile {
inherit system target git_version pkgsSrc cargo2nixOverlay; inherit system target git_version pkgsSrc cargo2nixOverlay;
release = true; release = true;
}).workspace.garage { }).workspace.garage { compileMode = "build"; };
compileMode = "build";
};
}); });
test = (rustPkgs: pkgs.symlinkJoin { test = (rustPkgs:
name ="garage-tests"; pkgs.symlinkJoin {
paths = builtins.map (key: rustPkgs.workspace.${key} { compileMode = "test"; }) (builtins.attrNames rustPkgs.workspace); name = "garage-tests";
}); paths =
builtins.map (key: rustPkgs.workspace.${key} { compileMode = "test"; })
(builtins.attrNames rustPkgs.workspace);
});
in { in {
pkgs = { pkgs = {
@ -55,8 +51,6 @@ in {
inherit system git_version pkgsSrc cargo2nixOverlay; inherit system git_version pkgsSrc cargo2nixOverlay;
target = "x86_64-unknown-linux-musl"; target = "x86_64-unknown-linux-musl";
compiler = "clippy"; compiler = "clippy";
}).workspace.garage { }).workspace.garage { compileMode = "build"; };
compileMode = "build";
};
}; };
} }

View File

@ -1,7 +1,9 @@
{ {
description = "Garage, an S3-compatible distributed object store for self-hosted deployments"; description =
"Garage, an S3-compatible distributed object store for self-hosted deployments";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/a3073c49bc0163fea6a121c276f526837672b555"; inputs.nixpkgs.url =
"github:NixOS/nixpkgs/a3073c49bc0163fea6a121c276f526837672b555";
inputs.cargo2nix = { inputs.cargo2nix = {
# As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection # As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36"; url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
@ -24,13 +26,11 @@
release = true; release = true;
}).workspace.garage { compileMode = "build"; }; }).workspace.garage { compileMode = "build"; };
}; };
devShell = ((compile { devShell = (compile {
inherit system git_version; inherit system git_version;
pkgsSrc = nixpkgs; pkgsSrc = nixpkgs;
cargo2nixOverlay = cargo2nix.overlays.default; cargo2nixOverlay = cargo2nix.overlays.default;
release = false; release = false;
}).workspaceShell { }).workspaceShell { packages = [ pkgs.rustfmt ]; };
packages = [ pkgs.rustfmt cargo2nix.packages.${system}.default ];
});
}); });
} }

View File

@ -1,10 +1,8 @@
{ { path ? "/../aws-list.txt", }:
path ? "/../aws-list.txt",
}:
with import ./common.nix; with import ./common.nix;
let let
pkgs = import pkgsSrc {}; pkgs = import pkgsSrc { };
lib = pkgs.lib; lib = pkgs.lib;
/* Converts a key list and a value list to a set /* Converts a key list and a value list to a set
@ -13,139 +11,182 @@ let
listToSet [ "name" "version" ] [ "latex" "3.14" ] listToSet [ "name" "version" ] [ "latex" "3.14" ]
=> { name = "latex"; version = "3.14"; } => { name = "latex"; version = "3.14"; }
*/ */
listToSet = keys: values: listToSet = keys: values:
builtins.listToAttrs builtins.listToAttrs (lib.zipListsWith (a: b: {
(lib.zipListsWith name = a;
(a: b: { name = a; value = b; }) value = b;
keys }) keys values);
values);
/* Says if datetime a is more recent than datetime b /* Says if datetime a is more recent than datetime b
Example: Example:
cmpDate { date = "2021-09-10"; time = "22:12:15"; } { date = "2021-02-03"; time = "23:54:12"; } cmpDate { date = "2021-09-10"; time = "22:12:15"; } { date = "2021-02-03"; time = "23:54:12"; }
=> true => true
*/ */
cmpDate = a: b: cmpDate = a: b:
let da = (builtins.head a.builds).date; let
db = (builtins.head b.builds).date; da = (builtins.head a.builds).date;
in db = (builtins.head b.builds).date;
if da == db then (builtins.head a.builds).time > (builtins.head b.builds).time in if da == db then
else da > db; (builtins.head a.builds).time > (builtins.head b.builds).time
else
da > db;
/* Pretty platforms */ # Pretty platforms
prettyPlatform = name: prettyPlatform = name:
if name == "aarch64-unknown-linux-musl" then "linux/arm64" if name == "aarch64-unknown-linux-musl" then
else if name == "armv6l-unknown-linux-musleabihf" then "linux/arm" "linux/arm64"
else if name == "x86_64-unknown-linux-musl" then "linux/amd64" else if name == "armv6l-unknown-linux-musleabihf" then
else if name == "i686-unknown-linux-musl" then "linux/386" "linux/arm"
else name; else if name == "x86_64-unknown-linux-musl" then
"linux/amd64"
else if name == "i686-unknown-linux-musl" then
"linux/386"
else
name;
/* Parsing */ # Parsing
list = builtins.readFile (./. + path); list = builtins.readFile (./. + path);
entries = lib.splitString "\n" list; entries = lib.splitString "\n" list;
elems = builtins.filter elems = builtins.filter (e: (builtins.length e) == 4)
(e: (builtins.length e) == 4) (map (x: builtins.filter (e: e != "") (lib.splitString " " x)) entries);
(map
(x: builtins.filter (e: e != "") (lib.splitString " " x))
entries);
keys = ["date" "time" "size" "path"]; keys = [ "date" "time" "size" "path" ];
parsed = map (entry: listToSet keys entry) elems; parsed = map (entry: listToSet keys entry) elems;
subkeys = ["root" "version" "platform" "binary" ]; subkeys = [ "root" "version" "platform" "binary" ];
builds = map (entry: entry // listToSet subkeys (lib.splitString "/" entry.path) // { url = "https://garagehq.deuxfleurs.fr/" + entry.path; }) parsed; builds = map (entry:
entry // listToSet subkeys (lib.splitString "/" entry.path) // {
url = "https://garagehq.deuxfleurs.fr/" + entry.path;
}) parsed;
/* Aggregation */ # Aggregation
builds_per_version = lib.foldl (acc: v: acc // { ${v.version} = if builtins.hasAttr v.version acc then acc.${v.version} ++ [ v ] else [ v ]; }) {} builds; builds_per_version = lib.foldl (acc: v:
acc // {
${v.version} = if builtins.hasAttr v.version acc then
acc.${v.version} ++ [ v ]
else
[ v ];
}) { } builds;
versions = builtins.attrNames builds_per_version; versions = builtins.attrNames builds_per_version;
versions_release = builtins.filter (x: builtins.match "v[0-9]+\.[0-9]+\.[0-9]+(\.[0-9]+)?" x != null) versions; versions_release = builtins.filter
versions_commit = builtins.filter (x: builtins.match "[0-9a-f]{40}" x != null) versions; (x: builtins.match "v[0-9]+.[0-9]+.[0-9]+(.[0-9]+)?" x != null) versions;
versions_extra = lib.subtractLists (versions_release ++ versions_commit) versions; versions_commit =
builtins.filter (x: builtins.match "[0-9a-f]{40}" x != null) versions;
versions_extra =
lib.subtractLists (versions_release ++ versions_commit) versions;
sorted_builds = [ sorted_builds = [
{ {
name = "Release"; name = "Release";
hide = false; hide = false;
type = "tag"; type = "tag";
description = "Release builds are the official builds, they are tailored for productions and are the most tested."; description =
builds = builtins.sort (a: b: a.version > b.version) (map (x: { version = x; builds = builtins.getAttr x builds_per_version; }) versions_release); "Release builds are the official builds, they are tailored for productions and are the most tested.";
builds = builtins.sort (a: b: a.version > b.version) (map (x: {
version = x;
builds = builtins.getAttr x builds_per_version;
}) versions_release);
} }
{ {
name = "Extra"; name = "Extra";
hide = true; hide = true;
type = "tag"; type = "tag";
description = "Extra builds are built on demand to test a specific feature or a specific need."; description =
builds = builtins.sort cmpDate (map (x: { version = x; builds = builtins.getAttr x builds_per_version; }) versions_extra); "Extra builds are built on demand to test a specific feature or a specific need.";
builds = builtins.sort cmpDate (map (x: {
version = x;
builds = builtins.getAttr x builds_per_version;
}) versions_extra);
} }
{ {
name = "Development"; name = "Development";
hide = true; hide = true;
type = "commit"; type = "commit";
description = "Development builds are built periodically. Use them if you want to test a specific feature that is not yet released."; description =
builds = builtins.sort cmpDate (map (x: { version = x; builds = builtins.getAttr x builds_per_version; }) versions_commit); "Development builds are built periodically. Use them if you want to test a specific feature that is not yet released.";
builds = builtins.sort cmpDate (map (x: {
version = x;
builds = builtins.getAttr x builds_per_version;
}) versions_commit);
} }
]; ];
json = pkgs.writeTextDir "share/_releases.json" (builtins.toJSON sorted_builds); json =
pkgs.writeTextDir "share/_releases.json" (builtins.toJSON sorted_builds);
html = pkgs.writeTextDir "share/_releases.html" '' html = pkgs.writeTextDir "share/_releases.html" ''
<!doctype html> <!doctype html>
<html> <html>
<head> <head>
<meta charset="utf-8" /> <meta charset="utf-8" />
<title>Garage releases</title> <title>Garage releases</title>
<style> <style>
html, body { margin:0; padding: 0 } html, body { margin:0; padding: 0 }
body { font-family: 'Helvetica', Sans; } body { font-family: 'Helvetica', Sans; }
section { margin: 1rem; } section { margin: 1rem; }
ul { padding:0; margin: 0.2rem } ul { padding:0; margin: 0.2rem }
li { li {
border-radius: 0.2rem; border-radius: 0.2rem;
display: inline; display: inline;
border: 2px #0b5d83 solid; border: 2px #0b5d83 solid;
padding: 0.5rem; padding: 0.5rem;
line-height: 3rem; line-height: 3rem;
color: #0b5d83; color: #0b5d83;
} }
li:hover { background-color: #0b5d83; color: #fff; } li:hover { background-color: #0b5d83; color: #fff; }
li a, li a:hover { color: inherit; text-decoration: none } li a, li a:hover { color: inherit; text-decoration: none }
</style> </style>
</head> </head>
<body> <body>
${ builtins.toString (lib.forEach sorted_builds (r: '' ${
<section> builtins.toString (lib.forEach sorted_builds (r: ''
<h2>${r.name} builds</h2> <section>
<h2>${r.name} builds</h2>
<p>${r.description}</p> <p>${r.description}</p>
${if r.hide then "<details><summary>Show ${r.name} builds</summary>" else ""} ${
${ builtins.toString (lib.forEach r.builds (x: '' if r.hide then
<h3> ${x.version} (${(builtins.head x.builds).date}) </h3> "<details><summary>Show ${r.name} builds</summary>"
<p>See this build on</p> else
<p> Binaries: ""
<ul> }
${ builtins.toString (lib.forEach x.builds (b: '' ${
<li><a href="/${b.path}">${prettyPlatform b.platform}</a></li> builtins.toString (lib.forEach r.builds (x: ''
''))} <h3> ${x.version} (${(builtins.head x.builds).date}) </h3>
</ul></p> <p>See this build on</p>
<p> Sources: <p> Binaries:
<ul> <ul>
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/src/${r.type}/${x.version}">gitea</a></li> ${builtins.toString (lib.forEach x.builds (b: ''
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/archive/${x.version}.zip">.zip</a></li> <li><a href="/${b.path}">${
<li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/archive/${x.version}.tar.gz">.tar.gz</a></li> prettyPlatform b.platform
</ul></p> }</a></li>
'')) } ''))}
${ if builtins.length r.builds == 0 then "<em>There is no build for this category</em>" else "" } </ul></p>
${if r.hide then "</details>" else ""} <p> Sources:
</section> <ul>
''))} <li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/src/${r.type}/${x.version}">gitea</a></li>
</body> <li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/archive/${x.version}.zip">.zip</a></li>
</html> <li><a href="https://git.deuxfleurs.fr/Deuxfleurs/garage/archive/${x.version}.tar.gz">.tar.gz</a></li>
''; </ul></p>
in ''))
pkgs.symlinkJoin { }
name = "releases"; ${
paths = [ json html ]; if builtins.length r.builds == 0 then
} "<em>There is no build for this category</em>"
else
""
}
${if r.hide then "</details>" else ""}
</section>
''))
}
</body>
</html>
'';
in pkgs.symlinkJoin {
name = "releases";
paths = [ json html ];
}

View File

@ -1,10 +1,9 @@
rec { rec {
/* # * Fixed dependencies
* Fixed dependencies
*/
pkgsSrc = fetchTarball { pkgsSrc = fetchTarball {
# As of 2022-10-13 # As of 2022-10-13
url = "https://github.com/NixOS/nixpkgs/archive/a3073c49bc0163fea6a121c276f526837672b555.zip"; url =
"https://github.com/NixOS/nixpkgs/archive/a3073c49bc0163fea6a121c276f526837672b555.zip";
sha256 = "1bz632psfbpmicyzjb8b4265y50shylccvfm6ry6mgnv5hvz324s"; sha256 = "1bz632psfbpmicyzjb8b4265y50shylccvfm6ry6mgnv5hvz324s";
}; };
cargo2nixSrc = fetchGit { cargo2nixSrc = fetchGit {
@ -14,9 +13,7 @@ rec {
rev = "a7a61179b66054904ef6a195d8da736eaaa06c36"; rev = "a7a61179b66054904ef6a195d8da736eaaa06c36";
}; };
/* # * Shared objects
* Shared objects
*/
cargo2nix = import cargo2nixSrc; cargo2nix = import cargo2nixSrc;
cargo2nixOverlay = cargo2nix.overlays.default; cargo2nixOverlay = cargo2nix.overlays.default;
} }

View File

@ -1,227 +1,240 @@
{ { system, target ? null, pkgsSrc, cargo2nixOverlay, compiler ? "rustc"
system, , release ? false, git_version ? null, features ? null, }:
target ? null,
pkgsSrc,
cargo2nixOverlay,
compiler ? "rustc",
release ? false,
git_version ? null,
features ? null,
}:
let let
log = v: builtins.trace v v; log = v: builtins.trace v v;
pkgs = pkgs = if target != null then
if target != null then import pkgsSrc {
import pkgsSrc { inherit system;
inherit system; crossSystem = {
crossSystem = { config = target;
config = target; isStatic = true;
isStatic = true;
};
overlays = [ cargo2nixOverlay ];
}
else
import pkgsSrc {
inherit system;
overlays = [ cargo2nixOverlay ];
};
/*
Cargo2nix is built for rustOverlay which installs Rust from Mozilla releases.
This is fine for 64-bit platforms, but for 32-bit platforms, we need our own Rust
to avoid incompatibilities with time_t between different versions of musl
(>= 1.2.0 shipped by NixOS, < 1.2.0 with which rustc was built), which lead to compilation breakage.
So we want a Rust release that is bound to our Nix repository to avoid these problems.
See here for more info: https://musl.libc.org/time64.html
Because Cargo2nix does not support the Rust environment shipped by NixOS,
we emulate the structure of the Rust object created by rustOverlay.
In practise, rustOverlay ships rustc+cargo in a single derivation while
NixOS ships them in separate ones. We reunite them with symlinkJoin.
*/
toolchainOptions =
if target == null || target == "x86_64-unknown-linux-musl" || target == "aarch64-unknown-linux-musl" then {
rustVersion = "1.63.0";
extraRustComponents = [ "clippy" ];
} else {
rustToolchain = pkgs.symlinkJoin {
name = "rust-static-toolchain-${target}";
paths = [
pkgs.rustPlatform.rust.cargo
pkgs.rustPlatform.rust.rustc
# clippy not needed, it only runs on amd64
];
}; };
overlays = [ cargo2nixOverlay ];
}
else
import pkgsSrc {
inherit system;
overlays = [ cargo2nixOverlay ];
}; };
/* Cargo2nix is built for rustOverlay which installs Rust from Mozilla releases.
buildEnv = (drv: { This is fine for 64-bit platforms, but for 32-bit platforms, we need our own Rust
rustc = drv.setBuildEnv; to avoid incompatibilities with time_t between different versions of musl
clippy = '' (>= 1.2.0 shipped by NixOS, < 1.2.0 with which rustc was built), which lead to compilation breakage.
${drv.setBuildEnv or "" } So we want a Rust release that is bound to our Nix repository to avoid these problems.
echo See here for more info: https://musl.libc.org/time64.html
echo --- BUILDING WITH CLIPPY --- Because Cargo2nix does not support the Rust environment shipped by NixOS,
echo we emulate the structure of the Rust object created by rustOverlay.
In practise, rustOverlay ships rustc+cargo in a single derivation while
export NIX_RUST_BUILD_FLAGS="''${NIX_RUST_BUILD_FLAGS} --deny warnings" NixOS ships them in separate ones. We reunite them with symlinkJoin.
export RUSTC="''${CLIPPY_DRIVER}"
'';
}.${compiler});
/*
Cargo2nix provides many overrides by default, you can take inspiration from them:
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
You can have a complete list of the available options by looking at the overriden object, mkcrate:
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/mkcrate.nix
*/ */
packageOverrides = pkgs: pkgs.rustBuilder.overrides.all ++ [ toolchainOptions = if target == null || target == "x86_64-unknown-linux-musl"
/* || target == "aarch64-unknown-linux-musl" then {
[1] We add some logic to compile our crates with clippy, it provides us many additional lints rustVersion = "1.63.0";
extraRustComponents = [ "clippy" ];
} else {
rustToolchain = pkgs.symlinkJoin {
name = "rust-static-toolchain-${target}";
paths = [
pkgs.rustPlatform.rust.cargo
pkgs.rustPlatform.rust.rustc
# clippy not needed, it only runs on amd64
];
};
};
[2] We need to alter Nix hardening to make static binaries: PIE, buildEnv = (drv:
Position Independent Executables seems to be supported only on amd64. Having {
this flag set either 1. make our executables crash or 2. compile as dynamic on some platforms. rustc = drv.setBuildEnv;
Here, we deactivate it. Later (find `codegenOpts`), we reactivate it for supported targets clippy = ''
(only amd64 curently) through the `-static-pie` flag. ${drv.setBuildEnv or ""}
PIE is a feature used by ASLR, which helps mitigate security issues. echo
Learn more about Nix Hardening at: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh echo --- BUILDING WITH CLIPPY ---
echo
[3] We want to inject the git version while keeping the build deterministic. export NIX_RUST_BUILD_FLAGS="''${NIX_RUST_BUILD_FLAGS} --deny warnings"
As we do not want to consider the .git folder as part of the input source, export RUSTC="''${CLIPPY_DRIVER}"
we ask the user (the CI often) to pass the value to Nix. '';
}.${compiler});
[4] We don't want libsodium-sys and zstd-sys to try to use pkgconfig to build against a system library. /* Cargo2nix provides many overrides by default, you can take inspiration from them:
However the features to do so get activated for some reason (due to a bug in cargo2nix?), https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
so disable them manually here.
*/
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage";
overrideAttrs = drv:
(if git_version != null then {
/* [3] */ preConfigure = ''
${drv.preConfigure or ""}
export GIT_VERSION="${git_version}"
'';
} else {})
//
{
/* [1] */ setBuildEnv = (buildEnv drv);
/* [2] */ hardeningDisable = [ "pie" ];
};
})
(pkgs.rustBuilder.rustLib.makeOverride { You can have a complete list of the available options by looking at the overriden object, mkcrate:
name = "garage_rpc"; https://github.com/cargo2nix/cargo2nix/blob/master/overlay/mkcrate.nix
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_db";
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_util";
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_table";
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_block";
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_model";
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_api";
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_web";
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "k2v-client";
overrideAttrs = drv: { /* [1] */ setBuildEnv = (buildEnv drv); };
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "libsodium-sys";
overrideArgs = old: {
features = [ ]; /* [4] */
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "zstd-sys";
overrideArgs = old: {
features = [ ]; /* [4] */
};
})
];
/*
We ship some parts of the code disabled by default by putting them behind a flag.
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.).
But we want to ship these additional features when we release Garage.
In the end, we chose to exclude all features from debug builds while putting (all of) them in the release builds.
*/ */
rootFeatures = if features != null then features else packageOverrides = pkgs:
([ pkgs.rustBuilder.overrides.all ++ [
"garage/bundled-libs" /* [1] We add some logic to compile our crates with clippy, it provides us many additional lints
"garage/sled"
"garage/k2v"
] ++ (if release then [
"garage/consul-discovery"
"garage/kubernetes-discovery"
"garage/metrics"
"garage/telemetry-otlp"
"garage/lmdb"
"garage/sqlite"
] else []));
[2] We need to alter Nix hardening to make static binaries: PIE,
Position Independent Executables seems to be supported only on amd64. Having
this flag set either 1. make our executables crash or 2. compile as dynamic on some platforms.
Here, we deactivate it. Later (find `codegenOpts`), we reactivate it for supported targets
(only amd64 curently) through the `-static-pie` flag.
PIE is a feature used by ASLR, which helps mitigate security issues.
Learn more about Nix Hardening at: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh
[3] We want to inject the git version while keeping the build deterministic.
As we do not want to consider the .git folder as part of the input source,
we ask the user (the CI often) to pass the value to Nix.
[4] We don't want libsodium-sys and zstd-sys to try to use pkgconfig to build against a system library.
However the features to do so get activated for some reason (due to a bug in cargo2nix?),
so disable them manually here.
*/
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage";
overrideAttrs = drv:
(if git_version != null then {
# [3]
preConfigure = ''
${drv.preConfigure or ""}
export GIT_VERSION="${git_version}"
'';
} else
{ }) // {
# [1]
setBuildEnv = (buildEnv drv);
# [2]
hardeningDisable = [ "pie" ];
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_rpc";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_db";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_util";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_table";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_block";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_model";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_api";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_web";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "k2v-client";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "libsodium-sys";
overrideArgs = old: {
features = [ ]; # [4]
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "zstd-sys";
overrideArgs = old: {
features = [ ]; # [4]
};
})
];
/* We ship some parts of the code disabled by default by putting them behind a flag.
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.).
But we want to ship these additional features when we release Garage.
In the end, we chose to exclude all features from debug builds while putting (all of) them in the release builds.
*/
rootFeatures = if features != null then
features
else
([ "garage/bundled-libs" "garage/sled" "garage/k2v" ] ++ (if release then [
"garage/consul-discovery"
"garage/kubernetes-discovery"
"garage/metrics"
"garage/telemetry-otlp"
"garage/lmdb"
"garage/sqlite"
] else
[ ]));
packageFun = import ../Cargo.nix; packageFun = import ../Cargo.nix;
/* /* We compile fully static binaries with musl to simplify deployment on most systems.
We compile fully static binaries with musl to simplify deployment on most systems. When possible, we reactivate PIE hardening (see above).
When possible, we reactivate PIE hardening (see above).
Also, if you set the RUSTFLAGS environment variable, the following parameters will Also, if you set the RUSTFLAGS environment variable, the following parameters will
be ignored. be ignored.
For more information on static builds, please refer to Rust's RFC 1721. For more information on static builds, please refer to Rust's RFC 1721.
https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage
*/ */
codegenOpts = { codegenOpts = {
"armv6l-unknown-linux-musleabihf" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* compile as dynamic with static-pie */ "armv6l-unknown-linux-musleabihf" = [
"aarch64-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* segfault with static-pie */ "target-feature=+crt-static"
"i686-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* segfault with static-pie */ "link-arg=-static"
"x86_64-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static-pie" ]; ]; # compile as dynamic with static-pie
"aarch64-unknown-linux-musl" = [
"target-feature=+crt-static"
"link-arg=-static"
]; # segfault with static-pie
"i686-unknown-linux-musl" = [
"target-feature=+crt-static"
"link-arg=-static"
]; # segfault with static-pie
"x86_64-unknown-linux-musl" =
[ "target-feature=+crt-static" "link-arg=-static-pie" ];
}; };
/* # NixOS and Rust/Cargo triples do not match for ARM, fix it here.
NixOS and Rust/Cargo triples do not match for ARM, fix it here. rustTarget = if target == "armv6l-unknown-linux-musleabihf" then
*/ "arm-unknown-linux-musleabihf"
rustTarget = if target == "armv6l-unknown-linux-musleabihf" else
then "arm-unknown-linux-musleabihf" target;
else target;
in in pkgs.rustBuilder.makePackageSet ({
pkgs.rustBuilder.makePackageSet ({ inherit release packageFun packageOverrides codegenOpts rootFeatures;
inherit release packageFun packageOverrides codegenOpts rootFeatures; target = rustTarget;
target = rustTarget; } // toolchainOptions)
} // toolchainOptions)

View File

@ -15,7 +15,8 @@ pkgs.buildGoModule rec {
checkPhase = "true"; checkPhase = "true";
meta = with pkgs.lib; { meta = with pkgs.lib; {
description = "kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster."; description =
"kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster.";
homepage = "https://github.com/GoogleContainerTools/kaniko"; homepage = "https://github.com/GoogleContainerTools/kaniko";
license = licenses.asl20; license = licenses.asl20;
platforms = platforms.linux; platforms = platforms.linux;

View File

@ -15,7 +15,8 @@ pkgs.buildGoModule rec {
checkPhase = "true"; checkPhase = "true";
meta = with pkgs.lib; { meta = with pkgs.lib; {
description = "Command line tool to create and query container image manifest list/indexes"; description =
"Command line tool to create and query container image manifest list/indexes";
homepage = "https://github.com/estesp/manifest-tool"; homepage = "https://github.com/estesp/manifest-tool";
license = licenses.asl20; license = licenses.asl20;
platforms = platforms.linux; platforms = platforms.linux;

View File

@ -1,6 +1,4 @@
{ { system ? builtins.currentSystem, }:
system ? builtins.currentSystem,
}:
with import ./common.nix; with import ./common.nix;
@ -11,27 +9,23 @@ let
#"aarch64-unknown-linux-musl" #"aarch64-unknown-linux-musl"
"armv6l-unknown-linux-musleabihf" "armv6l-unknown-linux-musleabihf"
]; ];
pkgsList = builtins.map (target: import pkgsSrc { pkgsList = builtins.map (target:
inherit system; import pkgsSrc {
crossSystem = { inherit system;
config = target; crossSystem = {
isStatic = true; config = target;
}; isStatic = true;
overlays = [ cargo2nixOverlay ]; };
}) platforms; overlays = [ cargo2nixOverlay ];
pkgsHost = import pkgsSrc {}; }) platforms;
pkgsHost = import pkgsSrc { };
lib = pkgsHost.lib; lib = pkgsHost.lib;
kaniko = (import ./kaniko.nix) pkgsHost; kaniko = (import ./kaniko.nix) pkgsHost;
winscp = (import ./winscp.nix) pkgsHost; winscp = (import ./winscp.nix) pkgsHost;
manifestTool = (import ./manifest-tool.nix) pkgsHost; manifestTool = (import ./manifest-tool.nix) pkgsHost;
in in lib.flatten (builtins.map (pkgs: [
lib.flatten (builtins.map (pkgs: [ pkgs.rustPlatform.rust.rustc
pkgs.rustPlatform.rust.rustc pkgs.rustPlatform.rust.cargo
pkgs.rustPlatform.rust.cargo pkgs.buildPackages.stdenv.cc
pkgs.buildPackages.stdenv.cc ]) pkgsList) ++ [ kaniko winscp manifestTool ]
]) pkgsList) ++ [
kaniko
winscp
manifestTool
]

View File

@ -11,12 +11,12 @@ pkgs.stdenv.mkDerivation rec {
}; };
buildPhase = '' buildPhase = ''
cat > winscp <<EOF cat > winscp <<EOF
#!${pkgs.bash}/bin/bash #!${pkgs.bash}/bin/bash
WINEDEBUG=-all WINEDEBUG=-all
${pkgs.winePackages.minimal}/bin/wine $out/opt/WinSCP.com ${pkgs.winePackages.minimal}/bin/wine $out/opt/WinSCP.com
EOF EOF
''; '';
installPhase = '' installPhase = ''

303
shell.nix
View File

@ -1,6 +1,4 @@
{ { system ? builtins.currentSystem, }:
system ? builtins.currentSystem,
}:
with import ./nix/common.nix; with import ./nix/common.nix;
@ -13,161 +11,152 @@ let
manifest-tool = (import ./nix/manifest-tool.nix) pkgs; manifest-tool = (import ./nix/manifest-tool.nix) pkgs;
winscp = (import ./nix/winscp.nix) pkgs; winscp = (import ./nix/winscp.nix) pkgs;
in in {
{ # --- Rust Shell ---
/* --- Rust Shell --- # Use it to compile Garage
* Use it to compile Garage rust = pkgs.mkShell {
*/ nativeBuildInputs = [
rust = pkgs.mkShell { #pkgs.rustPlatform.rust.rustc
nativeBuildInputs = [ pkgs.rustPlatform.rust.cargo
#pkgs.rustPlatform.rust.rustc #pkgs.clippy
pkgs.rustPlatform.rust.cargo pkgs.rustfmt
#pkgs.clippy #pkgs.perl
pkgs.rustfmt #pkgs.protobuf
#pkgs.perl #pkgs.pkg-config
#pkgs.protobuf #pkgs.openssl
#pkgs.pkg-config pkgs.file
#pkgs.openssl #cargo2nix.packages.x86_64-linux.cargo2nix
pkgs.file ];
#cargo2nix.packages.x86_64-linux.cargo2nix };
];
};
/* --- Integration shell --- # --- Integration shell ---
* Use it to test Garage with common S3 clients # Use it to test Garage with common S3 clients
*/ integration = pkgs.mkShell {
integration = pkgs.mkShell { nativeBuildInputs = [
nativeBuildInputs = [ winscp
winscp pkgs.s3cmd
pkgs.s3cmd pkgs.awscli2
pkgs.awscli2 pkgs.minio-client
pkgs.minio-client pkgs.rclone
pkgs.rclone pkgs.socat
pkgs.socat pkgs.psmisc
pkgs.psmisc pkgs.which
pkgs.which pkgs.openssl
pkgs.openssl pkgs.curl
pkgs.curl pkgs.jq
pkgs.jq ];
]; };
};
/* --- Release shell --- # --- Release shell ---
* A shell built to make releasing easier # A shell built to make releasing easier
*/ release = pkgs.mkShell {
release = pkgs.mkShell { shellHook = ''
shellHook = '' function refresh_toolchain {
function refresh_toolchain { pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec nix copy \
nix copy \ --to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/tmp/nix-signing-key.sec' \
--to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/tmp/nix-signing-key.sec' \ $(nix-store -qR \
$(nix-store -qR \ $(nix-build --no-build-output --no-out-link nix/toolchain.nix))
$(nix-build --no-build-output --no-out-link nix/toolchain.nix)) rm /tmp/nix-signing-key.sec
rm /tmp/nix-signing-key.sec }
function refresh_cache {
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.{debug,release}; do
echo "Updating cache for ''${attr}"
derivation=$(nix-instantiate --attr ''${attr})
nix copy -j8 \
--to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/tmp/nix-signing-key.sec' \
$(nix-store -qR ''${derivation%\!bin})
done
rm /tmp/nix-signing-key.sec
}
function refresh_flake_cache {
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
for attr in packages.x86_64-linux.default devShell.x86_64-linux; do
echo "Updating cache for ''${attr}"
derivation=$(nix path-info --derivation ".#''${attr}")
nix copy -j8 \
--to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/tmp/nix-signing-key.sec' \
$(nix-store -qR ''${derivation})
done
rm /tmp/nix-signing-key.sec
}
function to_s3 {
aws \
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
s3 cp \
./result-bin/bin/garage \
s3://garagehq.deuxfleurs.fr/_releases/''${DRONE_TAG:-$DRONE_COMMIT}/''${TARGET}/garage
}
function to_docker {
executor \
--force \
--customPlatform="''${DOCKER_PLATFORM}" \
--destination "''${CONTAINER_NAME}:''${CONTAINER_TAG}" \
--context dir://`pwd` \
--verbosity=debug
}
function multiarch_docker {
manifest-tool push from-spec <(cat <<EOF
image: dxflrs/garage:''${CONTAINER_TAG}
manifests:
-
image: dxflrs/arm64_garage:''${CONTAINER_TAG}
platform:
architecture: arm64
os: linux
-
image: dxflrs/amd64_garage:''${CONTAINER_TAG}
platform:
architecture: amd64
os: linux
-
image: dxflrs/386_garage:''${CONTAINER_TAG}
platform:
architecture: 386
os: linux
-
image: dxflrs/arm_garage:''${CONTAINER_TAG}
platform:
architecture: arm
os: linux
EOF
)
}
function refresh_index {
aws \
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
s3 ls \
--recursive \
s3://garagehq.deuxfleurs.fr/_releases/ \
> aws-list.txt
nix-build nix/build_index.nix
aws \
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
s3 cp \
result/share/_releases.json \
s3://garagehq.deuxfleurs.fr/
aws \
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
s3 cp \
result/share/_releases.html \
s3://garagehq.deuxfleurs.fr/
}
'';
nativeBuildInputs = [ pkgs.awscli2 kaniko manifest-tool ];
};
} }
function refresh_cache {
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.{debug,release}; do
echo "Updating cache for ''${attr}"
derivation=$(nix-instantiate --attr ''${attr})
nix copy -j8 \
--to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/tmp/nix-signing-key.sec' \
$(nix-store -qR ''${derivation%\!bin})
done
rm /tmp/nix-signing-key.sec
}
function refresh_flake_cache {
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
for attr in packages.x86_64-linux.default; do
echo "Updating cache for ''${attr}"
derivation=$(nix path-info --derivation ".#''${attr}")
nix copy -j8 \
--to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/tmp/nix-signing-key.sec' \
$(nix-store -qR ''${derivation})
done
rm /tmp/nix-signing-key.sec
}
function to_s3 {
aws \
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
s3 cp \
./result-bin/bin/garage \
s3://garagehq.deuxfleurs.fr/_releases/''${DRONE_TAG:-$DRONE_COMMIT}/''${TARGET}/garage
}
function to_docker {
executor \
--force \
--customPlatform="''${DOCKER_PLATFORM}" \
--destination "''${CONTAINER_NAME}:''${CONTAINER_TAG}" \
--context dir://`pwd` \
--verbosity=debug
}
function multiarch_docker {
manifest-tool push from-spec <(cat <<EOF
image: dxflrs/garage:''${CONTAINER_TAG}
manifests:
-
image: dxflrs/arm64_garage:''${CONTAINER_TAG}
platform:
architecture: arm64
os: linux
-
image: dxflrs/amd64_garage:''${CONTAINER_TAG}
platform:
architecture: amd64
os: linux
-
image: dxflrs/386_garage:''${CONTAINER_TAG}
platform:
architecture: 386
os: linux
-
image: dxflrs/arm_garage:''${CONTAINER_TAG}
platform:
architecture: arm
os: linux
EOF
)
}
function refresh_index {
aws \
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
s3 ls \
--recursive \
s3://garagehq.deuxfleurs.fr/_releases/ \
> aws-list.txt
nix-build nix/build_index.nix
aws \
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
s3 cp \
result/share/_releases.json \
s3://garagehq.deuxfleurs.fr/
aws \
--endpoint-url https://garage.deuxfleurs.fr \
--region garage \
s3 cp \
result/share/_releases.html \
s3://garagehq.deuxfleurs.fr/
}
'';
nativeBuildInputs = [
pkgs.awscli2
kaniko
manifest-tool
];
};
}

View File

@ -15,6 +15,7 @@ use garage_util::time::*;
use garage_table::replication::*; use garage_table::replication::*;
use garage_table::*; use garage_table::*;
use garage_rpc::ring::PARTITION_BITS;
use garage_rpc::*; use garage_rpc::*;
use garage_block::manager::BlockResyncErrorInfo; use garage_block::manager::BlockResyncErrorInfo;
@ -783,6 +784,7 @@ impl AdminRpcHandler {
for node in ring.layout.node_ids().iter() { for node in ring.layout.node_ids().iter() {
let mut opt = opt.clone(); let mut opt = opt.clone();
opt.all_nodes = false; opt.all_nodes = false;
opt.skip_global = true;
writeln!(&mut ret, "\n======================").unwrap(); writeln!(&mut ret, "\n======================").unwrap();
writeln!(&mut ret, "Stats for node {:?}:", node).unwrap(); writeln!(&mut ret, "Stats for node {:?}:", node).unwrap();
@ -799,6 +801,15 @@ impl AdminRpcHandler {
Err(e) => writeln!(&mut ret, "Network error: {}", e).unwrap(), Err(e) => writeln!(&mut ret, "Network error: {}", e).unwrap(),
} }
} }
writeln!(&mut ret, "\n======================").unwrap();
write!(
&mut ret,
"Cluster statistics:\n\n{}",
self.gather_cluster_stats()
)
.unwrap();
Ok(AdminRpc::Ok(ret)) Ok(AdminRpc::Ok(ret))
} else { } else {
Ok(AdminRpc::Ok(self.gather_stats_local(opt)?)) Ok(AdminRpc::Ok(self.gather_stats_local(opt)?))
@ -819,22 +830,6 @@ impl AdminRpcHandler {
writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap(); writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap();
// Gather ring statistics
let ring = self.garage.system.ring.borrow().clone();
let mut ring_nodes = HashMap::new();
for (_i, loc) in ring.partitions().iter() {
for n in ring.get_nodes(loc, ring.replication_factor).iter() {
if !ring_nodes.contains_key(n) {
ring_nodes.insert(*n, 0usize);
}
*ring_nodes.get_mut(n).unwrap() += 1;
}
}
writeln!(&mut ret, "\nRing nodes & partition count:").unwrap();
for (n, c) in ring_nodes.iter() {
writeln!(&mut ret, " {:?} {}", n, c).unwrap();
}
// Gather table statistics // Gather table statistics
let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()]; let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()];
table.push(self.gather_table_stats(&self.garage.bucket_table, opt.detailed)?); table.push(self.gather_table_stats(&self.garage.bucket_table, opt.detailed)?);
@ -881,12 +876,108 @@ impl AdminRpcHandler {
.unwrap(); .unwrap();
if !opt.detailed { if !opt.detailed {
writeln!(&mut ret, "\nIf values are missing (marked as NC), consider adding the --detailed flag - this will be slow.").unwrap(); writeln!(&mut ret, "\nIf values are missing above (marked as NC), consider adding the --detailed flag (this will be slow).").unwrap();
}
if !opt.skip_global {
write!(&mut ret, "\n{}", self.gather_cluster_stats()).unwrap();
} }
Ok(ret) Ok(ret)
} }
fn gather_cluster_stats(&self) -> String {
let mut ret = String::new();
// Gather storage node and free space statistics
let layout = &self.garage.system.ring.borrow().layout;
let mut node_partition_count = HashMap::<Uuid, u64>::new();
for short_id in layout.ring_assignation_data.iter() {
let id = layout.node_id_vec[*short_id as usize];
*node_partition_count.entry(id).or_default() += 1;
}
let node_info = self
.garage
.system
.get_known_nodes()
.into_iter()
.map(|n| (n.id, n))
.collect::<HashMap<_, _>>();
let mut table = vec![" ID\tHostname\tZone\tCapacity\tPart.\tDataAvail\tMetaAvail".into()];
for (id, parts) in node_partition_count.iter() {
let info = node_info.get(id);
let status = info.map(|x| &x.status);
let role = layout.roles.get(id).and_then(|x| x.0.as_ref());
let hostname = status.map(|x| x.hostname.as_str()).unwrap_or("?");
let zone = role.map(|x| x.zone.as_str()).unwrap_or("?");
let capacity = role.map(|x| x.capacity_string()).unwrap_or("?".into());
let avail_str = |x| match x {
Some((avail, total)) => {
let pct = (avail as f64) / (total as f64) * 100.;
let avail = bytesize::ByteSize::b(avail);
let total = bytesize::ByteSize::b(total);
format!("{}/{} ({:.1}%)", avail, total, pct)
}
None => "?".into(),
};
let data_avail = avail_str(status.and_then(|x| x.data_disk_avail));
let meta_avail = avail_str(status.and_then(|x| x.meta_disk_avail));
table.push(format!(
" {:?}\t{}\t{}\t{}\t{}\t{}\t{}",
id, hostname, zone, capacity, parts, data_avail, meta_avail
));
}
write!(
&mut ret,
"Storage nodes:\n{}",
format_table_to_string(table)
)
.unwrap();
let meta_part_avail = node_partition_count
.iter()
.filter_map(|(id, parts)| {
node_info
.get(id)
.and_then(|x| x.status.meta_disk_avail)
.map(|c| c.0 / *parts)
})
.collect::<Vec<_>>();
let data_part_avail = node_partition_count
.iter()
.filter_map(|(id, parts)| {
node_info
.get(id)
.and_then(|x| x.status.data_disk_avail)
.map(|c| c.0 / *parts)
})
.collect::<Vec<_>>();
if !meta_part_avail.is_empty() && !data_part_avail.is_empty() {
let meta_avail =
bytesize::ByteSize(meta_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
let data_avail =
bytesize::ByteSize(data_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
writeln!(
&mut ret,
"\nEstimated available storage space cluster-wide (might be lower in practice):"
)
.unwrap();
if meta_part_avail.len() < node_partition_count.len()
|| data_part_avail.len() < node_partition_count.len()
{
writeln!(&mut ret, " data: < {}", data_avail).unwrap();
writeln!(&mut ret, " metadata: < {}", meta_avail).unwrap();
writeln!(&mut ret, "A precise estimate could not be given as information is missing for some storage nodes.").unwrap();
} else {
writeln!(&mut ret, " data: {}", data_avail).unwrap();
writeln!(&mut ret, " metadata: {}", meta_avail).unwrap();
}
}
ret
}
fn gather_table_stats<F, R>( fn gather_table_stats<F, R>(
&self, &self,
t: &Arc<Table<F, R>>, t: &Arc<Table<F, R>>,

View File

@ -59,18 +59,29 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
let layout = fetch_layout(rpc_cli, rpc_host).await?; let layout = fetch_layout(rpc_cli, rpc_host).await?;
println!("==== HEALTHY NODES ===="); println!("==== HEALTHY NODES ====");
let mut healthy_nodes = vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity".to_string()]; let mut healthy_nodes =
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail\tMetaAvail".to_string()];
for adv in status.iter().filter(|adv| adv.is_up) { for adv in status.iter().filter(|adv| adv.is_up) {
match layout.roles.get(&adv.id) { match layout.roles.get(&adv.id) {
Some(NodeRoleV(Some(cfg))) => { Some(NodeRoleV(Some(cfg))) => {
let data_avail = match &adv.status.data_disk_avail {
_ if cfg.capacity.is_none() => "N/A".into(),
Some((avail, total)) => {
let pct = (*avail as f64) / (*total as f64) * 100.;
let avail = bytesize::ByteSize::b(*avail);
format!("{} ({:.1}%)", avail, pct)
}
None => "?".into(),
};
healthy_nodes.push(format!( healthy_nodes.push(format!(
"{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}", "{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
id = adv.id, id = adv.id,
host = adv.status.hostname, host = adv.status.hostname,
addr = adv.addr, addr = adv.addr,
tags = cfg.tags.join(","), tags = cfg.tags.join(","),
zone = cfg.zone, zone = cfg.zone,
capacity = cfg.capacity_string(), capacity = cfg.capacity_string(),
data_avail = data_avail,
)); ));
} }
_ => { _ => {

View File

@ -504,6 +504,11 @@ pub struct StatsOpt {
/// Gather detailed statistics (this can be long) /// Gather detailed statistics (this can be long)
#[structopt(short = "d", long = "detailed")] #[structopt(short = "d", long = "detailed")]
pub detailed: bool, pub detailed: bool,
/// Don't show global cluster stats (internal use in RPC)
#[structopt(skip)]
#[serde(default)]
pub skip_global: bool,
} }
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] #[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]

View File

@ -23,6 +23,7 @@ hex = "0.4"
tracing = "0.1" tracing = "0.1"
rand = "0.8" rand = "0.8"
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" } sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
systemstat = "0.2.3"
async-trait = "0.1.7" async-trait = "0.1.7"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }

View File

@ -3,6 +3,9 @@
#[macro_use] #[macro_use]
extern crate tracing; extern crate tracing;
mod metrics;
mod system_metrics;
#[cfg(feature = "consul-discovery")] #[cfg(feature = "consul-discovery")]
mod consul; mod consul;
#[cfg(feature = "kubernetes-discovery")] #[cfg(feature = "kubernetes-discovery")]
@ -13,9 +16,6 @@ pub mod replication_mode;
pub mod ring; pub mod ring;
pub mod system; pub mod system;
mod metrics;
pub mod rpc_helper; pub mod rpc_helper;
pub use rpc_helper::*; pub use rpc_helper::*;
pub mod system_metrics;

View File

@ -3,6 +3,7 @@ use std::collections::HashMap;
use std::io::{Read, Write}; use std::io::{Read, Write};
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::atomic::Ordering;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
@ -38,7 +39,6 @@ use crate::replication_mode::*;
use crate::ring::*; use crate::ring::*;
use crate::rpc_helper::*; use crate::rpc_helper::*;
#[cfg(feature = "metrics")]
use crate::system_metrics::*; use crate::system_metrics::*;
const DISCOVERY_INTERVAL: Duration = Duration::from_secs(60); const DISCOVERY_INTERVAL: Duration = Duration::from_secs(60);
@ -106,7 +106,7 @@ pub struct System {
consul_discovery: Option<ConsulDiscovery>, consul_discovery: Option<ConsulDiscovery>,
#[cfg(feature = "kubernetes-discovery")] #[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: Option<KubernetesDiscoveryConfig>, kubernetes_discovery: Option<KubernetesDiscoveryConfig>,
#[cfg(feature = "metrics")]
metrics: SystemMetrics, metrics: SystemMetrics,
replication_mode: ReplicationMode, replication_mode: ReplicationMode,
@ -118,18 +118,28 @@ pub struct System {
/// Path to metadata directory /// Path to metadata directory
pub metadata_dir: PathBuf, pub metadata_dir: PathBuf,
/// Path to data directory
pub data_dir: PathBuf,
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeStatus { pub struct NodeStatus {
/// Hostname of the node /// Hostname of the node
pub hostname: String, pub hostname: String,
/// Replication factor configured on the node /// Replication factor configured on the node
pub replication_factor: usize, pub replication_factor: usize,
/// Cluster layout version /// Cluster layout version
pub cluster_layout_version: u64, pub cluster_layout_version: u64,
/// Hash of cluster layout staging data /// Hash of cluster layout staging data
pub cluster_layout_staging_hash: Hash, pub cluster_layout_staging_hash: Hash,
/// Disk usage on partition containing metadata directory (tuple: `(avail, total)`)
#[serde(default)]
pub meta_disk_avail: Option<(u64, u64)>,
/// Disk usage on partition containing data directory (tuple: `(avail, total)`)
#[serde(default)]
pub data_disk_avail: Option<(u64, u64)>,
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@ -271,18 +281,11 @@ impl System {
} }
}; };
let local_status = NodeStatus {
hostname: gethostname::gethostname()
.into_string()
.unwrap_or_else(|_| "<invalid utf-8>".to_string()),
replication_factor,
cluster_layout_version: cluster_layout.version,
cluster_layout_staging_hash: cluster_layout.staging_hash,
};
#[cfg(feature = "metrics")]
let metrics = SystemMetrics::new(replication_factor); let metrics = SystemMetrics::new(replication_factor);
let mut local_status = NodeStatus::initial(replication_factor, &cluster_layout);
local_status.update_disk_usage(&config.metadata_dir, &config.data_dir, &metrics);
let ring = Ring::new(cluster_layout, replication_factor); let ring = Ring::new(cluster_layout, replication_factor);
let (update_ring, ring) = watch::channel(Arc::new(ring)); let (update_ring, ring) = watch::channel(Arc::new(ring));
@ -373,12 +376,12 @@ impl System {
consul_discovery, consul_discovery,
#[cfg(feature = "kubernetes-discovery")] #[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: config.kubernetes_discovery.clone(), kubernetes_discovery: config.kubernetes_discovery.clone(),
#[cfg(feature = "metrics")]
metrics, metrics,
ring, ring,
update_ring: Mutex::new(update_ring), update_ring: Mutex::new(update_ring),
metadata_dir: config.metadata_dir.clone(), metadata_dir: config.metadata_dir.clone(),
data_dir: config.data_dir.clone(),
}); });
sys.system_endpoint.set_handler(sys.clone()); sys.system_endpoint.set_handler(sys.clone());
Ok(sys) Ok(sys)
@ -416,12 +419,7 @@ impl System {
.get(&n.id.into()) .get(&n.id.into())
.cloned() .cloned()
.map(|(_, st)| st) .map(|(_, st)| st)
.unwrap_or(NodeStatus { .unwrap_or(NodeStatus::unknown()),
hostname: "?".to_string(),
replication_factor: 0,
cluster_layout_version: 0,
cluster_layout_staging_hash: Hash::from([0u8; 32]),
}),
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
known_nodes known_nodes
@ -600,6 +598,9 @@ impl System {
let ring = self.ring.borrow(); let ring = self.ring.borrow();
new_si.cluster_layout_version = ring.layout.version; new_si.cluster_layout_version = ring.layout.version;
new_si.cluster_layout_staging_hash = ring.layout.staging_hash; new_si.cluster_layout_staging_hash = ring.layout.staging_hash;
new_si.update_disk_usage(&self.metadata_dir, &self.data_dir, &self.metrics);
self.local_status.swap(Arc::new(new_si)); self.local_status.swap(Arc::new(new_si));
} }
@ -864,6 +865,69 @@ impl EndpointHandler<SystemRpc> for System {
} }
} }
impl NodeStatus {
fn initial(replication_factor: usize, layout: &ClusterLayout) -> Self {
NodeStatus {
hostname: gethostname::gethostname()
.into_string()
.unwrap_or_else(|_| "<invalid utf-8>".to_string()),
replication_factor,
cluster_layout_version: layout.version,
cluster_layout_staging_hash: layout.staging_hash,
meta_disk_avail: None,
data_disk_avail: None,
}
}
fn unknown() -> Self {
NodeStatus {
hostname: "?".to_string(),
replication_factor: 0,
cluster_layout_version: 0,
cluster_layout_staging_hash: Hash::from([0u8; 32]),
meta_disk_avail: None,
data_disk_avail: None,
}
}
fn update_disk_usage(&mut self, meta_dir: &Path, data_dir: &Path, metrics: &SystemMetrics) {
use systemstat::{Platform, System};
let mounts = System::new().mounts().unwrap_or_default();
let mount_avail = |path: &Path| {
mounts
.iter()
.filter(|x| path.starts_with(&x.fs_mounted_on))
.max_by_key(|x| x.fs_mounted_on.len())
.map(|x| (x.avail.as_u64(), x.total.as_u64()))
};
self.meta_disk_avail = mount_avail(meta_dir);
self.data_disk_avail = mount_avail(data_dir);
if let Some((avail, total)) = self.meta_disk_avail {
metrics
.values
.meta_disk_avail
.store(avail, Ordering::Relaxed);
metrics
.values
.meta_disk_total
.store(total, Ordering::Relaxed);
}
if let Some((avail, total)) = self.data_disk_avail {
metrics
.values
.data_disk_avail
.store(avail, Ordering::Relaxed);
metrics
.values
.data_disk_total
.store(total, Ordering::Relaxed);
}
}
}
fn get_default_ip() -> Option<IpAddr> { fn get_default_ip() -> Option<IpAddr> {
pnet_datalink::interfaces() pnet_datalink::interfaces()
.iter() .iter()

View File

@ -1,14 +1,31 @@
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use opentelemetry::{global, metrics::*, KeyValue}; use opentelemetry::{global, metrics::*, KeyValue};
/// TableMetrics reference all counter used for metrics /// TableMetrics reference all counter used for metrics
pub struct SystemMetrics { pub struct SystemMetrics {
pub(crate) _garage_build_info: ValueObserver<u64>, pub(crate) _garage_build_info: ValueObserver<u64>,
pub(crate) _replication_factor: ValueObserver<u64>, pub(crate) _replication_factor: ValueObserver<u64>,
pub(crate) _disk_avail: ValueObserver<u64>,
pub(crate) _disk_total: ValueObserver<u64>,
pub(crate) values: Arc<SystemMetricsValues>,
}
#[derive(Default)]
pub struct SystemMetricsValues {
pub(crate) data_disk_total: AtomicU64,
pub(crate) data_disk_avail: AtomicU64,
pub(crate) meta_disk_total: AtomicU64,
pub(crate) meta_disk_avail: AtomicU64,
} }
impl SystemMetrics { impl SystemMetrics {
pub fn new(replication_factor: usize) -> Self { pub fn new(replication_factor: usize) -> Self {
let meter = global::meter("garage_system"); let meter = global::meter("garage_system");
let values = Arc::new(SystemMetricsValues::default());
let values1 = values.clone();
let values2 = values.clone();
Self { Self {
_garage_build_info: meter _garage_build_info: meter
.u64_value_observer("garage_build_info", move |observer| { .u64_value_observer("garage_build_info", move |observer| {
@ -28,6 +45,33 @@ impl SystemMetrics {
}) })
.with_description("Garage replication factor setting") .with_description("Garage replication factor setting")
.init(), .init(),
_disk_avail: meter
.u64_value_observer("garage_local_disk_avail", move |observer| {
match values1.data_disk_avail.load(Ordering::Relaxed) {
0 => (),
x => observer.observe(x, &[KeyValue::new("volume", "data")]),
};
match values1.meta_disk_avail.load(Ordering::Relaxed) {
0 => (),
x => observer.observe(x, &[KeyValue::new("volume", "metadata")]),
};
})
.with_description("Garage available disk space on each node")
.init(),
_disk_total: meter
.u64_value_observer("garage_local_disk_total", move |observer| {
match values2.data_disk_total.load(Ordering::Relaxed) {
0 => (),
x => observer.observe(x, &[KeyValue::new("volume", "data")]),
};
match values2.meta_disk_total.load(Ordering::Relaxed) {
0 => (),
x => observer.observe(x, &[KeyValue::new("volume", "metadata")]),
};
})
.with_description("Garage total disk space on each node")
.init(),
values,
} }
} }
} }