nixcfg/nix/deuxfleurs.nix

404 lines
13 KiB
Nix

{ config, pkgs, ... }:
let
cfg = config.deuxfleurs;
in
with builtins;
with pkgs.lib;
{
options.deuxfleurs =
let wg_node = with types; submodule {
options = {
hostname = mkOption {
type = str;
description = "Host name";
};
site_name = mkOption {
type = nullOr str;
description = "Site where the node is located";
default = null;
};
IP = mkOption {
type = str;
description = "IP Address in the Wireguard network";
};
publicKey = mkOption {
type = str;
description = "Public key";
};
endpoint = mkOption {
type = nullOr str;
description = "Wireguard endpoint on the public Internet";
};
lan_endpoint = mkOption {
type = nullOr str;
description = "Wireguard endpoint for nodes in the same site";
default = null;
};
};
};
in
{
# Parameters for individual nodes
network_interface = mkOption {
description = "Network interface name to configure";
type = types.str;
};
lan_ip = mkOption {
description = "IP address of this node on the local network interface";
type = types.str;
};
lan_ip_prefix_length = mkOption {
description = "Prefix length associated with lan_ip";
type = types.int;
};
ipv6 = mkOption {
description = "Public IPv6 address of this node";
type = types.str;
};
ipv6_prefix_length = mkOption {
description = "Prefix length associated with ipv6 ip";
type = types.int;
};
cluster_ip = mkOption {
description = "IP address of this node on the Wesher mesh network";
type = types.str;
};
wireguard_port = mkOption {
description = "Port for incoming Wireguard VPN connections";
type = types.port;
default = 33799;
};
is_raft_server = mkOption {
description = "Make this node a RAFT server for the Nomad and Consul deployments";
type = types.bool;
default = false;
};
# Parameters that generally vary between sites
lan_default_gateway = mkOption {
description = "IPv4 address of the default route on the local network interface";
type = types.str;
};
ipv6_default_gateway = mkOption {
description = "IPv6 address of the default IPv6 gateway for the targeted net interface";
type = types.str;
};
site_name = mkOption {
description = "Site (availability zone) on which this node is deployed";
type = types.str;
};
public_ipv4 = mkOption {
description = "Public IPv4 through which this node is accessible (possibly after port opening using DiploNAT), for domain names that are updated by D53";
type = types.nullOr types.str;
default = null;
};
cname_target = mkOption {
description = "DNS CNAME target to use for services hosted in this site, for domain names that are updated by D53";
type = types.nullOr types.str;
default = null;
};
nameservers = mkOption {
description = "External DNS servers to use";
type = types.listOf types.str;
};
# Parameters common to all nodes
cluster_name = mkOption {
description = "Name of this Deuxfleurs deployment";
type = types.str;
};
cluster_prefix = mkOption {
description = "IP address prefix for the Wireguard overlay network";
type = types.str;
};
cluster_prefix_length = mkOption {
description = "IP address prefix length for the Wireguard overlay network";
type = types.int;
default = 16;
};
cluster_nodes = mkOption {
description = "Nodes that are part of the cluster";
type = types.listOf wg_node;
};
admin_accounts = mkOption {
description = "List of users having an admin account on cluster nodes, maps user names to a list of authorized SSH keys";
type = types.attrsOf (types.listOf types.str);
};
bootstrap = mkOption {
description = "Whether to enable bootstrapping for Nomad and Consul";
type = types.bool;
default = false;
};
};
config =
let node_meta = {
"site" = cfg.site_name;
"public_ipv6" = cfg.ipv6;
} //
(if cfg.public_ipv4 != null
then { "public_ipv4" = cfg.public_ipv4; }
else {}) //
(if cfg.cname_target != null
then { "cname_target" = cfg.cname_target; }
else {});
in
{
# Configure admin accounts on all nodes
users.users = builtins.mapAttrs (name: publicKeys: {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = publicKeys;
}) cfg.admin_accounts;
# Configure network interfaces
networking.useDHCP = false;
networking.useNetworkd = true;
systemd.network.networks = {
"10-uplink" = {
matchConfig = {
# We could preprend "en* eth*" to match all ethernet interfaces
Name = "${cfg.network_interface}";
};
networkConfig = {
IPv6AcceptRA = false;
LinkLocalAddressing = "no";
};
address = [
"${cfg.lan_ip}/${toString cfg.lan_ip_prefix_length}"
"${cfg.ipv6}/${toString cfg.ipv6_prefix_length}"
];
routes = [
{
routeConfig = {
Gateway = cfg.lan_default_gateway;
# GatewayOnLink - Takes a boolean. If set to true, the kernel does not have to check if the gateway is reachable directly by the current machine (i.e., attached to the local network), so that we can insert the route in the kernel table without it being complained about. Defaults to "no".
GatewayOnLink = true;
};
}
{
routeConfig = {
Gateway = cfg.ipv6_default_gateway;
GatewayOnLink = true;
};
}
];
};
};
# Configure Unbound DNS to redirect to Consul queries under .consul
# and to pass directly to public DNS resolver all others
services.unbound = {
enable = true;
enableRootTrustAnchor = false; # disable DNSSEC as it causes issues
settings = {
server = {
interface = [ "127.0.0.1" "${cfg.lan_ip}" "172.17.0.1" ];
domain-insecure = [ "consul." ];
local-zone = [ "consul. nodefault" ];
log-servfail = true;
access-control = [
"127.0.0.0/8 allow"
"${cfg.lan_ip}/${toString cfg.lan_ip_prefix_length} allow"
"172.17.0.0/16 allow"
];
};
forward-zone = [
# Forward .consul queries to Consul daemon
{
name = "consul.";
forward-addr = "${cfg.lan_ip}@8600";
forward-no-cache = true;
forward-tcp-upstream = false;
forward-tls-upstream = false;
}
# Forward all queries to our ISP's nameserver
{
name = ".";
forward-addr = cfg.nameservers;
forward-first = true;
}
];
};
resolveLocalQueries = true;
};
# Reach Unbound through the IP of our LAN interface,
# instead of 127.0.0.1 (this will also work in Docker containers)
networking.nameservers = [ # TODO remove this ?
cfg.lan_ip
];
services.resolved.enable = false;
# Configure Wireguard VPN between all nodes
systemd.services."wg-quick-wg0".after = [ "unbound.service" ];
networking.wg-quick.interfaces.wg0 = {
address = [ "${cfg.cluster_ip}/16" ];
listenPort = cfg.wireguard_port;
privateKeyFile = "/var/lib/deuxfleurs/wireguard-keys/private";
mtu = 1420;
peers = map ({ publicKey, endpoint, IP, site_name, lan_endpoint, ... }: {
publicKey = publicKey;
allowedIPs = [ "${IP}/32" ];
endpoint = if site_name != null && site_name == cfg.site_name && lan_endpoint != null
then lan_endpoint else endpoint;
persistentKeepalive = 25;
}) cfg.cluster_nodes;
};
system.activationScripts.generate_df_wg_key = ''
if [ ! -f /var/lib/deuxfleurs/wireguard-keys/private ]; then
mkdir -p /var/lib/deuxfleurs/wireguard-keys
(umask 077; ${pkgs.wireguard-tools}/bin/wg genkey > /var/lib/deuxfleurs/wireguard-keys/private)
echo "New Wireguard key was generated."
echo "This node's Wireguard public key is: $(${pkgs.wireguard-tools}/bin/wg pubkey < /var/lib/deuxfleurs/wireguard-keys/private)"
fi
'';
# Configure /etc/hosts to link all hostnames to their Wireguard IP
networking.extraHosts = builtins.concatStringsSep "\n" (map
({ hostname, IP, ...}: "${IP} ${hostname}")
cfg.cluster_nodes);
# Enable Hashicorp Consul & Nomad
services.consul.enable = true;
systemd.services.consul.after = [ "wg-quick-wg0.service" ];
services.consul.extraConfig =
(if cfg.is_raft_server
then { server = true; }
// (if cfg.bootstrap then { bootstrap_expect = 3; } else {})
else {}) //
{
inherit node_meta;
datacenter = cfg.cluster_name;
ui_config = {
enabled = true;
};
bind_addr = "${cfg.cluster_ip}";
addresses = {
https = "0.0.0.0";
dns = "0.0.0.0";
};
ports = {
http = -1;
https = 8501;
};
performance = {
rpc_hold_timeout = "70s";
};
ca_file = "/var/lib/consul/pki/consul-ca.crt";
cert_file = "/var/lib/consul/pki/consul.crt";
key_file = "/var/lib/consul/pki/consul.key";
verify_incoming = true;
verify_outgoing = true;
verify_server_hostname = true;
};
systemd.services.consul.serviceConfig = { # TODO remove this ?
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
};
services.nomad.enable = true;
systemd.services.nomad.after = [ "wg-quick-wg0.service" ];
services.nomad.package = pkgs.nomad_1_3;
services.nomad.extraPackages = [
pkgs.glibc
pkgs.zstd
];
services.nomad.settings =
(if cfg.is_raft_server
then {
server = { enabled = true; }
// (if cfg.bootstrap then { bootstrap_expect = 3; } else {});
} else {}) //
{
region = cfg.cluster_name;
datacenter = cfg.site_name;
advertise = {
rpc = "${cfg.cluster_ip}";
http = "${cfg.cluster_ip}";
serf = "${cfg.cluster_ip}";
};
consul = {
address = "localhost:8501";
ca_file = "/var/lib/nomad/pki/consul.crt";
cert_file = "/var/lib/nomad/pki/consul-client.crt";
key_file = "/var/lib/nomad/pki/consul-client.key";
ssl = true;
checks_use_advertise = true;
};
client = {
enabled = true;
network_interface = "wg0";
meta = node_meta;
};
telemetry = {
publish_allocation_metrics = true;
publish_node_metrics = true;
prometheus_metrics = true;
};
tls = {
http = true;
rpc = true;
ca_file = "/var/lib/nomad/pki/nomad-ca.crt";
cert_file = "/var/lib/nomad/pki/nomad.crt";
key_file = "/var/lib/nomad/pki/nomad.key";
verify_server_hostname = true;
verify_https_client = true;
};
plugin = [
{
docker = [
{
config = [
{
volumes.enabled = true;
allow_privileged = true;
}
];
}
];
}
];
};
# ---- Firewall config ----
# Open ports in the firewall.
networking.firewall = {
enable = true;
allowedTCPPorts = [
# Allow anyone to connect on SSH port
(builtins.head ({ openssh.ports = [22]; } // config.services).openssh.ports)
];
allowedUDPPorts = [
# Allow peers to connect to Wireguard
cfg.wireguard_port
];
# Allow specific hosts access to specific things in the cluster
extraCommands = ''
# Allow everything from router (usefull for UPnP/IGD)
iptables -A INPUT -s ${cfg.lan_default_gateway} -j ACCEPT
# Allow docker containers to access all ports
iptables -A INPUT -s 172.17.0.0/16 -j ACCEPT
# Allow other nodes on VPN to access all ports
iptables -A INPUT -s ${cfg.cluster_prefix}/${toString cfg.cluster_prefix_length} -j ACCEPT
'';
# When stopping firewall, delete all rules that were configured manually above
extraStopCommands = ''
iptables -D INPUT -s ${cfg.lan_default_gateway} -j ACCEPT
iptables -D INPUT -s 172.17.0.0/16 -j ACCEPT
iptables -D INPUT -s ${cfg.cluster_prefix}/${toString cfg.cluster_prefix_length} -j ACCEPT
'';
};
};
}