Modularize and prepare to support multiple clusters

This commit is contained in:
Alex 2022-02-09 12:09:49 +01:00
parent cce5cd17f5
commit f03cafd49b
No known key found for this signature in database
GPG key ID: EDABF9711E244EB1
20 changed files with 356 additions and 252 deletions

3
.gitignore vendored
View file

@ -1,3 +1,6 @@
notes/ notes/
secrets/* secrets/*
!secrets/*.sample !secrets/*.sample
cluster/*/secrets/*
!cluster/*/secrets/*.sample

View file

@ -0,0 +1,77 @@
{ config, pkgs, ... } @ args:
{
deuxfleurs.cluster_name = "staging";
deuxfleurs.cluster_nodes = [
{
hostname = "spoutnik";
publicKey = "fO8qZOZmnug84cA8nvfjl5MUqyWljP0BAz/4tHRZyEg=";
IP = "10.42.0.2";
endpoint = "77.141.67.109:42136";
}
{
hostname = "cariacou";
publicKey = "qxrtfn2zRVnN52Y5NYumyU3/FcRMnh3kJ2C37JfrczA=";
IP = "10.42.0.21";
endpoint = "82.66.112.151:33721";
}
{
hostname = "carcajou";
publicKey = "7Nm7pMmyS7Nts1MB+loyD8u84ODxHPTkDu+uqQR6yDk=";
IP = "10.42.0.22";
endpoint = "82.66.112.151:33722";
}
{
hostname = "caribou";
publicKey = "g6ZED/wPn5MPfytJKwPI19808CXtEad0IJUkEAAzwyY=";
IP = "10.42.0.23";
endpoint = "82.66.112.151:33723";
}
];
deuxfleurs.admin_nodes = [
{
hostname = "hammerhead";
publicKey = "b5hF+GSTgg3oM6wnjL7jRbfyf1jtsWdVptPPbAh3Qic=";
IP = "10.42.0.1";
endpoint = "5.135.179.11:51349";
}
{
hostname = "robinson";
publicKey = "ETaZFil3mFXlJ0LaJZyWqJVLV2IZUF5PB/8M7WbQSTg=";
IP = "10.42.0.42";
endpoint = "77.141.67.109:33742";
}
{
hostname = "shiki";
publicKey = "QUiUNMk70TEQ75Ut7Uqikr5uGVSXmx8EGNkGM6tANlg=";
IP = "10.42.0.206";
endpoint = "37.187.118.206:51820";
}
{
hostname = "lindy";
publicKey = "wen9GnZy2iLT6RyHfn7ydS/wvdvow1XPmhZxIkrDbks=";
IP = "10.42.0.66";
endpoint = "82.66.112.151:33766";
}
];
deuxfleurs.admin_accounts = {
lx = [
# Keys for accessing nodes from outside
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDIDdVbA9fEdqSr5UJ77NnoIqDTVp8ca5kHExhZYI4ecBExFJfonJllXMBN9KdC4ukxtY8Ug47PcMOfMaTBZQc+e+KpvDWpkBt15Xpem3RCxmMBES79sLL7LgtAdBXc5mNaCX8EOEVixWKdarjvxRyf6py6the51G5muaiMpoj5fae4ZpRGjhGTPefzc7y7zRWBUUZ8pYHW774BIaK6XT9gn3hyHV+Occjl/UODXvodktk55YtnuPi8adXTYEsHrVVz8AkFhx+cr0U/U8vtQnsTrZG+JmgQLqpXVs0RDw5bE1RefEbMuYNKxutYKUe3L+ZJtDe0M0MqOFI8a4F5TxP5 katchup@konata"
];
quentin = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDT1+H08FdUSvdPpPKdcafq4+JRHvFVjfvG5Id97LAoROmFRUb/ZOMTLdNuD7FqvW0Da5CPxIMr8ZxfrFLtpGyuG7qdI030iIRZPlKpBh37epZHaV+l9F4ZwJQMIBO9cuyLPXgsyvM/s7tDtrdK1k7JTf2EVvoirrjSzBaMhAnhi7//to8zvujDtgDZzy6aby75bAaDetlYPBq2brWehtrf9yDDG9WAMYJqp//scje/WmhbRR6eSdim1HaUcWk5+4ZPt8sQJcy8iWxQ4jtgjqTvMOe5v8ZPkxJNBine/ZKoJsv7FzKem00xEH7opzktaGukyEqH0VwOwKhmBiqsX2yN quentin@dufour.io"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBu+KUebaWwlugMC5fGbNhHc6IaQDAC6+1vMc4Ww7nVU1rs2nwI7L5qcWxOwNdhFaorZQZy/fJuCWdFbF61RCKGayBWPLZHGPsfqDuggYNEi1Qil1kpeCECfDQNjyMTK058ZBBhOWNMHBjlLWXUlRJDkRBBECY0vo4jRv22SvSaPUCAnkdJ9rbAp/kqb497PTIb2r1l1/ew8YdhINAlpYQFQezZVfkZdTKxt22n0QCjhupqjfh3gfNnbBX0z/iO+RvAOWRIZsjPFLC+jXl+n7cnu2cq1nvST5eHiYfXXeIgIwmeENLKqp+2Twr7PIdv22PnJkh6iR5kx7eTRxkNZdN quentin@deuxfleurs.fr"
];
adrien = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBfVX+iQSHl3V0el3/y2Rtl9Q/nrmLoTE3oXnR+16yX7g8HvzU871q89jbE/UWvNRvO4hirTcKF8yojuq8ZRCoUcQO+6/YlPrY/2G8kFhPTlUGDQ+mLT+ancZsom4mkg3I9oQjKZ9qxMD1GuU8Ydz4eXjhJ8OGFZhBpEgnrLmdA53Y5d2fCbaZN5EYD4sWEFYN7xBLxTGNwv0gygiPs967Z4/ZfHngTvqVoS9wnQThSCIoXPTWFAJCkN8dC5tPZwnbOT1bGcYUF0VTrcaD6cU6Q1ZRrtyqXxnnyxpQCAoe2hgdIm+LnDsBx9trfPauqi0dXi36X8pLmudW1f1RmKWT adrien@bacigalupi"
];
maximilien = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHMMR6zNzz8NQU80wFquhUCeiXJuGphjP+zNouKbn228GyESu8sfNBwnuZq86vblR11Lz8l2rtCM73GfAKg29qmUWUHRKWvRIYWv2vaUJcCdy0bAxIzcvCvjZX0SpnIKxe9y3Rp0LGO5WLYfw0ZFaavwFZP0Z8w1Kj9/zBmL2X2avbhkaYHi/C1yXhbvESYQysmqLa48EX/TS616MBrgR9zbI9AoTQ9NOHnR14Tve/AP/khcZoBJdm4hTttMbNkEc0wonzdylTDew263SPRs/uoqnQIpUtErdPHqU10Yup8HjXjEyFJsSwcZcM5sZOw5JKckKJwmcd0yjO/x/4/Mk5"
];
kokakiwi = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFPTsEgcOtb2bij+Ih8eg8ZqO7d3IMiWykv6deMzlSSS kokakiwi@kira"
];
};
}

View file

@ -24,15 +24,10 @@
} }
]; ];
networking.wireguard.interfaces.wg0 = { deuxfleurs.vpn_ip = "10.42.0.22";
ips = [ "10.42.0.22/16" ]; deuxfleurs.vpn_listen_port = 33722;
listenPort = 33722; deuxfleurs.is_raft_server = true;
};
# Enable netdata monitoring # Enable netdata monitoring
services.netdata.enable = true; services.netdata.enable = true;
# Activate as Nomad and Consul server node
services.nomad.settings.server.enabled = true;
services.consul.extraConfig.server = true;
} }

View file

@ -24,10 +24,9 @@
} }
]; ];
networking.wireguard.interfaces.wg0 = { deuxfleurs.vpn_ip = "10.42.0.21";
ips = [ "10.42.0.21/16" ]; deuxfleurs.vpn_listen_port = 33721;
listenPort = 33721; deuxfleurs.is_raft_server = true;
};
# Enable netdata monitoring # Enable netdata monitoring
services.netdata.enable = true; services.netdata.enable = true;

View file

@ -24,15 +24,10 @@
} }
]; ];
networking.wireguard.interfaces.wg0 = { deuxfleurs.vpn_ip = "10.42.0.23";
ips = [ "10.42.0.23/16" ]; deuxfleurs.vpn_listen_port = 33723;
listenPort = 33723; deuxfleurs.is_raft_server = true;
};
# Enable netdata monitoring # Enable netdata monitoring
services.netdata.enable = true; services.netdata.enable = true;
# Activate as Nomad and Consul server node
services.nomad.settings.server.enabled = true;
services.consul.extraConfig.server = true;
} }

View file

@ -6,7 +6,7 @@
interface = "eno1"; interface = "eno1";
}; };
services.nomad.settings.datacenter = "neptune"; deuxfleurs.site_name = "neptune";
networking.firewall.allowedTCPPorts = [ 80 443 ]; networking.firewall.allowedTCPPorts = [ 80 443 ];

View file

@ -7,7 +7,7 @@
}; };
networking.nameservers = [ "213.186.33.99" "172.104.136.243" ]; networking.nameservers = [ "213.186.33.99" "172.104.136.243" ];
services.nomad.settings.datacenter = "pluton"; deuxfleurs.site_name = "pluton";
networking.firewall.allowedTCPPorts = [ 80 443 ]; networking.firewall.allowedTCPPorts = [ 80 443 ];
} }

View file

@ -5,17 +5,18 @@
{ config, pkgs, ... } @ args: { config, pkgs, ... } @ args:
# Configuration local for this cluster node (hostname, IP, etc) # Configuration local for this cluster node (hostname, IP, etc)
let node_config = import ./node.nix args;
site_config = import ./site.nix args;
in
{ {
imports = imports =
[ # Include the results of the hardware scan. [ # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
# Configuration local for this cluster node (hostname, IP, etc) # Include generic Deuxfleurs module
./node.nix ./deuxfleurs.nix
# Configuration for this deployment (a cluster)
./cluster.nix
# Configuration local for this Deuxfleurs site (set of nodes) # Configuration local for this Deuxfleurs site (set of nodes)
./site.nix ./site.nix
# Configuration local for this cluster node (hostname, IP, etc)
./node.nix
]; ];
# The global useDHCP flag is deprecated, therefore explicitly set to false here. # The global useDHCP flag is deprecated, therefore explicitly set to false here.
@ -27,77 +28,9 @@ in
# Networking configuration (static IPs for each node is defined in node/*.nix) # Networking configuration (static IPs for each node is defined in node/*.nix)
networking.nameservers = [ "9.9.9.9" ]; networking.nameservers = [ "9.9.9.9" ];
# Wireguard VPN configuration
networking.wireguard.interfaces.wg0 = {
privateKeyFile = "/root/wireguard-keys/private";
peers = [
{ # Hammerhead
publicKey = "b5hF+GSTgg3oM6wnjL7jRbfyf1jtsWdVptPPbAh3Qic=";
allowedIPs = [ "10.42.0.1/32" ];
endpoint = "5.135.179.11:51349";
persistentKeepalive = 25;
}
{ # Spoutnik
publicKey = "fO8qZOZmnug84cA8nvfjl5MUqyWljP0BAz/4tHRZyEg=";
allowedIPs = [ "10.42.0.2/32" ];
endpoint = "77.141.67.109:42136";
persistentKeepalive = 25;
}
{ # Robinson
publicKey = "ETaZFil3mFXlJ0LaJZyWqJVLV2IZUF5PB/8M7WbQSTg=";
allowedIPs = [ "10.42.0.42/32" ];
endpoint = "77.141.67.109:33742";
persistentKeepalive = 25;
}
{ # Shiki
publicKey = "QUiUNMk70TEQ75Ut7Uqikr5uGVSXmx8EGNkGM6tANlg=";
allowedIPs = [ "10.42.0.206/32" ];
endpoint = "37.187.118.206:51820";
persistentKeepalive = 25;
}
{ # Lindy
publicKey = "wen9GnZy2iLT6RyHfn7ydS/wvdvow1XPmhZxIkrDbks=";
allowedIPs = [ "10.42.0.66/32" ];
endpoint = "82.66.112.151:33766";
persistentKeepalive = 25;
}
{ # Carcajou
publicKey = "qxrtfn2zRVnN52Y5NYumyU3/FcRMnh3kJ2C37JfrczA=";
allowedIPs = [ "10.42.0.21/32" ];
endpoint = "82.66.112.151:33721";
persistentKeepalive = 25;
}
{ # Carcajou
publicKey = "7Nm7pMmyS7Nts1MB+loyD8u84ODxHPTkDu+uqQR6yDk=";
allowedIPs = [ "10.42.0.22/32" ];
endpoint = "82.66.112.151:33722";
persistentKeepalive = 25;
}
{ # Caribou
publicKey = "g6ZED/wPn5MPfytJKwPI19808CXtEad0IJUkEAAzwyY=";
allowedIPs = [ "10.42.0.23/32" ];
endpoint = "82.66.112.151:33723";
persistentKeepalive = 25;
}
];
};
# Set your time zone. # Set your time zone.
time.timeZone = "Europe/Paris"; time.timeZone = "Europe/Paris";
networking.extraHosts = ''
192.168.1.21 cariacou.lan
192.168.1.22 carcajou.lan
192.168.1.23 caribou.lan
10.42.0.1 hammerhead
10.42.0.2 spoutnik
10.42.0.21 cariacou
10.42.0.22 carcajou
10.42.0.23 caribou
10.42.0.66 lindy
10.42.0.206 shiki
'';
# Select internationalisation properties. # Select internationalisation properties.
# i18n.defaultLocale = "en_US.UTF-8"; # i18n.defaultLocale = "en_US.UTF-8";
console = { console = {
@ -105,57 +38,6 @@ in
keyMap = "fr"; keyMap = "fr";
}; };
# Enable sound.
# sound.enable = true;
# hardware.pulseaudio.enable = true;
# Define user accounts
users.users.lx = {
isNormalUser = true;
extraGroups = [
"wheel" # Enable sudo for the user.
"video" # Having fun with links -g
];
openssh.authorizedKeys.keys = [
# Keys for accessing nodes from outside
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDIDdVbA9fEdqSr5UJ77NnoIqDTVp8ca5kHExhZYI4ecBExFJfonJllXMBN9KdC4ukxtY8Ug47PcMOfMaTBZQc+e+KpvDWpkBt15Xpem3RCxmMBES79sLL7LgtAdBXc5mNaCX8EOEVixWKdarjvxRyf6py6the51G5muaiMpoj5fae4ZpRGjhGTPefzc7y7zRWBUUZ8pYHW774BIaK6XT9gn3hyHV+Occjl/UODXvodktk55YtnuPi8adXTYEsHrVVz8AkFhx+cr0U/U8vtQnsTrZG+JmgQLqpXVs0RDw5bE1RefEbMuYNKxutYKUe3L+ZJtDe0M0MqOFI8a4F5TxP5 katchup@konata"
];
};
users.users.quentin = {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDT1+H08FdUSvdPpPKdcafq4+JRHvFVjfvG5Id97LAoROmFRUb/ZOMTLdNuD7FqvW0Da5CPxIMr8ZxfrFLtpGyuG7qdI030iIRZPlKpBh37epZHaV+l9F4ZwJQMIBO9cuyLPXgsyvM/s7tDtrdK1k7JTf2EVvoirrjSzBaMhAnhi7//to8zvujDtgDZzy6aby75bAaDetlYPBq2brWehtrf9yDDG9WAMYJqp//scje/WmhbRR6eSdim1HaUcWk5+4ZPt8sQJcy8iWxQ4jtgjqTvMOe5v8ZPkxJNBine/ZKoJsv7FzKem00xEH7opzktaGukyEqH0VwOwKhmBiqsX2yN quentin@dufour.io"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBu+KUebaWwlugMC5fGbNhHc6IaQDAC6+1vMc4Ww7nVU1rs2nwI7L5qcWxOwNdhFaorZQZy/fJuCWdFbF61RCKGayBWPLZHGPsfqDuggYNEi1Qil1kpeCECfDQNjyMTK058ZBBhOWNMHBjlLWXUlRJDkRBBECY0vo4jRv22SvSaPUCAnkdJ9rbAp/kqb497PTIb2r1l1/ew8YdhINAlpYQFQezZVfkZdTKxt22n0QCjhupqjfh3gfNnbBX0z/iO+RvAOWRIZsjPFLC+jXl+n7cnu2cq1nvST5eHiYfXXeIgIwmeENLKqp+2Twr7PIdv22PnJkh6iR5kx7eTRxkNZdN quentin@deuxfleurs.fr"
];
};
users.users.adrien = {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBfVX+iQSHl3V0el3/y2Rtl9Q/nrmLoTE3oXnR+16yX7g8HvzU871q89jbE/UWvNRvO4hirTcKF8yojuq8ZRCoUcQO+6/YlPrY/2G8kFhPTlUGDQ+mLT+ancZsom4mkg3I9oQjKZ9qxMD1GuU8Ydz4eXjhJ8OGFZhBpEgnrLmdA53Y5d2fCbaZN5EYD4sWEFYN7xBLxTGNwv0gygiPs967Z4/ZfHngTvqVoS9wnQThSCIoXPTWFAJCkN8dC5tPZwnbOT1bGcYUF0VTrcaD6cU6Q1ZRrtyqXxnnyxpQCAoe2hgdIm+LnDsBx9trfPauqi0dXi36X8pLmudW1f1RmKWT adrien@bacigalupi"
];
};
users.users.maximilien = {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHMMR6zNzz8NQU80wFquhUCeiXJuGphjP+zNouKbn228GyESu8sfNBwnuZq86vblR11Lz8l2rtCM73GfAKg29qmUWUHRKWvRIYWv2vaUJcCdy0bAxIzcvCvjZX0SpnIKxe9y3Rp0LGO5WLYfw0ZFaavwFZP0Z8w1Kj9/zBmL2X2avbhkaYHi/C1yXhbvESYQysmqLa48EX/TS616MBrgR9zbI9AoTQ9NOHnR14Tve/AP/khcZoBJdm4hTttMbNkEc0wonzdylTDew263SPRs/uoqnQIpUtErdPHqU10Yup8HjXjEyFJsSwcZcM5sZOw5JKckKJwmcd0yjO/x/4/Mk5"
];
};
users.users.kokakiwi = {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFPTsEgcOtb2bij+Ih8eg8ZqO7d3IMiWykv6deMzlSSS kokakiwi@kira"
];
};
# List packages installed in system profile. To search, run: # List packages installed in system profile. To search, run:
# $ nix search wget # $ nix search wget
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
@ -188,90 +70,6 @@ in
# ---- CONFIG FOR DEUXFLEURS CLUSTER ---- # ---- CONFIG FOR DEUXFLEURS CLUSTER ----
# Enable Hashicorp Consul & Nomad
services.consul.enable = true;
services.consul.extraConfig =
let public_ip = (builtins.head (builtins.split "/" (builtins.head node_config.networking.wireguard.interfaces.wg0.ips)));
in
(if node_config.services.consul.extraConfig.server or false
then { bootstrap_expect = 3; }
else {}) //
{
datacenter = "staging";
node_meta = {
"site" = site_config.services.nomad.settings.datacenter;
};
ui = true;
bind_addr = public_ip;
ports.http = -1;
addresses.https = "0.0.0.0";
ports.https = 8501;
retry_join = [ "10.42.0.2" "10.42.0.21" "10.42.0.22" "10.42.0.23" ];
ca_file = "/var/lib/consul/pki/consul-ca.crt";
cert_file = "/var/lib/consul/pki/consul2022.crt";
key_file = "/var/lib/consul/pki/consul2022.key";
verify_incoming = true;
verify_outgoing = true;
verify_server_hostname = true;
};
services.nomad.enable = true;
services.nomad.package = pkgs.nomad_1_1;
services.nomad.settings =
let public_ip = (builtins.head (builtins.split "/" (builtins.head node_config.networking.wireguard.interfaces.wg0.ips)));
in
(if node_config.services.nomad.settings.server.enabled or false
then { server = { bootstrap_expect = 3; }; }
else {}) //
{
region = "staging";
advertise = {
rpc = public_ip;
http = public_ip;
serf = public_ip;
};
consul = {
address = "localhost:8501";
ca_file = "/var/lib/nomad/pki/consul2022.crt";
cert_file = "/var/lib/nomad/pki/consul2022-client.crt";
key_file = "/var/lib/nomad/pki/consul2022-client.key";
ssl = true;
};
client = {
enabled = true;
network_interface = "wg0";
meta = {
"site" = site_config.services.nomad.settings.datacenter;
};
};
tls = {
http = true;
rpc = true;
ca_file = "/var/lib/nomad/pki/nomad-ca.crt";
cert_file = "/var/lib/nomad/pki/nomad2022.crt";
key_file = "/var/lib/nomad/pki/nomad2022.key";
verify_server_hostname = true;
verify_https_client = true;
};
plugin = [
{
docker = [
{
config = [
{
volumes.enabled = true;
allow_privileged = true;
}
];
}
];
}
];
};
# Mount Garage using Rclone # Mount Garage using Rclone
systemd.services.mountgarage = { systemd.services.mountgarage = {
enable = false; enable = false;
@ -296,12 +94,7 @@ in
# Allow anyone to connect on SSH port # Allow anyone to connect on SSH port
allowedTCPPorts = [ allowedTCPPorts = [
(builtins.head ({ openssh.ports = [22]; } // node_config.services).openssh.ports) (builtins.head ({ openssh.ports = [22]; } // config.services).openssh.ports)
];
# Allow anyone to contact Wireguard VPN server
allowedUDPPorts = [
node_config.networking.wireguard.interfaces.wg0.listenPort
]; ];
# Allow specific hosts access to specific things in the cluster # Allow specific hosts access to specific things in the cluster

View file

@ -1,15 +1,27 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Get cluster subdirectory name
cd $(dirname $0) cd $(dirname $0)
CLUSTER="$1"
if [ -z "$CLUSTER" ] || [ ! -d "cluster/$CLUSTER" ]; then
echo "Usage: $0 <cluster name>"
echo "The cluster name must be the name of a subdirectory of cluster/"
exit 1
fi
shift 1
# Do actual stuff
if [ -z "$1" ]; then if [ -z "$1" ]; then
NIXHOSTLIST=$(ls node | grep -v '\.site\.') NIXHOSTLIST=$(ls cluster/$CLUSTER/node | grep -v '\.site\.')
else else
NIXHOSTLIST="$@" NIXHOSTLIST="$@"
fi fi
TMP_PATH=/tmp/tmp-deploy-$(date +%s) TMP_PATH=/tmp/tmp-deploy-$(date +%s)
SSH_CONFIG=cluster/$CLUSTER/ssh_config
YEAR=$(date +%Y) YEAR=$(date +%Y)
for NIXHOST in $NIXHOSTLIST; do for NIXHOST in $NIXHOSTLIST; do
@ -25,25 +37,27 @@ for NIXHOST in $NIXHOSTLIST; do
echo "Sending NixOS config files" echo "Sending NixOS config files"
ssh -F ssh_config $SSH_DEST mkdir -p $TMP_PATH $TMP_PATH/pki ssh -F $SSH_CONFIG $SSH_DEST mkdir -p $TMP_PATH $TMP_PATH/pki
cat configuration.nix | ssh -F ssh_config $SSH_DEST tee $TMP_PATH/configuration.nix > /dev/null cat configuration.nix | ssh -F $SSH_CONFIG $SSH_DEST tee $TMP_PATH/configuration.nix > /dev/null
cat node/$NIXHOST.nix | ssh -F ssh_config $SSH_DEST tee $TMP_PATH/node.nix > /dev/null cat nix/deuxfleurs.nix | ssh -F $SSH_CONFIG $SSH_DEST tee $TMP_PATH/deuxfleurs.nix > /dev/null
cat node/$NIXHOST.site.nix | ssh -F ssh_config $SSH_DEST tee $TMP_PATH/site.nix > /dev/null cat cluster/$CLUSTER/cluster.nix | ssh -F $SSH_CONFIG $SSH_DEST tee $TMP_PATH/cluster.nix > /dev/null
cat cluster/$CLUSTER/node/$NIXHOST.nix | ssh -F $SSH_CONFIG $SSH_DEST tee $TMP_PATH/node.nix > /dev/null
cat cluster/$CLUSTER/node/$NIXHOST.site.nix | ssh -F $SSH_CONFIG $SSH_DEST tee $TMP_PATH/site.nix > /dev/null
echo "Sending secret files" echo "Sending secret files"
for SECRET in rclone.conf \ for SECRET in rclone.conf \
pki/consul-ca.crt pki/consul$YEAR.crt pki/consul$YEAR.key pki/consul$YEAR-client.crt pki/consul$YEAR-client.key \ pki/consul-ca.crt pki/consul$YEAR.crt pki/consul$YEAR.key pki/consul$YEAR-client.crt pki/consul$YEAR-client.key \
pki/nomad-ca.crt pki/nomad$YEAR.crt pki/nomad$YEAR.key; do pki/nomad-ca.crt pki/nomad$YEAR.crt pki/nomad$YEAR.key; do
test -f secrets/$SECRET && (cat secrets/$SECRET | ssh -F ssh_config $SSH_DEST tee $TMP_PATH/$SECRET > /dev/null) test -f cluster/$CLUSTER/secrets/$SECRET && (cat cluster/$CLUSTER/secrets/$SECRET | ssh -F $SSH_CONFIG $SSH_DEST tee $TMP_PATH/$SECRET > /dev/null)
done done
echo "Rebuilding NixOS" echo "Rebuilding NixOS"
ssh -F ssh_config $SSH_DEST tee $TMP_PATH/deploy.sh > /dev/null <<EOF ssh -F $SSH_CONFIG $SSH_DEST tee $TMP_PATH/deploy.sh > /dev/null <<EOF
set -ex set -ex
cd $TMP_PATH cd $TMP_PATH
mv configuration.nix node.nix site.nix /etc/nixos mv deuxfleurs.nix configuration.nix cluster.nix node.nix site.nix /etc/nixos
test -f rclone.conf && (mv rclone.conf /root; chmod 600 /root/rclone.conf) test -f rclone.conf && (mv rclone.conf /root; chmod 600 /root/rclone.conf)
@ -71,6 +85,6 @@ consul kv put secrets/consul/consul-client.crt - < /var/lib/consul/pki/consul$YE
consul kv put secrets/consul/consul-client.key - < /var/lib/consul/pki/consul$YEAR-client.key consul kv put secrets/consul/consul-client.key - < /var/lib/consul/pki/consul$YEAR-client.key
EOF EOF
ssh -t -F ssh_config $SSH_DEST sudo sh $TMP_PATH/deploy.sh ssh -t -F $SSH_CONFIG $SSH_DEST sudo sh $TMP_PATH/deploy.sh
ssh -F ssh_config $SSH_DEST rm -rv '/tmp/tmp-deploy-*' ssh -F $SSH_CONFIG $SSH_DEST rm -rv '/tmp/tmp-deploy-*'
done done

View file

@ -2,11 +2,24 @@
set -xe set -xe
# Enter proper cluster subdirectory
cd $(dirname $0) cd $(dirname $0)
CLUSTER="$1"
if [ ! -d "cluster/$CLUSTER" ]; then
echo "Usage: $0 <cluster name>"
echo "The cluster name must be the name of a subdirectory of cluster/"
exit 1
fi
cd cluster/$CLUSTER
mkdir -p secrets/pki mkdir -p secrets/pki
cd secrets/pki cd secrets/pki
# Do actual stuff
YEAR=$(date +%Y) YEAR=$(date +%Y)
for APP in consul nomad; do for APP in consul nomad; do
# 1. Create certificate authority # 1. Create certificate authority

185
nix/deuxfleurs.nix Normal file
View file

@ -0,0 +1,185 @@
{ config, pkgs, ... }:
let
cfg = config.deuxfleurs;
in
with builtins;
with pkgs.lib;
{
options.deuxfleurs =
let wg_node = with types; submodule {
options = {
hostname = mkOption {
type = str;
description = "Host name";
};
IP = mkOption {
type = str;
description = "IP Address";
};
publicKey = mkOption {
type = str;
description = "Public key";
};
endpoint = mkOption {
type = nullOr str;
description = "Wireguard endpoint on the public Internet";
};
};
};
in
{
# Parameters that may vary between nodes
site_name = mkOption {
description = "Site (availability zone) on which this node is deployed";
type = types.str;
};
vpn_ip = mkOption {
description = "IP address of this node on the Wireguard VPN";
type = types.str;
};
vpn_listen_port = mkOption {
description = "Port for incoming Wireguard VPN connections";
type = types.port;
};
is_raft_server = mkOption {
description = "Make this node a RAFT server for the Nomad and Consul deployments";
type = types.bool;
default = false;
};
# Parameters common to all nodes
cluster_name = mkOption {
description = "Name of this Deuxfleurs deployment";
type = types.str;
};
cluster_nodes = mkOption {
description = "Nodes that are part of the cluster";
type = types.listOf wg_node;
};
admin_nodes = mkOption {
description = "Machines that are part of the Wireguard VPN for administration purposes";
type = types.listOf wg_node;
};
admin_accounts = mkOption {
description = "List of users having an admin account on cluster nodes, maps user names to a list of authorized SSH keys";
type = types.attrsOf (types.listOf types.str);
};
};
config = {
# Configure admin accounts on all nodes
users.users = builtins.mapAttrs (name: publicKeys: {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = publicKeys;
}) cfg.admin_accounts;
# Configure Wireguard VPN between all nodes
networking.wireguard.interfaces.wg0 = {
ips = [ "${cfg.vpn_ip}/16" ];
listenPort = cfg.vpn_listen_port;
privateKeyFile = "/root/wireguard-keys/private";
peers = map ({ publicKey, endpoint, IP, ... }: {
publicKey = publicKey;
allowedIPs = [ "${IP}/32" ];
endpoint = endpoint;
persistentKeepalive = 25;
}) (cfg.cluster_nodes ++ cfg.admin_nodes);
};
networking.firewall.allowedUDPPorts = [ cfg.vpn_listen_port ];
# Configure /etc/hosts to link all hostnames to their Wireguard IP
networking.extraHosts = builtins.concatStringsSep "\n" (map
({ hostname, IP, ...}: "${IP} ${hostname}")
(cfg.cluster_nodes ++ cfg.admin_nodes));
# Enable Hashicorp Consul & Nomad
services.consul.enable = true;
services.consul.extraConfig =
(if cfg.is_raft_server
then {
server = true;
bootstrap_expect = 3;
}
else {}) //
{
datacenter = cfg.cluster_name;
node_meta = {
"site" = cfg.site_name;
};
ui = true;
bind_addr = cfg.vpn_ip;
ports.http = -1;
addresses.https = "0.0.0.0";
ports.https = 8501;
retry_join = map (node_info: node_info.IP) cfg.cluster_nodes;
ca_file = "/var/lib/consul/pki/consul-ca.crt";
cert_file = "/var/lib/consul/pki/consul2022.crt";
key_file = "/var/lib/consul/pki/consul2022.key";
verify_incoming = true;
verify_outgoing = true;
verify_server_hostname = true;
};
services.nomad.enable = true;
services.nomad.package = pkgs.nomad_1_1;
services.nomad.settings =
(if cfg.is_raft_server
then { server = {
enabled = true;
bootstrap_expect = 3;
}; }
else {}) //
{
region = cfg.cluster_name;
datacenter = cfg.site_name;
advertise = {
rpc = cfg.vpn_ip;
http = cfg.vpn_ip;
serf = cfg.vpn_ip;
};
consul = {
address = "localhost:8501";
ca_file = "/var/lib/nomad/pki/consul2022.crt";
cert_file = "/var/lib/nomad/pki/consul2022-client.crt";
key_file = "/var/lib/nomad/pki/consul2022-client.key";
ssl = true;
};
client = {
enabled = true;
network_interface = "wg0";
meta = {
"site" = cfg.site_name;
};
};
tls = {
http = true;
rpc = true;
ca_file = "/var/lib/nomad/pki/nomad-ca.crt";
cert_file = "/var/lib/nomad/pki/nomad2022.crt";
key_file = "/var/lib/nomad/pki/nomad2022.key";
verify_server_hostname = true;
verify_https_client = true;
};
plugin = [
{
docker = [
{
config = [
{
volumes.enabled = true;
allow_privileged = true;
}
];
}
];
}
];
};
};
}

View file

@ -1,5 +1,22 @@
#!/bin/sh #!/bin/sh
set -xe
# Enter proper cluster subdirectory
cd $(dirname $0)
CLUSTER="$1"
if [ ! -d "cluster/$CLUSTER" ]; then
echo "Usage: $0 <cluster name>"
echo "The cluster name must be the name of a subdirectory of cluster/"
exit 1
fi
cd cluster/$CLUSTER
# Do actual stuff
YEAR=$(date +%Y) YEAR=$(date +%Y)
_int() { _int() {

View file

@ -1,14 +1,27 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Get cluster subdirectory name
cd $(dirname $0) cd $(dirname $0)
CLUSTER="$1"
if [ -z "$CLUSTER" ] || [ ! -d "cluster/$CLUSTER" ]; then
echo "Usage: $0 <cluster name>"
echo "The cluster name must be the name of a subdirectory of cluster/"
exit 1
fi
shift 1
# Do actual stuff
if [ -z "$@" ]; then if [ -z "$@" ]; then
NIXHOSTLIST=$(ls node | grep -v '\.site\.') NIXHOSTLIST=$(ls cluster/$CLUSTER/node | grep -v '\.site\.')
else else
NIXHOSTLIST="$@" NIXHOSTLIST="$@"
fi fi
TMP_SCRIPT=/tmp/tmp-upgrade-$(date +%s).sh TMP_SCRIPT=/tmp/tmp-upgrade-$(date +%s).sh
SSH_CONFIG=cluster/$CLUSTER/ssh_config
for NIXHOST in $NIXHOSTLIST; do for NIXHOST in $NIXHOSTLIST; do
NIXHOST=${NIXHOST%.*} NIXHOST=${NIXHOST%.*}
@ -21,7 +34,7 @@ for NIXHOST in $NIXHOSTLIST; do
echo "==== DOING $NIXHOST ====" echo "==== DOING $NIXHOST ===="
ssh -F ssh_config $SSH_DEST tee $TMP_SCRIPT > /dev/null <<EOF ssh -F $SSH_CONFIG $SSH_DEST tee $TMP_SCRIPT > /dev/null <<EOF
set -ex set -ex
nix-channel --add https://nixos.org/channels/nixos-21.11 nixos nix-channel --add https://nixos.org/channels/nixos-21.11 nixos
@ -30,9 +43,9 @@ nixos-rebuild boot
EOF EOF
read -p "Press Enter to continue (run upgrade on $NIXHOST)..." read -p "Press Enter to continue (run upgrade on $NIXHOST)..."
ssh -t -F ssh_config $SSH_DEST sudo sh $TMP_SCRIPT ssh -t -F $SSH_CONFIG $SSH_DEST sudo sh $TMP_SCRIPT
ssh -F ssh_config $SSH_DEST rm -v $TMP_SCRIPT ssh -F $SSH_CONFIG $SSH_DEST rm -v $TMP_SCRIPT
read -p "Press Enter to continue (reboot $NIXHOST)..." read -p "Press Enter to continue (reboot $NIXHOST)..."
ssh -t -F ssh_config $SSH_DEST sudo reboot ssh -t -F $SSH_CONFIG $SSH_DEST sudo reboot
done done