Merge pull request 'Simplify network configuration' (#11) from simplify-network-config into main

Reviewed-on: Deuxfleurs/nixcfg#11
This commit is contained in:
Alex 2023-05-16 13:19:33 +00:00
commit aee3a09471
43 changed files with 789 additions and 835 deletions

View file

@ -0,0 +1,100 @@
job "core:bottin" {
datacenters = ["orion", "neptune", "scorpio"]
type = "system"
priority = 90
update {
max_parallel = 1
stagger = "1m"
}
group "bottin" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "ldap_port" {
static = 389
to = 389
}
}
task "bottin" {
driver = "docker"
config {
image = "dxflrs/bottin:7h18i30cckckaahv87d3c86pn4a7q41z"
network_mode = "host"
readonly_rootfs = true
ports = [ "ldap_port" ]
volumes = [
"secrets/config.json:/config.json",
"secrets:/etc/bottin",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
resources {
memory = 100
memory_max = 200
}
template {
data = file("../config/bottin/config.json.tpl")
destination = "secrets/config.json"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
CONSUL_HTTP_ADDR=https://consul.service.prod.consul:8501
CONSUL_HTTP_SSL=true
CONSUL_CACERT=/etc/bottin/consul.crt
CONSUL_CLIENT_CERT=/etc/bottin/consul-client.crt
CONSUL_CLIENT_KEY=/etc/bottin/consul-client.key
EOH
destination = "secrets/env"
env = true
}
service {
tags = [ "${meta.site}" ]
port = "ldap_port"
address_mode = "host"
name = "bottin"
check {
type = "tcp"
port = "ldap_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -1,257 +0,0 @@
job "core" {
datacenters = ["orion", "neptune", "scorpio"]
type = "system"
priority = 90
update {
max_parallel = 1
stagger = "5m"
}
group "diplonat" {
task "diplonat" {
driver = "docker"
config {
image = "lxpz/amd64_diplonat:5"
network_mode = "host"
readonly_rootfs = true
privileged = true
volumes = [
"secrets:/etc/diplonat",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://consul.service.prod.consul:8501
DIPLONAT_CONSUL_TLS_SKIP_VERIFY=true
DIPLONAT_CONSUL_CLIENT_CERT=/etc/diplonat/consul-client.crt
DIPLONAT_CONSUL_CLIENT_KEY=/etc/diplonat/consul-client.key
RUST_LOG=debug
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 100
memory_max = 200
}
}
}
group "tricot" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "http_port" { static = 80 }
port "https_port" { static = 443 }
port "metrics_port" { static = 9334 }
}
task "server" {
driver = "docker"
config {
image = "lxpz/amd64_tricot:47"
network_mode = "host"
readonly_rootfs = true
ports = [ "http_port", "https_port" ]
volumes = [
"secrets:/etc/tricot",
]
}
resources {
cpu = 1000
memory = 200
memory_max = 500
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "secrets/consul-ca.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
TRICOT_NODE_NAME={{ env "attr.unique.hostname" }}
TRICOT_LETSENCRYPT_EMAIL=prod-sysadmin@deuxfleurs.fr
TRICOT_ENABLE_COMPRESSION=true
TRICOT_CONSUL_HOST=https://consul.service.prod.consul:8501
TRICOT_CONSUL_TLS_SKIP_VERIFY=true
TRICOT_CONSUL_CLIENT_CERT=/etc/tricot/consul-client.crt
TRICOT_CONSUL_CLIENT_KEY=/etc/tricot/consul-client.key
TRICOT_HTTP_BIND_ADDR=[::]:80
TRICOT_HTTPS_BIND_ADDR=[::]:443
TRICOT_METRICS_BIND_ADDR=[::]:9334
RUST_LOG=tricot=debug
EOH
destination = "secrets/env"
env = true
}
service {
name = "tricot-http"
port = "http_port"
tags = [ "(diplonat (tcp_port 80))", "${meta.site}" ]
address_mode = "host"
}
service {
name = "tricot-https"
port = "https_port"
tags = [
"(diplonat (tcp_port 443))",
"${meta.site}",
"d53-aaaa ${meta.site}.site.deuxfleurs.fr",
"d53-a global.site.deuxfleurs.fr",
"d53-aaaa global.site.deuxfleurs.fr",
]
address_mode = "host"
}
service {
name = "tricot-metrics"
port = "metrics_port"
address_mode = "host"
}
}
}
group "bottin" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "ldap_port" {
static = 389
to = 389
}
}
task "bottin" {
driver = "docker"
config {
image = "dxflrs/bottin:7h18i30cckckaahv87d3c86pn4a7q41z"
network_mode = "host"
readonly_rootfs = true
ports = [ "ldap_port" ]
volumes = [
"secrets/config.json:/config.json",
"secrets:/etc/bottin",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
resources {
memory = 100
memory_max = 200
}
template {
data = file("../config/bottin/config.json.tpl")
destination = "secrets/config.json"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
CONSUL_HTTP_ADDR=https://consul.service.prod.consul:8501
CONSUL_HTTP_SSL=true
CONSUL_CACERT=/etc/bottin/consul.crt
CONSUL_CLIENT_CERT=/etc/bottin/consul-client.crt
CONSUL_CLIENT_KEY=/etc/bottin/consul-client.key
EOH
destination = "secrets/env"
env = true
}
service {
tags = [ "${meta.site}" ]
port = "ldap_port"
address_mode = "host"
name = "bottin"
check {
type = "tcp"
port = "ldap_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -1,4 +1,4 @@
job "core-service" {
job "core:d53" {
datacenters = ["neptune", "orion", "bespin"]
type = "service"
priority = 90

View file

@ -0,0 +1,71 @@
job "core:diplonat" {
datacenters = ["orion", "neptune", "scorpio", "bespin"]
type = "system"
priority = 90
update {
max_parallel = 2
stagger = "1m"
}
group "diplonat" {
task "diplonat" {
driver = "docker"
config {
image = "lxpz/amd64_diplonat:6"
network_mode = "host"
readonly_rootfs = true
privileged = true
volumes = [
"secrets:/etc/diplonat",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://consul.service.prod.consul:8501
DIPLONAT_CONSUL_TLS_SKIP_VERIFY=true
DIPLONAT_CONSUL_CLIENT_CERT=/etc/diplonat/consul-client.crt
DIPLONAT_CONSUL_CLIENT_KEY=/etc/diplonat/consul-client.key
{{ if env "meta.site" | eq "bespin" }}
DIPLONAT_IPV6_ONLY=true
{{ end }}
RUST_LOG=debug
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 100
memory_max = 200
}
}
}
}

View file

@ -0,0 +1,109 @@
job "core:tricot" {
datacenters = ["orion", "neptune", "scorpio", "bespin"]
type = "system"
priority = 90
update {
max_parallel = 1
stagger = "5m"
}
group "tricot" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "http_port" { static = 80 }
port "https_port" { static = 443 }
port "metrics_port" { static = 9334 }
}
task "server" {
driver = "docker"
config {
image = "lxpz/amd64_tricot:47"
network_mode = "host"
readonly_rootfs = true
ports = [ "http_port", "https_port" ]
volumes = [
"secrets:/etc/tricot",
]
}
resources {
cpu = 1000
memory = 200
memory_max = 500
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "secrets/consul-ca.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
TRICOT_NODE_NAME={{ env "attr.unique.hostname" }}
TRICOT_LETSENCRYPT_EMAIL=prod-sysadmin@deuxfleurs.fr
TRICOT_ENABLE_COMPRESSION=true
TRICOT_CONSUL_HOST=https://consul.service.prod.consul:8501
TRICOT_CONSUL_TLS_SKIP_VERIFY=true
TRICOT_CONSUL_CLIENT_CERT=/etc/tricot/consul-client.crt
TRICOT_CONSUL_CLIENT_KEY=/etc/tricot/consul-client.key
TRICOT_HTTP_BIND_ADDR=[::]:80
TRICOT_HTTPS_BIND_ADDR=[::]:443
TRICOT_METRICS_BIND_ADDR=[::]:9334
RUST_LOG=tricot=debug
EOH
destination = "secrets/env"
env = true
}
service {
name = "tricot-http"
port = "http_port"
tags = [ "(diplonat (tcp_port 80))", "${meta.site}" ]
address_mode = "host"
}
service {
name = "tricot-https"
port = "https_port"
tags = [
"(diplonat (tcp_port 443))",
"${meta.site}",
"d53-aaaa ${meta.site}.site.deuxfleurs.fr",
"d53-a global.site.deuxfleurs.fr",
"d53-aaaa global.site.deuxfleurs.fr",
]
address_mode = "host"
}
service {
name = "tricot-metrics"
port = "metrics_port"
address_mode = "host"
}
}
}
}

View file

@ -6,8 +6,10 @@ db_engine = "lmdb"
replication_mode = "3"
rpc_bind_addr = "[{{ env "meta.public_ipv6" }}]:3901"
rpc_public_addr = "[{{ env "meta.public_ipv6" }}]:3901"
{{ with $a := env "attr.unique.hostname" | printf "diplonat/autodiscovery/ipv6/%s" | key | parseJSON }}
rpc_bind_addr = "[{{ $a.address }}]:3901"
rpc_public_addr = "[{{ $a.address }}]:3901"
{{ end }}
rpc_secret = "{{ key "secrets/garage/rpc_secret" | trimSpace }}"
[consul_discovery]

View file

@ -1,84 +1,73 @@
{ config, pkgs, ... } @ args:
{
deuxfleurs.cluster_name = "prod";
deuxfleurs.clusterName = "prod";
# The IP range to use for the Wireguard overlay of this cluster
deuxfleurs.cluster_prefix = "10.83.0.0";
deuxfleurs.cluster_prefix_length = 16;
deuxfleurs.clusterPrefix = "10.83.0.0/16";
deuxfleurs.cluster_nodes = [
{
hostname = "concombre";
site_name = "neptune";
deuxfleurs.clusterNodes = {
"concombre" = {
siteName = "neptune";
publicKey = "VvXT0fPDfWsHxumZqVShpS33dJQAdpJ1E79ZbCBJP34=";
IP = "10.83.1.1";
address = "10.83.1.1";
endpoint = "77.207.15.215:33731";
}
{
hostname = "courgette";
site_name = "neptune";
};
"courgette" = {
siteName = "neptune";
publicKey = "goTkBJGmzrGDOAjUcdH9G0JekipqSMoaYQdB6IHnzi0=";
IP = "10.83.1.2";
address = "10.83.1.2";
endpoint = "77.207.15.215:33732";
}
{
hostname = "celeri";
site_name = "neptune";
};
"celeri" = {
siteName = "neptune";
publicKey = "oZDAb8LoLW87ktUHyFFec0VaIar97bqq47mGbdVqJ0U=";
IP = "10.83.1.3";
address = "10.83.1.3";
endpoint = "77.207.15.215:33733";
}
{
hostname = "dahlia";
site_name = "orion";
};
"dahlia" = {
siteName = "orion";
publicKey = "EtRoWBYCdjqgXX0L+uWLg8KxNfIK8k9OTh30tL19bXU=";
IP = "10.83.2.1";
address = "10.83.2.1";
endpoint = "82.66.80.201:33731";
}
{
hostname = "diplotaxis";
site_name = "orion";
};
"diplotaxis" = {
siteName = "orion";
publicKey = "HbLC938mysadMSOxWgq8+qrv+dBKzPP/43OMJp/3phA=";
IP = "10.83.2.2";
address = "10.83.2.2";
endpoint = "82.66.80.201:33732";
}
{
hostname = "doradille";
site_name = "orion";
};
"doradille" = {
siteName = "orion";
publicKey = "e1C8jgTj9eD20ywG08G1FQZ+Js3wMK/msDUE1wO3l1Y=";
IP = "10.83.2.3";
address = "10.83.2.3";
endpoint = "82.66.80.201:33733";
}
{
hostname = "df-ykl";
site_name = "bespin";
};
"df-ykl" = {
siteName = "bespin";
publicKey = "bIjxey/VhBgVrLa0FxN/KISOt2XFmQeSh1MPivUq9gg=";
IP = "10.83.3.1";
address = "10.83.3.1";
endpoint = "109.136.55.235:33731";
}
{
hostname = "df-ymf";
site_name = "bespin";
};
"df-ymf" = {
siteName = "bespin";
publicKey = "pUIKv8UBl586O7DBrHBsb9BgNU7WlYQ2r2RSNkD+JAQ=";
IP = "10.83.3.2";
address = "10.83.3.2";
endpoint = "109.136.55.235:33732";
}
{
hostname = "df-ymk";
site_name = "bespin";
};
"df-ymk" = {
siteName = "bespin";
publicKey = "VBmpo15iIJP7250NAsF+ryhZc3j+8TZFnE1Djvn5TXI=";
IP = "10.83.3.3";
address = "10.83.3.3";
endpoint = "109.136.55.235:33733";
}
{
hostname = "abricot";
site_name = "scorpio";
};
"abricot" = {
siteName = "scorpio";
publicKey = "Sm9cmNZ/BfWVPFflMO+fuyiera4r203b/dKhHTQmBFg=";
IP = "10.83.4.1";
address = "10.83.4.1";
endpoint = "82.65.41.110:33741";
}
];
};
};
# Bootstrap IPs for Consul cluster,
# these are IPs on the Wireguard overlay
@ -88,7 +77,7 @@
"10.83.3.1" # df-ykl
];
deuxfleurs.admin_accounts = {
deuxfleurs.adminAccounts = {
lx = [
# Keys for accessing nodes from outside
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy"

View file

@ -8,12 +8,7 @@
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "abricot";
deuxfleurs.network_interface = "eno1";
deuxfleurs.lan_ip = "192.168.1.41";
deuxfleurs.ipv6 = "2a01:e0a:e4:2dd0::41";
deuxfleurs.cluster_ip = "10.83.4.1";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "abricot";
deuxfleurs.staticIPv4.address = "192.168.1.41";
deuxfleurs.staticIPv6.address = "2a01:e0a:e4:2dd0::41";
}

View file

@ -8,12 +8,7 @@
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "celeri";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.33";
deuxfleurs.ipv6 = "2001:910:1204:1::33";
deuxfleurs.cluster_ip = "10.83.1.3";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "celeri";
deuxfleurs.staticIPv4.address = "192.168.1.33";
deuxfleurs.staticIPv6.address = "2001:910:1204:1::33";
}

View file

@ -8,12 +8,8 @@
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "concombre";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.31";
deuxfleurs.ipv6 = "2001:910:1204:1::31";
deuxfleurs.cluster_ip = "10.83.1.1";
deuxfleurs.is_raft_server = true;
deuxfleurs.hostName = "concombre";
deuxfleurs.staticIPv4.address = "192.168.1.31";
deuxfleurs.staticIPv6.address = "2001:910:1204:1::31";
deuxfleurs.isRaftServer = true;
}

View file

@ -8,12 +8,7 @@
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "courgette";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.32";
deuxfleurs.ipv6 = "2001:910:1204:1::32";
deuxfleurs.cluster_ip = "10.83.1.2";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "courgette";
deuxfleurs.staticIPv4.address = "192.168.1.32";
deuxfleurs.staticIPv6.address = "2001:910:1204:1::32";
}

View file

@ -7,12 +7,8 @@
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "dahlia";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.11";
deuxfleurs.ipv6 = "2a01:e0a:28f:5e60::11";
deuxfleurs.cluster_ip = "10.83.2.1";
deuxfleurs.is_raft_server = true;
deuxfleurs.hostName = "dahlia";
deuxfleurs.staticIPv4.address = "192.168.1.11";
deuxfleurs.staticIPv6.address = "2a01:e0a:28f:5e60::11";
deuxfleurs.isRaftServer = true;
}

View file

@ -7,14 +7,10 @@
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "df-ykl";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.5.117";
deuxfleurs.ipv6 = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e86c";
deuxfleurs.cluster_ip = "10.83.3.1";
deuxfleurs.is_raft_server = true;
deuxfleurs.hostName = "df-ykl";
deuxfleurs.staticIPv4.address = "192.168.5.117";
deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e86c";
deuxfleurs.isRaftServer = true;
fileSystems."/mnt" = {
device = "/dev/disk/by-uuid/f7aa396f-23d0-44d3-89cf-3cb00bbb6c3b";

View file

@ -7,14 +7,9 @@
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "df-ymf";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.5.134";
deuxfleurs.ipv6 = "2a02:a03f:6510:5102:6e4b:90ff:fe3a:6174";
deuxfleurs.cluster_ip = "10.83.3.2";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "df-ymf";
deuxfleurs.staticIPv4.address = "192.168.5.134";
deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:6e4b:90ff:fe3a:6174";
fileSystems."/mnt" = {
device = "/dev/disk/by-uuid/fec20a7e-5019-4747-8f73-77f3f196c122";

View file

@ -7,14 +7,9 @@
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "df-ymk";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.5.116";
deuxfleurs.ipv6 = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e939";
deuxfleurs.cluster_ip = "10.83.3.3";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "df-ymk";
deuxfleurs.staticIPv4.address = "192.168.5.116";
deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e939";
fileSystems."/mnt" = {
device = "/dev/disk/by-uuid/51d95b17-0e06-4a73-9e4e-ae5363cc4015";

View file

@ -8,12 +8,7 @@
boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/nvme0n1"; # or "nodev" for efi only
networking.hostName = "diplotaxis";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.12";
deuxfleurs.ipv6 = "2a01:e0a:28f:5e60::12";
deuxfleurs.cluster_ip = "10.83.2.2";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "diplotaxis";
deuxfleurs.staticIPv4.address = "192.168.1.12";
deuxfleurs.staticIPv6.address = "2a01:e0a:28f:5e60::12";
}

View file

@ -8,12 +8,7 @@
boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/nvme0n1"; # or "nodev" for efi only
networking.hostName = "doradille";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.13";
deuxfleurs.ipv6 = "2a01:e0a:28f:5e60::13";
deuxfleurs.cluster_ip = "10.83.2.3";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "doradille";
deuxfleurs.staticIPv4.address = "192.168.1.13";
deuxfleurs.staticIPv6.address = "2a01:e0a:28f:5e60::13";
}

View file

@ -0,0 +1,41 @@
#!/usr/bin/env bash
# Bruxelles (bespin): git forge at git.deuxfleurs.fr
curl -vv -X PUT http://localhost:8500/v1/catalog/register -H "Content-Type: application/json" --data @- <<EOF
{
"Datacenter": "prod",
"Node": "gitea",
"Address": "192.168.5.200",
"NodeMeta": {
"site": "bespin",
"cname_target": "bespin.site.deuxfleurs.fr."
},
"Service": {
"Service": "gitea",
"Tags": ["tricot git.deuxfleurs.fr"],
"Address": "192.168.5.200",
"Port": 3001
}
}
EOF
# Lille (scorpio): ADRN's personnal services under luxeylab.net
curl -vv -X PUT http://localhost:8500/v1/catalog/register -H "Content-Type: application/json" --data @- <<EOF
{
"Datacenter": "prod",
"Node": "spoutnik",
"Address": "192.168.1.60",
"NodeMeta": {
"site": "scorpio",
"cname_target": "scorpio.site.deuxfleurs.fr."
},
"Service": {
"Service": "adrien-nginx",
"Tags": ["tricot-https *.luxeylab.net"],
"Address": "192.168.1.60",
"Port": 443
}
}
EOF

View file

@ -1,17 +0,0 @@
#!/usr/bin/env bash
curl -vv -X PUT http://localhost:8500/v1/catalog/register -H "Content-Type: application/json" --data @- <<EOF
{
"Datacenter": "prod",
"Node": "spoutnik",
"Address": "192.168.1.60",
"NodeMeta": { "somekey": "bidon" },
"Service": {
"Service": "adrien-nginx",
"Tags": ["tricot-https *.luxeylab.net"],
"Address": "192.168.1.60",
"Port": 443
}
}
EOF

View file

@ -1,13 +1,7 @@
{ config, pkgs, ... }:
{
deuxfleurs.site_name = "bespin";
deuxfleurs.lan_default_gateway = "192.168.5.254";
deuxfleurs.ipv6_default_gateway = "2a02:a03f:6510:5102::1";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.5.254" ];
deuxfleurs.cname_target = "bespin.site.deuxfleurs.fr.";
networking.firewall.allowedTCPPorts = [ 80 443 ];
deuxfleurs.siteName = "bespin";
deuxfleurs.staticIPv4.defaultGateway = "192.168.5.254";
deuxfleurs.cnameTarget = "bespin.site.deuxfleurs.fr.";
}

View file

@ -1,14 +1,8 @@
{ config, pkgs, ... }:
{
deuxfleurs.site_name = "neptune";
deuxfleurs.lan_default_gateway = "192.168.1.1";
deuxfleurs.ipv6_default_gateway = "2001:910:1204:1::1";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.1" ];
deuxfleurs.cname_target = "neptune.site.deuxfleurs.fr.";
deuxfleurs.public_ipv4 = "77.207.15.215";
networking.firewall.allowedTCPPorts = [ 80 443 ];
deuxfleurs.siteName = "neptune";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1";
deuxfleurs.cnameTarget = "neptune.site.deuxfleurs.fr.";
deuxfleurs.publicIPv4 = "77.207.15.215";
}

View file

@ -1,14 +1,8 @@
{ config, pkgs, ... }:
{
deuxfleurs.site_name = "orion";
deuxfleurs.lan_default_gateway = "192.168.1.254";
deuxfleurs.ipv6_default_gateway = "2a01:e0a:28f:5e60::1";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.254" ];
deuxfleurs.cname_target = "orion.site.deuxfleurs.fr.";
deuxfleurs.public_ipv4 = "82.66.80.201";
networking.firewall.allowedTCPPorts = [ 80 443 ];
deuxfleurs.siteName = "orion";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.254";
deuxfleurs.cnameTarget = "orion.site.deuxfleurs.fr.";
deuxfleurs.publicIPv4 = "82.66.80.201";
}

View file

@ -1,14 +1,8 @@
{ config, pkgs, ... }:
{
deuxfleurs.site_name = "scorpio";
deuxfleurs.lan_default_gateway = "192.168.1.254";
deuxfleurs.ipv6_default_gateway = "2a01:e0a:e4:2dd0::1";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.254" ];
deuxfleurs.cname_target = "scorpio.site.deuxfleurs.fr.";
deuxfleurs.public_ipv4 = "82.65.41.110";
networking.firewall.allowedTCPPorts = [ 80 443 ];
deuxfleurs.siteName = "scorpio";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.254";
deuxfleurs.cnameTarget = "scorpio.site.deuxfleurs.fr.";
deuxfleurs.publicIPv4 = "82.65.41.110";
}

View file

@ -1,4 +1,4 @@
job "core-service" {
job "core:d53" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "service"
priority = 90
@ -11,7 +11,7 @@ job "core-service" {
config {
packages = [
"git+https://git.deuxfleurs.fr/lx/D53.git?ref=main&rev=86c255dfeabc60b0ef46ff78bc487c61c9548c79"
"git+https://git.deuxfleurs.fr/lx/D53.git?ref=diplonat-autodiscovery&rev=49d94dae1d753c1f3349be7ea9bc7e7978c0af15"
]
command = "d53"
}
@ -52,7 +52,7 @@ D53_CONSUL_CLIENT_KEY=/etc/tricot/consul-client.key
D53_PROVIDERS=deuxfleurs.org:gandi
D53_GANDI_API_KEY={{ key "secrets/d53/gandi_api_key" }}
D53_ALLOWED_DOMAINS=staging.deuxfleurs.org
RUST_LOG=d53=info
RUST_LOG=d53=debug
EOH
destination = "secrets/env"
env = true

View file

@ -0,0 +1,75 @@
job "core:diplonat" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "system"
priority = 90
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
update {
max_parallel = 3
stagger = "20s"
}
group "diplonat" {
task "diplonat" {
driver = "nix2"
config {
packages = [
"#iptables",
"#bash",
"#coreutils",
"git+https://git.deuxfleurs.fr/Deuxfleurs/diplonat.git?ref=stun&rev=f5fc635b75dfa17b83a8db4893a7be206b4f9892"
]
command = "diplonat"
}
user = "root"
restart {
interval = "30m"
attempts = 2
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "etc/diplonat/consul-ca.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "etc/diplonat/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "etc/diplonat/consul-client.key"
}
template {
data = <<EOH
DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_IPV6_ONLY=true
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://localhost:8501
DIPLONAT_CONSUL_CA_CERT=/etc/diplonat/consul-ca.crt
DIPLONAT_CONSUL_CLIENT_CERT=/etc/diplonat/consul-client.crt
DIPLONAT_CONSUL_CLIENT_KEY=/etc/diplonat/consul-client.key
RUST_LOG=debug
RUST_BACKTRACE=1
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 100
}
}
}
}

View file

@ -1,4 +1,4 @@
job "core-system" {
job "core:tricot" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "system"
priority = 90
@ -13,64 +13,6 @@ job "core-system" {
stagger = "1m"
}
/*
group "diplonat" {
task "diplonat" {
driver = "nix2"
config {
packages = [
"#iptables",
"git+https://git.deuxfleurs.fr/Deuxfleurs/diplonat.git?ref=main&rev=f306e8dc8d0e93478353ce39b6064e8c06a8bca6"
]
command = "diplonat"
}
user = "root"
restart {
interval = "30m"
attempts = 2
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "etc/diplonat/consul-ca.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "etc/diplonat/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "etc/diplonat/consul-client.key"
}
template {
data = <<EOH
DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://localhost:8501
DIPLONAT_CONSUL_CA_CERT=/etc/diplonat/consul-ca.crt
DIPLONAT_CONSUL_CLIENT_CERT=/etc/diplonat/consul-client.crt
DIPLONAT_CONSUL_CLIENT_KEY=/etc/diplonat/consul-client.key
RUST_LOG=debug
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 40
}
}
}
*/
group "tricot" {
network {
port "http_port" { static = 80 }
@ -130,6 +72,7 @@ TRICOT_HTTP_BIND_ADDR=[::]:80
TRICOT_HTTPS_BIND_ADDR=[::]:443
TRICOT_METRICS_BIND_ADDR=[::]:9334
RUST_LOG=tricot=debug
RUST_BACKTRACE=1
EOH
destination = "secrets/env"
env = true
@ -141,7 +84,7 @@ EOH
tags = [
"d53-aaaa ${meta.site}.site.staging.deuxfleurs.org",
"d53-aaaa staging.deuxfleurs.org",
# "(diplonat (tcp_port 80))"
"(diplonat (tcp_port 80))"
]
address_mode = "host"
}
@ -150,7 +93,7 @@ EOH
name = "tricot-https"
port = "https_port"
tags = [
# "(diplonat (tcp_port 443))"
"(diplonat (tcp_port 443))"
]
address_mode = "host"
}

View file

@ -6,8 +6,10 @@ db_engine = "lmdb"
replication_mode = "3"
rpc_bind_addr = "[{{ env "meta.public_ipv6" }}]:3991"
rpc_public_addr = "[{{ env "meta.public_ipv6" }}]:3991"
{{ with $a := env "attr.unique.hostname" | printf "diplonat/autodiscovery/ipv6/%s" | key | parseJSON }}
rpc_bind_addr = "[{{ $a.address }}]:3991"
rpc_public_addr = "[{{ $a.address }}]:3991"
{{ end }}
rpc_secret = "{{ key "secrets/garage-staging/rpc_secret" | trimSpace }}"
bootstrap_peers = []

View file

@ -25,6 +25,7 @@ job "garage-staging" {
config {
packages = [
"#bash", # so that we can enter a shell inside container
"#coreutils",
"git+https://git.deuxfleurs.fr/Deuxfleurs/garage.git?ref=main&rev=0d0906b066eb76111f3b427dce1c50eac083366c",
]
command = "garage"

View file

@ -1,60 +1,56 @@
{ config, pkgs, ... } @ args:
{
deuxfleurs.cluster_name = "staging";
deuxfleurs.clusterName = "staging";
# The IP range to use for the Wireguard overlay of this cluster
deuxfleurs.cluster_prefix = "10.14.0.0";
deuxfleurs.cluster_prefix_length = 16;
deuxfleurs.clusterPrefix = "10.14.0.0/16";
deuxfleurs.cluster_nodes = [
{
hostname = "carcajou";
site_name = "neptune";
deuxfleurs.clusterNodes = {
"carcajou" = {
siteName = "neptune";
publicKey = "7Nm7pMmyS7Nts1MB+loyD8u84ODxHPTkDu+uqQR6yDk=";
IP = "10.14.1.2";
address = "10.14.1.2";
endpoint = "77.207.15.215:33722";
}
{
hostname = "caribou";
site_name = "neptune";
};
"caribou" = {
siteName = "neptune";
publicKey = "lABn/axzD1jkFulX8c+K3B3CbKXORlIMDDoe8sQVxhs=";
IP = "10.14.1.3";
address = "10.14.1.3";
endpoint = "77.207.15.215:33723";
}
{
hostname = "origan";
site_name = "jupiter";
};
"origan" = {
siteName = "jupiter";
publicKey = "smBQYUS60JDkNoqkTT7TgbpqFiM43005fcrT6472llI=";
IP = "10.14.2.33";
address = "10.14.2.33";
endpoint = "82.64.238.84:33733";
}
{
hostname = "piranha";
site_name = "corrin";
};
"piranha" = {
siteName = "corrin";
publicKey = "m9rLf+233X1VColmeVrM/xfDGro5W6Gk5N0zqcf32WY=";
IP = "10.14.3.1";
address = "10.14.3.1";
#endpoint = "82.120.233.78:33721";
}
{
hostname = "df-pw5";
site_name = "bespin";
};
"df-pw5" = {
siteName = "bespin";
publicKey = "XLOYoMXF+PO4jcgfSVAk+thh4VmWx0wzWnb0xs08G1s=";
IP = "10.14.4.1";
address = "10.14.4.1";
endpoint = "bitfrost.fiber.shirokumo.net:33734";
}
];
};
};
deuxfleurs.wgautomeshPort = 1667;
services.wgautomesh.logLevel = "debug";
# Bootstrap IPs for Consul cluster,
# these are IPs on the Wireguard overlay
services.consul.extraConfig.retry_join = [
"10.14.1.1" # cariacou
"10.14.1.2" # carcajou
"10.14.1.3" # caribou
"10.14.2.33" # origan
"10.14.3.1" # piranha
];
deuxfleurs.admin_accounts = {
deuxfleurs.adminAccounts = {
lx = [
# Keys for accessing nodes from outside
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy"
@ -142,16 +138,16 @@
enable = true;
port = substituter_port;
openFirewall = false;
bindAddress = config.deuxfleurs.cluster_ip;
bindAddress = "0.0.0.0";
package = pkgs.haskellPackages.nix-serve-ng;
};
nix.settings.substituters = map
({ IP, ... }: "http://${IP}:${builtins.toString substituter_port}")
(builtins.filter
({ site_name, IP, ...}:
(IP != config.deuxfleurs.cluster_ip
&& site_name == config.deuxfleurs.site_name))
config.deuxfleurs.cluster_nodes);
({ address, ... }: "http://${address}:${builtins.toString substituter_port}")
(builtins.attrValues (pkgs.lib.filterAttrs
(hostname: { siteName, ...}:
(hostname != config.deuxfleurs.hostName
&& siteName == config.deuxfleurs.siteName))
config.deuxfleurs.clusterNodes));
})
];
}

View file

@ -9,3 +9,4 @@ piranha.polyno.me,2a01:cb05:8984:3c00:223:24ff:feb0:ea82 ssh-ed25519 AAAAC3NzaC1
2a01:e0a:5e4:1d0:223:24ff:feaf:fdec ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAsZas74RT6lCZwuUOPR23nPdbSdpWORyAmRgjoiMVHK
df-pw5.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK/dJIxioCkfeehxeGiZR7qquYGoqEH/YrRJ/ukEcaLH
10.14.3.1 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnpO6zpLWsyyugOoOj+2bUow9TUrcWgURFGGaoyu+co
192.168.1.22 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMf/ioVSSb19Slu+HZLgKt4f1/XsL+K9uMxazSWb/+nQ

View file

@ -8,18 +8,19 @@
./remote-unlock.nix
];
deuxfleurs.remoteUnlock = {
networkInterface = "eno1";
staticIP = "192.168.1.22/24";
defaultGateway = "192.168.1.1";
};
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "carcajou";
deuxfleurs.network_interface = "eno1";
deuxfleurs.lan_ip = "192.168.1.22";
deuxfleurs.ipv6 = "2001:910:1204:1::22";
deuxfleurs.cluster_ip = "10.14.1.2";
deuxfleurs.hostName = "carcajou";
deuxfleurs.staticIPv6.address = "2001:910:1204:1::22";
system.stateVersion = "21.05";
}

View file

@ -8,14 +8,9 @@
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "caribou";
deuxfleurs.network_interface = "eno1";
deuxfleurs.lan_ip = "192.168.1.23";
deuxfleurs.ipv6 = "2001:910:1204:1::23";
deuxfleurs.cluster_ip = "10.14.1.3";
deuxfleurs.is_raft_server = true;
deuxfleurs.hostName = "caribou";
deuxfleurs.staticIPv6.address = "2001:910:1204:1::23";
deuxfleurs.isRaftServer = true;
system.stateVersion = "21.05";
}

View file

@ -9,12 +9,9 @@
boot.loader.efi.efiSysMountPoint = "/boot";
boot.loader.timeout = 20;
networking.hostName = "df-pw5";
deuxfleurs.hostName = "df-pw5";
deuxfleurs.staticIPv4.address = "192.168.5.130";
deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:223:24ff:feb0:e8a7";
deuxfleurs.network_interface = "eno1";
deuxfleurs.lan_ip = "192.168.5.130";
deuxfleurs.ipv6 = "2a02:a03f:6510:5102:223:24ff:feb0:e8a7";
deuxfleurs.cluster_ip = "10.14.4.1";
deuxfleurs.is_raft_server = false;
system.stateVersion = "22.11";
}

View file

@ -8,14 +8,10 @@
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "origan";
deuxfleurs.network_interface = "eno1";
deuxfleurs.lan_ip = "192.168.1.33";
deuxfleurs.ipv6 = "2a01:e0a:5e4:1d0:223:24ff:feaf:fdec";
deuxfleurs.cluster_ip = "10.14.2.33";
deuxfleurs.is_raft_server = true;
deuxfleurs.hostName = "origan";
deuxfleurs.staticIPv4.address = "192.168.1.33";
deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:223:24ff:feaf:fdec";
deuxfleurs.isRaftServer = true;
system.stateVersion = "22.11";
}

View file

@ -8,14 +8,10 @@
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "piranha";
deuxfleurs.network_interface = "eno1";
deuxfleurs.lan_ip = "192.168.1.25";
deuxfleurs.ipv6 = "2a01:cb05:9142:7400:223:24ff:feb0:ea82";
deuxfleurs.cluster_ip = "10.14.3.1";
deuxfleurs.is_raft_server = true;
deuxfleurs.hostName = "piranha";
deuxfleurs.staticIPv4.address = "192.168.1.25";
deuxfleurs.staticIPv6.address = "2a01:cb05:9142:7400:223:24ff:feb0:ea82";
deuxfleurs.isRaftServer = true;
system.stateVersion = "22.11";
}

View file

@ -1,13 +1,7 @@
{ config, pkgs, ... }:
{
deuxfleurs.site_name = "bespin";
deuxfleurs.lan_default_gateway = "192.168.5.254";
deuxfleurs.ipv6_default_gateway = "2a02:a03f:6510:5102::1";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.5.254" ];
deuxfleurs.cname_target = "bespin.site.staging.deuxfleurs.org.";
networking.firewall.allowedTCPPorts = [ 80 443 ];
deuxfleurs.siteName = "bespin";
deuxfleurs.staticIPv4.defaultGateway = "192.168.5.254";
deuxfleurs.cnameTarget = "bespin.site.staging.deuxfleurs.org.";
}

View file

@ -1,14 +1,8 @@
{ config, pkgs, ... }:
{
deuxfleurs.site_name = "corrin";
deuxfleurs.lan_default_gateway = "192.168.1.1";
deuxfleurs.ipv6_default_gateway = "fe80::7ec1:77ff:fe3e:bb90";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.1" ];
deuxfleurs.cname_target = "corrin.site.staging.deuxfleurs.org.";
deuxfleurs.public_ipv4 = "2.13.96.213";
networking.firewall.allowedTCPPorts = [ 80 443 ];
deuxfleurs.siteName = "corrin";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1";
deuxfleurs.cnameTarget = "corrin.site.staging.deuxfleurs.org.";
deuxfleurs.publicIPv4 = "2.13.96.213";
}

View file

@ -1,16 +1,7 @@
{ config, pkgs, ... }:
{
deuxfleurs.site_name = "jupiter";
deuxfleurs.lan_default_gateway = "192.168.1.1";
deuxfleurs.ipv6_default_gateway = "fe80::9038:202a:73a0:e73b";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.1" ];
deuxfleurs.cname_target = "jupiter.site.staging.deuxfleurs.org.";
# no public ipv4 is used for the staging cluster on Jupiter
# deuxfleurs.public_ipv4 = "???";
networking.firewall.allowedTCPPorts = [ 80 443 ];
deuxfleurs.siteName = "jupiter";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1";
deuxfleurs.cnameTarget = "jupiter.site.staging.deuxfleurs.org.";
}

View file

@ -1,17 +1,6 @@
{ config, pkgs, ... }:
{
deuxfleurs.site_name = "neptune";
deuxfleurs.lan_default_gateway = "192.168.1.1";
deuxfleurs.ipv6_default_gateway = "2001:910:1204:1::1";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.1" ];
deuxfleurs.cname_target = "neptune.site.staging.deuxfleurs.org.";
# no public ipv4 is used for the staging cluster on Neptune,
# because the Internet connection is already used for the prod cluster
# deuxfleurs.public_ipv4 = "77.207.15.215";
networking.firewall.allowedTCPPorts = [ 80 443 ];
deuxfleurs.siteName = "neptune";
deuxfleurs.cnameTarget = "neptune.site.staging.deuxfleurs.org.";
}

View file

@ -10,7 +10,7 @@ Host origan
HostName origan.df.trinity.fr.eu.org
Host piranha
ProxyJump caribou.machine.deuxfleurs.fr
ProxyJump carcajou.machine.deuxfleurs.fr
HostName 10.14.3.1
#HostName piranha.polyno.me

View file

@ -36,9 +36,6 @@
boot.kernel.sysctl = {
"vm.max_map_count" = 262144;
"net.ipv6.conf.all.accept_ra" = 0;
"net.ipv6.conf.all.autoconf" = 0;
"net.ipv6.conf.all.use_tempaddr" = 0;
};
services.journald.extraConfig = ''

View file

@ -6,19 +6,86 @@ in
with builtins;
with pkgs.lib;
{
options.deuxfleurs =
let wg_node = with types; submodule {
options = {
hostname = mkOption {
options.deuxfleurs = with types; {
# Parameters for individual nodes
hostName = mkOption {
description = "Node name";
type = str;
description = "Host name";
};
site_name = mkOption {
staticIPv4.address = mkOption {
description = "IP address (with prefix length) of this node on the local network interface";
type = nullOr str;
default = null;
};
staticIPv6.address = mkOption {
description = "Static public IPv6 address of this node";
type = nullOr str;
};
isRaftServer = mkOption {
description = "Make this node a RAFT server for the Nomad and Consul deployments";
type = bool;
default = false;
};
# Parameters that generally vary between sites
siteName = mkOption {
description = "Site (availability zone) on which this node is deployed";
type = str;
};
staticIPv4.defaultGateway = mkOption {
description = "IPv4 address of the default route on the local network interface";
type = nullOr str;
default = null;
};
staticIPv4.prefixLength = mkOption {
description = "IPv4 prefix length for LAN addresses, only used with static configuration";
type = int;
default = 24;
};
staticIPv6.defaultGateway = mkOption {
description = ''
IPv6 address of the default route on the local network interface.
IPv6 Router Advertisements (RA) will be totally disabled if this is set.
'';
type = nullOr str;
default = null;
};
staticIPv6.prefixLength = mkOption {
description = "IPv6 prefix length, used only when router advertisements are disabled.";
type = int;
default = 64;
};
publicIPv4 = mkOption {
description = "Public IPv4 through which this node is accessible (possibly after port opening using DiploNAT), for domain names that are updated by D53";
type = nullOr str;
default = null;
};
cnameTarget = mkOption {
description = "DNS CNAME target to use for services hosted in this site, for domain names that are updated by D53";
type = nullOr str;
default = null;
};
# Parameters common to all nodes
clusterName = mkOption {
description = "Name of this Deuxfleurs deployment";
type = str;
};
clusterPrefix = mkOption {
description = "IP address prefix (and length) for the Wireguard overlay network";
type = str;
};
clusterNodes = mkOption {
description = "Nodes that are part of the cluster";
type = attrsOf (submodule {
options = {
siteName = mkOption {
type = nullOr str;
description = "Site where the node is located";
default = null;
};
IP = mkOption {
address = mkOption {
type = str;
description = "IP Address in the Wireguard network";
};
@ -32,102 +99,29 @@ in
description = "Wireguard endpoint on the public Internet";
};
};
});
};
in
{
# Parameters for individual nodes
network_interface = mkOption {
description = "Network interface name to configure";
type = types.str;
};
lan_ip = mkOption {
description = "IP address of this node on the local network interface";
type = types.str;
};
lan_ip_prefix_length = mkOption {
description = "Prefix length associated with lan_ip";
type = types.int;
};
ipv6 = mkOption {
description = "Public IPv6 address of this node";
type = types.str;
};
ipv6_prefix_length = mkOption {
description = "Prefix length associated with ipv6 ip";
type = types.int;
};
cluster_ip = mkOption {
description = "IP address of this node on the Wesher mesh network";
type = types.str;
};
wireguard_port = mkOption {
description = "Port for incoming Wireguard VPN connections";
type = types.port;
default = 33799;
};
is_raft_server = mkOption {
description = "Make this node a RAFT server for the Nomad and Consul deployments";
type = types.bool;
default = false;
};
# Parameters that generally vary between sites
lan_default_gateway = mkOption {
description = "IPv4 address of the default route on the local network interface";
type = types.str;
};
ipv6_default_gateway = mkOption {
description = "IPv6 address of the default IPv6 gateway for the targeted net interface";
type = types.str;
};
site_name = mkOption {
description = "Site (availability zone) on which this node is deployed";
type = types.str;
};
public_ipv4 = mkOption {
description = "Public IPv4 through which this node is accessible (possibly after port opening using DiploNAT), for domain names that are updated by D53";
type = types.nullOr types.str;
default = null;
};
cname_target = mkOption {
description = "DNS CNAME target to use for services hosted in this site, for domain names that are updated by D53";
type = types.nullOr types.str;
default = null;
};
nameservers = mkOption {
description = "External DNS servers to use";
type = types.listOf types.str;
};
# Parameters common to all nodes
cluster_name = mkOption {
description = "Name of this Deuxfleurs deployment";
type = types.str;
};
cluster_prefix = mkOption {
description = "IP address prefix for the Wireguard overlay network";
type = types.str;
};
cluster_prefix_length = mkOption {
description = "IP address prefix length for the Wireguard overlay network";
type = types.int;
default = 16;
};
cluster_nodes = mkOption {
description = "Nodes that are part of the cluster";
type = types.listOf wg_node;
};
admin_accounts = mkOption {
adminAccounts = mkOption {
description = "List of users having an admin account on cluster nodes, maps user names to a list of authorized SSH keys";
type = types.attrsOf (types.listOf types.str);
type = attrsOf (listOf str);
};
bootstrap = mkOption {
description = "Whether to enable bootstrapping for Nomad and Consul";
type = types.bool;
type = bool;
default = false;
};
# Options that generally stay to their default value
wireguardPort = mkOption {
description = "Port for incoming Wireguard VPN connections";
type = port;
default = 33799;
};
wgautomeshPort = mkOption {
description = "Gossip port for wgautomesh";
type = port;
default = 1666;
};
};
imports = [
@ -135,69 +129,92 @@ in
];
config =
let node_meta = {
"site" = cfg.site_name;
"public_ipv6" = cfg.ipv6;
let
clusterNodeCfg = getAttr cfg.hostName cfg.clusterNodes;
clusterAddress = clusterNodeCfg.address;
node_meta = {
"site" = cfg.siteName;
} //
(if cfg.public_ipv4 != null
then { "public_ipv4" = cfg.public_ipv4; }
(if cfg.staticIPv6.address != null
then { "public_ipv6" = cfg.staticIPv6.address; }
else {}) //
(if cfg.cname_target != null
then { "cname_target" = cfg.cname_target; }
(if cfg.publicIPv4 != null
then { "public_ipv4" = cfg.publicIPv4; }
else {}) //
(if cfg.cnameTarget != null
then { "cname_target" = cfg.cnameTarget; }
else {});
in
{
networking.hostName = cfg.hostName;
# Configure admin accounts on all nodes
users.users = builtins.mapAttrs (name: publicKeys: {
users.users = mapAttrs (name: publicKeys: {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = publicKeys;
}) cfg.admin_accounts;
}) cfg.adminAccounts;
# Configure network interfaces
networking.useDHCP = false;
networking.useNetworkd = true;
systemd.network.networks = {
"10-uplink" = {
matchConfig = {
# We could preprend "en* eth*" to match all ethernet interfaces
Name = "${cfg.network_interface}";
};
networkConfig = {
IPv6AcceptRA = false;
LinkLocalAddressing = "no";
};
address = [
"${cfg.lan_ip}/${toString cfg.lan_ip_prefix_length}"
"${cfg.ipv6}/${toString cfg.ipv6_prefix_length}"
];
routes = [
systemd.network.networks."10-uplink" =
let
# IPv4 configuration is obtained by DHCP by default,
# unless a static v4 address and default gateway are given
noDHCP = cfg.staticIPv4.address != null && cfg.staticIPv4.defaultGateway != null;
# IPv6 configuration is obtained through router advertisements (RA),
# possibly using a static token to ensure a static IPv6,
# unless a static v6 address and default gateway are given,
# in which case RAs are disabled entirely
noRA = cfg.staticIPv6.address != null && cfg.staticIPv6.defaultGateway != null;
staticV6 = cfg.staticIPv6.address != null;
in
{
matchConfig.Name = "en* eth*";
address =
optional noDHCP "${cfg.staticIPv4.address}/${toString cfg.staticIPv4.prefixLength}"
++ optional noRA "${cfg.staticIPv6.address}/${toString cfg.staticIPv6.prefixLength}";
routes =
optional noDHCP {
routeConfig = {
Gateway = cfg.lan_default_gateway;
Gateway = cfg.staticIPv4.defaultGateway;
# GatewayOnLink - Takes a boolean. If set to true, the kernel does not have to check if the gateway is reachable directly by the current machine (i.e., attached to the local network), so that we can insert the route in the kernel table without it being complained about. Defaults to "no".
GatewayOnLink = true;
};
}
{
} ++ optional noRA {
routeConfig = {
Gateway = cfg.ipv6_default_gateway;
Gateway = cfg.staticIPv6.defaultGateway;
GatewayOnLink = true;
};
}
];
};
};
# Configure Unbound DNS to redirect to Consul queries under .consul
# and to pass directly to public DNS resolver all others
# Dynamic IPv4: enable DHCP but not for DNS servers
networkConfig.DHCP = mkIf (!noDHCP) "ipv4";
dhcpV4Config.UseDNS = mkIf (!noDHCP) false;
# Dynamic IPv6: only fetch default route, use static
# address and no DNS servers
ipv6AcceptRAConfig.Token = mkIf (!noRA && staticV6) "static:${cfg.staticIPv6.address}";
ipv6AcceptRAConfig.UseDNS = mkIf (!noRA) false;
# Static IPv6: disable all router advertisements and
# link-local addresses
networkConfig.IPv6AcceptRA = mkIf noRA false;
networkConfig.LinkLocalAddressing = mkIf noRA "no";
};
# Configure Unbound as a central DNS server for everything
# - is its own recursor (applies DNSSec) for everything,
# no need to declare an outside nameserver
# - redirects to Consul queries under .consul
services.unbound = {
enable = true;
enableRootTrustAnchor = false; # disable DNSSEC as it causes issues
settings = {
server = {
interface = [ "127.0.0.1" "${cfg.lan_ip}" "172.17.0.1" ];
interface = [ "127.0.0.1" "172.17.0.1" ];
domain-insecure = [ "consul." ];
local-zone = [ "consul. nodefault" ];
log-servfail = true;
@ -207,73 +224,58 @@ in
logfile = "/dev/stdout";
access-control = [
"127.0.0.0/8 allow"
"${cfg.lan_ip}/${toString cfg.lan_ip_prefix_length} allow"
"172.17.0.0/16 allow"
"10.83.0.0/16 allow"
"192.168.0.0/16 allow"
"${cfg.clusterPrefix} allow"
];
};
forward-zone = [
stub-zone = [
# Forward .consul queries to Consul daemon
{
name = "consul.";
forward-addr = "${cfg.lan_ip}@8600";
forward-no-cache = true;
forward-tcp-upstream = false;
forward-tls-upstream = false;
}
# Forward all queries to our ISP's nameserver
{
name = ".";
forward-addr = cfg.nameservers;
forward-first = true;
stub-addr = "${clusterAddress}@8600";
stub-no-cache = true;
stub-tcp-upstream = false;
stub-tls-upstream = false;
}
];
};
resolveLocalQueries = true;
};
# Reach Unbound through the IP of our LAN interface,
# instead of 127.0.0.1 (this will also work in Docker containers)
networking.nameservers = [ # TODO remove this ?
cfg.lan_ip
];
services.resolved.enable = false;
# Configure Wireguard VPN between all nodes
networking.wireguard.interfaces.wg0 = {
ips = [ "${cfg.cluster_ip}/16" ];
listenPort = cfg.wireguard_port;
ips = [ "${clusterAddress}/16" ];
listenPort = cfg.wireguardPort;
privateKeyFile = "/var/lib/deuxfleurs/wireguard-keys/private";
mtu = 1420;
};
services.wgautomesh = {
enable = true;
interface = "wg0";
gossipPort = 1666;
gossipPort = cfg.wgautomeshPort;
gossipSecretFile = "/var/lib/wgautomesh/gossip_secret";
persistFile = "/var/lib/wgautomesh/state";
upnpForwardPublicPort =
let
us = filter ({ hostname, ...}: hostname == config.networking.hostName) cfg.cluster_nodes;
in
if length us > 0 && (head us).endpoint != null then
strings.toInt (lists.last (split ":" (head us).endpoint))
if clusterNodeCfg.endpoint != null then
strings.toInt (lists.last (split ":" clusterNodeCfg.endpoint))
else null;
peers = map ({ publicKey, endpoint, IP, ... }: {
address = IP;
peers = attrValues (mapAttrs (hostname: { publicKey, endpoint, address, ... }: {
inherit address endpoint;
pubkey = publicKey;
endpoint = endpoint;
}) cfg.cluster_nodes;
}) cfg.clusterNodes);
};
# Old code for wg-quick, we can use this as a fallback if we fail to make wgautomesh work
# systemd.services."wg-quick-wg0".after = [ "unbound.service" ];
# networking.wg-quick.interfaces.wg0 = {
# address = [ "${cfg.cluster_ip}/16" ];
# listenPort = cfg.wireguard_port;
# address = [ "${clusterAddress}/16" ];
# listenPort = cfg.wireguardPort;
# privateKeyFile = "/var/lib/deuxfleurs/wireguard-keys/private";
# mtu = 1420;
# peers = map ({ publicKey, endpoint, IP, ... }: {
# peers = map ({ publicKey, endpoint, address, ... }: {
# inherit publicKey endpoint;
# allowedIPs = [ "${IP}/32" ];
# allowedIPs = [ "${address}/32" ];
# persistentKeepalive = 25;
# };
@ -287,25 +289,25 @@ in
'';
# Configure /etc/hosts to link all hostnames to their Wireguard IP
networking.extraHosts = builtins.concatStringsSep "\n" (map
({ hostname, IP, ...}: "${IP} ${hostname}")
cfg.cluster_nodes);
networking.extraHosts = concatStringsSep "\n" (attrValues (mapAttrs
(hostname: { address, ...}: "${address} ${hostname}")
cfg.clusterNodes));
# Enable Hashicorp Consul & Nomad
services.consul.enable = true;
systemd.services.consul.after = [ "wg-quick-wg0.service" ];
services.consul.extraConfig =
(if cfg.is_raft_server
(if cfg.isRaftServer
then { server = true; }
// (if cfg.bootstrap then { bootstrap_expect = 3; } else {})
else {}) //
{
inherit node_meta;
datacenter = cfg.cluster_name;
datacenter = cfg.clusterName;
ui_config = {
enabled = true;
};
bind_addr = "${cfg.cluster_ip}";
bind_addr = "${clusterAddress}";
addresses = {
https = "0.0.0.0";
@ -326,9 +328,6 @@ in
verify_outgoing = true;
verify_server_hostname = true;
};
systemd.services.consul.serviceConfig = { # TODO remove this ?
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
};
services.nomad.enable = true;
systemd.services.nomad.after = [ "wg-quick-wg0.service" ];
@ -338,18 +337,18 @@ in
pkgs.zstd
];
services.nomad.settings =
(if cfg.is_raft_server
(if cfg.isRaftServer
then {
server = { enabled = true; }
// (if cfg.bootstrap then { bootstrap_expect = 3; } else {});
} else {}) //
{
region = cfg.cluster_name;
datacenter = cfg.site_name;
region = cfg.clusterName;
datacenter = cfg.siteName;
advertise = {
rpc = "${cfg.cluster_ip}";
http = "${cfg.cluster_ip}";
serf = "${cfg.cluster_ip}";
rpc = "${clusterAddress}";
http = "${clusterAddress}";
serf = "${clusterAddress}";
};
consul = {
address = "localhost:8501";
@ -402,31 +401,32 @@ in
allowedTCPPorts = [
# Allow anyone to connect on SSH port
(builtins.head ({ openssh.ports = [22]; } // config.services).openssh.ports)
(head ({ openssh.ports = [22]; } // config.services).openssh.ports)
];
allowedUDPPorts = [
# Allow peers to connect to Wireguard
cfg.wireguard_port
cfg.wireguardPort
];
# Allow specific hosts access to specific things in the cluster
extraCommands = ''
# Allow everything from router (usefull for UPnP/IGD)
iptables -A INPUT -s ${cfg.lan_default_gateway} -j ACCEPT
# Allow UDP packets comming from port 1900 from a local address,
# these are necessary for UPnP/IGD
iptables -A INPUT -s 192.168.0.0/16 -p udp --sport 1900 -j ACCEPT
# Allow docker containers to access all ports
iptables -A INPUT -s 172.17.0.0/16 -j ACCEPT
# Allow other nodes on VPN to access all ports
iptables -A INPUT -s ${cfg.cluster_prefix}/${toString cfg.cluster_prefix_length} -j ACCEPT
iptables -A INPUT -s ${cfg.clusterPrefix} -j ACCEPT
'';
# When stopping firewall, delete all rules that were configured manually above
extraStopCommands = ''
iptables -D INPUT -s ${cfg.lan_default_gateway} -j ACCEPT
iptables -D INPUT -s 192.168.0.0/16 -p udp --sport 1900 -j ACCEPT
iptables -D INPUT -s 172.17.0.0/16 -j ACCEPT
iptables -D INPUT -s ${cfg.cluster_prefix}/${toString cfg.cluster_prefix_length} -j ACCEPT
iptables -D INPUT -s ${cfg.clusterPrefix} -j ACCEPT
'';
};
};

View file

@ -1,24 +1,43 @@
{ config, pkgs, ... }:
let
cfg = config.deuxfleurs.remoteUnlock;
in
with builtins;
with pkgs.lib;
{
options.deuxfleurs.remoteUnlock = {
networkInterface = mkOption {
description = "Network interface to configure with static IP";
type = types.str;
};
staticIP = mkOption {
description = "IP address (with prefix length) of this node on the local network interface";
type = types.str;
};
defaultGateway = mkOption {
description = "IP address of default gateway";
type = types.str;
};
};
config = {
boot.initrd.availableKernelModules = [ "pps_core" "ptp" "e1000e" ];
boot.initrd.network.enable = true;
boot.initrd.network.ssh = {
enable = true;
port = 222;
authorizedKeys = concatLists (mapAttrsToList (name: user: user) config.deuxfleurs.admin_accounts);
authorizedKeys = concatLists (mapAttrsToList (name: user: user) config.deuxfleurs.adminAccounts);
hostKeys = [ "/var/lib/deuxfleurs/remote-unlock/ssh_host_ed25519_key" ];
};
boot.initrd.network.postCommands = ''
ip addr add ${config.deuxfleurs.lan_ip}/${toString config.deuxfleurs.lan_ip_prefix_length} dev ${config.deuxfleurs.network_interface}
ip link set dev ${config.deuxfleurs.network_interface} up
ip route add default via ${config.deuxfleurs.lan_default_gateway} dev ${config.deuxfleurs.network_interface}
ip addr add ${cfg.staticIP} dev ${cfg.networkInterface}
ip link set dev ${cfg.networkInterface} up
ip route add default via ${cfg.defaultGateway} dev ${cfg.networkInterface}
ip a
ip route
ping -c 4 ${config.deuxfleurs.lan_default_gateway}
ping -c 4 ${cfg.defaultGateway}
echo 'echo run cryptsetup-askpass to unlock drives' >> /root/.profile
'';
};