Merge pull request 'Simplify network configuration' (#11) from simplify-network-config into main

Reviewed-on: Deuxfleurs/nixcfg#11
This commit is contained in:
Alex 2023-05-16 13:19:33 +00:00
commit aee3a09471
43 changed files with 789 additions and 835 deletions

View file

@ -0,0 +1,100 @@
job "core:bottin" {
datacenters = ["orion", "neptune", "scorpio"]
type = "system"
priority = 90
update {
max_parallel = 1
stagger = "1m"
}
group "bottin" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "ldap_port" {
static = 389
to = 389
}
}
task "bottin" {
driver = "docker"
config {
image = "dxflrs/bottin:7h18i30cckckaahv87d3c86pn4a7q41z"
network_mode = "host"
readonly_rootfs = true
ports = [ "ldap_port" ]
volumes = [
"secrets/config.json:/config.json",
"secrets:/etc/bottin",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
resources {
memory = 100
memory_max = 200
}
template {
data = file("../config/bottin/config.json.tpl")
destination = "secrets/config.json"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
CONSUL_HTTP_ADDR=https://consul.service.prod.consul:8501
CONSUL_HTTP_SSL=true
CONSUL_CACERT=/etc/bottin/consul.crt
CONSUL_CLIENT_CERT=/etc/bottin/consul-client.crt
CONSUL_CLIENT_KEY=/etc/bottin/consul-client.key
EOH
destination = "secrets/env"
env = true
}
service {
tags = [ "${meta.site}" ]
port = "ldap_port"
address_mode = "host"
name = "bottin"
check {
type = "tcp"
port = "ldap_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -1,257 +0,0 @@
job "core" {
datacenters = ["orion", "neptune", "scorpio"]
type = "system"
priority = 90
update {
max_parallel = 1
stagger = "5m"
}
group "diplonat" {
task "diplonat" {
driver = "docker"
config {
image = "lxpz/amd64_diplonat:5"
network_mode = "host"
readonly_rootfs = true
privileged = true
volumes = [
"secrets:/etc/diplonat",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://consul.service.prod.consul:8501
DIPLONAT_CONSUL_TLS_SKIP_VERIFY=true
DIPLONAT_CONSUL_CLIENT_CERT=/etc/diplonat/consul-client.crt
DIPLONAT_CONSUL_CLIENT_KEY=/etc/diplonat/consul-client.key
RUST_LOG=debug
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 100
memory_max = 200
}
}
}
group "tricot" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "http_port" { static = 80 }
port "https_port" { static = 443 }
port "metrics_port" { static = 9334 }
}
task "server" {
driver = "docker"
config {
image = "lxpz/amd64_tricot:47"
network_mode = "host"
readonly_rootfs = true
ports = [ "http_port", "https_port" ]
volumes = [
"secrets:/etc/tricot",
]
}
resources {
cpu = 1000
memory = 200
memory_max = 500
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "secrets/consul-ca.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
TRICOT_NODE_NAME={{ env "attr.unique.hostname" }}
TRICOT_LETSENCRYPT_EMAIL=prod-sysadmin@deuxfleurs.fr
TRICOT_ENABLE_COMPRESSION=true
TRICOT_CONSUL_HOST=https://consul.service.prod.consul:8501
TRICOT_CONSUL_TLS_SKIP_VERIFY=true
TRICOT_CONSUL_CLIENT_CERT=/etc/tricot/consul-client.crt
TRICOT_CONSUL_CLIENT_KEY=/etc/tricot/consul-client.key
TRICOT_HTTP_BIND_ADDR=[::]:80
TRICOT_HTTPS_BIND_ADDR=[::]:443
TRICOT_METRICS_BIND_ADDR=[::]:9334
RUST_LOG=tricot=debug
EOH
destination = "secrets/env"
env = true
}
service {
name = "tricot-http"
port = "http_port"
tags = [ "(diplonat (tcp_port 80))", "${meta.site}" ]
address_mode = "host"
}
service {
name = "tricot-https"
port = "https_port"
tags = [
"(diplonat (tcp_port 443))",
"${meta.site}",
"d53-aaaa ${meta.site}.site.deuxfleurs.fr",
"d53-a global.site.deuxfleurs.fr",
"d53-aaaa global.site.deuxfleurs.fr",
]
address_mode = "host"
}
service {
name = "tricot-metrics"
port = "metrics_port"
address_mode = "host"
}
}
}
group "bottin" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "ldap_port" {
static = 389
to = 389
}
}
task "bottin" {
driver = "docker"
config {
image = "dxflrs/bottin:7h18i30cckckaahv87d3c86pn4a7q41z"
network_mode = "host"
readonly_rootfs = true
ports = [ "ldap_port" ]
volumes = [
"secrets/config.json:/config.json",
"secrets:/etc/bottin",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
resources {
memory = 100
memory_max = 200
}
template {
data = file("../config/bottin/config.json.tpl")
destination = "secrets/config.json"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
CONSUL_HTTP_ADDR=https://consul.service.prod.consul:8501
CONSUL_HTTP_SSL=true
CONSUL_CACERT=/etc/bottin/consul.crt
CONSUL_CLIENT_CERT=/etc/bottin/consul-client.crt
CONSUL_CLIENT_KEY=/etc/bottin/consul-client.key
EOH
destination = "secrets/env"
env = true
}
service {
tags = [ "${meta.site}" ]
port = "ldap_port"
address_mode = "host"
name = "bottin"
check {
type = "tcp"
port = "ldap_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -1,4 +1,4 @@
job "core-service" { job "core:d53" {
datacenters = ["neptune", "orion", "bespin"] datacenters = ["neptune", "orion", "bespin"]
type = "service" type = "service"
priority = 90 priority = 90

View file

@ -0,0 +1,71 @@
job "core:diplonat" {
datacenters = ["orion", "neptune", "scorpio", "bespin"]
type = "system"
priority = 90
update {
max_parallel = 2
stagger = "1m"
}
group "diplonat" {
task "diplonat" {
driver = "docker"
config {
image = "lxpz/amd64_diplonat:6"
network_mode = "host"
readonly_rootfs = true
privileged = true
volumes = [
"secrets:/etc/diplonat",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://consul.service.prod.consul:8501
DIPLONAT_CONSUL_TLS_SKIP_VERIFY=true
DIPLONAT_CONSUL_CLIENT_CERT=/etc/diplonat/consul-client.crt
DIPLONAT_CONSUL_CLIENT_KEY=/etc/diplonat/consul-client.key
{{ if env "meta.site" | eq "bespin" }}
DIPLONAT_IPV6_ONLY=true
{{ end }}
RUST_LOG=debug
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 100
memory_max = 200
}
}
}
}

View file

@ -0,0 +1,109 @@
job "core:tricot" {
datacenters = ["orion", "neptune", "scorpio", "bespin"]
type = "system"
priority = 90
update {
max_parallel = 1
stagger = "5m"
}
group "tricot" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "http_port" { static = 80 }
port "https_port" { static = 443 }
port "metrics_port" { static = 9334 }
}
task "server" {
driver = "docker"
config {
image = "lxpz/amd64_tricot:47"
network_mode = "host"
readonly_rootfs = true
ports = [ "http_port", "https_port" ]
volumes = [
"secrets:/etc/tricot",
]
}
resources {
cpu = 1000
memory = 200
memory_max = 500
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "secrets/consul-ca.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
TRICOT_NODE_NAME={{ env "attr.unique.hostname" }}
TRICOT_LETSENCRYPT_EMAIL=prod-sysadmin@deuxfleurs.fr
TRICOT_ENABLE_COMPRESSION=true
TRICOT_CONSUL_HOST=https://consul.service.prod.consul:8501
TRICOT_CONSUL_TLS_SKIP_VERIFY=true
TRICOT_CONSUL_CLIENT_CERT=/etc/tricot/consul-client.crt
TRICOT_CONSUL_CLIENT_KEY=/etc/tricot/consul-client.key
TRICOT_HTTP_BIND_ADDR=[::]:80
TRICOT_HTTPS_BIND_ADDR=[::]:443
TRICOT_METRICS_BIND_ADDR=[::]:9334
RUST_LOG=tricot=debug
EOH
destination = "secrets/env"
env = true
}
service {
name = "tricot-http"
port = "http_port"
tags = [ "(diplonat (tcp_port 80))", "${meta.site}" ]
address_mode = "host"
}
service {
name = "tricot-https"
port = "https_port"
tags = [
"(diplonat (tcp_port 443))",
"${meta.site}",
"d53-aaaa ${meta.site}.site.deuxfleurs.fr",
"d53-a global.site.deuxfleurs.fr",
"d53-aaaa global.site.deuxfleurs.fr",
]
address_mode = "host"
}
service {
name = "tricot-metrics"
port = "metrics_port"
address_mode = "host"
}
}
}
}

View file

@ -6,8 +6,10 @@ db_engine = "lmdb"
replication_mode = "3" replication_mode = "3"
rpc_bind_addr = "[{{ env "meta.public_ipv6" }}]:3901" {{ with $a := env "attr.unique.hostname" | printf "diplonat/autodiscovery/ipv6/%s" | key | parseJSON }}
rpc_public_addr = "[{{ env "meta.public_ipv6" }}]:3901" rpc_bind_addr = "[{{ $a.address }}]:3901"
rpc_public_addr = "[{{ $a.address }}]:3901"
{{ end }}
rpc_secret = "{{ key "secrets/garage/rpc_secret" | trimSpace }}" rpc_secret = "{{ key "secrets/garage/rpc_secret" | trimSpace }}"
[consul_discovery] [consul_discovery]

View file

@ -1,84 +1,73 @@
{ config, pkgs, ... } @ args: { config, pkgs, ... } @ args:
{ {
deuxfleurs.cluster_name = "prod"; deuxfleurs.clusterName = "prod";
# The IP range to use for the Wireguard overlay of this cluster # The IP range to use for the Wireguard overlay of this cluster
deuxfleurs.cluster_prefix = "10.83.0.0"; deuxfleurs.clusterPrefix = "10.83.0.0/16";
deuxfleurs.cluster_prefix_length = 16;
deuxfleurs.cluster_nodes = [ deuxfleurs.clusterNodes = {
{ "concombre" = {
hostname = "concombre"; siteName = "neptune";
site_name = "neptune";
publicKey = "VvXT0fPDfWsHxumZqVShpS33dJQAdpJ1E79ZbCBJP34="; publicKey = "VvXT0fPDfWsHxumZqVShpS33dJQAdpJ1E79ZbCBJP34=";
IP = "10.83.1.1"; address = "10.83.1.1";
endpoint = "77.207.15.215:33731"; endpoint = "77.207.15.215:33731";
} };
{ "courgette" = {
hostname = "courgette"; siteName = "neptune";
site_name = "neptune";
publicKey = "goTkBJGmzrGDOAjUcdH9G0JekipqSMoaYQdB6IHnzi0="; publicKey = "goTkBJGmzrGDOAjUcdH9G0JekipqSMoaYQdB6IHnzi0=";
IP = "10.83.1.2"; address = "10.83.1.2";
endpoint = "77.207.15.215:33732"; endpoint = "77.207.15.215:33732";
} };
{ "celeri" = {
hostname = "celeri"; siteName = "neptune";
site_name = "neptune";
publicKey = "oZDAb8LoLW87ktUHyFFec0VaIar97bqq47mGbdVqJ0U="; publicKey = "oZDAb8LoLW87ktUHyFFec0VaIar97bqq47mGbdVqJ0U=";
IP = "10.83.1.3"; address = "10.83.1.3";
endpoint = "77.207.15.215:33733"; endpoint = "77.207.15.215:33733";
} };
{ "dahlia" = {
hostname = "dahlia"; siteName = "orion";
site_name = "orion";
publicKey = "EtRoWBYCdjqgXX0L+uWLg8KxNfIK8k9OTh30tL19bXU="; publicKey = "EtRoWBYCdjqgXX0L+uWLg8KxNfIK8k9OTh30tL19bXU=";
IP = "10.83.2.1"; address = "10.83.2.1";
endpoint = "82.66.80.201:33731"; endpoint = "82.66.80.201:33731";
} };
{ "diplotaxis" = {
hostname = "diplotaxis"; siteName = "orion";
site_name = "orion";
publicKey = "HbLC938mysadMSOxWgq8+qrv+dBKzPP/43OMJp/3phA="; publicKey = "HbLC938mysadMSOxWgq8+qrv+dBKzPP/43OMJp/3phA=";
IP = "10.83.2.2"; address = "10.83.2.2";
endpoint = "82.66.80.201:33732"; endpoint = "82.66.80.201:33732";
} };
{ "doradille" = {
hostname = "doradille"; siteName = "orion";
site_name = "orion";
publicKey = "e1C8jgTj9eD20ywG08G1FQZ+Js3wMK/msDUE1wO3l1Y="; publicKey = "e1C8jgTj9eD20ywG08G1FQZ+Js3wMK/msDUE1wO3l1Y=";
IP = "10.83.2.3"; address = "10.83.2.3";
endpoint = "82.66.80.201:33733"; endpoint = "82.66.80.201:33733";
} };
{ "df-ykl" = {
hostname = "df-ykl"; siteName = "bespin";
site_name = "bespin";
publicKey = "bIjxey/VhBgVrLa0FxN/KISOt2XFmQeSh1MPivUq9gg="; publicKey = "bIjxey/VhBgVrLa0FxN/KISOt2XFmQeSh1MPivUq9gg=";
IP = "10.83.3.1"; address = "10.83.3.1";
endpoint = "109.136.55.235:33731"; endpoint = "109.136.55.235:33731";
} };
{ "df-ymf" = {
hostname = "df-ymf"; siteName = "bespin";
site_name = "bespin";
publicKey = "pUIKv8UBl586O7DBrHBsb9BgNU7WlYQ2r2RSNkD+JAQ="; publicKey = "pUIKv8UBl586O7DBrHBsb9BgNU7WlYQ2r2RSNkD+JAQ=";
IP = "10.83.3.2"; address = "10.83.3.2";
endpoint = "109.136.55.235:33732"; endpoint = "109.136.55.235:33732";
} };
{ "df-ymk" = {
hostname = "df-ymk"; siteName = "bespin";
site_name = "bespin";
publicKey = "VBmpo15iIJP7250NAsF+ryhZc3j+8TZFnE1Djvn5TXI="; publicKey = "VBmpo15iIJP7250NAsF+ryhZc3j+8TZFnE1Djvn5TXI=";
IP = "10.83.3.3"; address = "10.83.3.3";
endpoint = "109.136.55.235:33733"; endpoint = "109.136.55.235:33733";
} };
{ "abricot" = {
hostname = "abricot"; siteName = "scorpio";
site_name = "scorpio";
publicKey = "Sm9cmNZ/BfWVPFflMO+fuyiera4r203b/dKhHTQmBFg="; publicKey = "Sm9cmNZ/BfWVPFflMO+fuyiera4r203b/dKhHTQmBFg=";
IP = "10.83.4.1"; address = "10.83.4.1";
endpoint = "82.65.41.110:33741"; endpoint = "82.65.41.110:33741";
} };
]; };
# Bootstrap IPs for Consul cluster, # Bootstrap IPs for Consul cluster,
# these are IPs on the Wireguard overlay # these are IPs on the Wireguard overlay
@ -88,7 +77,7 @@
"10.83.3.1" # df-ykl "10.83.3.1" # df-ykl
]; ];
deuxfleurs.admin_accounts = { deuxfleurs.adminAccounts = {
lx = [ lx = [
# Keys for accessing nodes from outside # Keys for accessing nodes from outside
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy"

View file

@ -8,12 +8,7 @@
boot.loader.timeout = 20; boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "abricot"; deuxfleurs.hostName = "abricot";
deuxfleurs.staticIPv4.address = "192.168.1.41";
deuxfleurs.network_interface = "eno1"; deuxfleurs.staticIPv6.address = "2a01:e0a:e4:2dd0::41";
deuxfleurs.lan_ip = "192.168.1.41";
deuxfleurs.ipv6 = "2a01:e0a:e4:2dd0::41";
deuxfleurs.cluster_ip = "10.83.4.1";
deuxfleurs.is_raft_server = false;
} }

View file

@ -8,12 +8,7 @@
boot.loader.timeout = 20; boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "celeri"; deuxfleurs.hostName = "celeri";
deuxfleurs.staticIPv4.address = "192.168.1.33";
deuxfleurs.network_interface = "enp0s31f6"; deuxfleurs.staticIPv6.address = "2001:910:1204:1::33";
deuxfleurs.lan_ip = "192.168.1.33";
deuxfleurs.ipv6 = "2001:910:1204:1::33";
deuxfleurs.cluster_ip = "10.83.1.3";
deuxfleurs.is_raft_server = false;
} }

View file

@ -8,12 +8,8 @@
boot.loader.timeout = 20; boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "concombre"; deuxfleurs.hostName = "concombre";
deuxfleurs.staticIPv4.address = "192.168.1.31";
deuxfleurs.network_interface = "enp0s31f6"; deuxfleurs.staticIPv6.address = "2001:910:1204:1::31";
deuxfleurs.lan_ip = "192.168.1.31"; deuxfleurs.isRaftServer = true;
deuxfleurs.ipv6 = "2001:910:1204:1::31";
deuxfleurs.cluster_ip = "10.83.1.1";
deuxfleurs.is_raft_server = true;
} }

View file

@ -8,12 +8,7 @@
boot.loader.timeout = 20; boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "courgette"; deuxfleurs.hostName = "courgette";
deuxfleurs.staticIPv4.address = "192.168.1.32";
deuxfleurs.network_interface = "enp0s31f6"; deuxfleurs.staticIPv6.address = "2001:910:1204:1::32";
deuxfleurs.lan_ip = "192.168.1.32";
deuxfleurs.ipv6 = "2001:910:1204:1::32";
deuxfleurs.cluster_ip = "10.83.1.2";
deuxfleurs.is_raft_server = false;
} }

View file

@ -7,12 +7,8 @@
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "dahlia"; deuxfleurs.hostName = "dahlia";
deuxfleurs.staticIPv4.address = "192.168.1.11";
deuxfleurs.network_interface = "enp0s31f6"; deuxfleurs.staticIPv6.address = "2a01:e0a:28f:5e60::11";
deuxfleurs.lan_ip = "192.168.1.11"; deuxfleurs.isRaftServer = true;
deuxfleurs.ipv6 = "2a01:e0a:28f:5e60::11";
deuxfleurs.cluster_ip = "10.83.2.1";
deuxfleurs.is_raft_server = true;
} }

View file

@ -7,14 +7,10 @@
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "df-ykl"; deuxfleurs.hostName = "df-ykl";
deuxfleurs.staticIPv4.address = "192.168.5.117";
deuxfleurs.network_interface = "enp0s31f6"; deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e86c";
deuxfleurs.lan_ip = "192.168.5.117"; deuxfleurs.isRaftServer = true;
deuxfleurs.ipv6 = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e86c";
deuxfleurs.cluster_ip = "10.83.3.1";
deuxfleurs.is_raft_server = true;
fileSystems."/mnt" = { fileSystems."/mnt" = {
device = "/dev/disk/by-uuid/f7aa396f-23d0-44d3-89cf-3cb00bbb6c3b"; device = "/dev/disk/by-uuid/f7aa396f-23d0-44d3-89cf-3cb00bbb6c3b";

View file

@ -7,14 +7,9 @@
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "df-ymf"; deuxfleurs.hostName = "df-ymf";
deuxfleurs.staticIPv4.address = "192.168.5.134";
deuxfleurs.network_interface = "enp0s31f6"; deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:6e4b:90ff:fe3a:6174";
deuxfleurs.lan_ip = "192.168.5.134";
deuxfleurs.ipv6 = "2a02:a03f:6510:5102:6e4b:90ff:fe3a:6174";
deuxfleurs.cluster_ip = "10.83.3.2";
deuxfleurs.is_raft_server = false;
fileSystems."/mnt" = { fileSystems."/mnt" = {
device = "/dev/disk/by-uuid/fec20a7e-5019-4747-8f73-77f3f196c122"; device = "/dev/disk/by-uuid/fec20a7e-5019-4747-8f73-77f3f196c122";

View file

@ -7,14 +7,9 @@
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "df-ymk"; deuxfleurs.hostName = "df-ymk";
deuxfleurs.staticIPv4.address = "192.168.5.116";
deuxfleurs.network_interface = "enp0s31f6"; deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e939";
deuxfleurs.lan_ip = "192.168.5.116";
deuxfleurs.ipv6 = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e939";
deuxfleurs.cluster_ip = "10.83.3.3";
deuxfleurs.is_raft_server = false;
fileSystems."/mnt" = { fileSystems."/mnt" = {
device = "/dev/disk/by-uuid/51d95b17-0e06-4a73-9e4e-ae5363cc4015"; device = "/dev/disk/by-uuid/51d95b17-0e06-4a73-9e4e-ae5363cc4015";

View file

@ -8,12 +8,7 @@
boot.loader.grub.version = 2; boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/nvme0n1"; # or "nodev" for efi only boot.loader.grub.device = "/dev/nvme0n1"; # or "nodev" for efi only
networking.hostName = "diplotaxis"; deuxfleurs.hostName = "diplotaxis";
deuxfleurs.staticIPv4.address = "192.168.1.12";
deuxfleurs.network_interface = "enp0s31f6"; deuxfleurs.staticIPv6.address = "2a01:e0a:28f:5e60::12";
deuxfleurs.lan_ip = "192.168.1.12";
deuxfleurs.ipv6 = "2a01:e0a:28f:5e60::12";
deuxfleurs.cluster_ip = "10.83.2.2";
deuxfleurs.is_raft_server = false;
} }

View file

@ -8,12 +8,7 @@
boot.loader.grub.version = 2; boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/nvme0n1"; # or "nodev" for efi only boot.loader.grub.device = "/dev/nvme0n1"; # or "nodev" for efi only
networking.hostName = "doradille"; deuxfleurs.hostName = "doradille";
deuxfleurs.staticIPv4.address = "192.168.1.13";
deuxfleurs.network_interface = "enp0s31f6"; deuxfleurs.staticIPv6.address = "2a01:e0a:28f:5e60::13";
deuxfleurs.lan_ip = "192.168.1.13";
deuxfleurs.ipv6 = "2a01:e0a:28f:5e60::13";
deuxfleurs.cluster_ip = "10.83.2.3";
deuxfleurs.is_raft_server = false;
} }

View file

@ -0,0 +1,41 @@
#!/usr/bin/env bash
# Bruxelles (bespin): git forge at git.deuxfleurs.fr
curl -vv -X PUT http://localhost:8500/v1/catalog/register -H "Content-Type: application/json" --data @- <<EOF
{
"Datacenter": "prod",
"Node": "gitea",
"Address": "192.168.5.200",
"NodeMeta": {
"site": "bespin",
"cname_target": "bespin.site.deuxfleurs.fr."
},
"Service": {
"Service": "gitea",
"Tags": ["tricot git.deuxfleurs.fr"],
"Address": "192.168.5.200",
"Port": 3001
}
}
EOF
# Lille (scorpio): ADRN's personnal services under luxeylab.net
curl -vv -X PUT http://localhost:8500/v1/catalog/register -H "Content-Type: application/json" --data @- <<EOF
{
"Datacenter": "prod",
"Node": "spoutnik",
"Address": "192.168.1.60",
"NodeMeta": {
"site": "scorpio",
"cname_target": "scorpio.site.deuxfleurs.fr."
},
"Service": {
"Service": "adrien-nginx",
"Tags": ["tricot-https *.luxeylab.net"],
"Address": "192.168.1.60",
"Port": 443
}
}
EOF

View file

@ -1,17 +0,0 @@
#!/usr/bin/env bash
curl -vv -X PUT http://localhost:8500/v1/catalog/register -H "Content-Type: application/json" --data @- <<EOF
{
"Datacenter": "prod",
"Node": "spoutnik",
"Address": "192.168.1.60",
"NodeMeta": { "somekey": "bidon" },
"Service": {
"Service": "adrien-nginx",
"Tags": ["tricot-https *.luxeylab.net"],
"Address": "192.168.1.60",
"Port": 443
}
}
EOF

View file

@ -1,13 +1,7 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
{ {
deuxfleurs.site_name = "bespin"; deuxfleurs.siteName = "bespin";
deuxfleurs.lan_default_gateway = "192.168.5.254"; deuxfleurs.staticIPv4.defaultGateway = "192.168.5.254";
deuxfleurs.ipv6_default_gateway = "2a02:a03f:6510:5102::1"; deuxfleurs.cnameTarget = "bespin.site.deuxfleurs.fr.";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.5.254" ];
deuxfleurs.cname_target = "bespin.site.deuxfleurs.fr.";
networking.firewall.allowedTCPPorts = [ 80 443 ];
} }

View file

@ -1,14 +1,8 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
{ {
deuxfleurs.site_name = "neptune"; deuxfleurs.siteName = "neptune";
deuxfleurs.lan_default_gateway = "192.168.1.1"; deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1";
deuxfleurs.ipv6_default_gateway = "2001:910:1204:1::1"; deuxfleurs.cnameTarget = "neptune.site.deuxfleurs.fr.";
deuxfleurs.lan_ip_prefix_length = 24; deuxfleurs.publicIPv4 = "77.207.15.215";
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.1" ];
deuxfleurs.cname_target = "neptune.site.deuxfleurs.fr.";
deuxfleurs.public_ipv4 = "77.207.15.215";
networking.firewall.allowedTCPPorts = [ 80 443 ];
} }

View file

@ -1,14 +1,8 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
{ {
deuxfleurs.site_name = "orion"; deuxfleurs.siteName = "orion";
deuxfleurs.lan_default_gateway = "192.168.1.254"; deuxfleurs.staticIPv4.defaultGateway = "192.168.1.254";
deuxfleurs.ipv6_default_gateway = "2a01:e0a:28f:5e60::1"; deuxfleurs.cnameTarget = "orion.site.deuxfleurs.fr.";
deuxfleurs.lan_ip_prefix_length = 24; deuxfleurs.publicIPv4 = "82.66.80.201";
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.254" ];
deuxfleurs.cname_target = "orion.site.deuxfleurs.fr.";
deuxfleurs.public_ipv4 = "82.66.80.201";
networking.firewall.allowedTCPPorts = [ 80 443 ];
} }

View file

@ -1,14 +1,8 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
{ {
deuxfleurs.site_name = "scorpio"; deuxfleurs.siteName = "scorpio";
deuxfleurs.lan_default_gateway = "192.168.1.254"; deuxfleurs.staticIPv4.defaultGateway = "192.168.1.254";
deuxfleurs.ipv6_default_gateway = "2a01:e0a:e4:2dd0::1"; deuxfleurs.cnameTarget = "scorpio.site.deuxfleurs.fr.";
deuxfleurs.lan_ip_prefix_length = 24; deuxfleurs.publicIPv4 = "82.65.41.110";
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.254" ];
deuxfleurs.cname_target = "scorpio.site.deuxfleurs.fr.";
deuxfleurs.public_ipv4 = "82.65.41.110";
networking.firewall.allowedTCPPorts = [ 80 443 ];
} }

View file

@ -1,4 +1,4 @@
job "core-service" { job "core:d53" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"] datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "service" type = "service"
priority = 90 priority = 90
@ -11,7 +11,7 @@ job "core-service" {
config { config {
packages = [ packages = [
"git+https://git.deuxfleurs.fr/lx/D53.git?ref=main&rev=86c255dfeabc60b0ef46ff78bc487c61c9548c79" "git+https://git.deuxfleurs.fr/lx/D53.git?ref=diplonat-autodiscovery&rev=49d94dae1d753c1f3349be7ea9bc7e7978c0af15"
] ]
command = "d53" command = "d53"
} }
@ -52,7 +52,7 @@ D53_CONSUL_CLIENT_KEY=/etc/tricot/consul-client.key
D53_PROVIDERS=deuxfleurs.org:gandi D53_PROVIDERS=deuxfleurs.org:gandi
D53_GANDI_API_KEY={{ key "secrets/d53/gandi_api_key" }} D53_GANDI_API_KEY={{ key "secrets/d53/gandi_api_key" }}
D53_ALLOWED_DOMAINS=staging.deuxfleurs.org D53_ALLOWED_DOMAINS=staging.deuxfleurs.org
RUST_LOG=d53=info RUST_LOG=d53=debug
EOH EOH
destination = "secrets/env" destination = "secrets/env"
env = true env = true

View file

@ -0,0 +1,75 @@
job "core:diplonat" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "system"
priority = 90
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
update {
max_parallel = 3
stagger = "20s"
}
group "diplonat" {
task "diplonat" {
driver = "nix2"
config {
packages = [
"#iptables",
"#bash",
"#coreutils",
"git+https://git.deuxfleurs.fr/Deuxfleurs/diplonat.git?ref=stun&rev=f5fc635b75dfa17b83a8db4893a7be206b4f9892"
]
command = "diplonat"
}
user = "root"
restart {
interval = "30m"
attempts = 2
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "etc/diplonat/consul-ca.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "etc/diplonat/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "etc/diplonat/consul-client.key"
}
template {
data = <<EOH
DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_IPV6_ONLY=true
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://localhost:8501
DIPLONAT_CONSUL_CA_CERT=/etc/diplonat/consul-ca.crt
DIPLONAT_CONSUL_CLIENT_CERT=/etc/diplonat/consul-client.crt
DIPLONAT_CONSUL_CLIENT_KEY=/etc/diplonat/consul-client.key
RUST_LOG=debug
RUST_BACKTRACE=1
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 100
}
}
}
}

View file

@ -1,4 +1,4 @@
job "core-system" { job "core:tricot" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"] datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "system" type = "system"
priority = 90 priority = 90
@ -13,64 +13,6 @@ job "core-system" {
stagger = "1m" stagger = "1m"
} }
/*
group "diplonat" {
task "diplonat" {
driver = "nix2"
config {
packages = [
"#iptables",
"git+https://git.deuxfleurs.fr/Deuxfleurs/diplonat.git?ref=main&rev=f306e8dc8d0e93478353ce39b6064e8c06a8bca6"
]
command = "diplonat"
}
user = "root"
restart {
interval = "30m"
attempts = 2
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "etc/diplonat/consul-ca.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "etc/diplonat/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "etc/diplonat/consul-client.key"
}
template {
data = <<EOH
DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://localhost:8501
DIPLONAT_CONSUL_CA_CERT=/etc/diplonat/consul-ca.crt
DIPLONAT_CONSUL_CLIENT_CERT=/etc/diplonat/consul-client.crt
DIPLONAT_CONSUL_CLIENT_KEY=/etc/diplonat/consul-client.key
RUST_LOG=debug
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 40
}
}
}
*/
group "tricot" { group "tricot" {
network { network {
port "http_port" { static = 80 } port "http_port" { static = 80 }
@ -130,6 +72,7 @@ TRICOT_HTTP_BIND_ADDR=[::]:80
TRICOT_HTTPS_BIND_ADDR=[::]:443 TRICOT_HTTPS_BIND_ADDR=[::]:443
TRICOT_METRICS_BIND_ADDR=[::]:9334 TRICOT_METRICS_BIND_ADDR=[::]:9334
RUST_LOG=tricot=debug RUST_LOG=tricot=debug
RUST_BACKTRACE=1
EOH EOH
destination = "secrets/env" destination = "secrets/env"
env = true env = true
@ -141,7 +84,7 @@ EOH
tags = [ tags = [
"d53-aaaa ${meta.site}.site.staging.deuxfleurs.org", "d53-aaaa ${meta.site}.site.staging.deuxfleurs.org",
"d53-aaaa staging.deuxfleurs.org", "d53-aaaa staging.deuxfleurs.org",
# "(diplonat (tcp_port 80))" "(diplonat (tcp_port 80))"
] ]
address_mode = "host" address_mode = "host"
} }
@ -150,7 +93,7 @@ EOH
name = "tricot-https" name = "tricot-https"
port = "https_port" port = "https_port"
tags = [ tags = [
# "(diplonat (tcp_port 443))" "(diplonat (tcp_port 443))"
] ]
address_mode = "host" address_mode = "host"
} }

View file

@ -6,8 +6,10 @@ db_engine = "lmdb"
replication_mode = "3" replication_mode = "3"
rpc_bind_addr = "[{{ env "meta.public_ipv6" }}]:3991" {{ with $a := env "attr.unique.hostname" | printf "diplonat/autodiscovery/ipv6/%s" | key | parseJSON }}
rpc_public_addr = "[{{ env "meta.public_ipv6" }}]:3991" rpc_bind_addr = "[{{ $a.address }}]:3991"
rpc_public_addr = "[{{ $a.address }}]:3991"
{{ end }}
rpc_secret = "{{ key "secrets/garage-staging/rpc_secret" | trimSpace }}" rpc_secret = "{{ key "secrets/garage-staging/rpc_secret" | trimSpace }}"
bootstrap_peers = [] bootstrap_peers = []

View file

@ -25,6 +25,7 @@ job "garage-staging" {
config { config {
packages = [ packages = [
"#bash", # so that we can enter a shell inside container "#bash", # so that we can enter a shell inside container
"#coreutils",
"git+https://git.deuxfleurs.fr/Deuxfleurs/garage.git?ref=main&rev=0d0906b066eb76111f3b427dce1c50eac083366c", "git+https://git.deuxfleurs.fr/Deuxfleurs/garage.git?ref=main&rev=0d0906b066eb76111f3b427dce1c50eac083366c",
] ]
command = "garage" command = "garage"

View file

@ -1,60 +1,56 @@
{ config, pkgs, ... } @ args: { config, pkgs, ... } @ args:
{ {
deuxfleurs.cluster_name = "staging"; deuxfleurs.clusterName = "staging";
# The IP range to use for the Wireguard overlay of this cluster # The IP range to use for the Wireguard overlay of this cluster
deuxfleurs.cluster_prefix = "10.14.0.0"; deuxfleurs.clusterPrefix = "10.14.0.0/16";
deuxfleurs.cluster_prefix_length = 16;
deuxfleurs.cluster_nodes = [ deuxfleurs.clusterNodes = {
{ "carcajou" = {
hostname = "carcajou"; siteName = "neptune";
site_name = "neptune";
publicKey = "7Nm7pMmyS7Nts1MB+loyD8u84ODxHPTkDu+uqQR6yDk="; publicKey = "7Nm7pMmyS7Nts1MB+loyD8u84ODxHPTkDu+uqQR6yDk=";
IP = "10.14.1.2"; address = "10.14.1.2";
endpoint = "77.207.15.215:33722"; endpoint = "77.207.15.215:33722";
} };
{ "caribou" = {
hostname = "caribou"; siteName = "neptune";
site_name = "neptune";
publicKey = "lABn/axzD1jkFulX8c+K3B3CbKXORlIMDDoe8sQVxhs="; publicKey = "lABn/axzD1jkFulX8c+K3B3CbKXORlIMDDoe8sQVxhs=";
IP = "10.14.1.3"; address = "10.14.1.3";
endpoint = "77.207.15.215:33723"; endpoint = "77.207.15.215:33723";
} };
{ "origan" = {
hostname = "origan"; siteName = "jupiter";
site_name = "jupiter";
publicKey = "smBQYUS60JDkNoqkTT7TgbpqFiM43005fcrT6472llI="; publicKey = "smBQYUS60JDkNoqkTT7TgbpqFiM43005fcrT6472llI=";
IP = "10.14.2.33"; address = "10.14.2.33";
endpoint = "82.64.238.84:33733"; endpoint = "82.64.238.84:33733";
} };
{ "piranha" = {
hostname = "piranha"; siteName = "corrin";
site_name = "corrin";
publicKey = "m9rLf+233X1VColmeVrM/xfDGro5W6Gk5N0zqcf32WY="; publicKey = "m9rLf+233X1VColmeVrM/xfDGro5W6Gk5N0zqcf32WY=";
IP = "10.14.3.1"; address = "10.14.3.1";
#endpoint = "82.120.233.78:33721"; #endpoint = "82.120.233.78:33721";
} };
{ "df-pw5" = {
hostname = "df-pw5"; siteName = "bespin";
site_name = "bespin";
publicKey = "XLOYoMXF+PO4jcgfSVAk+thh4VmWx0wzWnb0xs08G1s="; publicKey = "XLOYoMXF+PO4jcgfSVAk+thh4VmWx0wzWnb0xs08G1s=";
IP = "10.14.4.1"; address = "10.14.4.1";
endpoint = "bitfrost.fiber.shirokumo.net:33734"; endpoint = "bitfrost.fiber.shirokumo.net:33734";
} };
]; };
deuxfleurs.wgautomeshPort = 1667;
services.wgautomesh.logLevel = "debug"; services.wgautomesh.logLevel = "debug";
# Bootstrap IPs for Consul cluster, # Bootstrap IPs for Consul cluster,
# these are IPs on the Wireguard overlay # these are IPs on the Wireguard overlay
services.consul.extraConfig.retry_join = [ services.consul.extraConfig.retry_join = [
"10.14.1.1" # cariacou
"10.14.1.2" # carcajou
"10.14.1.3" # caribou "10.14.1.3" # caribou
"10.14.2.33" # origan
"10.14.3.1" # piranha
]; ];
deuxfleurs.admin_accounts = { deuxfleurs.adminAccounts = {
lx = [ lx = [
# Keys for accessing nodes from outside # Keys for accessing nodes from outside
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy"
@ -142,16 +138,16 @@
enable = true; enable = true;
port = substituter_port; port = substituter_port;
openFirewall = false; openFirewall = false;
bindAddress = config.deuxfleurs.cluster_ip; bindAddress = "0.0.0.0";
package = pkgs.haskellPackages.nix-serve-ng; package = pkgs.haskellPackages.nix-serve-ng;
}; };
nix.settings.substituters = map nix.settings.substituters = map
({ IP, ... }: "http://${IP}:${builtins.toString substituter_port}") ({ address, ... }: "http://${address}:${builtins.toString substituter_port}")
(builtins.filter (builtins.attrValues (pkgs.lib.filterAttrs
({ site_name, IP, ...}: (hostname: { siteName, ...}:
(IP != config.deuxfleurs.cluster_ip (hostname != config.deuxfleurs.hostName
&& site_name == config.deuxfleurs.site_name)) && siteName == config.deuxfleurs.siteName))
config.deuxfleurs.cluster_nodes); config.deuxfleurs.clusterNodes));
}) })
]; ];
} }

View file

@ -9,3 +9,4 @@ piranha.polyno.me,2a01:cb05:8984:3c00:223:24ff:feb0:ea82 ssh-ed25519 AAAAC3NzaC1
2a01:e0a:5e4:1d0:223:24ff:feaf:fdec ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAsZas74RT6lCZwuUOPR23nPdbSdpWORyAmRgjoiMVHK 2a01:e0a:5e4:1d0:223:24ff:feaf:fdec ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAsZas74RT6lCZwuUOPR23nPdbSdpWORyAmRgjoiMVHK
df-pw5.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK/dJIxioCkfeehxeGiZR7qquYGoqEH/YrRJ/ukEcaLH df-pw5.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK/dJIxioCkfeehxeGiZR7qquYGoqEH/YrRJ/ukEcaLH
10.14.3.1 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnpO6zpLWsyyugOoOj+2bUow9TUrcWgURFGGaoyu+co 10.14.3.1 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnpO6zpLWsyyugOoOj+2bUow9TUrcWgURFGGaoyu+co
192.168.1.22 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMf/ioVSSb19Slu+HZLgKt4f1/XsL+K9uMxazSWb/+nQ

View file

@ -8,18 +8,19 @@
./remote-unlock.nix ./remote-unlock.nix
]; ];
deuxfleurs.remoteUnlock = {
networkInterface = "eno1";
staticIP = "192.168.1.22/24";
defaultGateway = "192.168.1.1";
};
# Use the systemd-boot EFI boot loader. # Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 20; boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "carcajou"; deuxfleurs.hostName = "carcajou";
deuxfleurs.staticIPv6.address = "2001:910:1204:1::22";
deuxfleurs.network_interface = "eno1";
deuxfleurs.lan_ip = "192.168.1.22";
deuxfleurs.ipv6 = "2001:910:1204:1::22";
deuxfleurs.cluster_ip = "10.14.1.2";
system.stateVersion = "21.05"; system.stateVersion = "21.05";
} }

View file

@ -8,14 +8,9 @@
boot.loader.timeout = 20; boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "caribou"; deuxfleurs.hostName = "caribou";
deuxfleurs.staticIPv6.address = "2001:910:1204:1::23";
deuxfleurs.network_interface = "eno1"; deuxfleurs.isRaftServer = true;
deuxfleurs.lan_ip = "192.168.1.23";
deuxfleurs.ipv6 = "2001:910:1204:1::23";
deuxfleurs.cluster_ip = "10.14.1.3";
deuxfleurs.is_raft_server = true;
system.stateVersion = "21.05"; system.stateVersion = "21.05";
} }

View file

@ -9,12 +9,9 @@
boot.loader.efi.efiSysMountPoint = "/boot"; boot.loader.efi.efiSysMountPoint = "/boot";
boot.loader.timeout = 20; boot.loader.timeout = 20;
networking.hostName = "df-pw5"; deuxfleurs.hostName = "df-pw5";
deuxfleurs.staticIPv4.address = "192.168.5.130";
deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:223:24ff:feb0:e8a7";
deuxfleurs.network_interface = "eno1"; system.stateVersion = "22.11";
deuxfleurs.lan_ip = "192.168.5.130";
deuxfleurs.ipv6 = "2a02:a03f:6510:5102:223:24ff:feb0:e8a7";
deuxfleurs.cluster_ip = "10.14.4.1";
deuxfleurs.is_raft_server = false;
} }

View file

@ -8,14 +8,10 @@
boot.loader.timeout = 20; boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "origan"; deuxfleurs.hostName = "origan";
deuxfleurs.staticIPv4.address = "192.168.1.33";
deuxfleurs.network_interface = "eno1"; deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:223:24ff:feaf:fdec";
deuxfleurs.lan_ip = "192.168.1.33"; deuxfleurs.isRaftServer = true;
deuxfleurs.ipv6 = "2a01:e0a:5e4:1d0:223:24ff:feaf:fdec";
deuxfleurs.cluster_ip = "10.14.2.33";
deuxfleurs.is_raft_server = true;
system.stateVersion = "22.11"; system.stateVersion = "22.11";
} }

View file

@ -8,14 +8,10 @@
boot.loader.timeout = 20; boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "piranha"; deuxfleurs.hostName = "piranha";
deuxfleurs.staticIPv4.address = "192.168.1.25";
deuxfleurs.network_interface = "eno1"; deuxfleurs.staticIPv6.address = "2a01:cb05:9142:7400:223:24ff:feb0:ea82";
deuxfleurs.lan_ip = "192.168.1.25"; deuxfleurs.isRaftServer = true;
deuxfleurs.ipv6 = "2a01:cb05:9142:7400:223:24ff:feb0:ea82";
deuxfleurs.cluster_ip = "10.14.3.1";
deuxfleurs.is_raft_server = true;
system.stateVersion = "22.11"; system.stateVersion = "22.11";
} }

View file

@ -1,13 +1,7 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
{ {
deuxfleurs.site_name = "bespin"; deuxfleurs.siteName = "bespin";
deuxfleurs.lan_default_gateway = "192.168.5.254"; deuxfleurs.staticIPv4.defaultGateway = "192.168.5.254";
deuxfleurs.ipv6_default_gateway = "2a02:a03f:6510:5102::1"; deuxfleurs.cnameTarget = "bespin.site.staging.deuxfleurs.org.";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.5.254" ];
deuxfleurs.cname_target = "bespin.site.staging.deuxfleurs.org.";
networking.firewall.allowedTCPPorts = [ 80 443 ];
} }

View file

@ -1,14 +1,8 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
{ {
deuxfleurs.site_name = "corrin"; deuxfleurs.siteName = "corrin";
deuxfleurs.lan_default_gateway = "192.168.1.1"; deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1";
deuxfleurs.ipv6_default_gateway = "fe80::7ec1:77ff:fe3e:bb90"; deuxfleurs.cnameTarget = "corrin.site.staging.deuxfleurs.org.";
deuxfleurs.lan_ip_prefix_length = 24; deuxfleurs.publicIPv4 = "2.13.96.213";
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.1" ];
deuxfleurs.cname_target = "corrin.site.staging.deuxfleurs.org.";
deuxfleurs.public_ipv4 = "2.13.96.213";
networking.firewall.allowedTCPPorts = [ 80 443 ];
} }

View file

@ -1,16 +1,7 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
{ {
deuxfleurs.site_name = "jupiter"; deuxfleurs.siteName = "jupiter";
deuxfleurs.lan_default_gateway = "192.168.1.1"; deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1";
deuxfleurs.ipv6_default_gateway = "fe80::9038:202a:73a0:e73b"; deuxfleurs.cnameTarget = "jupiter.site.staging.deuxfleurs.org.";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.1" ];
deuxfleurs.cname_target = "jupiter.site.staging.deuxfleurs.org.";
# no public ipv4 is used for the staging cluster on Jupiter
# deuxfleurs.public_ipv4 = "???";
networking.firewall.allowedTCPPorts = [ 80 443 ];
} }

View file

@ -1,17 +1,6 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
{ {
deuxfleurs.site_name = "neptune"; deuxfleurs.siteName = "neptune";
deuxfleurs.lan_default_gateway = "192.168.1.1"; deuxfleurs.cnameTarget = "neptune.site.staging.deuxfleurs.org.";
deuxfleurs.ipv6_default_gateway = "2001:910:1204:1::1";
deuxfleurs.lan_ip_prefix_length = 24;
deuxfleurs.ipv6_prefix_length = 64;
deuxfleurs.nameservers = [ "192.168.1.1" ];
deuxfleurs.cname_target = "neptune.site.staging.deuxfleurs.org.";
# no public ipv4 is used for the staging cluster on Neptune,
# because the Internet connection is already used for the prod cluster
# deuxfleurs.public_ipv4 = "77.207.15.215";
networking.firewall.allowedTCPPorts = [ 80 443 ];
} }

View file

@ -10,7 +10,7 @@ Host origan
HostName origan.df.trinity.fr.eu.org HostName origan.df.trinity.fr.eu.org
Host piranha Host piranha
ProxyJump caribou.machine.deuxfleurs.fr ProxyJump carcajou.machine.deuxfleurs.fr
HostName 10.14.3.1 HostName 10.14.3.1
#HostName piranha.polyno.me #HostName piranha.polyno.me

View file

@ -36,9 +36,6 @@
boot.kernel.sysctl = { boot.kernel.sysctl = {
"vm.max_map_count" = 262144; "vm.max_map_count" = 262144;
"net.ipv6.conf.all.accept_ra" = 0;
"net.ipv6.conf.all.autoconf" = 0;
"net.ipv6.conf.all.use_tempaddr" = 0;
}; };
services.journald.extraConfig = '' services.journald.extraConfig = ''

View file

@ -6,19 +6,86 @@ in
with builtins; with builtins;
with pkgs.lib; with pkgs.lib;
{ {
options.deuxfleurs = options.deuxfleurs = with types; {
let wg_node = with types; submodule { # Parameters for individual nodes
options = { hostName = mkOption {
hostname = mkOption { description = "Node name";
type = str; type = str;
description = "Host name";
}; };
site_name = mkOption { staticIPv4.address = mkOption {
description = "IP address (with prefix length) of this node on the local network interface";
type = nullOr str;
default = null;
};
staticIPv6.address = mkOption {
description = "Static public IPv6 address of this node";
type = nullOr str;
};
isRaftServer = mkOption {
description = "Make this node a RAFT server for the Nomad and Consul deployments";
type = bool;
default = false;
};
# Parameters that generally vary between sites
siteName = mkOption {
description = "Site (availability zone) on which this node is deployed";
type = str;
};
staticIPv4.defaultGateway = mkOption {
description = "IPv4 address of the default route on the local network interface";
type = nullOr str;
default = null;
};
staticIPv4.prefixLength = mkOption {
description = "IPv4 prefix length for LAN addresses, only used with static configuration";
type = int;
default = 24;
};
staticIPv6.defaultGateway = mkOption {
description = ''
IPv6 address of the default route on the local network interface.
IPv6 Router Advertisements (RA) will be totally disabled if this is set.
'';
type = nullOr str;
default = null;
};
staticIPv6.prefixLength = mkOption {
description = "IPv6 prefix length, used only when router advertisements are disabled.";
type = int;
default = 64;
};
publicIPv4 = mkOption {
description = "Public IPv4 through which this node is accessible (possibly after port opening using DiploNAT), for domain names that are updated by D53";
type = nullOr str;
default = null;
};
cnameTarget = mkOption {
description = "DNS CNAME target to use for services hosted in this site, for domain names that are updated by D53";
type = nullOr str;
default = null;
};
# Parameters common to all nodes
clusterName = mkOption {
description = "Name of this Deuxfleurs deployment";
type = str;
};
clusterPrefix = mkOption {
description = "IP address prefix (and length) for the Wireguard overlay network";
type = str;
};
clusterNodes = mkOption {
description = "Nodes that are part of the cluster";
type = attrsOf (submodule {
options = {
siteName = mkOption {
type = nullOr str; type = nullOr str;
description = "Site where the node is located"; description = "Site where the node is located";
default = null; default = null;
}; };
IP = mkOption { address = mkOption {
type = str; type = str;
description = "IP Address in the Wireguard network"; description = "IP Address in the Wireguard network";
}; };
@ -32,102 +99,29 @@ in
description = "Wireguard endpoint on the public Internet"; description = "Wireguard endpoint on the public Internet";
}; };
}; };
});
}; };
in adminAccounts = mkOption {
{
# Parameters for individual nodes
network_interface = mkOption {
description = "Network interface name to configure";
type = types.str;
};
lan_ip = mkOption {
description = "IP address of this node on the local network interface";
type = types.str;
};
lan_ip_prefix_length = mkOption {
description = "Prefix length associated with lan_ip";
type = types.int;
};
ipv6 = mkOption {
description = "Public IPv6 address of this node";
type = types.str;
};
ipv6_prefix_length = mkOption {
description = "Prefix length associated with ipv6 ip";
type = types.int;
};
cluster_ip = mkOption {
description = "IP address of this node on the Wesher mesh network";
type = types.str;
};
wireguard_port = mkOption {
description = "Port for incoming Wireguard VPN connections";
type = types.port;
default = 33799;
};
is_raft_server = mkOption {
description = "Make this node a RAFT server for the Nomad and Consul deployments";
type = types.bool;
default = false;
};
# Parameters that generally vary between sites
lan_default_gateway = mkOption {
description = "IPv4 address of the default route on the local network interface";
type = types.str;
};
ipv6_default_gateway = mkOption {
description = "IPv6 address of the default IPv6 gateway for the targeted net interface";
type = types.str;
};
site_name = mkOption {
description = "Site (availability zone) on which this node is deployed";
type = types.str;
};
public_ipv4 = mkOption {
description = "Public IPv4 through which this node is accessible (possibly after port opening using DiploNAT), for domain names that are updated by D53";
type = types.nullOr types.str;
default = null;
};
cname_target = mkOption {
description = "DNS CNAME target to use for services hosted in this site, for domain names that are updated by D53";
type = types.nullOr types.str;
default = null;
};
nameservers = mkOption {
description = "External DNS servers to use";
type = types.listOf types.str;
};
# Parameters common to all nodes
cluster_name = mkOption {
description = "Name of this Deuxfleurs deployment";
type = types.str;
};
cluster_prefix = mkOption {
description = "IP address prefix for the Wireguard overlay network";
type = types.str;
};
cluster_prefix_length = mkOption {
description = "IP address prefix length for the Wireguard overlay network";
type = types.int;
default = 16;
};
cluster_nodes = mkOption {
description = "Nodes that are part of the cluster";
type = types.listOf wg_node;
};
admin_accounts = mkOption {
description = "List of users having an admin account on cluster nodes, maps user names to a list of authorized SSH keys"; description = "List of users having an admin account on cluster nodes, maps user names to a list of authorized SSH keys";
type = types.attrsOf (types.listOf types.str); type = attrsOf (listOf str);
}; };
bootstrap = mkOption { bootstrap = mkOption {
description = "Whether to enable bootstrapping for Nomad and Consul"; description = "Whether to enable bootstrapping for Nomad and Consul";
type = types.bool; type = bool;
default = false; default = false;
}; };
# Options that generally stay to their default value
wireguardPort = mkOption {
description = "Port for incoming Wireguard VPN connections";
type = port;
default = 33799;
};
wgautomeshPort = mkOption {
description = "Gossip port for wgautomesh";
type = port;
default = 1666;
};
}; };
imports = [ imports = [
@ -135,69 +129,92 @@ in
]; ];
config = config =
let node_meta = { let
"site" = cfg.site_name; clusterNodeCfg = getAttr cfg.hostName cfg.clusterNodes;
"public_ipv6" = cfg.ipv6; clusterAddress = clusterNodeCfg.address;
node_meta = {
"site" = cfg.siteName;
} // } //
(if cfg.public_ipv4 != null (if cfg.staticIPv6.address != null
then { "public_ipv4" = cfg.public_ipv4; } then { "public_ipv6" = cfg.staticIPv6.address; }
else {}) // else {}) //
(if cfg.cname_target != null (if cfg.publicIPv4 != null
then { "cname_target" = cfg.cname_target; } then { "public_ipv4" = cfg.publicIPv4; }
else {}) //
(if cfg.cnameTarget != null
then { "cname_target" = cfg.cnameTarget; }
else {}); else {});
in in
{ {
networking.hostName = cfg.hostName;
# Configure admin accounts on all nodes # Configure admin accounts on all nodes
users.users = builtins.mapAttrs (name: publicKeys: { users.users = mapAttrs (name: publicKeys: {
isNormalUser = true; isNormalUser = true;
extraGroups = [ "wheel" ]; extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = publicKeys; openssh.authorizedKeys.keys = publicKeys;
}) cfg.admin_accounts; }) cfg.adminAccounts;
# Configure network interfaces # Configure network interfaces
networking.useDHCP = false; networking.useDHCP = false;
networking.useNetworkd = true; networking.useNetworkd = true;
systemd.network.networks = { systemd.network.networks."10-uplink" =
"10-uplink" = { let
matchConfig = { # IPv4 configuration is obtained by DHCP by default,
# We could preprend "en* eth*" to match all ethernet interfaces # unless a static v4 address and default gateway are given
Name = "${cfg.network_interface}"; noDHCP = cfg.staticIPv4.address != null && cfg.staticIPv4.defaultGateway != null;
}; # IPv6 configuration is obtained through router advertisements (RA),
networkConfig = { # possibly using a static token to ensure a static IPv6,
IPv6AcceptRA = false; # unless a static v6 address and default gateway are given,
LinkLocalAddressing = "no"; # in which case RAs are disabled entirely
}; noRA = cfg.staticIPv6.address != null && cfg.staticIPv6.defaultGateway != null;
address = [ staticV6 = cfg.staticIPv6.address != null;
"${cfg.lan_ip}/${toString cfg.lan_ip_prefix_length}" in
"${cfg.ipv6}/${toString cfg.ipv6_prefix_length}"
];
routes = [
{ {
matchConfig.Name = "en* eth*";
address =
optional noDHCP "${cfg.staticIPv4.address}/${toString cfg.staticIPv4.prefixLength}"
++ optional noRA "${cfg.staticIPv6.address}/${toString cfg.staticIPv6.prefixLength}";
routes =
optional noDHCP {
routeConfig = { routeConfig = {
Gateway = cfg.lan_default_gateway; Gateway = cfg.staticIPv4.defaultGateway;
# GatewayOnLink - Takes a boolean. If set to true, the kernel does not have to check if the gateway is reachable directly by the current machine (i.e., attached to the local network), so that we can insert the route in the kernel table without it being complained about. Defaults to "no". # GatewayOnLink - Takes a boolean. If set to true, the kernel does not have to check if the gateway is reachable directly by the current machine (i.e., attached to the local network), so that we can insert the route in the kernel table without it being complained about. Defaults to "no".
GatewayOnLink = true; GatewayOnLink = true;
}; };
} } ++ optional noRA {
{
routeConfig = { routeConfig = {
Gateway = cfg.ipv6_default_gateway; Gateway = cfg.staticIPv6.defaultGateway;
GatewayOnLink = true; GatewayOnLink = true;
}; };
}
];
};
}; };
# Configure Unbound DNS to redirect to Consul queries under .consul # Dynamic IPv4: enable DHCP but not for DNS servers
# and to pass directly to public DNS resolver all others networkConfig.DHCP = mkIf (!noDHCP) "ipv4";
dhcpV4Config.UseDNS = mkIf (!noDHCP) false;
# Dynamic IPv6: only fetch default route, use static
# address and no DNS servers
ipv6AcceptRAConfig.Token = mkIf (!noRA && staticV6) "static:${cfg.staticIPv6.address}";
ipv6AcceptRAConfig.UseDNS = mkIf (!noRA) false;
# Static IPv6: disable all router advertisements and
# link-local addresses
networkConfig.IPv6AcceptRA = mkIf noRA false;
networkConfig.LinkLocalAddressing = mkIf noRA "no";
};
# Configure Unbound as a central DNS server for everything
# - is its own recursor (applies DNSSec) for everything,
# no need to declare an outside nameserver
# - redirects to Consul queries under .consul
services.unbound = { services.unbound = {
enable = true; enable = true;
enableRootTrustAnchor = false; # disable DNSSEC as it causes issues
settings = { settings = {
server = { server = {
interface = [ "127.0.0.1" "${cfg.lan_ip}" "172.17.0.1" ]; interface = [ "127.0.0.1" "172.17.0.1" ];
domain-insecure = [ "consul." ]; domain-insecure = [ "consul." ];
local-zone = [ "consul. nodefault" ]; local-zone = [ "consul. nodefault" ];
log-servfail = true; log-servfail = true;
@ -207,73 +224,58 @@ in
logfile = "/dev/stdout"; logfile = "/dev/stdout";
access-control = [ access-control = [
"127.0.0.0/8 allow" "127.0.0.0/8 allow"
"${cfg.lan_ip}/${toString cfg.lan_ip_prefix_length} allow"
"172.17.0.0/16 allow" "172.17.0.0/16 allow"
"10.83.0.0/16 allow" "192.168.0.0/16 allow"
"${cfg.clusterPrefix} allow"
]; ];
}; };
forward-zone = [ stub-zone = [
# Forward .consul queries to Consul daemon # Forward .consul queries to Consul daemon
{ {
name = "consul."; name = "consul.";
forward-addr = "${cfg.lan_ip}@8600"; stub-addr = "${clusterAddress}@8600";
forward-no-cache = true; stub-no-cache = true;
forward-tcp-upstream = false; stub-tcp-upstream = false;
forward-tls-upstream = false; stub-tls-upstream = false;
}
# Forward all queries to our ISP's nameserver
{
name = ".";
forward-addr = cfg.nameservers;
forward-first = true;
} }
]; ];
}; };
resolveLocalQueries = true; resolveLocalQueries = true;
}; };
# Reach Unbound through the IP of our LAN interface,
# instead of 127.0.0.1 (this will also work in Docker containers)
networking.nameservers = [ # TODO remove this ?
cfg.lan_ip
];
services.resolved.enable = false; services.resolved.enable = false;
# Configure Wireguard VPN between all nodes # Configure Wireguard VPN between all nodes
networking.wireguard.interfaces.wg0 = { networking.wireguard.interfaces.wg0 = {
ips = [ "${cfg.cluster_ip}/16" ]; ips = [ "${clusterAddress}/16" ];
listenPort = cfg.wireguard_port; listenPort = cfg.wireguardPort;
privateKeyFile = "/var/lib/deuxfleurs/wireguard-keys/private"; privateKeyFile = "/var/lib/deuxfleurs/wireguard-keys/private";
mtu = 1420; mtu = 1420;
}; };
services.wgautomesh = { services.wgautomesh = {
enable = true; enable = true;
interface = "wg0"; interface = "wg0";
gossipPort = 1666; gossipPort = cfg.wgautomeshPort;
gossipSecretFile = "/var/lib/wgautomesh/gossip_secret"; gossipSecretFile = "/var/lib/wgautomesh/gossip_secret";
persistFile = "/var/lib/wgautomesh/state"; persistFile = "/var/lib/wgautomesh/state";
upnpForwardPublicPort = upnpForwardPublicPort =
let if clusterNodeCfg.endpoint != null then
us = filter ({ hostname, ...}: hostname == config.networking.hostName) cfg.cluster_nodes; strings.toInt (lists.last (split ":" clusterNodeCfg.endpoint))
in
if length us > 0 && (head us).endpoint != null then
strings.toInt (lists.last (split ":" (head us).endpoint))
else null; else null;
peers = map ({ publicKey, endpoint, IP, ... }: { peers = attrValues (mapAttrs (hostname: { publicKey, endpoint, address, ... }: {
address = IP; inherit address endpoint;
pubkey = publicKey; pubkey = publicKey;
endpoint = endpoint; }) cfg.clusterNodes);
}) cfg.cluster_nodes;
}; };
# Old code for wg-quick, we can use this as a fallback if we fail to make wgautomesh work # Old code for wg-quick, we can use this as a fallback if we fail to make wgautomesh work
# systemd.services."wg-quick-wg0".after = [ "unbound.service" ]; # systemd.services."wg-quick-wg0".after = [ "unbound.service" ];
# networking.wg-quick.interfaces.wg0 = { # networking.wg-quick.interfaces.wg0 = {
# address = [ "${cfg.cluster_ip}/16" ]; # address = [ "${clusterAddress}/16" ];
# listenPort = cfg.wireguard_port; # listenPort = cfg.wireguardPort;
# privateKeyFile = "/var/lib/deuxfleurs/wireguard-keys/private"; # privateKeyFile = "/var/lib/deuxfleurs/wireguard-keys/private";
# mtu = 1420; # mtu = 1420;
# peers = map ({ publicKey, endpoint, IP, ... }: { # peers = map ({ publicKey, endpoint, address, ... }: {
# inherit publicKey endpoint; # inherit publicKey endpoint;
# allowedIPs = [ "${IP}/32" ]; # allowedIPs = [ "${address}/32" ];
# persistentKeepalive = 25; # persistentKeepalive = 25;
# }; # };
@ -287,25 +289,25 @@ in
''; '';
# Configure /etc/hosts to link all hostnames to their Wireguard IP # Configure /etc/hosts to link all hostnames to their Wireguard IP
networking.extraHosts = builtins.concatStringsSep "\n" (map networking.extraHosts = concatStringsSep "\n" (attrValues (mapAttrs
({ hostname, IP, ...}: "${IP} ${hostname}") (hostname: { address, ...}: "${address} ${hostname}")
cfg.cluster_nodes); cfg.clusterNodes));
# Enable Hashicorp Consul & Nomad # Enable Hashicorp Consul & Nomad
services.consul.enable = true; services.consul.enable = true;
systemd.services.consul.after = [ "wg-quick-wg0.service" ]; systemd.services.consul.after = [ "wg-quick-wg0.service" ];
services.consul.extraConfig = services.consul.extraConfig =
(if cfg.is_raft_server (if cfg.isRaftServer
then { server = true; } then { server = true; }
// (if cfg.bootstrap then { bootstrap_expect = 3; } else {}) // (if cfg.bootstrap then { bootstrap_expect = 3; } else {})
else {}) // else {}) //
{ {
inherit node_meta; inherit node_meta;
datacenter = cfg.cluster_name; datacenter = cfg.clusterName;
ui_config = { ui_config = {
enabled = true; enabled = true;
}; };
bind_addr = "${cfg.cluster_ip}"; bind_addr = "${clusterAddress}";
addresses = { addresses = {
https = "0.0.0.0"; https = "0.0.0.0";
@ -326,9 +328,6 @@ in
verify_outgoing = true; verify_outgoing = true;
verify_server_hostname = true; verify_server_hostname = true;
}; };
systemd.services.consul.serviceConfig = { # TODO remove this ?
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
};
services.nomad.enable = true; services.nomad.enable = true;
systemd.services.nomad.after = [ "wg-quick-wg0.service" ]; systemd.services.nomad.after = [ "wg-quick-wg0.service" ];
@ -338,18 +337,18 @@ in
pkgs.zstd pkgs.zstd
]; ];
services.nomad.settings = services.nomad.settings =
(if cfg.is_raft_server (if cfg.isRaftServer
then { then {
server = { enabled = true; } server = { enabled = true; }
// (if cfg.bootstrap then { bootstrap_expect = 3; } else {}); // (if cfg.bootstrap then { bootstrap_expect = 3; } else {});
} else {}) // } else {}) //
{ {
region = cfg.cluster_name; region = cfg.clusterName;
datacenter = cfg.site_name; datacenter = cfg.siteName;
advertise = { advertise = {
rpc = "${cfg.cluster_ip}"; rpc = "${clusterAddress}";
http = "${cfg.cluster_ip}"; http = "${clusterAddress}";
serf = "${cfg.cluster_ip}"; serf = "${clusterAddress}";
}; };
consul = { consul = {
address = "localhost:8501"; address = "localhost:8501";
@ -402,31 +401,32 @@ in
allowedTCPPorts = [ allowedTCPPorts = [
# Allow anyone to connect on SSH port # Allow anyone to connect on SSH port
(builtins.head ({ openssh.ports = [22]; } // config.services).openssh.ports) (head ({ openssh.ports = [22]; } // config.services).openssh.ports)
]; ];
allowedUDPPorts = [ allowedUDPPorts = [
# Allow peers to connect to Wireguard # Allow peers to connect to Wireguard
cfg.wireguard_port cfg.wireguardPort
]; ];
# Allow specific hosts access to specific things in the cluster # Allow specific hosts access to specific things in the cluster
extraCommands = '' extraCommands = ''
# Allow everything from router (usefull for UPnP/IGD) # Allow UDP packets comming from port 1900 from a local address,
iptables -A INPUT -s ${cfg.lan_default_gateway} -j ACCEPT # these are necessary for UPnP/IGD
iptables -A INPUT -s 192.168.0.0/16 -p udp --sport 1900 -j ACCEPT
# Allow docker containers to access all ports # Allow docker containers to access all ports
iptables -A INPUT -s 172.17.0.0/16 -j ACCEPT iptables -A INPUT -s 172.17.0.0/16 -j ACCEPT
# Allow other nodes on VPN to access all ports # Allow other nodes on VPN to access all ports
iptables -A INPUT -s ${cfg.cluster_prefix}/${toString cfg.cluster_prefix_length} -j ACCEPT iptables -A INPUT -s ${cfg.clusterPrefix} -j ACCEPT
''; '';
# When stopping firewall, delete all rules that were configured manually above # When stopping firewall, delete all rules that were configured manually above
extraStopCommands = '' extraStopCommands = ''
iptables -D INPUT -s ${cfg.lan_default_gateway} -j ACCEPT iptables -D INPUT -s 192.168.0.0/16 -p udp --sport 1900 -j ACCEPT
iptables -D INPUT -s 172.17.0.0/16 -j ACCEPT iptables -D INPUT -s 172.17.0.0/16 -j ACCEPT
iptables -D INPUT -s ${cfg.cluster_prefix}/${toString cfg.cluster_prefix_length} -j ACCEPT iptables -D INPUT -s ${cfg.clusterPrefix} -j ACCEPT
''; '';
}; };
}; };

View file

@ -1,24 +1,43 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
let
cfg = config.deuxfleurs.remoteUnlock;
in
with builtins; with builtins;
with pkgs.lib; with pkgs.lib;
{ {
options.deuxfleurs.remoteUnlock = {
networkInterface = mkOption {
description = "Network interface to configure with static IP";
type = types.str;
};
staticIP = mkOption {
description = "IP address (with prefix length) of this node on the local network interface";
type = types.str;
};
defaultGateway = mkOption {
description = "IP address of default gateway";
type = types.str;
};
};
config = { config = {
boot.initrd.availableKernelModules = [ "pps_core" "ptp" "e1000e" ]; boot.initrd.availableKernelModules = [ "pps_core" "ptp" "e1000e" ];
boot.initrd.network.enable = true; boot.initrd.network.enable = true;
boot.initrd.network.ssh = { boot.initrd.network.ssh = {
enable = true; enable = true;
port = 222; port = 222;
authorizedKeys = concatLists (mapAttrsToList (name: user: user) config.deuxfleurs.admin_accounts); authorizedKeys = concatLists (mapAttrsToList (name: user: user) config.deuxfleurs.adminAccounts);
hostKeys = [ "/var/lib/deuxfleurs/remote-unlock/ssh_host_ed25519_key" ]; hostKeys = [ "/var/lib/deuxfleurs/remote-unlock/ssh_host_ed25519_key" ];
}; };
boot.initrd.network.postCommands = '' boot.initrd.network.postCommands = ''
ip addr add ${config.deuxfleurs.lan_ip}/${toString config.deuxfleurs.lan_ip_prefix_length} dev ${config.deuxfleurs.network_interface} ip addr add ${cfg.staticIP} dev ${cfg.networkInterface}
ip link set dev ${config.deuxfleurs.network_interface} up ip link set dev ${cfg.networkInterface} up
ip route add default via ${config.deuxfleurs.lan_default_gateway} dev ${config.deuxfleurs.network_interface} ip route add default via ${cfg.defaultGateway} dev ${cfg.networkInterface}
ip a ip a
ip route ip route
ping -c 4 ${config.deuxfleurs.lan_default_gateway} ping -c 4 ${cfg.defaultGateway}
echo 'echo run cryptsetup-askpass to unlock drives' >> /root/.profile echo 'echo run cryptsetup-askpass to unlock drives' >> /root/.profile
''; '';
}; };