Compare commits

...
This repository has been archived on 2023-03-15. You can view files and clone it, but cannot push or open issues or pull requests.

10 commits
main ... alexvm

Author SHA1 Message Date
edb0a3737a WIP NextCloud using Garage backend, fix app download urls 2020-07-15 16:06:28 +02:00
24118ab426 Make things work on cluster devx.adnab.me 2020-07-15 16:06:08 +02:00
65af077d5a Fix iptables not liking comment on same line 2020-07-15 16:03:51 +02:00
d3ada90d83 Fix nomad ip address
Remove the networ_interface parameter in nomad config
This means that nomad will now autodetect its own ip address
by looking at the default route.
Thus nodes in a LAN behind a NAT will get their LAN address,
and internet nodes will get their public address.
They won't get their VPN addresses.
This seems not to break Consul's use of VPN addresses to address
services, and fixes attr.unique.network.ip-address for DiploNAT.
2020-07-15 16:03:51 +02:00
3bf830713f don't retrieve wireguard privkeys in ansible 2020-07-15 16:03:51 +02:00
207d1fa278 Allow external VPN nodes, make multi-DC deployment work 2020-07-15 16:03:42 +02:00
bee7e10256 Document Wireguard config 2020-07-15 16:03:42 +02:00
a4f9aa2d98 Set up wireguard in dev cluster 2020-07-15 16:03:33 +02:00
1a16fc7f9e Add gitea config example 2020-07-15 15:49:52 +02:00
3174179100 Achieve a working install on my VMs 2020-07-15 15:49:52 +02:00
22 changed files with 419 additions and 38 deletions

View file

@ -13,3 +13,59 @@ For each machine, **one by one** do:
- Reboot - Reboot
- Check that cluster is healthy - Check that cluster is healthy
## New configuration with Wireguard
This configuration is used to make all of the cluster nodes appear in a single
virtual private network, enable them to communicate on all ports even if they
are behind NATs at different locations. The VPN also provides a layer of
security, encrypting all comunications that occur over the internet.
### Prerequisites
Nodes must all have two publicly accessible ports (potentially routed through a NAT):
- A port that maps to the SSH port (port 22) of the machine, allowing TCP connections
- A port that maps to the Wireguard port (port 51820) of the machine, allowing UDP connections
### Configuration
The network role sets up a Wireguard interface, called `wgdeuxfleurs`, and
establishes a full mesh between all cluster machines. The following
configuration variables are necessary in the node list:
- `ansible_host`: hostname to which Ansible connects to, usually the same as `public_ip`
- `ansible_user`: username to connect as for Ansible to run commands through SSH
- `ansible_port`: if SSH is not bound publicly on port 22, set the port here
- `public_ip`: the public IP for the machine or the NATting router behind which the machine is
- `public_vpn_port`: the public port number on `public_ip` that maps to port 51820 of the machine
- `vpn_ip`: the IP address to affect to the node on the VPN (each node must have a different one)
- `dns_server`: any DNS resolver, typically your ISP's DNS or a public one such as OpenDNS
The new iptables configuration now prevents direct communication between
cluster machines, except on port 51820 which is used to transmit VPN packets.
All intra-cluster communications must now go through the VPN interface (thus
machines refer to one another using their VPN IP addresses and never their
public or LAN addresses).
### Restarting Nomad
When switching to the Wireguard configuration, machines will stop using their
LAN addresses and switch to using their VPN addresses. Consul seems to handle
this correctly, however Nomad does not. To make Nomad able to restart
correctly, its Raft protocol module must be informed of the new IP addresses of
the cluster members. This is done by creating on all nodes the file
`/var/lib/nomad/server/raft/peers.json` that contains the list of IP addresses
of the cluster. Here is an example for such a file:
```
["10.68.70.11:4647","10.68.70.12:4647","10.68.70.13:4647"]
```
Once this file is created and is the same on all nodes, restart Nomad on all
nodes. The cluster should resume operation normally.
The same procedure can also be applied to fix Consul, however my tests showed
that it didn't break when IP addresses changed (it just took a bit long to come
back up).

6
ansible/lxvm Normal file
View file

@ -0,0 +1,6 @@
[cluster_nodes]
#ubuntu1 ansible_host=192.168.42.10
debian1 ansible_host=192.168.42.20 ansible_user=root public_ip=192.168.42.20 dns_server=208.67.222.222 vpn_ip=10.68.70.11 public_vpn_port=51820 datacenter=belair interface=enp1s0
debian2 ansible_host=192.168.42.21 ansible_user=root public_ip=192.168.42.21 dns_server=208.67.222.222 vpn_ip=10.68.70.12 public_vpn_port=51820 datacenter=belair interface=enp1s0
debian3 ansible_host=192.168.42.22 ansible_user=root public_ip=192.168.42.22 dns_server=208.67.222.222 vpn_ip=10.68.70.13 public_vpn_port=51820 datacenter=belair interface=enp1s0
ovh1 ansible_host=51.75.4.20 ansible_user=debian ansible_become=yes public_ip=51.75.4.20 dns_server=208.67.222.222 vpn_ip=10.68.70.20 public_vpn_port=51820 datacenter=saturne interface=eth0

View file

@ -3,6 +3,7 @@
that: that:
- "ansible_architecture == 'aarch64' or ansible_architecture == 'armv7l' or ansible_architecture == 'x86_64'" - "ansible_architecture == 'aarch64' or ansible_architecture == 'armv7l' or ansible_architecture == 'x86_64'"
- "ansible_os_family == 'Debian'" - "ansible_os_family == 'Debian'"
- "ansible_distribution_version == '10'"
- name: "Upgrade system" - name: "Upgrade system"
apt: apt:
@ -15,6 +16,7 @@
- name: "Install base tools" - name: "Install base tools"
apt: apt:
name: name:
- sudo
- vim - vim
- htop - htop
- screen - screen

View file

@ -1,17 +1,18 @@
{ {
"datacenter": "deuxfleurs",
"data_dir": "/var/lib/consul", "data_dir": "/var/lib/consul",
"bind_addr": "0.0.0.0", "bind_addr": "0.0.0.0",
"advertise_addr": "{{ public_ip }}", "advertise_addr": "{{ vpn_ip }}",
"addresses": { "addresses": {
"dns": "0.0.0.0", "dns": "0.0.0.0",
"http": "0.0.0.0" "http": "0.0.0.0"
}, },
"retry_join": [ "retry_join": [
{% for selected_host in groups['cluster_nodes']|reject("sameas", ansible_fqdn) %}{# @FIXME: Reject doesn't work #} {% for selected_host in groups['cluster_nodes']|difference([inventory_hostname]) %}{# @FIXME: Reject doesn't work #}
"{{ hostvars[selected_host]['private_ip'] }}" {{ "," if not loop.last else "" }} "{{ hostvars[selected_host]['vpn_ip'] }}" {{ "," if not loop.last else "" }}
{% endfor %} {% endfor %}
], ],
"bootstrap_expect": 3, "bootstrap_expect": {{ groups['cluster_nodes']|length }},
"server": true, "server": true,
"ui": true, "ui": true,
"ports": { "ports": {

View file

@ -1,2 +1,2 @@
nameserver {{ private_ip }} nameserver {{ vpn_ip }}
nameserver {{ dns_server }} nameserver {{ dns_server }}

View file

@ -0,0 +1,5 @@
---
- name: reload wireguard
service:
name: wg-quick@wgdeuxfleurs
state: restarted

View file

@ -1,3 +1,6 @@
- name: "Create iptables configuration direcetory"
file: path=/etc/iptables/ state=directory
- name: "Deploy iptablesv4 configuration" - name: "Deploy iptablesv4 configuration"
template: src=rules.v4.j2 dest=/etc/iptables/rules.v4 template: src=rules.v4.j2 dest=/etc/iptables/rules.v4
@ -9,3 +12,48 @@
name: net.ipv4.ip_forward name: net.ipv4.ip_forward
value: "1" value: "1"
sysctl_set: yes sysctl_set: yes
# Wireguard configuration
- name: "Enable backports repository"
apt_repository:
repo: deb http://deb.debian.org/debian buster-backports main
state: present
- name: "Install wireguard"
apt:
name:
- wireguard
- wireguard-tools
- "linux-headers-{{ ansible_kernel }}"
state: present
- name: "Create wireguard configuration direcetory"
file: path=/etc/wireguard/ state=directory
- name: "Check if wireguard private key exists"
stat: path=/etc/wireguard/privkey
register: wireguard_privkey
- name: "Create wireguard private key"
shell: wg genkey > /etc/wireguard/privkey
when: wireguard_privkey.stat.exists == false
notify:
- reload wireguard
- name: "Secure wireguard private key"
file: path=/etc/wireguard/privkey mode=0600
- name: "Retrieve wireguard public key"
shell: wg pubkey < /etc/wireguard/privkey
register: wireguard_pubkey
- name: "Deploy wireguard configuration"
template: src=wireguard.conf.j2 dest=/etc/wireguard/wgdeuxfleurs.conf mode=0600
notify:
- reload wireguard
- name: "Enable Wireguard systemd service at boot"
service: name=wg-quick@wgdeuxfleurs state=started enabled=yes daemon_reload=yes
- name: "Create /tmp/wgdeuxfleurs.template.conf example configuration file for external nodes"
local_action: template src=wireguard_external.conf.j2 dest=/tmp/wgdeuxfleurs.template.conf

View file

@ -7,14 +7,21 @@
-A INPUT -p tcp --dport 22 -j ACCEPT -A INPUT -p tcp --dport 22 -j ACCEPT
# Cluster # Cluster
-A INPUT -s 192.168.1.254 -j ACCEPT
-A INPUT -s 82.253.205.190 -j ACCEPT
{% for selected_host in groups['cluster_nodes'] %} {% for selected_host in groups['cluster_nodes'] %}
-A INPUT -s {{ hostvars[selected_host]['public_ip'] }} -j ACCEPT -A INPUT -s {{ hostvars[selected_host]['public_ip'] }} -p udp --dport 51820 -j ACCEPT
-A INPUT -s {{ hostvars[selected_host]['private_ip'] }} -j ACCEPT -A INPUT -s {{ hostvars[selected_host]['vpn_ip'] }} -j ACCEPT
{% endfor %}
{% for host in other_vpn_nodes %}
-A INPUT -s {{ host.public_ip }} -p udp --dport 51820 -j ACCEPT
-A INPUT -s {{ host.vpn_ip }} -j ACCEPT
{% endfor %} {% endfor %}
# Local # Rennes
-A INPUT -s 93.2.173.168 -j ACCEPT
-A INPUT -s 82.253.205.190 -j ACCEPT
# router
-A INPUT -s 192.168.1.254 -j ACCEPT
-A INPUT -i docker0 -j ACCEPT -A INPUT -i docker0 -j ACCEPT
-A INPUT -s 127.0.0.1/8 -j ACCEPT -A INPUT -s 127.0.0.1/8 -j ACCEPT
-A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT

View file

@ -0,0 +1,20 @@
[Interface]
Address = {{ vpn_ip }}
PostUp = wg set %i private-key <(cat /etc/wireguard/privkey)
ListenPort = 51820
{% for selected_host in groups['cluster_nodes']|difference([inventory_hostname]) %}
[Peer]
PublicKey = {{ hostvars[selected_host].wireguard_pubkey.stdout }}
Endpoint = {{ hostvars[selected_host].public_ip }}:{{ hostvars[selected_host].public_vpn_port }}
AllowedIPs = {{ hostvars[selected_host].vpn_ip }}/32
PersistentKeepalive = 25
{% endfor %}
{% for host in other_vpn_nodes %}
[Peer]
PublicKey = {{ host.pubkey }}
Endpoint = {{ host.public_ip }}:{{ host.public_vpn_port }}
AllowedIPs = {{ host.vpn_ip }}/32
PersistentKeepalive = 25
{% endfor %}

View file

@ -0,0 +1,27 @@
# Template configuration file for VPN nodes that are non in the cluster
# The private key should be stored as /etc/wireguard/privkey
# External nodes should be registered in network/vars/main.yml
[Interface]
Address = <INSERT YOUR IP HERE, IT SHOULD MATCH THE ONE IN vars/main.yml>
PostUp = wg set %i private-key <(cat /etc/wireguard/privkey)
ListenPort = 51820
# Cluster nodes
{% for selected_host in groups['cluster_nodes'] %}
[Peer]
PublicKey = {{ hostvars[selected_host].wireguard_pubkey.stdout }}
Endpoint = {{ hostvars[selected_host].public_ip }}:{{ hostvars[selected_host].public_vpn_port }}
AllowedIPs = {{ hostvars[selected_host].vpn_ip }}/32
PersistentKeepalive = 25
{% endfor %}
# External nodes
# TODO: remove yourself from here
{% for host in other_vpn_nodes %}
[Peer]
PublicKey = {{ host.pubkey }}
Endpoint = {{ host.public_ip }}:{{ host.public_vpn_port }}
AllowedIPs = {{ host.vpn_ip }}/32
PersistentKeepalive = 25
{% endfor %}

View file

@ -0,0 +1,6 @@
---
other_vpn_nodes:
- pubkey: "QUiUNMk70TEQ75Ut7Uqikr5uGVSXmx8EGNkGM6tANlg="
public_ip: "37.187.118.206"
public_vpn_port: "51820"
vpn_ip: "10.68.70.101"

View file

@ -1,7 +1,11 @@
- name: "Set nomad version" - name: "Set Nomad version"
set_fact: set_fact:
nomad_version: 0.12.0-beta2 nomad_version: 0.12.0-beta2
- name: "Set CNI version"
set_fact:
cni_plugins_version: 0.8.6
- name: "Download and install Nomad for x86_64" - name: "Download and install Nomad for x86_64"
unarchive: unarchive:
src: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_amd64.zip" src: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_amd64.zip"
@ -10,6 +14,19 @@
when: when:
- "ansible_architecture == 'x86_64'" - "ansible_architecture == 'x86_64'"
- name: "Create /opt/cni/bin"
file: path=/opt/cni/bin state=directory
- name: "Download and install CNI plugins for x86_64"
unarchive:
src: "https://github.com/containernetworking/plugins/releases/download/v{{ cni_plugins_version }}/cni-plugins-linux-amd64-v{{ cni_plugins_version }}.tgz"
dest: /opt/cni/bin
remote_src: yes
when:
- "ansible_architecture == 'x86_64'"
notify:
- restart nomad
- name: "Create Nomad configuration directory" - name: "Create Nomad configuration directory"
file: path=/etc/nomad/ state=directory file: path=/etc/nomad/ state=directory

View file

@ -1,3 +1,5 @@
datacenter = "{{ datacenter }}"
addresses { addresses {
http = "0.0.0.0" http = "0.0.0.0"
rpc = "0.0.0.0" rpc = "0.0.0.0"
@ -5,16 +7,16 @@ addresses {
} }
advertise { advertise {
http = "{{ public_ip }}" http = "{{ vpn_ip }}"
rpc = "{{ public_ip }}" rpc = "{{ vpn_ip }}"
serf = "{{ public_ip }}" serf = "{{ vpn_ip }}"
} }
data_dir = "/var/lib/nomad" data_dir = "/var/lib/nomad"
server { server {
enabled = true enabled = true
bootstrap_expect = 3 bootstrap_expect = {{ groups['cluster_nodes']|length }}
} }
consul { consul {
@ -25,10 +27,20 @@ client {
enabled = true enabled = true
#cpu_total_compute = 4000 #cpu_total_compute = 4000
servers = ["127.0.0.1:4648"] servers = ["127.0.0.1:4648"]
network_interface = "{{ interface }}"
options { options {
docker.privileged.enabled = "true" docker.privileged.enabled = "true"
docker.volumes.enabled = "true" docker.volumes.enabled = "true"
} }
network_interface = "wgdeuxfleurs"
host_network "default" {
#cidr = "{{ vpn_ip }}/24"
interface = "wgdeuxfleurs"
}
host_network "public" {
#cidr = "{{ public_ip }}/32"
interface = "{{ interface }}"
}
} }

View file

@ -48,7 +48,7 @@
nfs.export-volumes: "off" nfs.export-volumes: "off"
cluster.lookup-optimize: "on" cluster.lookup-optimize: "on"
cluster: "{% for selected_host in groups['cluster_nodes'] %}{{ hostvars[selected_host]['private_ip'] }}{{ ',' if not loop.last else '' }}{% endfor %}" cluster: "{% for selected_host in groups['cluster_nodes'] %}{{ hostvars[selected_host]['vpn_ip'] }}{{ ',' if not loop.last else '' }}{% endfor %}"
run_once: true run_once: true
- name: "Create mountpoint" - name: "Create mountpoint"
@ -61,7 +61,7 @@
tags: gluster-fstab tags: gluster-fstab
mount: mount:
path: /mnt/glusterfs path: /mnt/glusterfs
src: "{{ private_ip }}:/donnees" src: "{{ vpn_ip }}:/donnees"
fstype: glusterfs fstype: glusterfs
opts: "defaults,_netdev,noauto,x-systemd.automount" opts: "defaults,_netdev,noauto,x-systemd.automount"
state: present state: present

View file

@ -6,6 +6,7 @@ $CONFIG = array (
'secret' => '{{ key "secrets/nextcloud/secret" | trimSpace }}', 'secret' => '{{ key "secrets/nextcloud/secret" | trimSpace }}',
'trusted_domains' => array ( 'trusted_domains' => array (
0 => 'nextcloud.deuxfleurs.fr', 0 => 'nextcloud.deuxfleurs.fr',
1 => 'nextcloud.devx.adnab.me',
), ),
'memcache.local' => '\\OC\\Memcache\\APCu', 'memcache.local' => '\\OC\\Memcache\\APCu',

View file

@ -1,5 +1,5 @@
job "directory2" { job "directory2" {
datacenters = ["dc1"] datacenters = ["dc1", "saturne", "belair"]
type = "service" type = "service"
constraint { constraint {
@ -94,7 +94,7 @@ job "directory2" {
"guichet", "guichet",
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https,http", "traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:guichet.deuxfleurs.fr", "traefik.frontend.rule=Host:guichet.devx.adnab.me",
] ]
port = "web_port" port = "web_port"
address_mode = "host" address_mode = "host"

View file

@ -59,7 +59,7 @@ job "garage" {
"garage_api", "garage_api",
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https,http", "traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:garage.deuxfleurs.fr" "traefik.frontend.rule=Host:garage.deuxfleurs.fr,garage.devx.adnab.me"
] ]
port = "api_port" port = "api_port"
address_mode = "host" address_mode = "host"

72
nomad/gitea.hcl Normal file
View file

@ -0,0 +1,72 @@
job "git" {
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${attr.unique.hostname}"
value = "debian1"
}
group "gitea" {
task "server" {
driver = "docker"
config {
image = "gitea/gitea:1.10.3"
port_map {
web_port = 3000
ssh_port = 222
}
volumes = [
"/etc/timezone:/etc/timezone:ro",
"/etc/localtime:/etc/localtime:ro",
"/srv/gitea:/data",
]
}
env {
"USER_UID" = "1000"
"USER_GID" = "1000"
"LOG_LEVEL" = "trace"
}
resources {
memory = 100
cpu = 100
network {
port "web_port" {}
port "ssh_port" {
static = 2222
}
}
}
service {
tags = [
"gitea",
"traefik.enable=true",
"traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:git.deuxfleurs.fr,git.deuxfleurs.org"
]
port = "web_port"
address_mode = "host"
name = "gitea"
check {
type = "tcp"
port = "web_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -45,7 +45,7 @@ job "nextcloud" {
"nextcloud", "nextcloud",
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https,http", "traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:nextcloud.deuxfleurs.fr", "traefik.frontend.rule=Host:nextcloud.deuxfleurs.fr,nextcloud.devx.adnab.me",
] ]
port = "web_port" port = "web_port"
address_mode = "host" address_mode = "host"

View file

@ -1,5 +1,5 @@
job "postgres" { job "postgres" {
datacenters = ["dc1"] datacenters = ["dc1", "belair"]
type = "system" type = "system"
priority = 90 priority = 90
@ -113,7 +113,7 @@ job "postgres" {
} }
resources { resources {
memory = 500 memory = 200
network { network {
port "psql_port" { port "psql_port" {
static = "5433" static = "5433"

77
nomad/traefik-netv2.hcl Normal file
View file

@ -0,0 +1,77 @@
job "frontend" {
datacenters = ["dc1", "saturne"]
type = "service"
group "traefik" {
network {
mode = "bridge"
port "http" {
static = 80
host_network = "public"
}
port "https" {
static = 443
host_network = "public"
}
port "admin" {
static = 8082
}
}
task "server" {
driver = "docker"
config {
image = "amd64/traefik:1.7.20"
readonly_rootfs = true
volumes = [
"secrets/traefik.toml:/etc/traefik/traefik.toml",
]
}
resources {
memory = 265
}
template {
data = "{{ key \"configuration/traefik/traefik.toml\" }}"
destination = "secrets/traefik.toml"
}
}
service {
tags = [
"http",
"frontend",
"(diplonat (tcp_port 80))"
]
port = "http"
name = "traefik"
}
service {
tags = [
"https",
"frontend",
"(diplonat (tcp_port 443))"
]
port = "https"
name = "traefik"
check {
type = "http"
protocol = "http"
port = "http"
path = "/"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}

View file

@ -1,5 +1,5 @@
job "frontend" { job "frontend" {
datacenters = ["dc1"] datacenters = ["dc1", "saturne"]
type = "service" type = "service"
group "traefik" { group "traefik" {
@ -9,6 +9,7 @@ job "frontend" {
config { config {
image = "amd64/traefik:1.7.20" image = "amd64/traefik:1.7.20"
readonly_rootfs = true readonly_rootfs = true
network_mode = "host"
port_map { port_map {
https_port = 443 https_port = 443
http_port = 80 http_port = 80
@ -24,30 +25,35 @@ job "frontend" {
network { network {
port "https_port" { port "https_port" {
static = "443" static = "443"
host_network = "public"
} }
port "http_port" { port "http_port" {
static = "80" static = "80"
host_network = "public"
} }
port "adm_port" { port "adm_port" {
static = "8082" static = "8082"
} }
} }
} }
template {
data = "{{ key \"configuration/traefik/traefik.toml\" }}"
destination = "secrets/traefik.toml"
}
service { service {
tags = [ tags = [
"https", "http",
"frontend", "frontend",
"(diplonat (tcp_port 80 443))" "(diplonat (tcp_port 80))"
] ]
port = "https_port" port = "http_port"
address_mode = "host" name = "traefik-http"
name = "traefik"
check { check {
type = "http" type = "tcp"
protocol = "http" port = "http_port"
port = "adm_port"
path = "/ping"
interval = "60s" interval = "60s"
timeout = "5s" timeout = "5s"
check_restart { check_restart {
@ -57,10 +63,28 @@ job "frontend" {
} }
} }
} }
template { service {
data = "{{ key \"configuration/traefik/traefik.toml\" }}" tags = [
destination = "secrets/traefik.toml" "https",
"frontend",
"(diplonat (tcp_port 443))"
]
port = "https_port"
address_mode = "host"
name = "traefik-https"
check {
type = "tcp"
port = "https_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
} }
} }
} }