We now have IPv6 activated on our network interfaces!

This commit is contained in:
Quentin 2020-10-22 18:55:29 +02:00
commit 3b75213d40
14 changed files with 109 additions and 38 deletions

View file

@ -4,12 +4,16 @@
For each machine, **one by one** do: For each machine, **one by one** do:
- Check that cluster is healthy - Check that cluster is healthy
- `sudo gluster peer status` - Check gluster
- `sudo gluster volume status all` (check Online Col, only `Y` must appear) - `sudo gluster peer status`
- `sudo gluster volume status all` (check Online Col, only `Y` must appear)
- Check that Nomad is healthy - Check that Nomad is healthy
- `nomad server members`
- `nomad node status`
- Check that Consul is healthy - Check that Consul is healthy
- `consul members`
- Check that Postgres is healthy - Check that Postgres is healthy
- Run `ansible-playbook -i production --limit <machine> site.yml` - Run `ansible-playbook -i production.yml --limit <machine> site.yml`
- Reboot - Reboot
- Check that cluster is healthy - Check that cluster is healthy

View file

@ -1,4 +0,0 @@
[cluster_nodes]
veterini ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=110 ansible_user=root public_ip=192.168.1.2 private_ip=192.168.1.2 interface=eno1 dns_server=80.67.169.40
silicareux ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=111 ansible_user=root public_ip=192.168.1.3 private_ip=192.168.1.3 interface=eno1 dns_server=80.67.169.40
wonse ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=112 ansible_user=root public_ip=192.168.1.4 private_ip=192.168.1.4 interface=eno1 dns_server=80.67.169.40

31
os/config/production.yml Normal file
View file

@ -0,0 +1,31 @@
cluster_nodes:
hosts:
veterini:
ansible_host: fbx-rennes2.machine.deuxfleurs.fr
ansible_port: 110
ansible_user: root
ipv4: 192.168.1.2
ipv6: 2a01:e35:2fdc:dbe0::2
interface: eno1
dns_server: 80.67.169.40
ansible_python_interpreter: python3
silicareux:
ansible_host: fbx-rennes2.machine.deuxfleurs.fr
ansible_port: 111
ansible_user: root
ipv4: 192.168.1.3
ipv6: 2a01:e35:2fdc:dbe0::3
interface: eno1
dns_server: 80.67.169.40
ansible_python_interpreter: python3
wonse:
ansible_host: fbx-rennes2.machine.deuxfleurs.fr
ansible_port: 112
ansible_user: root
ipv4: 192.168.1.4
ipv6: 2a01:e35:2fdc:dbe0::4
interface: eno1
dns_server: 80.67.169.40
ansible_python_interpreter: python3

View file

@ -1,6 +1,6 @@
- name: "Set consul version" - name: "Set consul version"
set_fact: set_fact:
consul_version: 1.8.0 consul_version: 1.8.4
- name: "Download and install Consul for x86_64" - name: "Download and install Consul for x86_64"
unarchive: unarchive:
@ -21,6 +21,3 @@
- name: "Enable consul systemd service at boot" - name: "Enable consul systemd service at boot"
service: name=consul state=started enabled=yes daemon_reload=yes service: name=consul state=started enabled=yes daemon_reload=yes
- name: "Deploy resolv.conf to use Consul"
template: src=resolv.conf.j2 dest=/etc/resolv.conf

View file

@ -1,14 +1,14 @@
{ {
"data_dir": "/var/lib/consul", "data_dir": "/var/lib/consul",
"bind_addr": "0.0.0.0", "bind_addr": "0.0.0.0",
"advertise_addr": "{{ public_ip }}", "advertise_addr": "{{ ipv4 }}",
"addresses": { "addresses": {
"dns": "0.0.0.0", "dns": "0.0.0.0",
"http": "0.0.0.0" "http": "0.0.0.0"
}, },
"retry_join": [ "retry_join": [
{% for selected_host in groups['cluster_nodes']|reject("sameas", ansible_fqdn) %}{# @FIXME: Reject doesn't work #} {% for selected_host in groups['cluster_nodes']|reject("sameas", ansible_fqdn) %}{# @FIXME: Reject doesn't work #}
"{{ hostvars[selected_host]['private_ip'] }}" {{ "," if not loop.last else "" }} "{{ hostvars[selected_host]['ipv4'] }}" {{ "," if not loop.last else "" }}
{% endfor %} {% endfor %}
], ],
"bootstrap_expect": 3, "bootstrap_expect": 3,

View file

@ -1,2 +1,2 @@
nameserver {{ private_ip }} nameserver {{ ipv4 }}
nameserver {{ dns_server }} nameserver {{ dns_server }}

View file

@ -1,12 +0,0 @@
# WARNING!! When rules.{v4,v6} are changed, the whole iptables configuration is reloaded.
# This creates issues with Docker, which injects its own configuration in iptables when it starts.
# In practice, most (all?) containers will break if rules.{v4,v6} are changed,
# and docker will have to be restared.
*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
COMMIT

View file

@ -1,8 +1,11 @@
- name: "Deploy iptablesv4 configuration" - name: "Deploy iptablesv4 configuration"
template: src=rules.v4.j2 dest=/etc/iptables/rules.v4 template: src=rules.v4 dest=/etc/iptables/rules.v4
- name: "Deploy iptablesv6 configuration" - name: "Deploy iptablesv6 configuration"
copy: src=rules.v6 dest=/etc/iptables/rules.v6 template: src=rules.v6 dest=/etc/iptables/rules.v6
- name: "Deploy systemd-networkd configuration"
template: src=en.network dest=/etc/systemd/network/en.network
- name: "Activate IP forwarding" - name: "Activate IP forwarding"
sysctl: sysctl:

View file

@ -0,0 +1,9 @@
[Match]
Name={{ interface }}
[Network]
Address={{ ipv4 }}/24
Address={{ ipv6 }}/64
Gateway=192.168.1.254
DNS={{ ipv4 }}
DNS={{ dns_server }}

View file

@ -3,15 +3,18 @@
:FORWARD DROP [0:0] :FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0] :OUTPUT ACCEPT [0:0]
# Internet Control Message Protocol
-A INPUT -p icmp -j ACCEPT
# Administration # Administration
-A INPUT -p tcp --dport 22 -j ACCEPT -A INPUT -p tcp --dport 22 -j ACCEPT
# Cluster # Diplonat needs everything open to communicate with IGD with the router
-A INPUT -s 192.168.1.254 -j ACCEPT -A INPUT -s 192.168.1.254 -j ACCEPT
-A INPUT -s 82.253.205.190 -j ACCEPT
# Cluster
{% for selected_host in groups['cluster_nodes'] %} {% for selected_host in groups['cluster_nodes'] %}
-A INPUT -s {{ hostvars[selected_host]['public_ip'] }} -j ACCEPT -A INPUT -s {{ hostvars[selected_host]['ipv4'] }} -j ACCEPT
-A INPUT -s {{ hostvars[selected_host]['private_ip'] }} -j ACCEPT
{% endfor %} {% endfor %}
# Local # Local

View file

@ -0,0 +1,40 @@
*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
# Internet Control Message Protocol
# (required)
-A INPUT -p icmp -j ACCEPT
-A INPUT -p ipv6-icmp -j ACCEPT
# Administration
-A INPUT -p tcp --dport 22 -j ACCEPT
# Cluster
{% for selected_host in groups['cluster_nodes'] %}
-A INPUT -s {{ hostvars[selected_host]['ipv6'] }} -j ACCEPT
{% endfor %}
# Local
-A INPUT -i docker0 -j ACCEPT
-A INPUT -s ::1/128 -j ACCEPT
-A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
COMMIT
*nat
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
COMMIT
*mangle
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
COMMIT

View file

@ -1,6 +1,6 @@
- name: "Set nomad version" - name: "Set nomad version"
set_fact: set_fact:
nomad_version: 0.12.0-beta2 nomad_version: 0.12.6
- name: "Download and install Nomad for x86_64" - name: "Download and install Nomad for x86_64"
unarchive: unarchive:

View file

@ -5,9 +5,9 @@ addresses {
} }
advertise { advertise {
http = "{{ public_ip }}" http = "{{ ipv4 }}"
rpc = "{{ public_ip }}" rpc = "{{ ipv4 }}"
serf = "{{ public_ip }}" serf = "{{ ipv4 }}"
} }
data_dir = "/var/lib/nomad" data_dir = "/var/lib/nomad"

View file

@ -48,7 +48,7 @@
nfs.export-volumes: "off" nfs.export-volumes: "off"
cluster.lookup-optimize: "on" cluster.lookup-optimize: "on"
cluster: "{% for selected_host in groups['cluster_nodes'] %}{{ hostvars[selected_host]['private_ip'] }}{{ ',' if not loop.last else '' }}{% endfor %}" cluster: "{% for selected_host in groups['cluster_nodes'] %}{{ hostvars[selected_host]['ipv4'] }}{{ ',' if not loop.last else '' }}{% endfor %}"
run_once: true run_once: true
- name: "Create mountpoint" - name: "Create mountpoint"
@ -61,7 +61,7 @@
tags: gluster-fstab tags: gluster-fstab
mount: mount:
path: /mnt/glusterfs path: /mnt/glusterfs
src: "{{ private_ip }}:/donnees" src: "{{ ipv4 }}:/donnees"
fstype: glusterfs fstype: glusterfs
opts: "defaults,_netdev,noauto,x-systemd.automount" opts: "defaults,_netdev,noauto,x-systemd.automount"
state: present state: present