We now have IPv6 activated on our network interfaces!
This commit is contained in:
commit
3b75213d40
14 changed files with 109 additions and 38 deletions
|
@ -4,12 +4,16 @@
|
|||
|
||||
For each machine, **one by one** do:
|
||||
- Check that cluster is healthy
|
||||
- Check gluster
|
||||
- `sudo gluster peer status`
|
||||
- `sudo gluster volume status all` (check Online Col, only `Y` must appear)
|
||||
- Check that Nomad is healthy
|
||||
- `nomad server members`
|
||||
- `nomad node status`
|
||||
- Check that Consul is healthy
|
||||
- `consul members`
|
||||
- Check that Postgres is healthy
|
||||
- Run `ansible-playbook -i production --limit <machine> site.yml`
|
||||
- Run `ansible-playbook -i production.yml --limit <machine> site.yml`
|
||||
- Reboot
|
||||
- Check that cluster is healthy
|
||||
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
[cluster_nodes]
|
||||
veterini ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=110 ansible_user=root public_ip=192.168.1.2 private_ip=192.168.1.2 interface=eno1 dns_server=80.67.169.40
|
||||
silicareux ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=111 ansible_user=root public_ip=192.168.1.3 private_ip=192.168.1.3 interface=eno1 dns_server=80.67.169.40
|
||||
wonse ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=112 ansible_user=root public_ip=192.168.1.4 private_ip=192.168.1.4 interface=eno1 dns_server=80.67.169.40
|
31
os/config/production.yml
Normal file
31
os/config/production.yml
Normal file
|
@ -0,0 +1,31 @@
|
|||
cluster_nodes:
|
||||
hosts:
|
||||
veterini:
|
||||
ansible_host: fbx-rennes2.machine.deuxfleurs.fr
|
||||
ansible_port: 110
|
||||
ansible_user: root
|
||||
ipv4: 192.168.1.2
|
||||
ipv6: 2a01:e35:2fdc:dbe0::2
|
||||
interface: eno1
|
||||
dns_server: 80.67.169.40
|
||||
ansible_python_interpreter: python3
|
||||
|
||||
silicareux:
|
||||
ansible_host: fbx-rennes2.machine.deuxfleurs.fr
|
||||
ansible_port: 111
|
||||
ansible_user: root
|
||||
ipv4: 192.168.1.3
|
||||
ipv6: 2a01:e35:2fdc:dbe0::3
|
||||
interface: eno1
|
||||
dns_server: 80.67.169.40
|
||||
ansible_python_interpreter: python3
|
||||
|
||||
wonse:
|
||||
ansible_host: fbx-rennes2.machine.deuxfleurs.fr
|
||||
ansible_port: 112
|
||||
ansible_user: root
|
||||
ipv4: 192.168.1.4
|
||||
ipv6: 2a01:e35:2fdc:dbe0::4
|
||||
interface: eno1
|
||||
dns_server: 80.67.169.40
|
||||
ansible_python_interpreter: python3
|
|
@ -1,6 +1,6 @@
|
|||
- name: "Set consul version"
|
||||
set_fact:
|
||||
consul_version: 1.8.0
|
||||
consul_version: 1.8.4
|
||||
|
||||
- name: "Download and install Consul for x86_64"
|
||||
unarchive:
|
||||
|
@ -21,6 +21,3 @@
|
|||
|
||||
- name: "Enable consul systemd service at boot"
|
||||
service: name=consul state=started enabled=yes daemon_reload=yes
|
||||
|
||||
- name: "Deploy resolv.conf to use Consul"
|
||||
template: src=resolv.conf.j2 dest=/etc/resolv.conf
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
{
|
||||
"data_dir": "/var/lib/consul",
|
||||
"bind_addr": "0.0.0.0",
|
||||
"advertise_addr": "{{ public_ip }}",
|
||||
"advertise_addr": "{{ ipv4 }}",
|
||||
"addresses": {
|
||||
"dns": "0.0.0.0",
|
||||
"http": "0.0.0.0"
|
||||
},
|
||||
"retry_join": [
|
||||
{% for selected_host in groups['cluster_nodes']|reject("sameas", ansible_fqdn) %}{# @FIXME: Reject doesn't work #}
|
||||
"{{ hostvars[selected_host]['private_ip'] }}" {{ "," if not loop.last else "" }}
|
||||
"{{ hostvars[selected_host]['ipv4'] }}" {{ "," if not loop.last else "" }}
|
||||
{% endfor %}
|
||||
],
|
||||
"bootstrap_expect": 3,
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
nameserver {{ private_ip }}
|
||||
nameserver {{ ipv4 }}
|
||||
nameserver {{ dns_server }}
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
# WARNING!! When rules.{v4,v6} are changed, the whole iptables configuration is reloaded.
|
||||
# This creates issues with Docker, which injects its own configuration in iptables when it starts.
|
||||
# In practice, most (all?) containers will break if rules.{v4,v6} are changed,
|
||||
# and docker will have to be restared.
|
||||
|
||||
|
||||
*filter
|
||||
:INPUT DROP [0:0]
|
||||
:FORWARD DROP [0:0]
|
||||
:OUTPUT ACCEPT [0:0]
|
||||
COMMIT
|
||||
|
|
@ -1,8 +1,11 @@
|
|||
- name: "Deploy iptablesv4 configuration"
|
||||
template: src=rules.v4.j2 dest=/etc/iptables/rules.v4
|
||||
template: src=rules.v4 dest=/etc/iptables/rules.v4
|
||||
|
||||
- name: "Deploy iptablesv6 configuration"
|
||||
copy: src=rules.v6 dest=/etc/iptables/rules.v6
|
||||
template: src=rules.v6 dest=/etc/iptables/rules.v6
|
||||
|
||||
- name: "Deploy systemd-networkd configuration"
|
||||
template: src=en.network dest=/etc/systemd/network/en.network
|
||||
|
||||
- name: "Activate IP forwarding"
|
||||
sysctl:
|
||||
|
|
9
os/config/roles/network/templates/en.network
Normal file
9
os/config/roles/network/templates/en.network
Normal file
|
@ -0,0 +1,9 @@
|
|||
[Match]
|
||||
Name={{ interface }}
|
||||
|
||||
[Network]
|
||||
Address={{ ipv4 }}/24
|
||||
Address={{ ipv6 }}/64
|
||||
Gateway=192.168.1.254
|
||||
DNS={{ ipv4 }}
|
||||
DNS={{ dns_server }}
|
|
@ -3,15 +3,18 @@
|
|||
:FORWARD DROP [0:0]
|
||||
:OUTPUT ACCEPT [0:0]
|
||||
|
||||
# Internet Control Message Protocol
|
||||
-A INPUT -p icmp -j ACCEPT
|
||||
|
||||
# Administration
|
||||
-A INPUT -p tcp --dport 22 -j ACCEPT
|
||||
|
||||
# Cluster
|
||||
# Diplonat needs everything open to communicate with IGD with the router
|
||||
-A INPUT -s 192.168.1.254 -j ACCEPT
|
||||
-A INPUT -s 82.253.205.190 -j ACCEPT
|
||||
|
||||
# Cluster
|
||||
{% for selected_host in groups['cluster_nodes'] %}
|
||||
-A INPUT -s {{ hostvars[selected_host]['public_ip'] }} -j ACCEPT
|
||||
-A INPUT -s {{ hostvars[selected_host]['private_ip'] }} -j ACCEPT
|
||||
-A INPUT -s {{ hostvars[selected_host]['ipv4'] }} -j ACCEPT
|
||||
{% endfor %}
|
||||
|
||||
# Local
|
40
os/config/roles/network/templates/rules.v6
Normal file
40
os/config/roles/network/templates/rules.v6
Normal file
|
@ -0,0 +1,40 @@
|
|||
*filter
|
||||
:INPUT DROP [0:0]
|
||||
:FORWARD DROP [0:0]
|
||||
:OUTPUT ACCEPT [0:0]
|
||||
|
||||
# Internet Control Message Protocol
|
||||
# (required)
|
||||
-A INPUT -p icmp -j ACCEPT
|
||||
-A INPUT -p ipv6-icmp -j ACCEPT
|
||||
|
||||
# Administration
|
||||
-A INPUT -p tcp --dport 22 -j ACCEPT
|
||||
|
||||
# Cluster
|
||||
{% for selected_host in groups['cluster_nodes'] %}
|
||||
-A INPUT -s {{ hostvars[selected_host]['ipv6'] }} -j ACCEPT
|
||||
{% endfor %}
|
||||
|
||||
# Local
|
||||
-A INPUT -i docker0 -j ACCEPT
|
||||
-A INPUT -s ::1/128 -j ACCEPT
|
||||
-A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
|
||||
|
||||
COMMIT
|
||||
|
||||
*nat
|
||||
:PREROUTING ACCEPT [0:0]
|
||||
:INPUT ACCEPT [0:0]
|
||||
:OUTPUT ACCEPT [0:0]
|
||||
:POSTROUTING ACCEPT [0:0]
|
||||
COMMIT
|
||||
|
||||
*mangle
|
||||
:PREROUTING ACCEPT [0:0]
|
||||
:INPUT ACCEPT [0:0]
|
||||
:FORWARD ACCEPT [0:0]
|
||||
:OUTPUT ACCEPT [0:0]
|
||||
:POSTROUTING ACCEPT [0:0]
|
||||
COMMIT
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
- name: "Set nomad version"
|
||||
set_fact:
|
||||
nomad_version: 0.12.0-beta2
|
||||
nomad_version: 0.12.6
|
||||
|
||||
- name: "Download and install Nomad for x86_64"
|
||||
unarchive:
|
||||
|
|
|
@ -5,9 +5,9 @@ addresses {
|
|||
}
|
||||
|
||||
advertise {
|
||||
http = "{{ public_ip }}"
|
||||
rpc = "{{ public_ip }}"
|
||||
serf = "{{ public_ip }}"
|
||||
http = "{{ ipv4 }}"
|
||||
rpc = "{{ ipv4 }}"
|
||||
serf = "{{ ipv4 }}"
|
||||
}
|
||||
|
||||
data_dir = "/var/lib/nomad"
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
nfs.export-volumes: "off"
|
||||
cluster.lookup-optimize: "on"
|
||||
|
||||
cluster: "{% for selected_host in groups['cluster_nodes'] %}{{ hostvars[selected_host]['private_ip'] }}{{ ',' if not loop.last else '' }}{% endfor %}"
|
||||
cluster: "{% for selected_host in groups['cluster_nodes'] %}{{ hostvars[selected_host]['ipv4'] }}{{ ',' if not loop.last else '' }}{% endfor %}"
|
||||
run_once: true
|
||||
|
||||
- name: "Create mountpoint"
|
||||
|
@ -61,7 +61,7 @@
|
|||
tags: gluster-fstab
|
||||
mount:
|
||||
path: /mnt/glusterfs
|
||||
src: "{{ private_ip }}:/donnees"
|
||||
src: "{{ ipv4 }}:/donnees"
|
||||
fstype: glusterfs
|
||||
opts: "defaults,_netdev,noauto,x-systemd.automount"
|
||||
state: present
|
||||
|
|
Reference in a new issue