Network configuration #1

Merged
quentin merged 8 commits from network_config into master 2020-07-05 18:37:21 +00:00
18 changed files with 108 additions and 180 deletions

View file

@ -1,52 +1,15 @@
# ANSIBLE
## Provisionning ## How to proceed
1. Need a public IP address For each machine, **one by one** do:
2. Deploy Debian sid/buster - Check that cluster is healthy
3. Add a DNS entry like xxxx.machine.deuxfleurs.fr A 0.0.0.0 in Cloudflare + Havelock - `sudo gluster peer status`
4. Setup the fqdn in /etc/hosts (127.0.1.1 xxxx.machine.deuxfleurs.fr) - `sudo gluster volume status all` (check Online Col, only `Y` must appear)
5. Switch the SSH port to the port 110 - Check that Nomad is healthy
6. Add the server to the ./production file - Check that Consul is healthy
7. Reboot machine - Check that Postgres is healthy
8. Deploy Ansible - Run `ansible-playbook -i production --limit <machine> site.yml`
9. Check that everything works as intended - Reboot
10. Update NS 1.cluster.deuxfleurs.fr - Check that cluster is healthy
## Useful commands
Show every variables collected by Ansible for a given host:
```
ansible -i production villequin.machine.deuxfleurs.fr -m setup
```
Run playbook for only one host:
```
ansible-playbook -i production --limit villequin.machine.deuxfleurs.fr site.yml
```
Dump hostvars:
```
ansible -m debug villequin.machine.deuxfleurs.fr -i ./production -a "var=hostvars"
```
Deploy only one tag:
```
ansible-playbook -i production site.yml --tags "container"
```
Redeploy everything:
```
ansible-playbook -i production site.yml
```
Upgrade packages and force overwirte to fix bad packing done by GlusterFS:
```
apt-get -o Dpkg::Options::="--force-overwrite" dist-upgrade -y
```

52
ansible/README.more.md Normal file
View file

@ -0,0 +1,52 @@
## Provisionning
1. Need a public IP address
2. Deploy Debian sid/buster
3. Add a DNS entry like xxxx.machine.deuxfleurs.fr A 0.0.0.0 in Cloudflare + Havelock
4. Setup the fqdn in /etc/hosts (127.0.1.1 xxxx.machine.deuxfleurs.fr)
5. Switch the SSH port to the port 110
6. Add the server to the ./production file
7. Reboot machine
8. Deploy Ansible
9. Check that everything works as intended
10. Update NS 1.cluster.deuxfleurs.fr
## Useful commands
Show every variables collected by Ansible for a given host:
```
ansible -i production villequin.machine.deuxfleurs.fr -m setup
```
Run playbook for only one host:
```
ansible-playbook -i production --limit villequin.machine.deuxfleurs.fr site.yml
```
Dump hostvars:
```
ansible -m debug villequin.machine.deuxfleurs.fr -i ./production -a "var=hostvars"
```
Deploy only one tag:
```
ansible-playbook -i production site.yml --tags "container"
```
Redeploy everything:
```
ansible-playbook -i production site.yml
```
Upgrade packages and force overwirte to fix bad packing done by GlusterFS:
```
apt-get -o Dpkg::Options::="--force-overwrite" dist-upgrade -y
```

View file

@ -11,17 +11,8 @@
tags: kv tags: kv
- role: nomad - role: nomad
tags: orchestrator tags: orchestrator
- role: network
# UNSAFE!! This section is disabled by default, to run it the flags -t net should be added tags: net
# to the ansible playbook command line.
# Reason: when rules.{v4,v6} are changed, the whole iptables configuration is reloaded.
# This creates issues with Docker, which injects its own configuration in iptables when it starts.
# In practice, most (all?) containers will break if rules.{v4,v6} are changed,
# and docker will have to be restared.
- hosts: cluster_nodes
roles:
- role: network
tags: [ net, never ]
# UNSAFE!! This section configures glusterfs. Once done, don't run it ever again as it may break stuff. # UNSAFE!! This section configures glusterfs. Once done, don't run it ever again as it may break stuff.
# - role: storage # - role: storage

View file

@ -1,4 +1,4 @@
[cluster_nodes] [cluster_nodes]
veterini ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=110 ansible_user=root public_ip=192.168.1.2 private_ip=192.168.1.2 interface=eno1 veterini ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=110 ansible_user=root public_ip=192.168.1.2 private_ip=192.168.1.2 interface=eno1 dns_server=80.67.169.40
silicareux ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=111 ansible_user=root public_ip=192.168.1.3 private_ip=192.168.1.3 interface=eno1 silicareux ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=111 ansible_user=root public_ip=192.168.1.3 private_ip=192.168.1.3 interface=eno1 dns_server=80.67.169.40
wonse ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=112 ansible_user=root public_ip=192.168.1.4 private_ip=192.168.1.4 interface=eno1 wonse ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=112 ansible_user=root public_ip=192.168.1.4 private_ip=192.168.1.4 interface=eno1 dns_server=80.67.169.40

View file

@ -12,6 +12,12 @@
autoclean: yes autoclean: yes
autoremove: yes autoremove: yes
- name: "Remove base tools"
apt:
name:
- systemd-resolved
state: absent
- name: "Install base tools" - name: "Install base tools"
apt: apt:
name: name:

View file

@ -1,4 +0,0 @@
---
- name: restart consul
service: name=consul state=restarted

View file

@ -1,16 +1,6 @@
- name: "Set consul version" - name: "Set consul version"
set_fact: set_fact:
consul_version: 1.7.4 consul_version: 1.8.0
- name: "Download and install Consul for armv7l"
unarchive:
src: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm.zip"
dest: /usr/local/bin
remote_src: yes
when:
- "ansible_architecture == 'armv7l'"
notify:
- restart consul
- name: "Download and install Consul for x86_64" - name: "Download and install Consul for x86_64"
unarchive: unarchive:
@ -19,31 +9,18 @@
remote_src: yes remote_src: yes
when: when:
- "ansible_architecture == 'x86_64'" - "ansible_architecture == 'x86_64'"
notify:
- restart consul
- name: "Download and install Consul for arm64"
unarchive:
src: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm64.zip"
dest: /usr/local/bin
remote_src: yes
when:
- "ansible_architecture == 'aarch64'"
notify:
- restart consul
- name: "Create consul configuration directory" - name: "Create consul configuration directory"
file: path=/etc/consul/ state=directory file: path=/etc/consul/ state=directory
- name: "Deploy consul configuration" - name: "Deploy consul configuration"
template: src=consul.json.j2 dest=/etc/consul/consul.json template: src=consul.json.j2 dest=/etc/consul/consul.json
notify:
- restart consul
- name: "Deploy consul systemd service" - name: "Deploy consul systemd service"
copy: src=consul.service dest=/etc/systemd/system/consul.service copy: src=consul.service dest=/etc/systemd/system/consul.service
notify:
- restart consul
- name: "Enable consul systemd service at boot" - name: "Enable consul systemd service at boot"
service: name=consul state=started enabled=yes daemon_reload=yes service: name=consul state=started enabled=yes daemon_reload=yes
- name: "Deploy resolv.conf to use Consul"
template: src=resolv.conf.j2 dest=/etc/resolv.conf

View file

@ -17,6 +17,9 @@
"ports": { "ports": {
"dns": 53 "dns": 53
}, },
"recursors": [
"{{ dns_server }}"
],
"encrypt": "{{ consul_gossip_encrypt }}", "encrypt": "{{ consul_gossip_encrypt }}",
"domain": "2.cluster.deuxfleurs.fr", "domain": "2.cluster.deuxfleurs.fr",
"performance": { "performance": {

View file

@ -0,0 +1,2 @@
nameserver {{ private_ip }}
nameserver {{ dns_server }}

View file

@ -9,8 +9,7 @@ group: files systemd
shadow: files shadow: files
gshadow: files gshadow: files
#hosts: files dns hosts: files dns
hosts: files mymachines resolve [!UNAVAIL=return] dns myhostname
networks: files networks: files
protocols: db files protocols: db files

View file

@ -1,2 +0,0 @@
[Resolve]
DNSStubListener=no

View file

@ -1,12 +0,0 @@
---
- name: reload iptables
shell: iptables-restore < /etc/iptables/rules.v4 && systemctl restart docker && ifdown nomad1 || true && ifup nomad1 || true
- name: reload ip6tables
shell: ip6tables-restore < /etc/iptables/rules.v6
- name: reload nomad interface
shell: ifdown nomad1 || true ; ifup nomad1
- name: reload systemd-resolved
service: name=systemd-resolved state=restarted

View file

@ -1,42 +1,23 @@
- name: "Add dummy interface to handle Nomad NAT restriction nomad#2770"
template: src=nomad-interface.j2 dest=/etc/network/interfaces.d/nomad.cfg
when: public_ip != private_ip
notify:
- reload nomad interface
- name: "Deploy iptablesv4 configuration" - name: "Deploy iptablesv4 configuration"
template: src=rules.v4.j2 dest=/etc/iptables/rules.v4 template: src=rules.v4.j2 dest=/etc/iptables/rules.v4
notify:
- reload iptables
- name: "Deploy iptablesv6 configuration" - name: "Deploy iptablesv6 configuration"
copy: src=rules.v6 dest=/etc/iptables/rules.v6 copy: src=rules.v6 dest=/etc/iptables/rules.v6
notify:
- reload ip6tables
- name: "Activate IP forwarding" - name: "Activate IP forwarding"
sysctl: sysctl:
name: net.ipv4.ip_forward name: net.ipv4.ip_forward
value: 1 value: "1"
sysctl_set: yes sysctl_set: yes
- name: "Create systemd-resolved override directory" # These two lines are used to undo previous config, remove them once it is done
file: path=/etc/systemd/resolved.conf.d/ state=directory - name: "Update nsswitch.conf to not use systemd-resolved"
- name: "Prevent systemd-resolved from listening on port 53 (DNS)"
copy: src=systemd-resolve-no-listen.conf dest=/etc/systemd/resolved.conf.d/systemd-resolve-no-listen.conf
notify: reload systemd-resolved
- name: "Use systemd-resolved as a source for /etc/resolv.conf"
file:
src: "/run/systemd/resolve/resolv.conf"
dest: "/etc/resolv.conf"
state: link
force: yes
notify: reload systemd-resolved
- name: "Update nsswitch.conf to use systemd-resolved"
copy: src=nsswitch.conf dest=/etc/nsswitch.conf copy: src=nsswitch.conf dest=/etc/nsswitch.conf
- name: "Flush handlers" - name: "Disable systemd-resolved"
meta: flush_handlers systemd:
name: systemd-resolved
state: stopped
enabled: false

View file

@ -1,8 +0,0 @@
auto nomad1
iface nomad1 inet manual
pre-up /sbin/ip link add nomad1 type dummy
up /sbin/ip addr add {{ public_ip }} dev nomad1
up /sbin/iptables -t nat -A PREROUTING -d {{ private_ip }}/32 -j NETMAP --to {{ public_ip }}/32
down /sbin/iptables -t nat -D PREROUTING -d {{ private_ip }}/32 -j NETMAP --to {{ public_ip }}/32
post-down /sbin/ip link del nomad1

View file

@ -1,5 +0,0 @@
---
- name: restart nomad
service: name=nomad state=restarted

View file

@ -1,16 +1,6 @@
- name: "Set nomad version" - name: "Set nomad version"
set_fact: set_fact:
nomad_version: 0.11.3 nomad_version: 0.12.0-beta2
- name: "Download and install Nomad for armv7l"
unarchive:
src: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm.zip"
dest: /usr/local/bin
remote_src: yes
when:
- "ansible_architecture == 'armv7l'"
notify:
- restart nomad
- name: "Download and install Nomad for x86_64" - name: "Download and install Nomad for x86_64"
unarchive: unarchive:
@ -19,31 +9,15 @@
remote_src: yes remote_src: yes
when: when:
- "ansible_architecture == 'x86_64'" - "ansible_architecture == 'x86_64'"
notify:
- restart nomad
- name: "Download and install Nomad for arm64"
unarchive:
src: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm64.zip"
dest: /usr/local/bin
remote_src: yes
when:
- "ansible_architecture == 'aarch64'"
notify:
- restart nomad
- name: "Create Nomad configuration directory" - name: "Create Nomad configuration directory"
file: path=/etc/nomad/ state=directory file: path=/etc/nomad/ state=directory
- name: "Deploy Nomad configuration" - name: "Deploy Nomad configuration"
template: src=nomad.hcl.j2 dest=/etc/nomad/nomad.hcl template: src=nomad.hcl.j2 dest=/etc/nomad/nomad.hcl
notify:
- restart nomad
- name: "Deploy Nomad systemd service" - name: "Deploy Nomad systemd service"
copy: src=nomad.service dest=/etc/systemd/system/nomad.service copy: src=nomad.service dest=/etc/systemd/system/nomad.service
notify:
- restart nomad
- name: "Enable Nomad systemd service at boot" - name: "Enable Nomad systemd service at boot"
service: name=nomad state=started enabled=yes daemon_reload=yes service: name=nomad state=started enabled=yes daemon_reload=yes

View file

@ -26,5 +26,9 @@ client {
#cpu_total_compute = 4000 #cpu_total_compute = 4000
servers = ["127.0.0.1:4648"] servers = ["127.0.0.1:4648"]
network_interface = "{{ interface }}" network_interface = "{{ interface }}"
options {
docker.privileged.enabled = "true"
docker.volumes.enabled = "true"
}
} }

View file

@ -24,8 +24,15 @@ job "seafile" {
seafhttp_port = 8082 seafhttp_port = 8082
} }
mounts = [
{
type = "bind"
source = "/mnt/glusterfs/seafile"
target = "/mnt/seafile-data"
}
]
volumes = [ volumes = [
"/mnt/glusterfs/seafile:/mnt/seafile-data",
"secrets/conf:/srv/webstore/conf", "secrets/conf:/srv/webstore/conf",
"secrets/ccnet:/srv/webstore/ccnet" "secrets/ccnet:/srv/webstore/ccnet"
] ]