From 9cf37f9fe0da315b73df5933b7dd494727c1c7b8 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Sun, 5 Jul 2020 20:12:51 +0200 Subject: [PATCH] Clean nomad+consul deploy tasks as we do not deploy anymore on ARM so it is untested for real --- ansible/README.md | 2 ++ ansible/cluster_nodes.yml | 13 ++---------- ansible/roles/consul/handlers/main.yml | 4 ---- ansible/roles/consul/tasks/main.yml | 28 +------------------------- ansible/roles/nomad/handlers/main.yml | 5 ----- ansible/roles/nomad/tasks/main.yml | 28 +------------------------- 6 files changed, 6 insertions(+), 74 deletions(-) delete mode 100644 ansible/roles/consul/handlers/main.yml delete mode 100644 ansible/roles/nomad/handlers/main.yml diff --git a/ansible/README.md b/ansible/README.md index 806adf5..023b941 100644 --- a/ansible/README.md +++ b/ansible/README.md @@ -4,6 +4,8 @@ For each machine, **one by one** do: - Check that cluster is healthy + - `sudo gluster peer status` + - `sudo gluster volume status all` (check Online Col, only `Y` must appear) - Run `ansible-playbook -i production --limit site.yml` - Reboot - Check that cluster is healthy diff --git a/ansible/cluster_nodes.yml b/ansible/cluster_nodes.yml index df73e79..ea58630 100644 --- a/ansible/cluster_nodes.yml +++ b/ansible/cluster_nodes.yml @@ -11,17 +11,8 @@ tags: kv - role: nomad tags: orchestrator - -# UNSAFE!! This section is disabled by default, to run it the flags -t net should be added -# to the ansible playbook command line. -# Reason: when rules.{v4,v6} are changed, the whole iptables configuration is reloaded. -# This creates issues with Docker, which injects its own configuration in iptables when it starts. -# In practice, most (all?) containers will break if rules.{v4,v6} are changed, -# and docker will have to be restared. -- hosts: cluster_nodes - roles: - - role: network - tags: [ net, never ] + - role: network + tags: net # UNSAFE!! This section configures glusterfs. Once done, don't run it ever again as it may break stuff. # - role: storage diff --git a/ansible/roles/consul/handlers/main.yml b/ansible/roles/consul/handlers/main.yml deleted file mode 100644 index e8cd4a4..0000000 --- a/ansible/roles/consul/handlers/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -- name: restart consul - service: name=consul state=restarted diff --git a/ansible/roles/consul/tasks/main.yml b/ansible/roles/consul/tasks/main.yml index 5cb68ab..2b77080 100644 --- a/ansible/roles/consul/tasks/main.yml +++ b/ansible/roles/consul/tasks/main.yml @@ -1,16 +1,6 @@ - name: "Set consul version" set_fact: - consul_version: 1.7.4 - -- name: "Download and install Consul for armv7l" - unarchive: - src: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm.zip" - dest: /usr/local/bin - remote_src: yes - when: - - "ansible_architecture == 'armv7l'" - notify: - - restart consul + consul_version: 1.8.0 - name: "Download and install Consul for x86_64" unarchive: @@ -19,31 +9,15 @@ remote_src: yes when: - "ansible_architecture == 'x86_64'" - notify: - - restart consul - -- name: "Download and install Consul for arm64" - unarchive: - src: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_arm64.zip" - dest: /usr/local/bin - remote_src: yes - when: - - "ansible_architecture == 'aarch64'" - notify: - - restart consul - name: "Create consul configuration directory" file: path=/etc/consul/ state=directory - name: "Deploy consul configuration" template: src=consul.json.j2 dest=/etc/consul/consul.json - notify: - - restart consul - name: "Deploy consul systemd service" copy: src=consul.service dest=/etc/systemd/system/consul.service - notify: - - restart consul - name: "Enable consul systemd service at boot" service: name=consul state=started enabled=yes daemon_reload=yes diff --git a/ansible/roles/nomad/handlers/main.yml b/ansible/roles/nomad/handlers/main.yml deleted file mode 100644 index 0274673..0000000 --- a/ansible/roles/nomad/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: restart nomad - service: name=nomad state=restarted - diff --git a/ansible/roles/nomad/tasks/main.yml b/ansible/roles/nomad/tasks/main.yml index 0b7b65c..7c73362 100644 --- a/ansible/roles/nomad/tasks/main.yml +++ b/ansible/roles/nomad/tasks/main.yml @@ -1,16 +1,6 @@ - name: "Set nomad version" set_fact: - nomad_version: 0.11.3 - -- name: "Download and install Nomad for armv7l" - unarchive: - src: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm.zip" - dest: /usr/local/bin - remote_src: yes - when: - - "ansible_architecture == 'armv7l'" - notify: - - restart nomad + nomad_version: 0.12.0-beta2 - name: "Download and install Nomad for x86_64" unarchive: @@ -19,31 +9,15 @@ remote_src: yes when: - "ansible_architecture == 'x86_64'" - notify: - - restart nomad - -- name: "Download and install Nomad for arm64" - unarchive: - src: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_arm64.zip" - dest: /usr/local/bin - remote_src: yes - when: - - "ansible_architecture == 'aarch64'" - notify: - - restart nomad - name: "Create Nomad configuration directory" file: path=/etc/nomad/ state=directory - name: "Deploy Nomad configuration" template: src=nomad.hcl.j2 dest=/etc/nomad/nomad.hcl - notify: - - restart nomad - name: "Deploy Nomad systemd service" copy: src=nomad.service dest=/etc/systemd/system/nomad.service - notify: - - restart nomad - name: "Enable Nomad systemd service at boot" service: name=nomad state=started enabled=yes daemon_reload=yes