Compare commits
10 commits
Author | SHA1 | Date | |
---|---|---|---|
edb0a3737a | |||
24118ab426 | |||
65af077d5a | |||
d3ada90d83 | |||
3bf830713f | |||
207d1fa278 | |||
bee7e10256 | |||
a4f9aa2d98 | |||
1a16fc7f9e | |||
3174179100 |
22 changed files with 419 additions and 38 deletions
|
@ -13,3 +13,59 @@ For each machine, **one by one** do:
|
|||
- Reboot
|
||||
- Check that cluster is healthy
|
||||
|
||||
## New configuration with Wireguard
|
||||
|
||||
This configuration is used to make all of the cluster nodes appear in a single
|
||||
virtual private network, enable them to communicate on all ports even if they
|
||||
are behind NATs at different locations. The VPN also provides a layer of
|
||||
security, encrypting all comunications that occur over the internet.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Nodes must all have two publicly accessible ports (potentially routed through a NAT):
|
||||
|
||||
- A port that maps to the SSH port (port 22) of the machine, allowing TCP connections
|
||||
- A port that maps to the Wireguard port (port 51820) of the machine, allowing UDP connections
|
||||
|
||||
|
||||
### Configuration
|
||||
|
||||
The network role sets up a Wireguard interface, called `wgdeuxfleurs`, and
|
||||
establishes a full mesh between all cluster machines. The following
|
||||
configuration variables are necessary in the node list:
|
||||
|
||||
- `ansible_host`: hostname to which Ansible connects to, usually the same as `public_ip`
|
||||
- `ansible_user`: username to connect as for Ansible to run commands through SSH
|
||||
- `ansible_port`: if SSH is not bound publicly on port 22, set the port here
|
||||
- `public_ip`: the public IP for the machine or the NATting router behind which the machine is
|
||||
- `public_vpn_port`: the public port number on `public_ip` that maps to port 51820 of the machine
|
||||
- `vpn_ip`: the IP address to affect to the node on the VPN (each node must have a different one)
|
||||
- `dns_server`: any DNS resolver, typically your ISP's DNS or a public one such as OpenDNS
|
||||
|
||||
The new iptables configuration now prevents direct communication between
|
||||
cluster machines, except on port 51820 which is used to transmit VPN packets.
|
||||
All intra-cluster communications must now go through the VPN interface (thus
|
||||
machines refer to one another using their VPN IP addresses and never their
|
||||
public or LAN addresses).
|
||||
|
||||
### Restarting Nomad
|
||||
|
||||
When switching to the Wireguard configuration, machines will stop using their
|
||||
LAN addresses and switch to using their VPN addresses. Consul seems to handle
|
||||
this correctly, however Nomad does not. To make Nomad able to restart
|
||||
correctly, its Raft protocol module must be informed of the new IP addresses of
|
||||
the cluster members. This is done by creating on all nodes the file
|
||||
`/var/lib/nomad/server/raft/peers.json` that contains the list of IP addresses
|
||||
of the cluster. Here is an example for such a file:
|
||||
|
||||
```
|
||||
["10.68.70.11:4647","10.68.70.12:4647","10.68.70.13:4647"]
|
||||
```
|
||||
|
||||
Once this file is created and is the same on all nodes, restart Nomad on all
|
||||
nodes. The cluster should resume operation normally.
|
||||
|
||||
The same procedure can also be applied to fix Consul, however my tests showed
|
||||
that it didn't break when IP addresses changed (it just took a bit long to come
|
||||
back up).
|
||||
|
||||
|
|
6
ansible/lxvm
Normal file
6
ansible/lxvm
Normal file
|
@ -0,0 +1,6 @@
|
|||
[cluster_nodes]
|
||||
#ubuntu1 ansible_host=192.168.42.10
|
||||
debian1 ansible_host=192.168.42.20 ansible_user=root public_ip=192.168.42.20 dns_server=208.67.222.222 vpn_ip=10.68.70.11 public_vpn_port=51820 datacenter=belair interface=enp1s0
|
||||
debian2 ansible_host=192.168.42.21 ansible_user=root public_ip=192.168.42.21 dns_server=208.67.222.222 vpn_ip=10.68.70.12 public_vpn_port=51820 datacenter=belair interface=enp1s0
|
||||
debian3 ansible_host=192.168.42.22 ansible_user=root public_ip=192.168.42.22 dns_server=208.67.222.222 vpn_ip=10.68.70.13 public_vpn_port=51820 datacenter=belair interface=enp1s0
|
||||
ovh1 ansible_host=51.75.4.20 ansible_user=debian ansible_become=yes public_ip=51.75.4.20 dns_server=208.67.222.222 vpn_ip=10.68.70.20 public_vpn_port=51820 datacenter=saturne interface=eth0
|
|
@ -3,6 +3,7 @@
|
|||
that:
|
||||
- "ansible_architecture == 'aarch64' or ansible_architecture == 'armv7l' or ansible_architecture == 'x86_64'"
|
||||
- "ansible_os_family == 'Debian'"
|
||||
- "ansible_distribution_version == '10'"
|
||||
|
||||
- name: "Upgrade system"
|
||||
apt:
|
||||
|
@ -15,6 +16,7 @@
|
|||
- name: "Install base tools"
|
||||
apt:
|
||||
name:
|
||||
- sudo
|
||||
- vim
|
||||
- htop
|
||||
- screen
|
||||
|
|
|
@ -1,17 +1,18 @@
|
|||
{
|
||||
"datacenter": "deuxfleurs",
|
||||
"data_dir": "/var/lib/consul",
|
||||
"bind_addr": "0.0.0.0",
|
||||
"advertise_addr": "{{ public_ip }}",
|
||||
"advertise_addr": "{{ vpn_ip }}",
|
||||
"addresses": {
|
||||
"dns": "0.0.0.0",
|
||||
"http": "0.0.0.0"
|
||||
},
|
||||
"retry_join": [
|
||||
{% for selected_host in groups['cluster_nodes']|reject("sameas", ansible_fqdn) %}{# @FIXME: Reject doesn't work #}
|
||||
"{{ hostvars[selected_host]['private_ip'] }}" {{ "," if not loop.last else "" }}
|
||||
{% for selected_host in groups['cluster_nodes']|difference([inventory_hostname]) %}{# @FIXME: Reject doesn't work #}
|
||||
"{{ hostvars[selected_host]['vpn_ip'] }}" {{ "," if not loop.last else "" }}
|
||||
{% endfor %}
|
||||
],
|
||||
"bootstrap_expect": 3,
|
||||
"bootstrap_expect": {{ groups['cluster_nodes']|length }},
|
||||
"server": true,
|
||||
"ui": true,
|
||||
"ports": {
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
nameserver {{ private_ip }}
|
||||
nameserver {{ vpn_ip }}
|
||||
nameserver {{ dns_server }}
|
||||
|
|
5
ansible/roles/network/handlers/main.yml
Normal file
5
ansible/roles/network/handlers/main.yml
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
- name: reload wireguard
|
||||
service:
|
||||
name: wg-quick@wgdeuxfleurs
|
||||
state: restarted
|
|
@ -1,3 +1,6 @@
|
|||
- name: "Create iptables configuration direcetory"
|
||||
file: path=/etc/iptables/ state=directory
|
||||
|
||||
- name: "Deploy iptablesv4 configuration"
|
||||
template: src=rules.v4.j2 dest=/etc/iptables/rules.v4
|
||||
|
||||
|
@ -9,3 +12,48 @@
|
|||
name: net.ipv4.ip_forward
|
||||
value: "1"
|
||||
sysctl_set: yes
|
||||
|
||||
# Wireguard configuration
|
||||
- name: "Enable backports repository"
|
||||
apt_repository:
|
||||
repo: deb http://deb.debian.org/debian buster-backports main
|
||||
state: present
|
||||
|
||||
- name: "Install wireguard"
|
||||
apt:
|
||||
name:
|
||||
- wireguard
|
||||
- wireguard-tools
|
||||
- "linux-headers-{{ ansible_kernel }}"
|
||||
state: present
|
||||
|
||||
- name: "Create wireguard configuration direcetory"
|
||||
file: path=/etc/wireguard/ state=directory
|
||||
|
||||
- name: "Check if wireguard private key exists"
|
||||
stat: path=/etc/wireguard/privkey
|
||||
register: wireguard_privkey
|
||||
|
||||
- name: "Create wireguard private key"
|
||||
shell: wg genkey > /etc/wireguard/privkey
|
||||
when: wireguard_privkey.stat.exists == false
|
||||
notify:
|
||||
- reload wireguard
|
||||
|
||||
- name: "Secure wireguard private key"
|
||||
file: path=/etc/wireguard/privkey mode=0600
|
||||
|
||||
- name: "Retrieve wireguard public key"
|
||||
shell: wg pubkey < /etc/wireguard/privkey
|
||||
register: wireguard_pubkey
|
||||
|
||||
- name: "Deploy wireguard configuration"
|
||||
template: src=wireguard.conf.j2 dest=/etc/wireguard/wgdeuxfleurs.conf mode=0600
|
||||
notify:
|
||||
- reload wireguard
|
||||
|
||||
- name: "Enable Wireguard systemd service at boot"
|
||||
service: name=wg-quick@wgdeuxfleurs state=started enabled=yes daemon_reload=yes
|
||||
|
||||
- name: "Create /tmp/wgdeuxfleurs.template.conf example configuration file for external nodes"
|
||||
local_action: template src=wireguard_external.conf.j2 dest=/tmp/wgdeuxfleurs.template.conf
|
||||
|
|
|
@ -7,14 +7,21 @@
|
|||
-A INPUT -p tcp --dport 22 -j ACCEPT
|
||||
|
||||
# Cluster
|
||||
-A INPUT -s 192.168.1.254 -j ACCEPT
|
||||
-A INPUT -s 82.253.205.190 -j ACCEPT
|
||||
{% for selected_host in groups['cluster_nodes'] %}
|
||||
-A INPUT -s {{ hostvars[selected_host]['public_ip'] }} -j ACCEPT
|
||||
-A INPUT -s {{ hostvars[selected_host]['private_ip'] }} -j ACCEPT
|
||||
-A INPUT -s {{ hostvars[selected_host]['public_ip'] }} -p udp --dport 51820 -j ACCEPT
|
||||
-A INPUT -s {{ hostvars[selected_host]['vpn_ip'] }} -j ACCEPT
|
||||
{% endfor %}
|
||||
{% for host in other_vpn_nodes %}
|
||||
-A INPUT -s {{ host.public_ip }} -p udp --dport 51820 -j ACCEPT
|
||||
-A INPUT -s {{ host.vpn_ip }} -j ACCEPT
|
||||
{% endfor %}
|
||||
|
||||
# Local
|
||||
# Rennes
|
||||
-A INPUT -s 93.2.173.168 -j ACCEPT
|
||||
-A INPUT -s 82.253.205.190 -j ACCEPT
|
||||
# router
|
||||
-A INPUT -s 192.168.1.254 -j ACCEPT
|
||||
|
||||
-A INPUT -i docker0 -j ACCEPT
|
||||
-A INPUT -s 127.0.0.1/8 -j ACCEPT
|
||||
-A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
|
||||
|
|
20
ansible/roles/network/templates/wireguard.conf.j2
Normal file
20
ansible/roles/network/templates/wireguard.conf.j2
Normal file
|
@ -0,0 +1,20 @@
|
|||
[Interface]
|
||||
Address = {{ vpn_ip }}
|
||||
PostUp = wg set %i private-key <(cat /etc/wireguard/privkey)
|
||||
ListenPort = 51820
|
||||
|
||||
{% for selected_host in groups['cluster_nodes']|difference([inventory_hostname]) %}
|
||||
[Peer]
|
||||
PublicKey = {{ hostvars[selected_host].wireguard_pubkey.stdout }}
|
||||
Endpoint = {{ hostvars[selected_host].public_ip }}:{{ hostvars[selected_host].public_vpn_port }}
|
||||
AllowedIPs = {{ hostvars[selected_host].vpn_ip }}/32
|
||||
PersistentKeepalive = 25
|
||||
{% endfor %}
|
||||
|
||||
{% for host in other_vpn_nodes %}
|
||||
[Peer]
|
||||
PublicKey = {{ host.pubkey }}
|
||||
Endpoint = {{ host.public_ip }}:{{ host.public_vpn_port }}
|
||||
AllowedIPs = {{ host.vpn_ip }}/32
|
||||
PersistentKeepalive = 25
|
||||
{% endfor %}
|
27
ansible/roles/network/templates/wireguard_external.conf.j2
Normal file
27
ansible/roles/network/templates/wireguard_external.conf.j2
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Template configuration file for VPN nodes that are non in the cluster
|
||||
# The private key should be stored as /etc/wireguard/privkey
|
||||
# External nodes should be registered in network/vars/main.yml
|
||||
|
||||
[Interface]
|
||||
Address = <INSERT YOUR IP HERE, IT SHOULD MATCH THE ONE IN vars/main.yml>
|
||||
PostUp = wg set %i private-key <(cat /etc/wireguard/privkey)
|
||||
ListenPort = 51820
|
||||
|
||||
# Cluster nodes
|
||||
{% for selected_host in groups['cluster_nodes'] %}
|
||||
[Peer]
|
||||
PublicKey = {{ hostvars[selected_host].wireguard_pubkey.stdout }}
|
||||
Endpoint = {{ hostvars[selected_host].public_ip }}:{{ hostvars[selected_host].public_vpn_port }}
|
||||
AllowedIPs = {{ hostvars[selected_host].vpn_ip }}/32
|
||||
PersistentKeepalive = 25
|
||||
{% endfor %}
|
||||
|
||||
# External nodes
|
||||
# TODO: remove yourself from here
|
||||
{% for host in other_vpn_nodes %}
|
||||
[Peer]
|
||||
PublicKey = {{ host.pubkey }}
|
||||
Endpoint = {{ host.public_ip }}:{{ host.public_vpn_port }}
|
||||
AllowedIPs = {{ host.vpn_ip }}/32
|
||||
PersistentKeepalive = 25
|
||||
{% endfor %}
|
6
ansible/roles/network/vars/main.yml
Normal file
6
ansible/roles/network/vars/main.yml
Normal file
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
other_vpn_nodes:
|
||||
- pubkey: "QUiUNMk70TEQ75Ut7Uqikr5uGVSXmx8EGNkGM6tANlg="
|
||||
public_ip: "37.187.118.206"
|
||||
public_vpn_port: "51820"
|
||||
vpn_ip: "10.68.70.101"
|
|
@ -1,7 +1,11 @@
|
|||
- name: "Set nomad version"
|
||||
- name: "Set Nomad version"
|
||||
set_fact:
|
||||
nomad_version: 0.12.0-beta2
|
||||
|
||||
- name: "Set CNI version"
|
||||
set_fact:
|
||||
cni_plugins_version: 0.8.6
|
||||
|
||||
- name: "Download and install Nomad for x86_64"
|
||||
unarchive:
|
||||
src: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_amd64.zip"
|
||||
|
@ -10,6 +14,19 @@
|
|||
when:
|
||||
- "ansible_architecture == 'x86_64'"
|
||||
|
||||
- name: "Create /opt/cni/bin"
|
||||
file: path=/opt/cni/bin state=directory
|
||||
|
||||
- name: "Download and install CNI plugins for x86_64"
|
||||
unarchive:
|
||||
src: "https://github.com/containernetworking/plugins/releases/download/v{{ cni_plugins_version }}/cni-plugins-linux-amd64-v{{ cni_plugins_version }}.tgz"
|
||||
dest: /opt/cni/bin
|
||||
remote_src: yes
|
||||
when:
|
||||
- "ansible_architecture == 'x86_64'"
|
||||
notify:
|
||||
- restart nomad
|
||||
|
||||
- name: "Create Nomad configuration directory"
|
||||
file: path=/etc/nomad/ state=directory
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
datacenter = "{{ datacenter }}"
|
||||
|
||||
addresses {
|
||||
http = "0.0.0.0"
|
||||
rpc = "0.0.0.0"
|
||||
|
@ -5,16 +7,16 @@ addresses {
|
|||
}
|
||||
|
||||
advertise {
|
||||
http = "{{ public_ip }}"
|
||||
rpc = "{{ public_ip }}"
|
||||
serf = "{{ public_ip }}"
|
||||
http = "{{ vpn_ip }}"
|
||||
rpc = "{{ vpn_ip }}"
|
||||
serf = "{{ vpn_ip }}"
|
||||
}
|
||||
|
||||
data_dir = "/var/lib/nomad"
|
||||
|
||||
server {
|
||||
enabled = true
|
||||
bootstrap_expect = 3
|
||||
bootstrap_expect = {{ groups['cluster_nodes']|length }}
|
||||
}
|
||||
|
||||
consul {
|
||||
|
@ -25,10 +27,20 @@ client {
|
|||
enabled = true
|
||||
#cpu_total_compute = 4000
|
||||
servers = ["127.0.0.1:4648"]
|
||||
network_interface = "{{ interface }}"
|
||||
options {
|
||||
docker.privileged.enabled = "true"
|
||||
docker.volumes.enabled = "true"
|
||||
}
|
||||
|
||||
network_interface = "wgdeuxfleurs"
|
||||
|
||||
host_network "default" {
|
||||
#cidr = "{{ vpn_ip }}/24"
|
||||
interface = "wgdeuxfleurs"
|
||||
}
|
||||
host_network "public" {
|
||||
#cidr = "{{ public_ip }}/32"
|
||||
interface = "{{ interface }}"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
nfs.export-volumes: "off"
|
||||
cluster.lookup-optimize: "on"
|
||||
|
||||
cluster: "{% for selected_host in groups['cluster_nodes'] %}{{ hostvars[selected_host]['private_ip'] }}{{ ',' if not loop.last else '' }}{% endfor %}"
|
||||
cluster: "{% for selected_host in groups['cluster_nodes'] %}{{ hostvars[selected_host]['vpn_ip'] }}{{ ',' if not loop.last else '' }}{% endfor %}"
|
||||
run_once: true
|
||||
|
||||
- name: "Create mountpoint"
|
||||
|
@ -61,7 +61,7 @@
|
|||
tags: gluster-fstab
|
||||
mount:
|
||||
path: /mnt/glusterfs
|
||||
src: "{{ private_ip }}:/donnees"
|
||||
src: "{{ vpn_ip }}:/donnees"
|
||||
fstype: glusterfs
|
||||
opts: "defaults,_netdev,noauto,x-systemd.automount"
|
||||
state: present
|
||||
|
|
|
@ -6,6 +6,7 @@ $CONFIG = array (
|
|||
'secret' => '{{ key "secrets/nextcloud/secret" | trimSpace }}',
|
||||
'trusted_domains' => array (
|
||||
0 => 'nextcloud.deuxfleurs.fr',
|
||||
1 => 'nextcloud.devx.adnab.me',
|
||||
),
|
||||
'memcache.local' => '\\OC\\Memcache\\APCu',
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
job "directory2" {
|
||||
datacenters = ["dc1"]
|
||||
datacenters = ["dc1", "saturne", "belair"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
|
@ -94,7 +94,7 @@ job "directory2" {
|
|||
"guichet",
|
||||
"traefik.enable=true",
|
||||
"traefik.frontend.entryPoints=https,http",
|
||||
"traefik.frontend.rule=Host:guichet.deuxfleurs.fr",
|
||||
"traefik.frontend.rule=Host:guichet.devx.adnab.me",
|
||||
]
|
||||
port = "web_port"
|
||||
address_mode = "host"
|
||||
|
|
|
@ -59,7 +59,7 @@ job "garage" {
|
|||
"garage_api",
|
||||
"traefik.enable=true",
|
||||
"traefik.frontend.entryPoints=https,http",
|
||||
"traefik.frontend.rule=Host:garage.deuxfleurs.fr"
|
||||
"traefik.frontend.rule=Host:garage.deuxfleurs.fr,garage.devx.adnab.me"
|
||||
]
|
||||
port = "api_port"
|
||||
address_mode = "host"
|
||||
|
|
72
nomad/gitea.hcl
Normal file
72
nomad/gitea.hcl
Normal file
|
@ -0,0 +1,72 @@
|
|||
job "git" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.unique.hostname}"
|
||||
value = "debian1"
|
||||
}
|
||||
|
||||
group "gitea" {
|
||||
task "server" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "gitea/gitea:1.10.3"
|
||||
port_map {
|
||||
web_port = 3000
|
||||
ssh_port = 222
|
||||
}
|
||||
volumes = [
|
||||
"/etc/timezone:/etc/timezone:ro",
|
||||
"/etc/localtime:/etc/localtime:ro",
|
||||
"/srv/gitea:/data",
|
||||
]
|
||||
}
|
||||
|
||||
env {
|
||||
"USER_UID" = "1000"
|
||||
"USER_GID" = "1000"
|
||||
"LOG_LEVEL" = "trace"
|
||||
}
|
||||
|
||||
resources {
|
||||
memory = 100
|
||||
cpu = 100
|
||||
network {
|
||||
port "web_port" {}
|
||||
port "ssh_port" {
|
||||
static = 2222
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
tags = [
|
||||
"gitea",
|
||||
"traefik.enable=true",
|
||||
"traefik.frontend.entryPoints=https,http",
|
||||
"traefik.frontend.rule=Host:git.deuxfleurs.fr,git.deuxfleurs.org"
|
||||
]
|
||||
port = "web_port"
|
||||
address_mode = "host"
|
||||
name = "gitea"
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "web_port"
|
||||
interval = "60s"
|
||||
timeout = "5s"
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "90s"
|
||||
ignore_warnings = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -45,7 +45,7 @@ job "nextcloud" {
|
|||
"nextcloud",
|
||||
"traefik.enable=true",
|
||||
"traefik.frontend.entryPoints=https,http",
|
||||
"traefik.frontend.rule=Host:nextcloud.deuxfleurs.fr",
|
||||
"traefik.frontend.rule=Host:nextcloud.deuxfleurs.fr,nextcloud.devx.adnab.me",
|
||||
]
|
||||
port = "web_port"
|
||||
address_mode = "host"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
job "postgres" {
|
||||
datacenters = ["dc1"]
|
||||
datacenters = ["dc1", "belair"]
|
||||
type = "system"
|
||||
priority = 90
|
||||
|
||||
|
@ -113,7 +113,7 @@ job "postgres" {
|
|||
}
|
||||
|
||||
resources {
|
||||
memory = 500
|
||||
memory = 200
|
||||
network {
|
||||
port "psql_port" {
|
||||
static = "5433"
|
||||
|
|
77
nomad/traefik-netv2.hcl
Normal file
77
nomad/traefik-netv2.hcl
Normal file
|
@ -0,0 +1,77 @@
|
|||
job "frontend" {
|
||||
datacenters = ["dc1", "saturne"]
|
||||
type = "service"
|
||||
|
||||
group "traefik" {
|
||||
network {
|
||||
mode = "bridge"
|
||||
port "http" {
|
||||
static = 80
|
||||
host_network = "public"
|
||||
}
|
||||
port "https" {
|
||||
static = 443
|
||||
host_network = "public"
|
||||
}
|
||||
port "admin" {
|
||||
static = 8082
|
||||
}
|
||||
}
|
||||
|
||||
task "server" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "amd64/traefik:1.7.20"
|
||||
readonly_rootfs = true
|
||||
volumes = [
|
||||
"secrets/traefik.toml:/etc/traefik/traefik.toml",
|
||||
]
|
||||
}
|
||||
|
||||
resources {
|
||||
memory = 265
|
||||
}
|
||||
|
||||
template {
|
||||
data = "{{ key \"configuration/traefik/traefik.toml\" }}"
|
||||
destination = "secrets/traefik.toml"
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
tags = [
|
||||
"http",
|
||||
"frontend",
|
||||
"(diplonat (tcp_port 80))"
|
||||
]
|
||||
port = "http"
|
||||
name = "traefik"
|
||||
}
|
||||
|
||||
service {
|
||||
tags = [
|
||||
"https",
|
||||
"frontend",
|
||||
"(diplonat (tcp_port 443))"
|
||||
]
|
||||
port = "https"
|
||||
name = "traefik"
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
protocol = "http"
|
||||
port = "http"
|
||||
path = "/"
|
||||
interval = "60s"
|
||||
timeout = "5s"
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "90s"
|
||||
ignore_warnings = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
job "frontend" {
|
||||
datacenters = ["dc1"]
|
||||
datacenters = ["dc1", "saturne"]
|
||||
type = "service"
|
||||
|
||||
group "traefik" {
|
||||
|
@ -9,6 +9,7 @@ job "frontend" {
|
|||
config {
|
||||
image = "amd64/traefik:1.7.20"
|
||||
readonly_rootfs = true
|
||||
network_mode = "host"
|
||||
port_map {
|
||||
https_port = 443
|
||||
http_port = 80
|
||||
|
@ -24,30 +25,35 @@ job "frontend" {
|
|||
network {
|
||||
port "https_port" {
|
||||
static = "443"
|
||||
host_network = "public"
|
||||
}
|
||||
port "http_port" {
|
||||
static = "80"
|
||||
host_network = "public"
|
||||
}
|
||||
port "adm_port" {
|
||||
static = "8082"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template {
|
||||
data = "{{ key \"configuration/traefik/traefik.toml\" }}"
|
||||
destination = "secrets/traefik.toml"
|
||||
}
|
||||
|
||||
service {
|
||||
tags = [
|
||||
"https",
|
||||
"http",
|
||||
"frontend",
|
||||
"(diplonat (tcp_port 80 443))"
|
||||
"(diplonat (tcp_port 80))"
|
||||
]
|
||||
port = "https_port"
|
||||
address_mode = "host"
|
||||
name = "traefik"
|
||||
port = "http_port"
|
||||
name = "traefik-http"
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
protocol = "http"
|
||||
port = "adm_port"
|
||||
path = "/ping"
|
||||
type = "tcp"
|
||||
port = "http_port"
|
||||
interval = "60s"
|
||||
timeout = "5s"
|
||||
check_restart {
|
||||
|
@ -57,10 +63,28 @@ job "frontend" {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
template {
|
||||
data = "{{ key \"configuration/traefik/traefik.toml\" }}"
|
||||
destination = "secrets/traefik.toml"
|
||||
|
||||
service {
|
||||
tags = [
|
||||
"https",
|
||||
"frontend",
|
||||
"(diplonat (tcp_port 443))"
|
||||
]
|
||||
port = "https_port"
|
||||
address_mode = "host"
|
||||
name = "traefik-https"
|
||||
|
||||
check {
|
||||
type = "tcp"
|
||||
port = "https_port"
|
||||
interval = "60s"
|
||||
timeout = "5s"
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "90s"
|
||||
ignore_warnings = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Reference in a new issue