commit c095a355197c454f1e09eb792ed1b6ad5aa81c8b Author: ADRN Date: Mon Dec 11 13:08:38 2023 +0100 initial commit diff --git a/README.md b/README.md new file mode 100644 index 0000000..81b2452 --- /dev/null +++ b/README.md @@ -0,0 +1,21 @@ +# RVN - TP2.2 : Ordonnancement de services sur une grappe + +1. Provisionner des VMs **sur notre WAN** avec [Vagrant](https://developer.hashicorp.com/vagrant/docs) +2. Déployer un cluster [Consul](https://developer.hashicorp.com/consul/docs) +3. Déployer un service avec Docker et le surveiller avec Consul +4. Déployer des services avec [Nomad](https://developer.hashicorp.com/nomad/docs) + + +## 1. Provisionner des VMs + +| Continent| IP sur continent | IP VM | +|--------- | ------------------ | ---------------- | +| Amerique | `10.13.1.3/24` | `10.13.1.31/24` | +| Europe | `10.13.2.1/24` | `10.13.2.11/24` | +| Asie | `10.13.3.4/24` | `10.13.3.41/24` | + +Vous pouvez vous inspirer de : + +* [./Vagrantfile](./Vagrantfile) +* [./install_nomad_consul.sh](./install_nomad_consul.sh) + diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000..c1ac7ed --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,119 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Provisioning +$script = <<-'SCRIPT' +echo "## Provisioning VM ##" +echo "Installing dependencies..." +sudo apt-get update +sudo apt-get install -y unzip curl jq dnsutils +echo "Determining Consul version to install ..." +CHECKPOINT_URL="https://checkpoint-api.hashicorp.com/v1/check" +if [ -z "$CONSUL_VERSION" ]; then + CONSUL_VERSION=$(curl -s "${CHECKPOINT_URL}"/consul | jq .current_version | tr -d '"') +fi +if [ -z "$NOMAD_VERSION" ]; then + NOMAD_VERSION=$(curl -s "${CHECKPOINT_URL}"/nomad | jq .current_version | tr -d '"') +fi +echo "Fetching Consul version ${CONSUL_VERSION} ..." +cd /tmp/ +curl -s https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip -o consul.zip +echo "Installing Consul version ${CONSUL_VERSION} ..." +unzip consul.zip +sudo chmod +x consul +sudo mv consul /usr/bin/consul +sudo mkdir /etc/consul.d +sudo chmod a+w /etc/consul.d + +echo "Fetching Nomad version ${NOMAD_VERSION} ..." +cd /tmp/ +curl -s https://releases.hashicorp.com/nomad/${NOMAD_VERSION}/nomad_${NOMAD_VERSION}_linux_amd64.zip -o nomad.zip +echo "Installing Nomad version ${NOMAD_VERSION} ..." +unzip nomad.zip +sudo chmod +x nomad +sudo mv nomad /usr/bin/nomad +sudo mkdir /etc/nomad.d +sudo chmod a+w /etc/nomad.d +SCRIPT + +# Specify a Consul version +CONSUL_VERSION = ENV['CONSUL_VERSION'] + +Vagrant.configure("2") do |config| + + config.vm.define "rvn11" do |rvn11| + rvn11.vm.box = "ubuntu/lunar64" + + rvn11.vm.hostname = "rvn11.machine.local" + rvn11.vm.network "public_network", ip: "172.24.1.1/16", bridge: "tap0" + end + + #config.vm.define "rvn12" do |rvn12| + # rvn12.vm.box = "ubuntu/lunar64" + + # rvn12.vm.hostname = "rvn12.machine.local" + # rvn12.vm.network "public_network", ip: "172.24.1.2", bridge: "tap0" + #end + + config.vm.provision "shell", + inline: $script, + env: {'CONSUL_VERSION' => CONSUL_VERSION} + + config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + # config.vm.network "forwarded_port", guest: 80, host: 8080 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + # config.vm.synced_folder "../data", "/vagrant_data" + + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessable to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + # config.vm.synced_folder ".", "/vagrant", disabled: true + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + + # Enable provisioning with a shell script. Additional provisioners such as + # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the + # documentation for more information about their specific syntax and use. + # config.vm.provision "shell", inline: <<-SHELL + # apt-get update + # apt-get install -y apache2 + # SHELL +end diff --git a/garage.hcl b/garage.hcl new file mode 100644 index 0000000..d3645a7 --- /dev/null +++ b/garage.hcl @@ -0,0 +1,221 @@ +job "garage" { + datacenters = [ "dc1" ] + type = "system" + priority = 80 + + update { + max_parallel = 10 + min_healthy_time = "60s" + } + + group "garage" { + network { + port "s3" { static = 3900 } + port "rpc" { static = 3901 } + port "web" { static = 3902 } + port "admin" { static = 3903 } + port "k2v" { static = 3904 } + } + + update { + max_parallel = 10 + min_healthy_time = "30s" + healthy_deadline = "5m" + } + + task "server" { + driver = "docker" + config { + image = "dxflrs/garage:v0.9.0" + command = "/garage" + args = [ "server" ] + network_mode = "host" + volumes = [ + "/mnt/storage/garage/data:/data", + "/mnt/ssd/garage/meta:/meta", + "secrets/garage.toml:/etc/garage.toml", + "secrets:/etc/garage", + ] + logging { + type = "journald" + } + } + + template { + data = file("../config/garage.toml") + destination = "secrets/garage.toml" + } + + template { + data = "{{ key \"secrets/consul/consul-ca.crt\" }}" + destination = "secrets/consul-ca.crt" + } + + template { + data = "{{ key \"secrets/consul/consul-client.crt\" }}" + destination = "secrets/consul-client.crt" + } + + template { + data = "{{ key \"secrets/consul/consul-client.key\" }}" + destination = "secrets/consul-client.key" + } + + resources { + memory = 1000 + memory_max = 3000 + cpu = 1000 + } + + kill_timeout = "20s" + + restart { + interval = "30m" + attempts = 10 + delay = "15s" + mode = "delay" + } + + #### Configuration for service ports: admin port (internal use only) + + service { + port = "admin" + address_mode = "host" + name = "garage-admin" + # Check that Garage is alive and answering TCP connections + check { + type = "tcp" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + } + + #### Configuration for service ports: externally available ports (API, web) + + service { + tags = [ + "garage_api", + "tricot garage.deuxfleurs.fr", + "tricot *.garage.deuxfleurs.fr", + "tricot-site-lb", + ] + port = "s3" + address_mode = "host" + name = "garage-api" + # Check 1: Garage is alive and answering TCP connections + check { + name = "garage-api-live" + type = "tcp" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + # Check 2: Garage is in a healthy state and requests should be routed here + check { + name = "garage-api-healthy" + port = "admin" + type = "http" + path = "/health" + interval = "60s" + timeout = "5s" + } + } + + service { + tags = [ + "garage-web", + "tricot * 1", + "tricot-add-header Strict-Transport-Security max-age=63072000; includeSubDomains; preload", + "tricot-add-header X-Frame-Options SAMEORIGIN", + "tricot-add-header X-XSS-Protection 1; mode=block", + "tricot-add-header X-Content-Type-Options nosniff", + "tricot-on-demand-tls-ask http://garage-admin.service.prod.consul:3903/check", + "tricot-site-lb", + ] + port = "web" + address_mode = "host" + name = "garage-web" + # Check 1: Garage is alive and answering TCP connections + check { + name = "garage-web-live" + type = "tcp" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + # Check 2: Garage is in a healthy state and requests should be routed here + check { + name = "garage-web-healthy" + port = "admin" + type = "http" + path = "/health" + interval = "60s" + timeout = "5s" + } + } + + service { + tags = [ + "garage-redirect-dummy", + "tricot www.deuxfleurs.fr 2", + "tricot osuny.org 2", + "tricot www.degrowth.net 2", + "tricot-add-redirect www.deuxfleurs.fr deuxfleurs.fr 301", + "tricot-add-redirect osuny.org www.osuny.org 301", + "tricot-add-redirect www.degrowth.net degrowth.net 301", + ] + name = "garage-redirect-dummy" + address_mode = "host" + port = "web" + on_update = "ignore" + } + + + service { + tags = [ + "garage_k2v", + "tricot k2v.deuxfleurs.fr", + "tricot-site-lb", + ] + port = "k2v" + address_mode = "host" + name = "garage-k2v" + # Check 1: Garage is alive and answering TCP connections + check { + name = "garage-k2v-live" + type = "tcp" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + # Check 2: Garage is in a healthy state and requests should be routed here + check { + name = "garage-k2v-healthy" + port = "admin" + type = "http" + path = "/health" + interval = "60s" + timeout = "5s" + } + } + } + } +} + diff --git a/install_nomad_consul.sh b/install_nomad_consul.sh new file mode 100644 index 0000000..3f789fe --- /dev/null +++ b/install_nomad_consul.sh @@ -0,0 +1,171 @@ +#!/usr/bin/env bash + +if [ -z "$BIND_ADDR" ]; then + BIND_ADDR=$(ip -4 -br addr show dev ip0 | awk '{print $NF}' | awk -F "/" '{print $1}' ) +fi + +if [ -z "$NODE_NAME" ]; then + read -p "Provide node_name: " NODE_NAME +fi +#read -p "node_name='$NODE_NAME', BIND_ADDR='$BIND_ADDR', ok?" + +### CONSUL ### + +echo "Installing dependencies..." +sudo apt-get update +sudo apt-get install -y unzip curl jq dnsutils + +if [ ! -f /usr/bin/consul ]; then + CHECKPOINT_URL="https://checkpoint-api.hashicorp.com/v1/check" + if [ -z "$CONSUL_VERSION" ]; then + CONSUL_VERSION=$(curl -s "${CHECKPOINT_URL}"/consul | jq .current_version | tr -d '"') + fi + echo "Fetching Consul version ${CONSUL_VERSION} ..." + cd /tmp/ + curl -s https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip -o consul.zip + echo "Installing Consul version ${CONSUL_VERSION} ..." + unzip consul.zip + sudo chmod +x consul + sudo mv consul /usr/bin/consul +fi +# useradd : -U create a group same as username, -r system user +sudo mkdir /etc/consul.d +sudo chmod a+w /etc/consul.d +sudo useradd -Urd /var/consul/ consul +sudo mkdir -p /var/consul/data +sudo chown -R consul /var/consul/data + +cat <