Compare commits

...
This repository has been archived on 2023-03-15. You can view files and clone it, but cannot push or open issues or pull requests.

11 commits

13 changed files with 732 additions and 0 deletions

69
hammerhead/README.md Normal file
View file

@ -0,0 +1,69 @@
# Hammerhead Configuration
## Roadmap
0. Prior
* The OS is fully installed and configured using the `os/config` Ansible scripts.
* Nomad and Consul on HammerHead have custom configurations compared to the rest of the cluster. The configuration files `os/config/nomad.hcl` and `os/config/consul.json` need to be in sync on the server at `/etc/nomad/nomad.hcl` and `/etc/consul/consul.json` respectively.
1. Base components: things that need to be installed before services
* [x] Dummy HTTP server to have something to work with.
* [x] Reverse-proxy/load-balancer: nginx is a good match for a one-node deployment. Installing it with Nomad/Consul will make me practice Consul Template etc.
SSL using nginx is pain. I undrstand the interest of traefik or fabio in that sense: their close collaboration with Nomad allow them to automate certificates generation.
Consequently, SSL is not supported at the moment. (It would be manual using nginx.)
* [x] Generate services configuration outside the nginx service definition.
Can't do because of *separation of concerns*: files needed by nginx need to be defined in the nginx job specification.
Solution: each new web service needs:
* an nginx configuration template at `app/nginx/config`
* a template stanza in `app/nginx/deploy/nginx.hcl` to interpret the above template configuration. Which is lame.
2. Gitea installation
* [x] persistent data -> `host_volume`
* [x] Postgres database
* [x] Persistent data volume - using `host_volume` in the `client` config of Nomad (requires a restart, and it's not so fun to add volumes there).
* [x] How can Postgres be its own job, while not exposing it publicly and still letting it talk to other jobs? With Consul Connect !
* [ ] Avoid exposing gitea publicly (on port 3000). Can't without heavy configuration of nginx, to leverage sidecars. Adding another service would be even more painful than it already is.
* [ ] SSL. Can't without heavy-lifting, again due to nginx.
Conclusion: Don't use nginx.
2. Wiki installation
* Postgres database
3. Gitea migration
* Postgres database: needs to be its own Nomad job.
* Gitea: setting it up on Nomad.
* Migrating data from Serenity, where the DB is MySQL. Expect fun times.
* Database & files periodic backups
4. Synapse migration
* Postgres already setup
* Migrating from a Postgres on Serenity (easier)
* Backups
5. [Own/Next]cloud: Adrien needs it for himself.
* Compare distribution capabilities / S3-compatibility between the two solutions. The assumption is that Owncloud's Go rewrite is the better fit.
* Do the things.

View file

@ -0,0 +1,65 @@
job "countdash" {
datacenters = ["dc1"]
group "api" {
network {
mode = "bridge"
}
service {
name = "count-api"
port = "9001"
connect {
sidecar_service {}
}
}
task "web" {
driver = "docker"
config {
image = "hashicorpnomad/counter-api:v3"
}
}
}
group "dashboard" {
network {
mode = "bridge"
port "http" {
static = 9002
to = 9002
}
}
service {
name = "count-dashboard"
port = "9002"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "count-api"
local_bind_port = 8080
}
}
}
}
}
task "dashboard" {
driver = "docker"
env {
COUNTING_SERVICE_URL = "http://${NOMAD_UPSTREAM_ADDR_count_api}"
}
config {
image = "hashicorpnomad/counter-dashboard:v3"
}
}
}
}

View file

@ -0,0 +1,37 @@
job "dummy-http-server" {
datacenters = ["dc1"]
group "server-group" {
count = 5
network {
port "http" {}
}
service {
name = "dummy-http-server"
port = "http"
check {
type = "http"
path = "/health"
interval = "2s"
timeout = "2s"
}
tags = [
"url=dummy.hammerhead.luxeylab.net"
]
}
task "server" {
driver = "docker"
config {
ports = ["http"]
image = "hashicorp/http-echo:latest"
args = [
"-listen", ":${NOMAD_PORT_http}",
"-text", "Hello and welcome to ${NOMAD_IP_http}:${NOMAD_PORT_http}",
]
}
}
}
}

View file

@ -0,0 +1,113 @@
job "gitea" {
datacenters = ["dc1"]
group "gitea" {
count = 1
volume "gitea-data" {
type = "host"
read_only = false
source = "gitea-data"
}
network {
mode = "bridge"
port "ssh" {
static = 22
}
# port "http" {
# static = 3000
# to = 3000
# }
}
service {
name = "gitea-frontend"
port = "3000"
connect {
sidecar_service {}
}
# check {
# name = "alive"
# type = "tcp"
# interval = "10s"
# timeout = "2s"
# }
}
service {
name = "gitea-ssh"
port = "ssh"
# check {
# name = "alive"
# type = "tcp"
# interval = "10s"
# timeout = "2s"
# }
}
service {
name = "gitea-postgres-connector"
connect {
sidecar_service {
proxy {
upstreams {
# Required
destination_name = "postgres"
local_bind_port = "5432"
# Optional
local_bind_address = "127.0.0.1"
}
}
}
}
}
task "gitea" {
driver = "docker"
config {
ports = ["ssh"]
image = "gitea/gitea:1.14.2"
volumes = [
"/etc/timezone:/etc/timezone:ro",
"/etc/localtime:/etc/localtime:ro"
]
}
volume_mount {
volume = "gitea-data"
destination = "/data"
read_only = false
}
template {
# Consul Template only works in template stanza.
# We need it to fetch secret values from Consul.
# The "env = true" parameter sets the environment with the data.
# "destination" key is required but its value doesn't matter.
data = <<EOH
DB_TYPE = "postgres"
DB_USER = "{{ key "secrets/postgres/gitea/user" }}"
DB_PASSWD = "{{ key "secrets/postgres/gitea/password" }}"
DB_NAME = "{{ key "secrets/postgres/gitea/db_name" }}"
EOH
destination = "secrets/env.env"
env = true
change_mode = "restart"
}
env {
DOMAIN = "gitea.hammerhead.luxeylab.net"
SSH_DOMAIN = "gitea.hammerhead.luxeylab.net"
DB_HOST = "${NOMAD_UPSTREAM_ADDR_postgres}"
}
}
}
}

View file

@ -0,0 +1,17 @@
upstream dummy-http-server-backend {
{{ range service "dummy-http-server" }}
server {{ .Address }}:{{ .Port }};
{{ else }}
server 127.0.0.1:65535; # force a 502
{{ end }}
}
server {
listen 80;
listen [::]:80;
server_name dummy.hammerhead.luxeylab.net;
location / {
proxy_pass http://dummy-http-server-backend;
}
}

View file

@ -0,0 +1,27 @@
upstream gitea-frontend {
server 127.0.0.1:3000;
}
server {
listen 80;
listen [::]:80;
server_name gitea.hammerhead.luxeylab.net;
# Forward information from nginx to the upstream
# add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options SAMEORIGIN;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
location / {
# Forward information from nginx to the upstream
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_pass http://gitea-frontend;
}
}

View file

@ -0,0 +1,73 @@
job "nginx" {
datacenters = ["dc1"]
group "nginx" {
count = 1
network {
mode = "bridge"
port "http" {
static = 80
}
port "https" {
static = 443
}
}
# volume "certs" {
# type = "host"
# source = "ca-certificates"
# # read_only = true
# }
service {
name = "nginx"
port = "http"
}
service {
name = "nginx-gitea-frontend-connector"
connect {
sidecar_service {
proxy {
upstreams {
# Required
destination_name = "gitea-frontend"
local_bind_port = "3000"
# Optional
local_bind_address = "127.0.0.1"
}
}
}
}
}
task "nginx" {
driver = "docker"
config {
ports = ["http", "https"]
image = "nginx"
volumes = [
"local:/etc/nginx/conf.d",
#"certs:..."
]
}
# template {
# data = file("../config/dummy-http-server.tpl")
# destination = "local/dummy-http-server.conf"
# change_mode = "signal"
# change_signal = "SIGHUP"
# }
template {
data = file("../config/gitea.tpl")
destination = "local/gitea.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
}
}
}

View file

@ -0,0 +1,86 @@
# Example PostgreSQL job file: https://github.com/GuyBarros/nomad_jobs/blob/master/postgresSQL.nomad
job "postgres" {
datacenters = ["dc1"]
type = "service"
group "postgres" {
count = 1
volume "postgres-data" {
type = "host"
read_only = false
source = "postgres-data"
}
network {
mode = "bridge"
# port "db" {
# static = 5432
# to = 5432
# }
}
service {
name = "postgres"
port = "5432"
# check {
# name = "alive"
# type = "tcp"
# interval = "10s"
# timeout = "2s"
# }
connect {
sidecar_service {}
}
}
task "postgres" {
driver = "docker"
config {
# ports = ["db"]
image = "postgres"
}
volume_mount {
volume = "postgres-data"
destination = "/var/lib/postgresql/data"
read_only = false
}
template {
# Consul Template only works in template stanza.
# We need it to fetch secret values from Consul.
# The "env = true" parameter sets the environment with the data.
# "destination" key is required but its value doesn't matter.
data = <<EOH
POSTGRES_USER = "{{ key "secrets/postgres/user" }}"
POSTGRES_PASSWORD = "{{ key "secrets/postgres/password" }}"
EOH
destination = "secrets/env.env"
env = true
change_mode = "restart"
}
env {
PGDATA = "/var/lib/postgresql/data"
}
}
# resources {
# # cpu = 1000
# # memory = 1024
# network {
# # mbits = 10
# port "db" {
# static = 5432
# }
# }
# }
}
}

View file

@ -0,0 +1,50 @@
InsecureSkipVerify = true
defaultEntryPoints = ["http", "https"]
[entryPoints]
[entryPoints.admin]
address = ":8082"
[entryPoints.http]
address = ":80"
[entryPoints.http.redirect]
entryPoint = "https"
[entryPoints.https]
address = ":443"
compress = true
[entryPoints.https.tls]
[ping]
entrypoint = "admin"
[retry]
[acme]
email = "adrien@luxeylab.net"
storage = "traefik/acme/account"
entryPoint = "https"
onHostRule = true
[acme.httpChallenge]
entryPoint = "http"
[api]
entryPoint = "admin"
dashboard = true
[consul]
endpoint = "172.17.0.1:8500"
watch = true
prefix = "traefik"
[consulCatalog]
endpoint = "172.17.0.1:8500"
prefix = "traefik"
# domain = "web.deuxfleurs.fr"
exposedByDefault = false
# [metrics]
# [metrics.prometheus]
# # -- below is for traefik 1.7 see https://doc.traefik.io/traefik/v1.7/configuration/metrics/
# entryPoint = "admin"

View file

@ -0,0 +1,72 @@
job "traefik" {
datacenters = ["dc1"]
type = "service"
priority = 80
group "traefik" {
network {
port "http_port" { static = 80 }
port "https_port" { static = 443 }
port "admin_port" { static = 8082 }
}
task "server" {
driver = "docker"
config {
image = "amd64/traefik:1.7.20"
readonly_rootfs = true
network_mode = "host"
volumes = [
"secrets/traefik.toml:/etc/traefik/traefik.toml",
]
ports = [ "http_port", "https_port", "admin_port" ]
}
resources {
memory = 265
}
template {
data = file("../config/traefik.toml")
destination = "secrets/traefik.toml"
}
service {
name = "traefik-http"
port = "http_port"
# tags = [ "(diplonat (tcp_port 80))" ]
address_mode = "host"
}
service {
name = "traefik-https"
port = "https_port"
# tags = [ "(diplonat (tcp_port 443))" ]
address_mode = "host"
}
service {
name = "traefik-admin"
port = "admin_port"
address_mode = "host"
check {
type = "http"
protocol = "http"
port = 8082
address_mode = "driver"
path = "/ping"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -0,0 +1,24 @@
{
"data_dir": "/var/lib/consul",
"bind_addr": "[::]",
"advertise_addr": "2001:41d0:8:ba0b::1",
"addresses": {
"dns": "[::]",
"http": "[::]",
"grpc": "[::]"
},
"bootstrap_expect": 1,
"server": true,
"ui_config": {
"enabled": true
},
"ports": {
"dns": 53,
"grpc": 8502
},
"encrypt": "2B2vxbfCRzu3Q29LEJAZBg==",
"domain": "hammerhead.deuxfleurs.fr",
"connect": {
"enabled": true
}
}

View file

@ -0,0 +1,40 @@
{
"data_dir": "/var/lib/consul",
"bind_addr": "[::]",
"advertise_addr": "2001:41d0:8:ba0b::1",
"addresses": {
"dns": "[::]",
"http": "[::]"
},
"retry_join": [
"2001:41d0:8:ba0b::1"
],
"bootstrap_expect": 1,
"server": true,
"ui": {
"enabled": true
},
"acl": {
"enabled": true,
"default_policy": "deny",
"enable_token_persistence": true,
},
"ports": {
"dns": 53,
"grpc": 8502
},
"recursors": [
"213.186.33.99",
"172.104.136.243"
],
"encrypt": "2B2vxbfCRzu3Q29LEJAZBg==",
"domain": "hammerhead.deuxfleurs.fr",
"performance": {
"raft_multiplier": 10,
"rpc_hold_timeout": "30s",
"leave_drain_time": "30s"
},
"connect": {
"enabled": true
}
}

View file

@ -0,0 +1,59 @@
addresses {
http = "::"
rpc = "::"
serf = "::"
}
advertise {
http = "2001:41d0:8:ba0b::1"
rpc = "2001:41d0:8:ba0b::1"
serf = "2001:41d0:8:ba0b::1"
}
bind_addr = "[::]"
data_dir = "/var/lib/nomad"
server {
enabled = true
bootstrap_expect = 1
}
consul {
address = "[::1]:8500"
grpc_address = "[::1]:8502"
}
client {
enabled = true
servers = ["[::1]:4648"]
network_interface = "eno1"
options {
docker.privileged.enabled = "true"
docker.volumes.enabled = "true"
}
host_volume "postgres-data" {
path = "/opt/postgres/data"
read_only = false
}
host_volume "gitea-data" {
path = "/opt/gitea/data"
read_only = false
}
}
plugin "raw_exec" {
config {
enabled = true
}
}
telemetry {
collection_interval = "1s"
disable_hostname = false
prometheus_metrics = true
publish_allocation_metrics = true
publish_node_metrics = true
}