Plume backup + WIP consul

This commit is contained in:
Quentin 2022-01-27 16:32:57 +01:00
parent 00d7106a18
commit 3baa511fce
14 changed files with 187 additions and 97 deletions

View file

@ -1,19 +1,23 @@
job "backup_daily" { job "backup_daily" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "batch" type = "batch"
priority = "60"
periodic { periodic {
cron = "@daily" cron = "@daily"
// Do not allow overlapping runs. // Do not allow overlapping runs.
prohibit_overlap = true prohibit_overlap = true
} }
task "backup-dovecot" { group "backup-dovecot" {
constraint { constraint {
attribute = "${attr.unique.hostname}" attribute = "${attr.unique.hostname}"
operator = "=" operator = "="
value = "digitale" value = "digitale"
} }
task "main" {
driver = "docker" driver = "docker"
config { config {
@ -25,7 +29,6 @@ job "backup_daily" {
] ]
} }
template { template {
data = <<EOH data = <<EOH
AWS_ACCESS_KEY_ID={{ key "secrets/email/dovecot/backup_aws_access_key_id" }} AWS_ACCESS_KEY_ID={{ key "secrets/email/dovecot/backup_aws_access_key_id" }}
@ -39,6 +42,7 @@ EOH
} }
resources { resources {
cpu = 500
memory = 200 memory = 200
} }
@ -49,4 +53,115 @@ EOH
mode = "fail" mode = "fail"
} }
} }
}
group "backup-plume" {
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "digitale"
}
task "main" {
driver = "docker"
config {
image = "restic/restic:0.12.1"
entrypoint = [ "/bin/sh", "-c" ]
args = [ "restic backup /plume && restic forget --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y ; restic prune --max-unused 50% --max-repack-size 2G ; restic check" ]
volumes = [
"/mnt/ssd/plume/media:/plume"
]
}
template {
data = <<EOH
AWS_ACCESS_KEY_ID={{ key "secrets/plume/backup_aws_access_key_id" }}
AWS_SECRET_ACCESS_KEY={{ key "secrets/plume/backup_aws_secret_access_key" }}
RESTIC_REPOSITORY={{ key "secrets/plume/backup_restic_repository" }}
RESTIC_PASSWORD={{ key "secrets/plume/backup_restic_password" }}
EOH
destination = "secrets/env_vars"
env = true
}
resources {
cpu = 500
memory = 200
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
}
group "backup-consul" {
task "export-kv-store" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "consul:1.11.2"
entrypoint = [ "/bin/sh", "-c" ]
NETWORK MODE HOST
args = [ "/bin/consul kv export > $NOMAD_ALLOC_DIR/consul.json" ]
}
resources {
cpu = 200
memory = 200
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
task "upload-kv-store" {
driver = "docker"
config {
image = "restic/restic:0.12.1"
entrypoint = [ "/bin/sh", "-c" ]
args = [ "restic backup $NOMAD_ALLOC_DIR && restic forget --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y ; restic prune --max-unused 50% --max-repack-size 2G ; restic check" ]
}
template {
data = <<EOH
AWS_ACCESS_KEY_ID={{ key "secrets/plume/backup_aws_access_key_id" }}
AWS_SECRET_ACCESS_KEY={{ key "secrets/plume/backup_aws_secret_access_key" }}
RESTIC_REPOSITORY={{ key "secrets/plume/backup_restic_repository" }}
RESTIC_PASSWORD={{ key "secrets/plume/backup_restic_password" }}
EOH
destination = "secrets/env_vars"
env = true
}
resources {
cpu = 200
memory = 200
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
}
} }

View file

@ -1,62 +0,0 @@
job "backup_manual_matrix" {
datacenters = ["dc1"]
type = "batch"
task "backup-matrix" {
driver = "docker"
config {
image = "superboum/backup_matrix:4"
volumes = [
"secrets/id_ed25519:/root/.ssh/id_ed25519",
"secrets/id_ed25519.pub:/root/.ssh/id_ed25519.pub",
"secrets/known_hosts:/root/.ssh/known_hosts",
"/mnt/glusterfs/chat/matrix/synapse/media:/mnt/glusterfs/chat/matrix/synapse/media"
]
network_mode = "host"
}
env {
CONSUL_HTTP_ADDR = "http://consul.service.2.cluster.deuxfleurs.fr:8500"
}
template {
data = <<EOH
TARGET_SSH_USER={{ key "secrets/backup/target_ssh_user" }}
TARGET_SSH_PORT={{ key "secrets/backup/target_ssh_port" }}
TARGET_SSH_HOST={{ key "secrets/backup/target_ssh_host" }}
TARGET_SSH_DIR={{ key "secrets/backup/target_ssh_dir" }}
REPL_PSQL_USER={{ key "secrets/postgres/keeper/pg_repl_username" }}
REPL_PSQL_PWD={{ key "secrets/postgres/keeper/pg_repl_pwd" }}
EOH
destination = "secrets/env_vars"
env = true
}
template {
data = "{{ key \"secrets/backup/id_ed25519\" }}"
destination = "secrets/id_ed25519"
}
template {
data = "{{ key \"secrets/backup/id_ed25519.pub\" }}"
destination = "secrets/id_ed25519.pub"
}
template {
data = "{{ key \"secrets/backup/target_ssh_fingerprint\" }}"
destination = "secrets/known_hosts"
}
resources {
memory = 200
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
}

View file

@ -1 +0,0 @@
USER AWS access key ID, eg. GKxxxx

View file

@ -1 +0,0 @@
USER AWS Endpoint, eg. s3.garage.tld

View file

@ -1 +0,0 @@
USER AWS Secret Access Key

View file

@ -0,0 +1 @@
USER Backup AWS access key ID

View file

@ -0,0 +1 @@
USER Backup AWS secret access key

View file

@ -0,0 +1 @@
USER Restic password to encrypt backups

View file

@ -0,0 +1 @@
USER Restic repository, eg. s3:https://s3.garage.tld

View file

@ -3,9 +3,12 @@ Add the admin account as `deuxfleurs` to your `~/.mc/config` file
You need to choose some names/identifiers: You need to choose some names/identifiers:
```bash ```bash
export BUCKET_NAME=example export ENDPOINT="https://s3.garage.tld"
export NEW_ACCESS_KEY_ID=hello export SERVICE_NAME="example"
export BUCKET_NAME="backups-${SERVICE_NAME}"
export NEW_ACCESS_KEY_ID="key-${SERVICE_NAME}"
export NEW_SECRET_ACCESS_KEY=$(openssl rand -base64 32) export NEW_SECRET_ACCESS_KEY=$(openssl rand -base64 32)
export POLICY_NAME="policy-$BUCKET_NAME" export POLICY_NAME="policy-$BUCKET_NAME"
``` ```
@ -22,7 +25,19 @@ Create a new user:
mc admin user add deuxfleurs $NEW_ACCESS_KEY_ID $NEW_SECRET_ACCESS_KEY mc admin user add deuxfleurs $NEW_ACCESS_KEY_ID $NEW_SECRET_ACCESS_KEY
``` ```
Add this new user to your `~/.mc/config.json` file, as `backup-user` for example. Add this new user to your `~/.mc/config.json`, run this command before to generate the snippet to copy/paste:
```
cat > /dev/stdout <<EOF
"$NEW_ACCESS_KEY_ID": {
"url": "$ENDPOINT",
"accessKey": "$NEW_ACCESS_KEY_ID",
"secretKey": "$NEW_SECRET_ACCESS_KEY",
"api": "S3v4",
"path": "auto"
},
EOF
```
--- ---
@ -79,8 +94,6 @@ mc ls backup-user/
Now we need to initialize the repository with restic. Now we need to initialize the repository with restic.
```bash ```bash
export ENDPOINT="https://garage.tld"
export AWS_ACCESS_KEY_ID=$NEW_ACCESS_KEY_ID export AWS_ACCESS_KEY_ID=$NEW_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY=$NEW_SECRET_ACCESS_KEY export AWS_SECRET_ACCESS_KEY=$NEW_SECRET_ACCESS_KEY
export RESTIC_REPOSITORY="s3:$ENDPOINT/$BUCKET_NAME" export RESTIC_REPOSITORY="s3:$ENDPOINT/$BUCKET_NAME"
@ -101,16 +114,39 @@ See your snapshots with:
restic snapshots restic snapshots
``` ```
Check also these useful commands:
```
restic ls
restic diff
restic help
```
--- ---
Add the secrets to Consul, near your service secrets. Add the secrets to Consul, near your service secrets.
The idea is that the backuping service is a component of the global running service. The idea is that the backuping service is a component of the global running service.
You must add: You must run in `app/<name>/secrets/<subpath>`:
- `backup_aws_access_key_id`
- `backup_aws_secret_access_key`
- `backup_restic_repository`
- `backup_restic_password`
```bash
echo "USER Backup AWS access key ID" > backup_aws_access_key_id
echo "USER Backup AWS secret access key" > backup_aws_secret_access_key
echo "USER Restic repository, eg. s3:https://s3.garage.tld" > backup_restic_repository
echo "USER Restic password to encrypt backups" > backup_restic_password
```
Then run secretmgr:
```bash
# Spawning a nix shell is an easy way to get all the dependencies you need
nix-shell
# Check that secretmgr works for you
python3 secretmgr.py check <name>
# Now interactively feed the secrets
python3 secretmgr.py gen <name>
```
--- ---