Garage backup to SFTP target hosted by Max

This commit is contained in:
Alex 2023-04-20 12:10:07 +02:00
parent 57aa2ce1d2
commit af82308e84
4 changed files with 108 additions and 81 deletions

View File

@ -1,6 +1,6 @@
FROM alpine:3.17
RUN apk add rclone btrfs-progs curl bash jq
RUN apk add rclone curl bash jq
COPY do-backup.sh /do-backup.sh

View File

@ -1,34 +1,53 @@
#!/usr/bin/env bash
# DEPENDENCIES: btrfs-progs curl rclone jq
# DESCRIPTION:
# Script to backup all buckets on a Garage cluster using rclone.
#
# REQUIREMENTS:
# An access key for the backup script must be created in Garage beforehand.
# This script will use the Garage administration API to grant read access
# to this key on all buckets.
#
# A rclone configuration file is expected to be located at `/etc/secrets/rclone.conf`,
# which contains credentials to the following two remotes:
# garage: the Garage server, for read access (using the backup access key)
# backup: the backup location
#
# DEPENDENCIES: (see Dockerfile)
# curl
# jq
# rclone
#
# PARAMETERS (environmenet variables)
# $BACKUP_BASEDIR => where to store backups and btrfs snapshots
# $GARAGE_ADMIN_API_URL => Garage administration API URL (e.g. http://localhost:3903)
# $GARAGE_ADMIN_TOKEN => Garage administration access token
# $GARAGE_ACCESS_KEY => Garage access key
# $GARAGE_SECRET_KEY => Garage secret key
# $GARAGE_ACCESS_KEY => Garage access key ID
# $TARGET_BACKUP_DIR => Folder on the backup remote where to store buckets
if [ -z "$BACKUP_BASEDIR" -o -z "$GARAGE_ACCESS_KEY" -o -z "$GARAGE_ADMIN_TOKEN" ]; then
if [ -z "$GARAGE_ACCESS_KEY" -o -z "$GARAGE_ADMIN_TOKEN" -o -z "$GARAGE_ADMIN_API_URL" ]; then
echo "Missing parameters"
fi
if [ ! -d "$BACKUP_BASEDIR/buckets" ]; then
btrfs subvolume create "$BACKUP_BASEDIR/buckets"
fi
# copy potentially immutable file to a mutable location,
# otherwise rclone complains
mkdir -p /root/.config/rclone
cp /etc/secrets/rclone.conf /root/.config/rclone/rclone.conf
function gcurl {
curl -s -H "Authorization: Bearer $GARAGE_ADMIN_TOKEN" $@
}
BUCKETS=$(gcurl "http://localhost:3903/v0/bucket" | jq -r '.[].id')
BUCKETS=$(gcurl "$GARAGE_ADMIN_API_URL/v0/bucket" | jq -r '.[].id')
mkdir -p /tmp/buckets-info
for BUCKET in $BUCKETS; do
echo "==== BUCKET $BUCKET ===="
gcurl "http://localhost:3903/v0/bucket?id=$BUCKET" > "$BACKUP_BASEDIR/buckets/$BUCKET.json"
gcurl "http://localhost:3903/v0/bucket?id=$BUCKET" > "/tmp/buckets-info/$BUCKET.json"
rclone copy "/tmp/buckets-info/$BUCKET.json" "backup:$TARGET_BACKUP_DIR/" 2>&1
ALIASES=$(jq -r '.globalAliases[]' < "$BACKUP_BASEDIR/buckets/$BUCKET.json")
ALIASES=$(jq -r '.globalAliases[]' < "/tmp/buckets-info/$BUCKET.json")
echo "(aka. $ALIASES)"
case $ALIASES in
@ -41,10 +60,6 @@ for BUCKET in $BUCKETS; do
*)
echo "Backing up $BUCKET"
if [ ! -d "$BACKUP_BASEDIR/buckets/$BUCKET" ]; then
mkdir "$BACKUP_BASEDIR/buckets/$BUCKET"
fi
gcurl -X POST -H "Content-Type: application/json" --data @- "http://localhost:3903/v0/bucket/allow" >/dev/null <<EOF
{
"bucketId": "$BUCKET",
@ -53,32 +68,16 @@ for BUCKET in $BUCKETS; do
}
EOF
rclone sync --s3-endpoint http://localhost:3900 \
--s3-access-key-id $GARAGE_ACCESS_KEY \
--s3-secret-access-key $GARAGE_SECRET_KEY \
--s3-region garage \
--s3-force-path-style \
--transfers 32 \
rclone sync \
--transfers 32 \
--fast-list \
--stats-one-line \
--stats 10s \
--stats-log-level NOTICE \
":s3:$BUCKET" "$BACKUP_BASEDIR/buckets/$BUCKET" 2>&1
"garage:$BUCKET" "backup:$TARGET_BACKUP_DIR/$BUCKET" 2>&1
;;
esac
done
echo "========= DONE SYNCHRONIZING =========="
if [ ! -d "$BACKUP_BASEDIR/snapshots" ]; then
mkdir "$BACKUP_BASEDIR/snapshots"
fi
SNAPSHOT="$BACKUP_BASEDIR/snapshots/buckets-$(date +%F)"
if [ ! -e "$SNAPSHOT" ]; then
echo "Making snapshot: $SNAPSHOT"
btrfs subvolume snapshot "$BACKUP_BASEDIR/buckets" "$SNAPSHOT"
btrfs prop set "$SNAPSHOT" ro true
fi

View File

@ -239,48 +239,4 @@ EOH
}
}
}
group "backup-garage" {
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "abricot"
}
task "main" {
driver = "docker"
config {
image = "lxpz/backup_garage:4"
network_mode = "host"
volumes = [
"/mnt/storage/backup/garage.deuxfleurs.fr:/backup"
]
}
template {
data = <<EOH
BACKUP_BASEDIR=/backup
GARAGE_ADMIN_TOKEN={{ key "secrets/garage/admin_token" }}
GARAGE_ACCESS_KEY={{ key "secrets/backup/garage/s3_access_key_id" }}
GARAGE_SECRET_KEY={{ key "secrets/backup/garage/s3_secret_access_key" }}
EOH
destination = "secrets/env_vars"
env = true
}
resources {
cpu = 500
memory = 200
memory_max = 4000
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
}
}

View File

@ -0,0 +1,72 @@
job "backup-garage" {
datacenters = ["neptune", "bespin"]
type = "batch"
priority = "60"
periodic {
cron = "@daily"
// Do not allow overlapping runs.
prohibit_overlap = true
}
group "backup-garage" {
task "main" {
driver = "docker"
config {
image = "lxpz/backup_garage:9"
network_mode = "host"
volumes = [
"secrets/rclone.conf:/etc/secrets/rclone.conf"
]
}
template {
data = <<EOH
GARAGE_ADMIN_TOKEN={{ key "secrets/garage/admin_token" }}
GARAGE_ADMIN_API_URL=http://localhost:3903
GARAGE_ACCESS_KEY={{ key "secrets/backup/garage/s3_access_key_id" }}
TARGET_BACKUP_DIR={{ key "secrets/backup/garage/target_sftp_directory" }}
EOH
destination = "secrets/env_vars"
env = true
}
template {
data = <<EOH
[garage]
type = s3
provider = Other
env_auth = false
access_key_id = {{ key "secrets/backup/garage/s3_access_key_id" }}
secret_access_key = {{ key "secrets/backup/garage/s3_secret_access_key" }}
endpoint = http://localhost:3900
region = garage
[backup]
type = sftp
host = {{ key "secrets/backup/garage/target_sftp_host" }}
user = {{ key "secrets/backup/garage/target_sftp_user" }}
port = {{ key "secrets/backup/garage/target_sftp_port" }}
key_pem = {{ key "secrets/backup/garage/target_sftp_key_pem" | replaceAll "\n" "\\n" }}
shell_type = unix
EOH
destination = "secrets/rclone.conf"
}
resources {
cpu = 500
memory = 200
memory_max = 4000
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
}
}