Create a backup script
This commit is contained in:
parent
1183583fdf
commit
9701b863fd
5 changed files with 143 additions and 62 deletions
|
@ -1,22 +0,0 @@
|
||||||
FROM golang:buster as builder
|
|
||||||
|
|
||||||
WORKDIR /root
|
|
||||||
RUN git clone https://filippo.io/age && cd age/cmd/age && go build -o age .
|
|
||||||
|
|
||||||
FROM amd64/debian:buster
|
|
||||||
|
|
||||||
COPY --from=builder /root/age/cmd/age/age /usr/local/bin/age
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get -qq -y full-upgrade && \
|
|
||||||
apt-get install -y rsync wget openssh-client postgresql-client && \
|
|
||||||
apt-get clean && \
|
|
||||||
rm -f /var/lib/apt/lists/*_*
|
|
||||||
|
|
||||||
RUN mkdir -p /root/.ssh
|
|
||||||
WORKDIR /root
|
|
||||||
|
|
||||||
COPY do_backup.sh /root/do_backup.sh
|
|
||||||
|
|
||||||
CMD "/root/do_backup.sh"
|
|
||||||
|
|
|
@ -1,40 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -x -e
|
|
||||||
|
|
||||||
cd /root
|
|
||||||
|
|
||||||
chmod 0600 .ssh/id_ed25519
|
|
||||||
|
|
||||||
cat > .ssh/config <<EOF
|
|
||||||
Host backuphost
|
|
||||||
HostName $TARGET_SSH_HOST
|
|
||||||
Port $TARGET_SSH_PORT
|
|
||||||
User $TARGET_SSH_USER
|
|
||||||
EOF
|
|
||||||
|
|
||||||
echo "export sql"
|
|
||||||
export PGPASSWORD=$REPL_PSQL_PWD
|
|
||||||
pg_basebackup \
|
|
||||||
--pgdata=- \
|
|
||||||
--format=tar \
|
|
||||||
--max-rate=1M \
|
|
||||||
--no-slot \
|
|
||||||
--wal-method=none \
|
|
||||||
--gzip \
|
|
||||||
--compress=8 \
|
|
||||||
--checkpoint=spread \
|
|
||||||
--progress \
|
|
||||||
--verbose \
|
|
||||||
--status-interval=10 \
|
|
||||||
--username=$REPL_PSQL_USER \
|
|
||||||
--port=5432 \
|
|
||||||
--host=psql-proxy.service.2.cluster.deuxfleurs.fr | \
|
|
||||||
age -r "$(cat /root/.ssh/id_ed25519.pub)" | \
|
|
||||||
ssh backuphost "cat > $TARGET_SSH_DIR/matrix/db-$(date --iso-8601=minute).gz.age"
|
|
||||||
|
|
||||||
MATRIX_MEDIA="/mnt/glusterfs/chat/matrix/synapse/media"
|
|
||||||
echo "export local_content"
|
|
||||||
tar -vzcf - ${MATRIX_MEDIA} | \
|
|
||||||
age -r "$(cat /root/.ssh/id_ed25519.pub)" | \
|
|
||||||
ssh backuphost "cat > $TARGET_SSH_DIR/matrix/media-$(date --iso-8601=minute).gz.age"
|
|
16
app/backup/build/backup-psql/default.nix
Normal file
16
app/backup/build/backup-psql/default.nix
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
{ pkgs ? import <nixpkgs> {} }:
|
||||||
|
let
|
||||||
|
python-with-my-packages = pkgs.python3.withPackages (p: with p; [
|
||||||
|
minio
|
||||||
|
]);
|
||||||
|
in
|
||||||
|
pkgs.mkShell {
|
||||||
|
buildInputs = [
|
||||||
|
python-with-my-packages
|
||||||
|
pkgs.age
|
||||||
|
pkgs.postgresql_14
|
||||||
|
];
|
||||||
|
shellHook = ''
|
||||||
|
PYTHONPATH=${python-with-my-packages}/${python-with-my-packages.sitePackages}
|
||||||
|
'';
|
||||||
|
}
|
101
app/backup/build/backup-psql/do_backup.py
Executable file
101
app/backup/build/backup-psql/do_backup.py
Executable file
|
@ -0,0 +1,101 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
import shutil,sys,os,datetime,minio
|
||||||
|
|
||||||
|
working_directory = "."
|
||||||
|
if 'CACHE_DIR' in os.environ: working_directory = os.environ['CACHE_DIR']
|
||||||
|
required_space_in_bytes = 20 * 1024 * 1024 * 1024
|
||||||
|
bucket = os.environ['AWS_BUCKET']
|
||||||
|
key = os.environ['AWS_ACCESS_KEY_ID']
|
||||||
|
secret = os.environ['AWS_SECRET_ACCESS_KEY']
|
||||||
|
endpoint = os.environ['AWS_ENDPOINT']
|
||||||
|
pubkey = os.environ['CRYPT_PUBLIC_KEY']
|
||||||
|
psql_host = os.environ['PSQL_HOST']
|
||||||
|
psql_user = os.environ['PSQL_USER']
|
||||||
|
s3_prefix = str(datetime.datetime.now())
|
||||||
|
files = [ "backup_manifest", "base.tar.gz", "pg_wal.tar.gz" ]
|
||||||
|
clear_paths = [ os.path.join(working_directory, f) for f in files ]
|
||||||
|
crypt_paths = [ os.path.join(working_directory, f) + ".age" for f in files ]
|
||||||
|
s3_keys = [ s3_prefix + "/" + f for f in files ]
|
||||||
|
|
||||||
|
def abort(msg):
|
||||||
|
for p in clear_paths + crypt_paths:
|
||||||
|
if os.path.exists(p):
|
||||||
|
print(f"Remove {p}")
|
||||||
|
os.remove(p)
|
||||||
|
|
||||||
|
if msg: sys.exit(msg)
|
||||||
|
else: print("success")
|
||||||
|
|
||||||
|
# Check we have enough space on disk
|
||||||
|
if shutil.disk_usage(working_directory).free < required_space_in_bytes:
|
||||||
|
abort(f"Not enough space on disk at path {working_directory} to perform a backup, aborting")
|
||||||
|
|
||||||
|
# Check postgres password is set
|
||||||
|
if 'PGPASSWORD' not in os.environ:
|
||||||
|
abort(f"You must pass postgres' password through the environment variable PGPASSWORD")
|
||||||
|
|
||||||
|
# Check our working directory is empty
|
||||||
|
if len(os.listdir(working_directory)) != 0:
|
||||||
|
abort(f"Working directory {working_directory} is not empty, aborting")
|
||||||
|
|
||||||
|
# Check Minio
|
||||||
|
client = minio.Minio(endpoint, key, secret)
|
||||||
|
if not client.bucket_exists(bucket):
|
||||||
|
abort(f"Bucket {bucket} does not exist or its access is forbidden, aborting")
|
||||||
|
|
||||||
|
# Perform the backup locally
|
||||||
|
ret = os.system(f"""
|
||||||
|
pg_basebackup \
|
||||||
|
--host={psql_host} \
|
||||||
|
--username={psql_user} \
|
||||||
|
--pgdata={working_directory} \
|
||||||
|
--format=tar \
|
||||||
|
--wal-method=stream \
|
||||||
|
--gzip \
|
||||||
|
--compress=6 \
|
||||||
|
--progress \
|
||||||
|
--max-rate=5M
|
||||||
|
""")
|
||||||
|
if ret != 0:
|
||||||
|
abort(f"pg_baseckup exit code is {ret}, 0 expected. aborting")
|
||||||
|
|
||||||
|
# Check that the expected files are here
|
||||||
|
for p in clear_paths:
|
||||||
|
print(f"Checking that {p} exists locally")
|
||||||
|
if not os.path.exists(p):
|
||||||
|
abort(f"File {p} expected but not found, aborting")
|
||||||
|
|
||||||
|
# Cipher them
|
||||||
|
for c, e in zip(clear_paths, crypt_paths):
|
||||||
|
print(f"Ciphering {c} to {e}")
|
||||||
|
ret = os.system(f"age -r {pubkey} -o {e} {c}")
|
||||||
|
if ret != 0:
|
||||||
|
abort(f"age exit code is {ret}, 0 expected. aborting")
|
||||||
|
|
||||||
|
# Upload the backup to S3
|
||||||
|
for p, k in zip(crypt_paths, s3_keys):
|
||||||
|
try:
|
||||||
|
print(f"Uploading {p} to {k}")
|
||||||
|
result = client.fput_object(bucket, k, p)
|
||||||
|
print(
|
||||||
|
"created {0} object; etag: {1}, version-id: {2}".format(
|
||||||
|
result.object_name, result.etag, result.version_id,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
abort(f"Exception {e} occured while upload {p}. aborting")
|
||||||
|
|
||||||
|
# Check that the files have been uploaded
|
||||||
|
for k in s3_keys:
|
||||||
|
try:
|
||||||
|
print(f"Checking that {k} exists remotely")
|
||||||
|
result = client.stat_object(bucket, k)
|
||||||
|
print(
|
||||||
|
"last-modified: {0}, size: {1}".format(
|
||||||
|
result.last_modified, result.size,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
abort(f"{k} not found on S3. {e}. aborting")
|
||||||
|
|
||||||
|
abort(None)
|
26
op_guide/stolon/nomad_full_backup.md
Normal file
26
op_guide/stolon/nomad_full_backup.md
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
Start by following ../backup-minio
|
||||||
|
|
||||||
|
## Garbage collect old backups
|
||||||
|
|
||||||
|
```
|
||||||
|
mc ilm import deuxfleurs/${BUCKET_NAME} <<EOF
|
||||||
|
{
|
||||||
|
"Rules": [
|
||||||
|
{
|
||||||
|
"Expiration": {
|
||||||
|
"Days": 62
|
||||||
|
},
|
||||||
|
"ID": "PurgeOldBackups",
|
||||||
|
"Status": "Enabled"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
Check that it has been activated:
|
||||||
|
|
||||||
|
```
|
||||||
|
mc ilm ls deuxfleurs/${BUCKET_NAME}
|
||||||
|
```
|
||||||
|
|
Reference in a new issue