infra/roles/backup/templates/backup.sh.j2
jack a620bb381c
Some checks failed
CI/CD / syntax-check (push) Successful in 1m1s
CI/CD / deploy (push) Has been cancelled
fix: remove all remaining Vaultwarden references after service removal
- tasks/main.yml: remove vaultwarden_image from image pull list
- tasks/directories.yml: remove vaultwarden/data directory creation
- backup.sh.j2: remove Vaultwarden backup/restore section and stop command

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-26 04:49:12 +07:00

135 lines
6.1 KiB
Django/Jinja

#!/usr/bin/env bash
# Generated by Ansible — do not edit manually
# Creates a single dated archive: data_YYYY-MM-DD_HH-MM.tar.gz
# Structure:
# data/
# databases/ — PostgreSQL dumps (restore with psql)
# volumes/ — Docker volume contents (restore by copying)
# Runs every 6 hours (00:00, 06:00, 12:00, 18:00), keeps {{ backup_retention_days }} days.
# Upload to S3: s3://{{ s3_bucket }}/data/
set -euo pipefail
BACKUP_ROOT="{{ backup_dir }}"
DATE=$(date +%Y-%m-%d_%H-%M)
WORK_DIR="${BACKUP_ROOT}/tmp_${DATE}"
ARCHIVE="${BACKUP_ROOT}/main_data_${DATE}.tar.gz"
KEEP_DAYS="{{ backup_retention_days }}"
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"; }
log "=== Backup started: ${DATE} ==="
mkdir -p "${WORK_DIR}/data/databases" "${WORK_DIR}/data/volumes"
# ── PostgreSQL: Forgejo ──────────────────────────────────────────────────────
log "Dumping forgejo-db..."
docker exec forgejo-db pg_dump -U forgejo forgejo \
| gzip > "${WORK_DIR}/data/databases/forgejo.sql.gz"
log " → databases/forgejo.sql.gz ($(du -sh "${WORK_DIR}/data/databases/forgejo.sql.gz" | cut -f1))"
# ── PostgreSQL: Plane ────────────────────────────────────────────────────────
log "Dumping plane-db..."
docker exec plane-db pg_dump -U plane plane \
| gzip > "${WORK_DIR}/data/databases/plane.sql.gz"
log " → databases/plane.sql.gz ($(du -sh "${WORK_DIR}/data/databases/plane.sql.gz" | cut -f1))"
# ── Forgejo data volume (repos, attachments, LFS) ───────────────────────────
log "Backing up Forgejo data..."
docker run --rm \
--volumes-from forgejo \
-v "${WORK_DIR}/data/volumes:/backup" \
alpine:3 \
tar czf /backup/forgejo.tar.gz /data
log " → volumes/forgejo.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/forgejo.tar.gz" | cut -f1))"
# ── Uptime Kuma ──────────────────────────────────────────────────────────────
log "Backing up Uptime Kuma..."
docker run --rm \
--volumes-from uptime-kuma \
-v "${WORK_DIR}/data/volumes:/backup" \
alpine:3 \
tar czf /backup/uptime-kuma.tar.gz /app/data
log " → volumes/uptime-kuma.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/uptime-kuma.tar.gz" | cut -f1))"
# ── Add restore instructions ─────────────────────────────────────────────────
cat > "${WORK_DIR}/data/RESTORE.md" << 'RESTORE_EOF'
# Restore Instructions
## Prerequisites
- Docker and docker compose running
- Services stack deployed via Ansible (`ansible-playbook playbooks/deploy.yml`)
## Step 1 — Stop services that need data restored
```bash
cd /opt/services
docker compose stop forgejo plane-api plane-web plane-admin plane-space plane-worker plane-beat uptime-kuma
docker compose stop forgejo-db plane-db
```
## Step 2 — Restore PostgreSQL databases
```bash
# Forgejo DB
zcat data/databases/forgejo.sql.gz | docker exec -i forgejo-db psql -U forgejo forgejo
# Plane DB
zcat data/databases/plane.sql.gz | docker exec -i plane-db psql -U plane plane
```
## Step 3 — Restore volume data
```bash
# Forgejo — extracts /data/ into the container
docker run --rm --volumes-from forgejo -v $(pwd)/data/volumes:/backup \
alpine:3 sh -c "cd / && tar xzf /backup/forgejo.tar.gz"
# Uptime Kuma — extracts /app/data/ into the container
docker run --rm --volumes-from uptime-kuma -v $(pwd)/data/volumes:/backup \
alpine:3 sh -c "cd / && tar xzf /backup/uptime-kuma.tar.gz"
```
## Step 4 — Restart services
```bash
cd /opt/services
docker compose up -d
```
RESTORE_EOF
# ── Pack everything into one archive ─────────────────────────────────────────
log "Creating archive: ${ARCHIVE}..."
tar czf "${ARCHIVE}" -C "${WORK_DIR}" data
rm -rf "${WORK_DIR}"
log " → Archive: $(du -sh "${ARCHIVE}" | cut -f1)"
# ── Upload to S3 ─────────────────────────────────────────────────────────────
log "Uploading to S3 (s3://{{ s3_bucket }}/data/)..."
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
aws s3 cp "${ARCHIVE}" "s3://{{ s3_bucket }}/data/$(basename "${ARCHIVE}")" \
--endpoint-url "{{ s3_endpoint }}" \
--no-progress \
&& log " → S3 upload complete" \
|| { log " ⚠ S3 upload FAILED"; exit 1; }
# ── Cleanup old local backups ─────────────────────────────────────────────────
log "Removing local backups older than ${KEEP_DAYS} days..."
find "${BACKUP_ROOT}" -name "main_data_*.tar.gz" -mtime +${KEEP_DAYS} -delete
# ── Prune old S3 objects ──────────────────────────────────────────────────────
log "Pruning S3 objects older than ${KEEP_DAYS} days..."
CUTOFF=$(date -d "-${KEEP_DAYS} days" +%Y-%m-%d 2>/dev/null \
|| date -v-${KEEP_DAYS}d +%Y-%m-%d)
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
aws s3 ls "s3://{{ s3_bucket }}/data/" \
--endpoint-url "{{ s3_endpoint }}" \
| awk '{print $4}' \
| while read -r obj; do
obj_date=$(echo "$obj" | grep -oP '^\d{4}-\d{2}-\d{2}' || true)
if [[ -n "$obj_date" && "$obj_date" < "$CUTOFF" ]]; then
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
aws s3 rm "s3://{{ s3_bucket }}/data/$obj" \
--endpoint-url "{{ s3_endpoint }}" \
&& log " → Deleted old S3 object: data/$obj"
fi
done
log "=== Backup completed: $(du -sh "${ARCHIVE}" | cut -f1) ==="