Backup (backup.sh.j2): - Creates a single data_YYYY-MM-DD_HH-MM.tar.gz archive - Unified data/ layout: databases/ (pg_dump .sql.gz) + volumes/ (docker volumes) - Includes RESTORE.md with step-by-step instructions inside the archive - S3 uploads to main/ prefix instead of flat root Outline (tools role): - Add SMTP_HOST/PORT/FROM vars to env.j2 template (required for email magic-link auth to activate) - Add outline_smtp_* defaults to roles/tools/defaults/main.yml - Without SMTP_HOST, the email auth plugin is disabled and clicking login does nothing Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
149 lines
6.8 KiB
Django/Jinja
149 lines
6.8 KiB
Django/Jinja
#!/usr/bin/env bash
|
|
# Generated by Ansible — do not edit manually
|
|
# Creates a single dated archive: data_YYYY-MM-DD_HH-MM.tar.gz
|
|
# Structure:
|
|
# data/
|
|
# databases/ — PostgreSQL dumps (restore with psql)
|
|
# volumes/ — Docker volume contents (restore by copying)
|
|
# Runs daily at 03:00, keeps {{ backup_retention_days }} days.
|
|
# Upload to S3: s3://{{ s3_bucket }}/main/
|
|
set -euo pipefail
|
|
|
|
BACKUP_ROOT="{{ backup_dir }}"
|
|
DATE=$(date +%Y-%m-%d_%H-%M)
|
|
WORK_DIR="${BACKUP_ROOT}/tmp_${DATE}"
|
|
ARCHIVE="${BACKUP_ROOT}/main_data_${DATE}.tar.gz"
|
|
KEEP_DAYS="{{ backup_retention_days }}"
|
|
|
|
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"; }
|
|
|
|
log "=== Backup started: ${DATE} ==="
|
|
mkdir -p "${WORK_DIR}/data/databases" "${WORK_DIR}/data/volumes"
|
|
|
|
# ── PostgreSQL: Forgejo ──────────────────────────────────────────────────────
|
|
log "Dumping forgejo-db..."
|
|
docker exec forgejo-db pg_dump -U forgejo forgejo \
|
|
| gzip > "${WORK_DIR}/data/databases/forgejo.sql.gz"
|
|
log " → databases/forgejo.sql.gz ($(du -sh "${WORK_DIR}/data/databases/forgejo.sql.gz" | cut -f1))"
|
|
|
|
# ── PostgreSQL: Plane ────────────────────────────────────────────────────────
|
|
log "Dumping plane-db..."
|
|
docker exec plane-db pg_dump -U plane plane \
|
|
| gzip > "${WORK_DIR}/data/databases/plane.sql.gz"
|
|
log " → databases/plane.sql.gz ($(du -sh "${WORK_DIR}/data/databases/plane.sql.gz" | cut -f1))"
|
|
|
|
# ── Vaultwarden data volume ──────────────────────────────────────────────────
|
|
log "Backing up Vaultwarden data..."
|
|
docker run --rm \
|
|
--volumes-from vaultwarden \
|
|
-v "${WORK_DIR}/data/volumes:/backup" \
|
|
alpine:3 \
|
|
tar czf /backup/vaultwarden.tar.gz /data
|
|
log " → volumes/vaultwarden.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/vaultwarden.tar.gz" | cut -f1))"
|
|
|
|
# ── Forgejo data volume (repos, attachments, LFS) ───────────────────────────
|
|
log "Backing up Forgejo data..."
|
|
docker run --rm \
|
|
--volumes-from forgejo \
|
|
-v "${WORK_DIR}/data/volumes:/backup" \
|
|
alpine:3 \
|
|
tar czf /backup/forgejo.tar.gz /data
|
|
log " → volumes/forgejo.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/forgejo.tar.gz" | cut -f1))"
|
|
|
|
# ── Uptime Kuma ──────────────────────────────────────────────────────────────
|
|
log "Backing up Uptime Kuma..."
|
|
docker run --rm \
|
|
--volumes-from uptime-kuma \
|
|
-v "${WORK_DIR}/data/volumes:/backup" \
|
|
alpine:3 \
|
|
tar czf /backup/uptime-kuma.tar.gz /app/data
|
|
log " → volumes/uptime-kuma.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/uptime-kuma.tar.gz" | cut -f1))"
|
|
|
|
# ── Add restore instructions ─────────────────────────────────────────────────
|
|
cat > "${WORK_DIR}/data/RESTORE.md" << 'RESTORE_EOF'
|
|
# Restore Instructions
|
|
|
|
## Prerequisites
|
|
- Docker and docker compose running
|
|
- Services stack deployed via Ansible (`ansible-playbook playbooks/deploy.yml`)
|
|
|
|
## Step 1 — Stop services that need data restored
|
|
```bash
|
|
cd /opt/services
|
|
docker compose stop forgejo plane-api plane-web plane-admin plane-space plane-worker plane-beat uptime-kuma vaultwarden
|
|
docker compose stop forgejo-db plane-db
|
|
```
|
|
|
|
## Step 2 — Restore PostgreSQL databases
|
|
```bash
|
|
# Forgejo DB
|
|
zcat data/databases/forgejo.sql.gz | docker exec -i forgejo-db psql -U forgejo forgejo
|
|
|
|
# Plane DB
|
|
zcat data/databases/plane.sql.gz | docker exec -i plane-db psql -U plane plane
|
|
```
|
|
|
|
## Step 3 — Restore volume data
|
|
```bash
|
|
# Vaultwarden — extracts /data/ into the container
|
|
docker run --rm --volumes-from vaultwarden -v $(pwd)/data/volumes:/backup \
|
|
alpine:3 sh -c "cd / && tar xzf /backup/vaultwarden.tar.gz"
|
|
|
|
# Forgejo — extracts /data/ into the container
|
|
docker run --rm --volumes-from forgejo -v $(pwd)/data/volumes:/backup \
|
|
alpine:3 sh -c "cd / && tar xzf /backup/forgejo.tar.gz"
|
|
|
|
# Uptime Kuma — extracts /app/data/ into the container
|
|
docker run --rm --volumes-from uptime-kuma -v $(pwd)/data/volumes:/backup \
|
|
alpine:3 sh -c "cd / && tar xzf /backup/uptime-kuma.tar.gz"
|
|
```
|
|
|
|
## Step 4 — Restart services
|
|
```bash
|
|
cd /opt/services
|
|
docker compose up -d
|
|
```
|
|
RESTORE_EOF
|
|
|
|
# ── Pack everything into one archive ─────────────────────────────────────────
|
|
log "Creating archive: ${ARCHIVE}..."
|
|
tar czf "${ARCHIVE}" -C "${WORK_DIR}" data
|
|
rm -rf "${WORK_DIR}"
|
|
log " → Archive: $(du -sh "${ARCHIVE}" | cut -f1)"
|
|
|
|
# ── Upload to S3 ─────────────────────────────────────────────────────────────
|
|
log "Uploading to S3 (s3://{{ s3_bucket }}/main/)..."
|
|
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
|
|
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
|
|
aws s3 cp "${ARCHIVE}" "s3://{{ s3_bucket }}/main/$(basename "${ARCHIVE}")" \
|
|
--endpoint-url "{{ s3_endpoint }}" \
|
|
--storage-class STANDARD \
|
|
--no-progress \
|
|
&& log " → S3 upload complete" \
|
|
|| log " ⚠ S3 upload failed (local backup still intact)"
|
|
|
|
# ── Cleanup old local backups ─────────────────────────────────────────────────
|
|
log "Removing local backups older than ${KEEP_DAYS} days..."
|
|
find "${BACKUP_ROOT}" -name "main_data_*.tar.gz" -mtime +${KEEP_DAYS} -delete
|
|
|
|
# ── Prune old S3 objects ──────────────────────────────────────────────────────
|
|
log "Pruning S3 objects older than ${KEEP_DAYS} days..."
|
|
CUTOFF=$(date -d "-${KEEP_DAYS} days" +%Y-%m-%d 2>/dev/null \
|
|
|| date -v-${KEEP_DAYS}d +%Y-%m-%d)
|
|
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
|
|
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
|
|
aws s3 ls "s3://{{ s3_bucket }}/main/" \
|
|
--endpoint-url "{{ s3_endpoint }}" \
|
|
| awk '{print $4}' \
|
|
| while read -r obj; do
|
|
obj_date=$(echo "$obj" | grep -oP '^\d{4}-\d{2}-\d{2}' || true)
|
|
if [[ -n "$obj_date" && "$obj_date" < "$CUTOFF" ]]; then
|
|
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
|
|
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
|
|
aws s3 rm "s3://{{ s3_bucket }}/main/$obj" \
|
|
--endpoint-url "{{ s3_endpoint }}" \
|
|
&& log " → Deleted old S3 object: main/$obj"
|
|
fi
|
|
done
|
|
|
|
log "=== Backup completed: $(du -sh "${ARCHIVE}" | cut -f1) ==="
|