fix: redesign backup archive structure + enable Outline email auth
Some checks failed
CI/CD / syntax-check (push) Successful in 1m13s
CI/CD / deploy (push) Has been cancelled

Backup (backup.sh.j2):
- Creates a single data_YYYY-MM-DD_HH-MM.tar.gz archive
- Unified data/ layout: databases/ (pg_dump .sql.gz) + volumes/ (docker volumes)
- Includes RESTORE.md with step-by-step instructions inside the archive
- S3 uploads to main/ prefix instead of flat root

Outline (tools role):
- Add SMTP_HOST/PORT/FROM vars to env.j2 template (required for email magic-link auth to activate)
- Add outline_smtp_* defaults to roles/tools/defaults/main.yml
- Without SMTP_HOST, the email auth plugin is disabled and clicking login does nothing

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
jack 2026-03-22 16:20:11 +07:00
parent 2b5524f258
commit bf59b75c8f
3 changed files with 122 additions and 40 deletions

View file

@ -1,83 +1,149 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Generated by Ansible — do not edit manually # Generated by Ansible — do not edit manually
# Backs up PostgreSQL databases and Vaultwarden data. # Creates a single dated archive: data_YYYY-MM-DD_HH-MM.tar.gz
# Runs daily at 03:00, keeps {{ backup_retention_days }} days of backups. # Structure:
# data/
# databases/ — PostgreSQL dumps (restore with psql)
# volumes/ — Docker volume contents (restore by copying)
# Runs daily at 03:00, keeps {{ backup_retention_days }} days.
# Upload to S3: s3://{{ s3_bucket }}/main/
set -euo pipefail set -euo pipefail
BACKUP_DIR="{{ backup_dir }}" BACKUP_ROOT="{{ backup_dir }}"
DATE=$(date +%Y-%m-%d_%H-%M-%S) DATE=$(date +%Y-%m-%d_%H-%M)
WORK_DIR="${BACKUP_ROOT}/tmp_${DATE}"
ARCHIVE="${BACKUP_ROOT}/main_data_${DATE}.tar.gz"
KEEP_DAYS="{{ backup_retention_days }}" KEEP_DAYS="{{ backup_retention_days }}"
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"; } log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"; }
log "=== Backup started ===" log "=== Backup started: ${DATE} ==="
mkdir -p "${WORK_DIR}/data/databases" "${WORK_DIR}/data/volumes"
# ── Forgejo PostgreSQL ────────────────────────────────────────────────────── # ── PostgreSQL: Forgejo ──────────────────────────────────────────────────────
log "Backing up forgejo-db..." log "Dumping forgejo-db..."
docker exec forgejo-db pg_dump -U forgejo forgejo \ docker exec forgejo-db pg_dump -U forgejo forgejo \
| gzip > "${BACKUP_DIR}/forgejo-db_${DATE}.sql.gz" | gzip > "${WORK_DIR}/data/databases/forgejo.sql.gz"
log " → ${BACKUP_DIR}/forgejo-db_${DATE}.sql.gz ($(du -sh "${BACKUP_DIR}/forgejo-db_${DATE}.sql.gz" | cut -f1))" log " → databases/forgejo.sql.gz ($(du -sh "${WORK_DIR}/data/databases/forgejo.sql.gz" | cut -f1))"
# ── Plane PostgreSQL ──────────────────────────────────────────────────────── # ── PostgreSQL: Plane ────────────────────────────────────────────────────────
log "Backing up plane-db..." log "Dumping plane-db..."
docker exec plane-db pg_dump -U plane plane \ docker exec plane-db pg_dump -U plane plane \
| gzip > "${BACKUP_DIR}/plane-db_${DATE}.sql.gz" | gzip > "${WORK_DIR}/data/databases/plane.sql.gz"
log " → ${BACKUP_DIR}/plane-db_${DATE}.sql.gz ($(du -sh "${BACKUP_DIR}/plane-db_${DATE}.sql.gz" | cut -f1))" log " → databases/plane.sql.gz ($(du -sh "${WORK_DIR}/data/databases/plane.sql.gz" | cut -f1))"
# ── Vaultwarden data ──────────────────────────────────────────────────────── # ── Vaultwarden data volume ──────────────────────────────────────────────────
log "Backing up Vaultwarden..." log "Backing up Vaultwarden data..."
docker run --rm \ docker run --rm \
--volumes-from vaultwarden \ --volumes-from vaultwarden \
-v "${BACKUP_DIR}:/backup" \ -v "${WORK_DIR}/data/volumes:/backup" \
alpine:3 \ alpine:3 \
tar czf "/backup/vaultwarden_${DATE}.tar.gz" /data tar czf /backup/vaultwarden.tar.gz /data
log " → ${BACKUP_DIR}/vaultwarden_${DATE}.tar.gz ($(du -sh "${BACKUP_DIR}/vaultwarden_${DATE}.tar.gz" | cut -f1))" log " → volumes/vaultwarden.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/vaultwarden.tar.gz" | cut -f1))"
# ── Forgejo repositories ──────────────────────────────────────────────────── # ── Forgejo data volume (repos, attachments, LFS) ───────────────────────────
log "Backing up Forgejo data..." log "Backing up Forgejo data..."
docker run --rm \ docker run --rm \
--volumes-from forgejo \ --volumes-from forgejo \
-v "${BACKUP_DIR}:/backup" \ -v "${WORK_DIR}/data/volumes:/backup" \
alpine:3 \ alpine:3 \
tar czf "/backup/forgejo-data_${DATE}.tar.gz" /data tar czf /backup/forgejo.tar.gz /data
log " → ${BACKUP_DIR}/forgejo-data_${DATE}.tar.gz ($(du -sh "${BACKUP_DIR}/forgejo-data_${DATE}.tar.gz" | cut -f1))" log " → volumes/forgejo.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/forgejo.tar.gz" | cut -f1))"
# ── Upload to Timeweb S3 ──────────────────────────────────────────────────── # ── Uptime Kuma ──────────────────────────────────────────────────────────────
log "Uploading backups to S3 ({{ s3_bucket }})..." log "Backing up Uptime Kuma..."
docker run --rm \
--volumes-from uptime-kuma \
-v "${WORK_DIR}/data/volumes:/backup" \
alpine:3 \
tar czf /backup/uptime-kuma.tar.gz /app/data
log " → volumes/uptime-kuma.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/uptime-kuma.tar.gz" | cut -f1))"
# ── Add restore instructions ─────────────────────────────────────────────────
cat > "${WORK_DIR}/data/RESTORE.md" << 'RESTORE_EOF'
# Restore Instructions
## Prerequisites
- Docker and docker compose running
- Services stack deployed via Ansible (`ansible-playbook playbooks/deploy.yml`)
## Step 1 — Stop services that need data restored
```bash
cd /opt/services
docker compose stop forgejo plane-api plane-web plane-admin plane-space plane-worker plane-beat uptime-kuma vaultwarden
docker compose stop forgejo-db plane-db
```
## Step 2 — Restore PostgreSQL databases
```bash
# Forgejo DB
zcat data/databases/forgejo.sql.gz | docker exec -i forgejo-db psql -U forgejo forgejo
# Plane DB
zcat data/databases/plane.sql.gz | docker exec -i plane-db psql -U plane plane
```
## Step 3 — Restore volume data
```bash
# Vaultwarden — extracts /data/ into the container
docker run --rm --volumes-from vaultwarden -v $(pwd)/data/volumes:/backup \
alpine:3 sh -c "cd / && tar xzf /backup/vaultwarden.tar.gz"
# Forgejo — extracts /data/ into the container
docker run --rm --volumes-from forgejo -v $(pwd)/data/volumes:/backup \
alpine:3 sh -c "cd / && tar xzf /backup/forgejo.tar.gz"
# Uptime Kuma — extracts /app/data/ into the container
docker run --rm --volumes-from uptime-kuma -v $(pwd)/data/volumes:/backup \
alpine:3 sh -c "cd / && tar xzf /backup/uptime-kuma.tar.gz"
```
## Step 4 — Restart services
```bash
cd /opt/services
docker compose up -d
```
RESTORE_EOF
# ── Pack everything into one archive ─────────────────────────────────────────
log "Creating archive: ${ARCHIVE}..."
tar czf "${ARCHIVE}" -C "${WORK_DIR}" data
rm -rf "${WORK_DIR}"
log " → Archive: $(du -sh "${ARCHIVE}" | cut -f1)"
# ── Upload to S3 ─────────────────────────────────────────────────────────────
log "Uploading to S3 (s3://{{ s3_bucket }}/main/)..."
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \ AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \ AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
aws s3 sync "${BACKUP_DIR}/" "s3://{{ s3_bucket }}/" \ aws s3 cp "${ARCHIVE}" "s3://{{ s3_bucket }}/main/$(basename "${ARCHIVE}")" \
--endpoint-url "{{ s3_endpoint }}" \ --endpoint-url "{{ s3_endpoint }}" \
--exclude "*" --include "*.gz" \
--storage-class STANDARD \ --storage-class STANDARD \
--no-progress \ --no-progress \
&& log " → S3 upload complete" \ && log " → S3 upload complete" \
|| log " ⚠ S3 upload failed (local backups still intact)" || log " ⚠ S3 upload failed (local backup still intact)"
# ── Cleanup old backups ───────────────────────────────────────────────────── # ── Cleanup old local backups ─────────────────────────────────────────────────
log "Removing backups older than ${KEEP_DAYS} days..." log "Removing local backups older than ${KEEP_DAYS} days..."
find "${BACKUP_DIR}" -name "*.gz" -mtime +${KEEP_DAYS} -delete find "${BACKUP_ROOT}" -name "main_data_*.tar.gz" -mtime +${KEEP_DAYS} -delete
# Remove S3 objects older than KEEP_DAYS as well # ── Prune old S3 objects ──────────────────────────────────────────────────────
log "Pruning S3 objects older than ${KEEP_DAYS} days..." log "Pruning S3 objects older than ${KEEP_DAYS} days..."
CUTOFF=$(date -d "-${KEEP_DAYS} days" +%Y-%m-%dT%H:%M:%S 2>/dev/null || date -v-${KEEP_DAYS}d +%Y-%m-%dT%H:%M:%S) CUTOFF=$(date -d "-${KEEP_DAYS} days" +%Y-%m-%d 2>/dev/null \
|| date -v-${KEEP_DAYS}d +%Y-%m-%d)
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \ AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \ AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
aws s3 ls "s3://{{ s3_bucket }}/" \ aws s3 ls "s3://{{ s3_bucket }}/main/" \
--endpoint-url "{{ s3_endpoint }}" \ --endpoint-url "{{ s3_endpoint }}" \
| awk '{print $4}' \ | awk '{print $4}' \
| while read -r obj; do | while read -r obj; do
obj_date=$(echo "$obj" | grep -oP '^\d{4}-\d{2}-\d{2}' || true) obj_date=$(echo "$obj" | grep -oP '^\d{4}-\d{2}-\d{2}' || true)
if [[ -n "$obj_date" && "$obj_date" < "${CUTOFF:0:10}" ]]; then if [[ -n "$obj_date" && "$obj_date" < "$CUTOFF" ]]; then
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \ AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \ AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
aws s3 rm "s3://{{ s3_bucket }}/$obj" \ aws s3 rm "s3://{{ s3_bucket }}/main/$obj" \
--endpoint-url "{{ s3_endpoint }}" \ --endpoint-url "{{ s3_endpoint }}" \
&& log " → Deleted old S3 object: $obj" && log " → Deleted old S3 object: main/$obj"
fi fi
done done
log " → Done. Current backups:" log "=== Backup completed: $(du -sh "${ARCHIVE}" | cut -f1) ==="
du -sh "${BACKUP_DIR}"/*.gz 2>/dev/null | sort -k2 || true
log "=== Backup completed ==="

View file

@ -4,3 +4,10 @@ outline_image: "outlinewiki/outline:0.80.2"
outline_db_image: "postgres:15-alpine" outline_db_image: "postgres:15-alpine"
outline_redis_image: "redis:7-alpine" outline_redis_image: "redis:7-alpine"
n8n_image: "n8nio/n8n:1.89.2" # https://hub.docker.com/r/n8nio/n8n/tags n8n_image: "n8nio/n8n:1.89.2" # https://hub.docker.com/r/n8nio/n8n/tags
# SMTP for Outline magic-link auth (override in vault)
outline_smtp_host: "smtp.csrx.ru"
outline_smtp_port: 587
outline_smtp_from: "noreply@csrx.ru"
outline_smtp_username: ""
outline_smtp_password: ""

View file

@ -27,6 +27,15 @@ FILE_STORAGE=s3
# Auth — local accounts (can add OIDC/Authelia later) # Auth — local accounts (can add OIDC/Authelia later)
AUTH_PROVIDERS=email AUTH_PROVIDERS=email
# SMTP (required to enable email magic-link auth)
SMTP_HOST={{ outline_smtp_host }}
SMTP_PORT={{ outline_smtp_port | default(587) }}
SMTP_FROM_EMAIL={{ outline_smtp_from }}
{% if outline_smtp_username is defined and outline_smtp_username %}
SMTP_USERNAME={{ outline_smtp_username }}
SMTP_PASSWORD={{ outline_smtp_password }}
{% endif %}
# Outline DB password (used in docker-compose) # Outline DB password (used in docker-compose)
OUTLINE_DB_PASSWORD={{ outline_db_password }} OUTLINE_DB_PASSWORD={{ outline_db_password }}