- Add Outline, outline-db, outline-redis, n8n, outline-mcp containers to main docker-compose - Add env.outline.j2 template with Resend SMTP and S3 (walava-outline bucket) - Update Traefik routes: wiki → outline:3000, auto → n8n:5678 (local, not cross-server) - Rename S3 buckets: visual-backup → walava-backup, visual-outline → walava-outline - Extend backup.sh.j2: add Outline DB, n8n, Plane MinIO to backup scope - Add outline_image, n8n_image, outline_mcp_image to services/defaults - Remove Authelia config deployment tasks from configs.yml - Add outline-internal and n8n-internal networks to docker-compose Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
170 lines
7.7 KiB
Django/Jinja
170 lines
7.7 KiB
Django/Jinja
#!/usr/bin/env bash
|
|
# Generated by Ansible — do not edit manually
|
|
# Creates a single dated archive: data_YYYY-MM-DD_HH-MM.tar.gz
|
|
# Structure:
|
|
# data/
|
|
# databases/ — PostgreSQL dumps (restore with psql)
|
|
# volumes/ — Docker volume contents (restore by copying)
|
|
# Runs every 6 hours (00:00, 06:00, 12:00, 18:00), keeps {{ backup_retention_days }} days.
|
|
# Upload to S3: s3://{{ s3_bucket }}/data/
|
|
set -euo pipefail
|
|
|
|
BACKUP_ROOT="{{ backup_dir }}"
|
|
DATE=$(date +%Y-%m-%d_%H-%M)
|
|
WORK_DIR="${BACKUP_ROOT}/tmp_${DATE}"
|
|
ARCHIVE="${BACKUP_ROOT}/main_data_${DATE}.tar.gz"
|
|
KEEP_DAYS="{{ backup_retention_days }}"
|
|
|
|
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"; }
|
|
|
|
log "=== Backup started: ${DATE} ==="
|
|
mkdir -p "${WORK_DIR}/data/databases" "${WORK_DIR}/data/volumes"
|
|
|
|
# ── PostgreSQL: Forgejo ──────────────────────────────────────────────────────
|
|
log "Dumping forgejo-db..."
|
|
docker exec forgejo-db pg_dump -U forgejo forgejo \
|
|
| gzip > "${WORK_DIR}/data/databases/forgejo.sql.gz"
|
|
log " → databases/forgejo.sql.gz ($(du -sh "${WORK_DIR}/data/databases/forgejo.sql.gz" | cut -f1))"
|
|
|
|
# ── PostgreSQL: Plane ────────────────────────────────────────────────────────
|
|
log "Dumping plane-db..."
|
|
docker exec plane-db pg_dump -U plane plane \
|
|
| gzip > "${WORK_DIR}/data/databases/plane.sql.gz"
|
|
log " → databases/plane.sql.gz ($(du -sh "${WORK_DIR}/data/databases/plane.sql.gz" | cut -f1))"
|
|
|
|
# ── PostgreSQL: Outline ──────────────────────────────────────────────────────
|
|
log "Dumping outline-db..."
|
|
docker exec outline-db pg_dump -U outline outline \
|
|
| gzip > "${WORK_DIR}/data/databases/outline.sql.gz"
|
|
log " → databases/outline.sql.gz ($(du -sh "${WORK_DIR}/data/databases/outline.sql.gz" | cut -f1))"
|
|
|
|
# ── Forgejo data volume (repos, attachments, LFS) ───────────────────────────
|
|
log "Backing up Forgejo data..."
|
|
docker run --rm \
|
|
--volumes-from forgejo \
|
|
-v "${WORK_DIR}/data/volumes:/backup" \
|
|
alpine:3 \
|
|
tar czf /backup/forgejo.tar.gz /data
|
|
log " → volumes/forgejo.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/forgejo.tar.gz" | cut -f1))"
|
|
|
|
# ── Uptime Kuma ──────────────────────────────────────────────────────────────
|
|
log "Backing up Uptime Kuma..."
|
|
docker run --rm \
|
|
--volumes-from uptime-kuma \
|
|
-v "${WORK_DIR}/data/volumes:/backup" \
|
|
alpine:3 \
|
|
tar czf /backup/uptime-kuma.tar.gz /app/data
|
|
log " → volumes/uptime-kuma.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/uptime-kuma.tar.gz" | cut -f1))"
|
|
|
|
# ── n8n workflows + credentials ──────────────────────────────────────────────
|
|
log "Backing up n8n..."
|
|
docker run --rm \
|
|
--volumes-from n8n \
|
|
-v "${WORK_DIR}/data/volumes:/backup" \
|
|
alpine:3 \
|
|
tar czf /backup/n8n.tar.gz /home/node/.n8n
|
|
log " → volumes/n8n.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/n8n.tar.gz" | cut -f1))"
|
|
|
|
# ── Plane MinIO (uploaded files / attachments) ───────────────────────────────
|
|
log "Backing up Plane MinIO..."
|
|
docker run --rm \
|
|
--volumes-from plane-minio \
|
|
-v "${WORK_DIR}/data/volumes:/backup" \
|
|
alpine:3 \
|
|
tar czf /backup/plane-minio.tar.gz /data
|
|
log " → volumes/plane-minio.tar.gz ($(du -sh "${WORK_DIR}/data/volumes/plane-minio.tar.gz" | cut -f1))"
|
|
|
|
# ── Add restore instructions ─────────────────────────────────────────────────
|
|
cat > "${WORK_DIR}/data/RESTORE.md" << 'RESTORE_EOF'
|
|
# Restore Instructions
|
|
|
|
## Prerequisites
|
|
- Docker and docker compose running
|
|
- Services stack deployed via Ansible (`ansible-playbook playbooks/deploy.yml`)
|
|
|
|
## Step 1 — Stop services that need data restored
|
|
```bash
|
|
cd /opt/services
|
|
docker compose stop forgejo plane-api plane-web plane-admin plane-space plane-worker plane-beat uptime-kuma
|
|
docker compose stop forgejo-db plane-db
|
|
```
|
|
|
|
## Step 2 — Restore PostgreSQL databases
|
|
```bash
|
|
# Forgejo DB
|
|
zcat data/databases/forgejo.sql.gz | docker exec -i forgejo-db psql -U forgejo forgejo
|
|
|
|
# Plane DB
|
|
zcat data/databases/plane.sql.gz | docker exec -i plane-db psql -U plane plane
|
|
|
|
# Outline DB
|
|
zcat data/databases/outline.sql.gz | docker exec -i outline-db psql -U outline outline
|
|
```
|
|
|
|
## Step 3 — Restore volume data
|
|
```bash
|
|
# Forgejo — extracts /data/ into the container
|
|
docker run --rm --volumes-from forgejo -v $(pwd)/data/volumes:/backup \
|
|
alpine:3 sh -c "cd / && tar xzf /backup/forgejo.tar.gz"
|
|
|
|
# Uptime Kuma
|
|
docker run --rm --volumes-from uptime-kuma -v $(pwd)/data/volumes:/backup \
|
|
alpine:3 sh -c "cd / && tar xzf /backup/uptime-kuma.tar.gz"
|
|
|
|
# n8n
|
|
docker run --rm --volumes-from n8n -v $(pwd)/data/volumes:/backup \
|
|
alpine:3 sh -c "cd / && tar xzf /backup/n8n.tar.gz"
|
|
|
|
# Plane MinIO (uploaded files)
|
|
docker run --rm --volumes-from plane-minio -v $(pwd)/data/volumes:/backup \
|
|
alpine:3 sh -c "cd / && tar xzf /backup/plane-minio.tar.gz"
|
|
```
|
|
|
|
## Step 4 — Restart services
|
|
```bash
|
|
cd /opt/services
|
|
docker compose up -d
|
|
```
|
|
RESTORE_EOF
|
|
|
|
# ── Pack everything into one archive ─────────────────────────────────────────
|
|
log "Creating archive: ${ARCHIVE}..."
|
|
tar czf "${ARCHIVE}" -C "${WORK_DIR}" data
|
|
rm -rf "${WORK_DIR}"
|
|
log " → Archive: $(du -sh "${ARCHIVE}" | cut -f1)"
|
|
|
|
# ── Upload to S3 ─────────────────────────────────────────────────────────────
|
|
log "Uploading to S3 (s3://{{ s3_bucket }}/data/)..."
|
|
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
|
|
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
|
|
aws s3 cp "${ARCHIVE}" "s3://{{ s3_bucket }}/data/$(basename "${ARCHIVE}")" \
|
|
--endpoint-url "{{ s3_endpoint }}" \
|
|
--no-progress \
|
|
&& log " → S3 upload complete" \
|
|
|| { log " ⚠ S3 upload FAILED"; exit 1; }
|
|
|
|
# ── Cleanup old local backups ─────────────────────────────────────────────────
|
|
log "Removing local backups older than ${KEEP_DAYS} days..."
|
|
find "${BACKUP_ROOT}" -name "main_data_*.tar.gz" -mtime +${KEEP_DAYS} -delete
|
|
|
|
# ── Prune old S3 objects ──────────────────────────────────────────────────────
|
|
log "Pruning S3 objects older than ${KEEP_DAYS} days..."
|
|
CUTOFF=$(date -d "-${KEEP_DAYS} days" +%Y-%m-%d 2>/dev/null \
|
|
|| date -v-${KEEP_DAYS}d +%Y-%m-%d)
|
|
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
|
|
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
|
|
aws s3 ls "s3://{{ s3_bucket }}/data/" \
|
|
--endpoint-url "{{ s3_endpoint }}" \
|
|
| awk '{print $4}' \
|
|
| while read -r obj; do
|
|
obj_date=$(echo "$obj" | grep -oP '^\d{4}-\d{2}-\d{2}' || true)
|
|
if [[ -n "$obj_date" && "$obj_date" < "$CUTOFF" ]]; then
|
|
AWS_ACCESS_KEY_ID="{{ s3_access_key }}" \
|
|
AWS_SECRET_ACCESS_KEY="{{ s3_secret_key }}" \
|
|
aws s3 rm "s3://{{ s3_bucket }}/data/$obj" \
|
|
--endpoint-url "{{ s3_endpoint }}" \
|
|
&& log " → Deleted old S3 object: data/$obj"
|
|
fi
|
|
done
|
|
|
|
log "=== Backup completed: $(du -sh "${ARCHIVE}" | cut -f1) ==="
|