infra/roles/services/templates/docker-compose.yml.j2
jack 44ccdf4882
Some checks failed
CI/CD / syntax-check (push) Successful in 1m30s
CI/CD / deploy (push) Failing after 6m8s
refactor: remove tools server, Vaultwarden, monitoring stack; rename plane→hub
- Remove tools server entirely (roles/tools, playbooks/tools.yml, CI deploy step)
- Remove Vaultwarden (already absent from compose, clean up vars)
- Remove node-exporter, cadvisor, promtail from main stack
- Remove grafana/uptime-kuma Traefik routes (pointed to tools)
- Remove monitoring network from docker-compose
- Remove tools vault vars (grafana_admin_password, alertmanager telegram)
- Rename domain_plane: plane.walava.io → hub.walava.io
- Update CI workflow to only deploy main server
- Update STATUS.md and BACKLOG.md to reflect current state
2026-03-27 19:05:19 +07:00

567 lines
18 KiB
Django/Jinja
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# Docker Compose stack — generated by Ansible
# Do not edit manually; re-run ansible-playbook deploy.yml
#
# NOTE: Traefik uses the file provider (routes.yml.j2) — Docker labels on
# containers are intentionally absent. Adding labels here has no effect.
networks:
# proxy — публичная сеть только для Traefik: нужна для исходящего интернет-доступа
# (ACME Let's Encrypt, внешние сервисы). backend — internal: true, поэтому
# сервисы не имеют прямого исходящего доступа в интернет.
proxy:
driver: bridge
backend:
driver: bridge
internal: true
forgejo-db:
driver: bridge
internal: true
forgejo-ssh:
driver: bridge
plane-internal:
driver: bridge
internal: true
runner-jobs:
driver: bridge
docmost-internal:
driver: bridge
internal: true
n8n-internal:
driver: bridge
internal: true
volumes:
forgejo_data:
forgejo_db_data:
plane_pgdata:
plane_redis_data:
plane_minio_data:
plane_media:
act_runner_data:
crowdsec_data:
docmost_db_data:
docmost_redis_data:
n8n_data:
services:
# ── Traefik ────────────────────────────────────────────────────────────────
# proxy — для ACME (исходящий интернет), backend — для маршрутизации к сервисам
traefik:
image: {{ traefik_image }}
container_name: traefik
restart: unless-stopped
security_opt:
- no-new-privileges:true
ports:
- "80:80"
- "443:443"
networks:
- proxy
- backend
volumes:
- {{ services_root }}/traefik/traefik.yml:/etc/traefik/traefik.yml:ro
- {{ services_root }}/traefik/dynamic:/etc/traefik/dynamic:ro
- {{ services_root }}/traefik/acme.json:/acme/acme.json
- {{ services_root }}/traefik/logs:/var/log/traefik
# env_file passes CLOUDFLARE_DNS_API_TOKEN (and all secrets) to Traefik
env_file: .env
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
healthcheck:
test: ["CMD", "traefik", "healthcheck", "--ping"]
interval: 30s
timeout: 5s
retries: 3
# ── Forgejo ────────────────────────────────────────────────────────────────
forgejo:
image: {{ forgejo_image }}
container_name: forgejo
restart: unless-stopped
security_opt:
- no-new-privileges:true
depends_on:
forgejo-db:
condition: service_healthy
networks:
- backend
- forgejo-db
- forgejo-ssh
volumes:
- forgejo_data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
environment:
- USER_UID=1000
- USER_GID=1000
- FORGEJO__database__DB_TYPE=postgres
- FORGEJO__database__HOST=forgejo-db:5432
- FORGEJO__database__NAME=forgejo
- FORGEJO__database__USER=forgejo
- FORGEJO__database__PASSWD=${FORGEJO_DB_PASSWORD}
- FORGEJO__server__DOMAIN={{ domain_git }}
- FORGEJO__server__ROOT_URL=https://{{ domain_git }}
- FORGEJO__server__SSH_DOMAIN={{ domain_git }}
- FORGEJO__server__SSH_PORT=2222
- FORGEJO__service__DISABLE_REGISTRATION=true
ports:
- "2222:22"
healthcheck:
test: ["CMD", "wget", "-qO-", "http://localhost:3000/"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
forgejo-db:
image: {{ forgejo_db_image }}
container_name: forgejo-db
restart: unless-stopped
networks:
- forgejo-db
volumes:
- forgejo_db_data:/var/lib/postgresql/data
environment:
- POSTGRES_USER=forgejo
- POSTGRES_PASSWORD=${FORGEJO_DB_PASSWORD}
- POSTGRES_DB=forgejo
- PGDATA=/var/lib/postgresql/data/pgdata
healthcheck:
test: ["CMD-SHELL", "pg_isready -U forgejo"]
interval: 10s
timeout: 5s
retries: 5
mem_limit: 512m
# ── Plane ──────────────────────────────────────────────────────────────────
# Маршрутизация через Traefik:
# /api/* и /auth/* → plane-api:8000 (Django, на backend + plane-internal)
# остальное → plane-web:3000 (Next.js, на backend + plane-internal)
# Правило с PathPrefix длиннее → более высокий приоритет у Traefik автоматически.
#
# NOTE: Plane не публикует конкретные version tags — используем :stable.
# Следить за обновлениями: https://github.com/makeplane/plane/releases
plane-web:
image: {{ plane_frontend_image }}
container_name: plane-web
restart: unless-stopped
depends_on:
- plane-api
environment:
- WEB_URL=https://{{ domain_plane }}
- NEXT_PUBLIC_API_BASE_URL=https://{{ domain_plane }}
networks:
- backend
- plane-internal
healthcheck:
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:3000/"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
plane-admin:
image: {{ plane_admin_image }}
container_name: plane-admin
restart: unless-stopped
depends_on:
- plane-api
- plane-web
networks:
- backend
- plane-internal
healthcheck:
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:3000/"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
plane-space:
image: {{ plane_space_image }}
container_name: plane-space
restart: unless-stopped
depends_on:
- plane-api
- plane-web
networks:
- backend
- plane-internal
healthcheck:
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:3000/spaces/"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
plane-api:
image: {{ plane_backend_image }}
container_name: plane-api
restart: unless-stopped
mem_limit: 512m
command: ./bin/docker-entrypoint-api.sh
depends_on:
plane-db:
condition: service_healthy
plane-redis:
condition: service_started
plane-minio:
condition: service_healthy
networks:
- backend
- plane-internal
volumes:
- plane_media:/app/media
environment:
- DATABASE_URL=postgresql://plane:${PLANE_DB_PASSWORD}@plane-db:5432/plane
- REDIS_URL=redis://plane-redis:6379/
- AMQP_URL=redis://plane-redis:6379/
- SECRET_KEY=${PLANE_SECRET_KEY}
- DEBUG=0
- DJANGO_SETTINGS_MODULE=plane.settings.production
- WEB_URL=https://{{ domain_plane }}
- FILE_SIZE_LIMIT=5242880
- USE_MINIO=1
- AWS_REGION=us-east-1
- AWS_ACCESS_KEY_ID=plane-minio
- AWS_SECRET_ACCESS_KEY=${PLANE_MINIO_PASSWORD}
- AWS_S3_ENDPOINT_URL=http://plane-minio:9000
- AWS_S3_BUCKET_NAME=uploads
- MINIO_ROOT_USER=plane-minio
- MINIO_ROOT_PASSWORD=${PLANE_MINIO_PASSWORD}
- GUNICORN_WORKERS=2
- APP_BASE_URL=https://{{ domain_plane }}
- ADMIN_BASE_URL=https://{{ domain_plane }}/god-mode
- SPACE_BASE_URL=https://{{ domain_plane }}/spaces
healthcheck:
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:8000/"]
interval: 30s
timeout: 10s
retries: 10
start_period: 120s
plane-worker:
image: {{ plane_backend_image }}
container_name: plane-worker
restart: unless-stopped
mem_limit: 512m
command: ./bin/docker-entrypoint-worker.sh
depends_on:
- plane-api
networks:
- plane-internal
volumes:
- plane_media:/app/media
environment:
- DATABASE_URL=postgresql://plane:${PLANE_DB_PASSWORD}@plane-db:5432/plane
- REDIS_URL=redis://plane-redis:6379/
- AMQP_URL=redis://plane-redis:6379/
- SECRET_KEY=${PLANE_SECRET_KEY}
- DEBUG=0
- DJANGO_SETTINGS_MODULE=plane.settings.production
- USE_MINIO=1
- AWS_REGION=us-east-1
- AWS_ACCESS_KEY_ID=plane-minio
- AWS_SECRET_ACCESS_KEY=${PLANE_MINIO_PASSWORD}
- AWS_S3_ENDPOINT_URL=http://plane-minio:9000
- AWS_S3_BUCKET_NAME=uploads
- MINIO_ROOT_USER=plane-minio
- MINIO_ROOT_PASSWORD=${PLANE_MINIO_PASSWORD}
plane-beat:
image: {{ plane_backend_image }}
container_name: plane-beat
restart: unless-stopped
mem_limit: 256m
command: ./bin/docker-entrypoint-beat.sh
depends_on:
- plane-api
networks:
- plane-internal
environment:
- DATABASE_URL=postgresql://plane:${PLANE_DB_PASSWORD}@plane-db:5432/plane
- REDIS_URL=redis://plane-redis:6379/
- AMQP_URL=redis://plane-redis:6379/
- SECRET_KEY=${PLANE_SECRET_KEY}
- DEBUG=0
- DJANGO_SETTINGS_MODULE=plane.settings.production
- USE_MINIO=1
- AWS_REGION=us-east-1
- AWS_ACCESS_KEY_ID=plane-minio
- AWS_SECRET_ACCESS_KEY=${PLANE_MINIO_PASSWORD}
- AWS_S3_ENDPOINT_URL=http://plane-minio:9000
- AWS_S3_BUCKET_NAME=uploads
- MINIO_ROOT_USER=plane-minio
- MINIO_ROOT_PASSWORD=${PLANE_MINIO_PASSWORD}
plane-db:
image: {{ plane_db_image }}
container_name: plane-db
restart: unless-stopped
mem_limit: 512m
networks:
- plane-internal
volumes:
- plane_pgdata:/var/lib/postgresql/data
environment:
- POSTGRES_USER=plane
- POSTGRES_PASSWORD=${PLANE_DB_PASSWORD}
- POSTGRES_DB=plane
- PGDATA=/var/lib/postgresql/data/pgdata
healthcheck:
test: ["CMD-SHELL", "pg_isready -U plane"]
interval: 10s
timeout: 5s
retries: 5
plane-redis:
image: {{ plane_redis_image }}
container_name: plane-redis
restart: unless-stopped
networks:
- plane-internal
volumes:
- plane_redis_data:/data
command: redis-server --appendonly yes
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 3s
retries: 5
plane-minio:
image: {{ plane_minio_image }}
container_name: plane-minio
restart: unless-stopped
mem_limit: 512m
networks:
- plane-internal
volumes:
- plane_minio_data:/data
environment:
- MINIO_ROOT_USER=plane-minio
- MINIO_ROOT_PASSWORD=${PLANE_MINIO_PASSWORD}
command: server /data --console-address ":9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
plane-createbuckets:
image: minio/mc
container_name: plane-createbuckets
restart: "no"
depends_on:
plane-minio:
condition: service_healthy
entrypoint: >
/bin/sh -c "
mc alias set myminio http://plane-minio:9000 plane-minio $${PLANE_MINIO_PASSWORD} &&
mc mb --ignore-existing myminio/uploads &&
mc anonymous set download myminio/uploads;
exit 0;
"
environment:
- PLANE_MINIO_PASSWORD=${PLANE_MINIO_PASSWORD}
networks:
- plane-internal
# ── Forgejo Actions Runner ─────────────────────────────────────────────────
# backend — для связи с Forgejo по внутренней сети (http://forgejo:3000)
# runner-jobs — сеть с интернет-доступом для job-контейнеров
act_runner:
image: {{ act_runner_image }}
container_name: act_runner
restart: unless-stopped
depends_on:
- forgejo
environment:
- GITEA_INSTANCE_URL=https://{{ domain_git }}
- GITEA_RUNNER_REGISTRATION_TOKEN=${FORGEJO_RUNNER_TOKEN}
- GITEA_RUNNER_NAME=vps-runner
- CONFIG_FILE=/data/config.yaml
volumes:
- act_runner_data:/data
- /var/run/docker.sock:/var/run/docker.sock
- {{ services_root }}/act_runner/config.yaml:/data/config.yaml:ro
networks:
- backend
- runner-jobs
# ── Security Stack ─────────────────────────────────────────────────────────
# CrowdSec: анализирует логи Traefik, банит злоумышленников по IP
# Использует community-репутацию + локальный анализ поведения
crowdsec:
image: {{ crowdsec_image }}
container_name: crowdsec
restart: unless-stopped
networks:
- proxy
environment:
- COLLECTIONS=crowdsecurity/traefik crowdsecurity/http-cve crowdsecurity/linux
- GID=1000
volumes:
- crowdsec_data:/var/lib/crowdsec/data
- {{ services_root }}/crowdsec/acquis.yaml:/etc/crowdsec/acquis.yaml:ro
- {{ services_root }}/traefik/logs:/var/log/traefik:ro
- /var/log/auth.log:/var/log/auth.log:ro
- /var/log/syslog:/var/log/syslog:ro
# ── Discord Bot ────────────────────────────────────────────────────────────
# NOTE: disabled until image is built & pushed to Forgejo registry
# discord-bot:
# image: git.{{ domain_base }}/jack/discord-bot:latest
# container_name: discord-bot
# restart: unless-stopped
# environment:
# DISCORD_TOKEN: "${DISCORD_BOT_TOKEN}"
# DISCORD_APP_ID: "{{ discord_bot_app_id }}"
# FORGEJO_TOKEN: "${FORGEJO_RUNNER_TOKEN}"
# FORGEJO_URL: "https://{{ domain_git }}"
# FORGEJO_REPO: "jack/infra"
# volumes:
# - /var/run/docker.sock:/var/run/docker.sock:ro
# networks:
# - proxy
# ── Walava Landing ─────────────────────────────────────────────────────────
# NOTE: disabled until image is built & pushed to Forgejo registry
# walava-web:
# image: git.{{ domain_base }}/jack/walava-web:latest
# container_name: walava-web
# restart: unless-stopped
# networks:
# - proxy
# ── Docmost wiki ─────────────────────────────────────────────────────────────
docmost:
image: {{ docmost_image }}
container_name: docmost
restart: unless-stopped
environment:
APP_URL: "https://{{ domain_wiki }}"
APP_SECRET: "{{ docmost_app_secret }}"
DATABASE_URL: "postgresql://docmost:{{ docmost_db_password }}@docmost-db:5432/docmost?schema=public"
REDIS_URL: "redis://docmost-redis:6379"
MAIL_DRIVER: smtp
SMTP_HOST: smtp.resend.com
SMTP_PORT: "587"
SMTP_USERNAME: resend
SMTP_PASSWORD: "{{ resend_api_key }}"
SMTP_SECURE: "false"
MAIL_FROM_ADDRESS: "noreply@{{ domain_base }}"
MAIL_FROM_NAME: "Visual Wiki"
STORAGE_DRIVER: s3
AWS_S3_ACCESS_KEY_ID: "{{ s3_access_key }}"
AWS_S3_SECRET_ACCESS_KEY: "{{ s3_secret_key }}"
AWS_S3_BUCKET: walava-docmost
AWS_S3_REGION: ru-1
AWS_S3_ENDPOINT: "https://s3.twcstorage.ru"
AWS_S3_FORCE_PATH_STYLE: "true"
networks:
- docmost-internal
- backend
- proxy
depends_on:
docmost-db:
condition: service_healthy
docmost-redis:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-sf", "http://127.0.0.1:3000/api/health"]
interval: 30s
timeout: 10s
retries: 10
start_period: 60s
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
docmost-db:
image: {{ docmost_db_image }}
container_name: docmost-db
restart: unless-stopped
environment:
POSTGRES_DB: docmost
POSTGRES_USER: docmost
POSTGRES_PASSWORD: "{{ docmost_db_password }}"
networks:
- docmost-internal
volumes:
- docmost_db_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U docmost"]
interval: 10s
timeout: 5s
retries: 5
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
docmost-redis:
image: {{ docmost_redis_image }}
container_name: docmost-redis
restart: unless-stopped
networks:
- docmost-internal
volumes:
- docmost_redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
# ── n8n workflow automation ──────────────────────────────────────────────────
n8n:
image: {{ n8n_image }}
container_name: n8n
restart: unless-stopped
networks:
- n8n-internal
- backend
- proxy # needs outbound internet for workflow API calls
volumes:
- n8n_data:/home/node/.n8n
environment:
- N8N_HOST={{ domain_n8n }}
- N8N_PORT=5678
- N8N_PROTOCOL=https
- WEBHOOK_URL=https://{{ domain_n8n }}/
- N8N_ENCRYPTION_KEY={{ n8n_encryption_key }}
- N8N_USER_MANAGEMENT_JWT_SECRET={{ n8n_jwt_secret }}
- GENERIC_TIMEZONE=Europe/Moscow
- TZ=Europe/Moscow
- N8N_METRICS=false
- N8N_LOG_LEVEL=warn
- EXECUTIONS_DATA_PRUNE=true
- EXECUTIONS_DATA_MAX_AGE=336
- N8N_DIAGNOSTICS_ENABLED=false
- N8N_VERSION_NOTIFICATIONS_ENABLED=false
healthcheck:
test: ["CMD", "wget", "-qO-", "http://127.0.0.1:5678/healthz"]
interval: 30s
timeout: 5s
retries: 3
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"