-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.infrastructure.yml
More file actions
101 lines (96 loc) · 3.22 KB
/
docker-compose.infrastructure.yml
File metadata and controls
101 lines (96 loc) · 3.22 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
x-logging: &default-logging
driver: "local"
options:
max-size: "${LOG_MAX_SIZE:-150m}"
max-file: "${LOG_MAX_FILE:-5}"
services:
webserver:
image: nginx:1.29.7
restart: always
logging: *default-logging
env_file: ${BPP_CONFIGS_DIR}/.env
environment:
NGINX_ENVSUBST_FILTER: "DJANGO_BPP_"
ports:
- 80:80
- 443:443
- 443:443/udp # HTTP/3 (QUIC)
healthcheck:
test: ["CMD-SHELL", "curl -f -s -o /dev/null http://127.0.0.1:80/healthz || exit 1"]
start_period: 10s
interval: 10s
timeout: 5s
retries: 3
volumes:
- staticfiles:/var/www/html/staticroot
- media:/mediaroot
- ${BPP_CONFIGS_DIR}/ssl:/etc/ssl/private:ro
- ./defaults/webserver/default.conf.template:/etc/nginx/templates/default.conf.template:ro
- ./defaults/webserver/security-headers.conf:/etc/nginx/conf.d/security-headers.conf:ro
- ./defaults/webserver/maintenance.html:/usr/share/nginx/html/maintenance.html:ro
deploy:
resources:
limits:
# Obserwowane idle ~32 MiB, ale proxy_buffers 16x16k = 256 KB/connection
# przy buforowaniu HTML-a z Django. Przy 500 concurrent requests to realnie
# +110 MB RAM. 256m daje headroom pod burst + HTTP/3 QUIC + duze uploady.
memory: ${WEBSERVER_MEM_LIMIT:-256m}
cpus: "${WEBSERVER_CPU_LIMIT:-2.0}"
redis:
image: redis:8.6.2
restart: always
logging: *default-logging
# --maxmemory + allkeys-lru: Redis sam ewiktuje klucze gdy zbliza sie
# do wlasnego limitu, zamiast czekac az Docker zabije caly kontener.
# Wewnetrzny limit musi byc mniejszy niz zewnetrzny, zeby eviction
# mial szanse zadzialac przed hard-OOM-kill-em.
command:
- redis-server
- --maxmemory
- ${REDIS_MAXMEMORY:-200mb}
- --maxmemory-policy
- allkeys-lru
healthcheck:
test: ["CMD", "redis-cli", "ping"]
start_period: 5s
interval: 15s
timeout: 5s
retries: 3
volumes:
- redis:/data
deploy:
resources:
limits:
memory: ${REDIS_MEM_LIMIT:-256m}
cpus: "${REDIS_CPU_LIMIT:-0.5}"
rabbitmq:
image: rabbitmq:4.2.5-management-alpine
restart: always
logging: *default-logging
env_file: ${BPP_CONFIGS_DIR}/.env
#ports:
# - 5672:5672 # AMQP protocol port
# - 15672:15672 # Management UI port
environment:
RABBITMQ_DEFAULT_USER: ${DJANGO_BPP_RABBITMQ_USER:-bpp}
RABBITMQ_DEFAULT_PASS: ${DJANGO_BPP_RABBITMQ_PASS:-bpp}
RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: >-
-rabbit log_levels [{connection,error},{channel,error},{mirroring,error},{federation,error},{upgrade,error}]
log [{console,[{level,error}]}]
healthcheck:
test: ["CMD-SHELL", "rabbitmqctl await_startup && rabbitmqctl authenticate_user \"$$RABBITMQ_DEFAULT_USER\" \"$$RABBITMQ_DEFAULT_PASS\""]
start_period: 60s
interval: 10s
timeout: 30s
retries: 5
volumes:
- rabbitmq_data:/var/lib/rabbitmq/mnesia
- ${BPP_CONFIGS_DIR}/rabbitmq:/etc/rabbitmq:ro
deploy:
resources:
limits:
memory: ${RABBITMQ_MEM_LIMIT:-512m}
cpus: "${RABBITMQ_CPU_LIMIT:-1.0}"
volumes:
redis:
rabbitmq_data: