-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.workers.yml
More file actions
150 lines (143 loc) · 4.84 KB
/
docker-compose.workers.yml
File metadata and controls
150 lines (143 loc) · 4.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
x-logging: &default-logging
driver: "local"
options:
max-size: "${LOG_MAX_SIZE:-150m}"
max-file: "${LOG_MAX_FILE:-5}"
services:
workerserver-general:
image: iplweb/bpp_workerserver:${DOCKER_VERSION:-latest}
restart: always
logging: *default-logging
env_file: ${BPP_CONFIGS_DIR}/.env
volumes:
- staticfiles:/staticroot
- media:/mediaroot
depends_on:
# Musi czekać na appserver, gdyż appserver aktualizuje ewentualnie baze danych.
# Czy nawet w ogóle ją buduje...
appserver:
condition: service_healthy
deploy:
resources:
limits:
memory: ${WORKER_GENERAL_MEM_LIMIT:-1g}
cpus: "${WORKER_GENERAL_CPU_LIMIT:-2.0}"
labels:
ofelia.enabled: "true"
# Nocny restart zbija skumulowany memory leak Celery workera (Python long-running).
# kill 1 -> SIGTERM do PID 1 -> graceful shutdown -> restart:always wskrzesza.
ofelia.job-exec.restart_self.schedule: "0 5 5 * * *"
ofelia.job-exec.restart_self.command: "kill 1"
workerserver-denorm:
image: iplweb/bpp_workerserver:${DOCKER_VERSION:-latest}
restart: always
logging: *default-logging
env_file: ${BPP_CONFIGS_DIR}/.env
environment:
CELERY_QUEUE: "denorm"
volumes:
- staticfiles:/staticroot
- media:/mediaroot
depends_on:
# Musi czekać na appserver, gdyż appserver aktualizuje ewentualnie baze danych.
# Czy nawet w ogóle ją buduje...
appserver:
condition: service_healthy
deploy:
resources:
limits:
memory: ${WORKER_DENORM_MEM_LIMIT:-1g}
cpus: "${WORKER_DENORM_CPU_LIMIT:-1.0}"
labels:
ofelia.enabled: "true"
ofelia.job-exec.restart_self.schedule: "0 10 5 * * *"
ofelia.job-exec.restart_self.command: "kill 1"
denorm-queue:
# run only SINGLE service, this is only a mechanism to pass
# PostgreSQL LISTEN to Celery queue
image: iplweb/bpp_denorm_queue:${DOCKER_VERSION:-latest}
logging: *default-logging
env_file: ${BPP_CONFIGS_DIR}/.env
# entrypoint: ["python", "src/manage.py", "denorm_queue"]
# healthcheck:
# test: ["CMD-SHELL", "pgrep -f 'denorm_queue' > /dev/null"]
# start_period: 10s
# interval: 30s
# timeout: 5s
# retries: 3
restart: always
depends_on:
workerserver-denorm:
condition: service_healthy
deploy:
resources:
limits:
memory: ${DENORM_QUEUE_MEM_LIMIT:-320m}
cpus: "${DENORM_QUEUE_CPU_LIMIT:-1.0}"
labels:
ofelia.enabled: "true"
# Restart najpozniej w staggered secie - po wszystkich pozostalych, zeby NOTIFY
# ze swiezo podniesionych worker-denormow mialy do czego trafic.
ofelia.job-exec.restart_self.schedule: "0 25 5 * * *"
ofelia.job-exec.restart_self.command: "kill 1"
workerserver-status:
image: iplweb/bpp_workerserver:${DOCKER_VERSION:-latest}
logging: *default-logging
env_file: ${BPP_CONFIGS_DIR}/.env
entrypoint: celery -A django_bpp.celery_tasks status
depends_on:
workerserver-general:
condition: service_healthy
workerserver-denorm:
condition: service_healthy
profiles: ['manual']
celerybeat:
image: iplweb/bpp_beatserver:${DOCKER_VERSION:-latest}
logging: *default-logging
env_file: ${BPP_CONFIGS_DIR}/.env
restart: always
volumes:
- staticfiles:/staticroot
- media:/mediaroot
depends_on:
redis:
condition: service_healthy
rabbitmq:
condition: service_healthy
appserver:
condition: service_started
deploy:
resources:
limits:
memory: ${CELERYBEAT_MEM_LIMIT:-320m}
cpus: "${CELERYBEAT_CPU_LIMIT:-0.25}"
labels:
ofelia.enabled: "true"
ofelia.job-exec.restart_self.schedule: "0 20 5 * * *"
ofelia.job-exec.restart_self.command: "kill 1"
flower:
image: mher/flower:2.0.1
restart: always
logging: *default-logging
environment:
- CELERY_BROKER_URL=amqp://${DJANGO_BPP_RABBITMQ_USER:-bpp}:${DJANGO_BPP_RABBITMQ_PASS:-bpp}@rabbitmq:${DJANGO_BPP_RABBITMQ_PORT:-5672}//
- FLOWER_PORT=5555
- FLOWER_URL_PREFIX=/flower
depends_on:
rabbitmq:
condition: service_healthy
expose:
- 5555
deploy:
resources:
limits:
memory: ${FLOWER_MEM_LIMIT:-768m}
cpus: "${FLOWER_CPU_LIMIT:-0.5}"
labels:
ofelia.enabled: "true"
# Flower akumuluje historie zadan Celery; nocny restart zbija konsumpcje pamieci.
# mher/flower 2.0.1 opiera sie na python:3.11-slim - ma /bin/kill z procps,
# ale Docker exec uruchamia go jako root przez PID namespace kontenera,
# wiec "kill 1" wysyla SIGTERM do PID 1 bezposrednio (bez shell-a).
ofelia.job-exec.restart_self.schedule: "0 15 5 * * *"
ofelia.job-exec.restart_self.command: "kill 1"