-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathMakefile
More file actions
668 lines (519 loc) · 26.7 KB
/
Makefile
File metadata and controls
668 lines (519 loc) · 26.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
# BPP Makefile
#
# Version Management Workflow:
# ---------------------------
# This project uses CalVer (Calendar Versioning) with the pattern: YYYYMM.BUILD[-TAG[TAGNUM]]
# Example versions: 202510.1274, 202510.1275-dev1, 202510.1275-dev2, 202510.1275
#
# Development Workflow:
# 1. After releasing v202510.1274, start development on next version:
# make bump-dev
# This creates: v202510.1275-dev1
#
# 2. During development, build and tag Docker images:
# docker compose build
# This tags images as: 202510.1275.dev1 and latest
#
# 3. Ready for release? Remove -dev tag:
# make bump-release
# This creates: v202510.1275 (final release version)
#
# 4. Or combine steps 3 and 1 in a single command:
# make bump-and-start-dev
# This releases current version and immediately starts next dev cycle
#
# Docker Version:
# DOCKER_VERSION variable is automatically updated by bumpver
# Used by both Makefile docker builds and docker-compose.yml
# Set DOCKER_VERSION environment variable to override for docker-compose
BRANCH=`git branch | sed -n '/\* /s///p'`
.PHONY: help clean distclean tests release tests-without-playwright tests-only-playwright docker destroy-test-databases coveralls-upload clean-coverage combine-coverage cache-delete buildx-cache-stats buildx-cache-prune buildx-cache-prune-aggressive buildx-cache-prune-registry buildx-cache-export buildx-cache-import buildx-cache-list bump-dev bump-release bump-and-start-dev migrate new-worktree clean-worktree generate-500-page build build-force build-base build-app-services build-appserver-base build-appserver build-workerserver build-beatserver build-authserver build-denorm-queue build-servers check-clean-tree prepare-claude prepare-developer-machine prepare-developer-machine-linux
.DEFAULT_GOAL := help
##@ Pomoc
help: ## Wyświetl tę listę celów
@awk 'BEGIN {FS = ":.*?## "; \
printf "\nUżycie:\n make \033[36m<cel>\033[0m\n"} \
/^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } \
/^[a-zA-Z_][a-zA-Z0-9_-]*:.*?## / { \
printf " \033[36m%-32s\033[0m %s\n", $$1, $$2 }' \
$(MAKEFILE_LIST)
PYTHON=python3
# Platform detection for developer machine setup
OS := $(shell uname -s)
ifeq ($(OS),Darwin)
YARN_CMD := yarn
else
YARN_CMD := yarnpkg
endif
##@ Konfiguracja maszyny deweloperskiej
all: prepare-developer-machine release ## UWAGA: pełna konfiguracja + release (uruchamia release!)
prepare-developer-machine-macos: ## Zainstaluj zależności systemowe na macOS (brew + uv sync)
uv sync --all-extras
brew install cairo pango gdk-pixbuf libffi gobject-introspection gtk+3
sudo ln -sf /opt/homebrew/opt/glib/lib/libgobject-2.0.0.dylib /usr/local/lib/gobject-2.0
sudo ln -sf /opt/homebrew/opt/pango/lib/libpango-1.0.dylib /usr/local/lib/pango-1.0
sudo ln -sf /opt/homebrew/opt/harfbuzz/lib/libharfbuzz.dylib /usr/local/lib/harfbuzz
sudo ln -sf /opt/homebrew/opt/fontconfig/lib/libfontconfig.1.dylib /usr/local/lib/fontconfig-1
sudo ln -sf /opt/homebrew/opt/pango/lib/libpangoft2-1.0.dylib /usr/local/lib/pangoft2-1.0
prepare-developer-machine-linux: ## Zainstaluj zależności systemowe na Linuksie (apt + uv sync)
sudo apt update
sudo apt install -y yarnpkg python3-dev libpq-dev libcairo2-dev \
libpango1.0-dev libgdk-pixbuf2.0-dev libffi-dev \
libgirepository1.0-dev libgtk-3-dev
uv sync --all-extras
prepare-developer-machine: ## Zainstaluj zależności systemowe (auto-detekcja macOS/Linux)
ifeq ($(OS),Darwin)
$(MAKE) prepare-developer-machine-macos
else ifeq ($(OS),Linux)
$(MAKE) prepare-developer-machine-linux
else
@echo "Unsupported platform: $(OS)"
@echo "Supported: Darwin (macOS), Linux"
@exit 1
endif
prepare-claude: ## Pokaż instrukcję instalacji wtyczki claude-mem w Claude Code
@echo "Setting up Claude Code with claude-mem plugin..."
@echo ""
@echo "NOTE: claude-mem stores memory data locally in ~/.claude/"
@echo " Memory data is machine-specific and cannot be shared."
@echo ""
@echo "To install claude-mem:"
@echo " 1. Open Claude Code"
@echo " 2. Run: /install-plugin thedotmack/claude-mem"
@echo " 3. Restart Claude Code"
@echo ""
@echo "Current installation status:"
@if [ -d "$$HOME/.claude/plugins/cache/thedotmack/claude-mem" ]; then \
echo " claude-mem: INSTALLED"; \
ls -1 "$$HOME/.claude/plugins/cache/thedotmack/claude-mem" | head -1 | \
xargs -I{} echo " Version: {}"; \
else \
echo " claude-mem: NOT INSTALLED"; \
fi
##@ Czyszczenie
cleanup-pycs: ## Usuń __pycache__, *.pyc, *.log i pliki tymczasowe
find . -name __pycache__ -type d -print0 | xargs -0 rm -rf
find . -name \*~ -print0 | xargs -0 rm -f
find . -name \*pyc -print0 | xargs -0 rm -f
find . -name \*\\.log -print0 | xargs -0 rm -f
rm -rf build __pycache__ *.log
clean-pycache: ## Usuń __pycache__, *.pyc oraz .eggs/.cache
find . -name __pycache__ -type d -print0 | xargs -0 rm -rf
find . -name \*pyc -print0 | xargs -0 rm -f
rm -rf .eggs .cache
clean: clean-pycache ## Szersze czyszczenie: egg-info, logi, build, dist, staticroot/CACHE, .tox
find . -type d -name \*egg-info -print0 | xargs -0 rm -rf
find . -name \*~ -print0 | xargs -0 rm -f
find . -name \*.prof -print0 | xargs -0 rm -f
rm -rf prof/
find . -name \*\\.log -print0 | xargs -0 rm -f
find . -name \*\\.log -print0 | xargs -0 rm -f
find . -name \#\* -print0 | xargs -0 rm -f
rm -rf build dist/*django_bpp*whl dist/*bpp_iplweb*whl *.log dist
rm -rf src/django_bpp/staticroot/CACHE
rm -rf .tox
rm -rf *xlsx pbn_json_data/
distclean: clean ## Pełne czyszczenie: + node_modules, staticroot, media, dist, skompilowane CSS
rm -rf src/django_bpp/staticroot
rm -rf *backup .pytest-cache
rm -rf node_modules src/node_modules src/django_bpp/staticroot
rm -rf .vagrant src/components/bower_components src/media
rm -rf dist
rm src/bpp/static/scss/*.css
rm src/bpp/static/scss/*.map
#yarn:
# export PUPPETEER_SKIP_CHROME_DOWNLOAD=true PUPPETEER_SKIP_CHROME_HEADLESS_SHELL_DOWNLOAD=true && yarn install --no-progress --emoji false -s
##@ Frontend / Assety
grunt-build: ## Uruchom `grunt build` (SCSS → CSS, bundling JS)
grunt build
# CSS output files (targets)
CSS_TARGETS := src/bpp/static/scss/app-blue.css src/bpp/static/scss/app-green.css src/bpp/static/scss/app-orange.css
# SCSS source files
SCSS_SOURCES := $(wildcard src/bpp/static/scss/*.scss)
# Node modules dependency
NODE_MODULES := node_modules/.installed
# Translation files
PO_FILES := $(shell find src -name "*.po" -type f)
MO_FILES := $(PO_FILES:.po=.mo)
$(NODE_MODULES): package.json yarn.lock
export PUPPETEER_SKIP_CHROME_DOWNLOAD=true PUPPETEER_SKIP_CHROME_HEADLESS_SHELL_DOWNLOAD=true && $(YARN_CMD) install --no-progress --emoji false -s
touch $(NODE_MODULES)
$(CSS_TARGETS): $(SCSS_SOURCES) $(NODE_MODULES)
grunt build
$(MO_FILES): $(PO_FILES)
# cd src && django-admin compilemessages
uv run python src/manage.py compilemessages --locale=pl --ignore=site-packages
assets: $(CSS_TARGETS) $(MO_FILES) ## Zbuduj frontend (CSS + .mo); uruchamia `yarn install` jeśli trzeba
yarn: $(NODE_MODULES) ## Zainstaluj zależności Node.js (yarn install)
production-assets: distclean assets ## Pełny clean + build assetów pod produkcję
# usuń ze staticroot niepotrzebne pakiety (Poetry pyproject.toml exclude
# nie do końca to załatwia...)
rm -rf src/django_bpp/staticroot/{qunit,sinon}
rm -rf src/django_bpp/staticroot/sitemap-*
rm -rf src/django_bpp/staticroot/grappelli/tinymce/
rm -rf src/django_bpp/staticroot/autocomplete_light/vendor/select2/tests/
rm -rf src/django_bpp/staticroot/vendor/select2/tests/
rm -rf src/django_bpp/staticroot/rest_framework/docs
rm -rf src/django_bpp/staticroot/vendor/select2/docs
rm -rf src/django_bpp/staticroot/scss/*.scss
compilemessages: $(MO_FILES) ## Skompiluj tłumaczenia Django (*.po → *.mo)
# bdist_wheel target removed - no longer using wheel distribution
#upload:
# twine upload dist/*whl
js-tests: assets ## Uruchom testy JS (QUnit via Puppeteer)
$(YARN_CMD) install --optional
npx puppeteer browsers install chrome
grunt qunit
##@ Dokumentacja
# cel: live-docs
# Uruchom sphinx-autobuild
live-docs: ## Uruchom sphinx-autobuild na porcie 8080 (live-reload docs)
# Nie wrzucam instalacji sphinx-autobuild do requirements_dev.in
# celowo i z premedytacją:
uv pip install --upgrade sphinx-autobuild
uv run sphinx-autobuild --port 8080 -D language=pl docs/ docs/_build
##@ Microsoft Auth
enable-microsoft-auth: ## Włącz django_microsoft_auth (dla testów integracyjnych)
echo MICROSOFT_AUTH_CLIENT_ID=foobar > ~/.env.local
echo MICROSOFT_AUTH_CLIENT_SECRET=foobar >> ~/.env.local
uv pip install django_microsoft_auth
disable-microsoft-auth: ## Wyłącz django_microsoft_auth
rm -f ~/.env.local
uv pip uninstall django_microsoft_auth
##@ Czyszczenie
clean-coverage: ## Usuń pliki pokrycia kodu (.coverage, cov.xml, cov_html)
rm -f .coverage .coverage.* cov.xml
rm -rf cov_html
##@ Testy
tests-without-playwright: ## Szybkie testy bez Playwright (xdist -n auto, maxfail=50)
uv run pytest -n auto -m "not playwright" --maxfail 50
tests-without-playwright-with-microsoft-auth: ## tests-without-playwright z aktywnym Microsoft Auth
uv run pytest -n auto -m "not playwright" --maxfail 50
tests-with-microsoft-auth: enable-microsoft-auth tests-without-playwright-with-microsoft-auth disable-microsoft-auth ## Włącz MS Auth, uruchom testy, wyłącz
tests-only-playwright: ## Tylko testy Playwright (wolne)
uv run pytest -n auto -m "playwright"
combine-coverage: ## Scal pokrycie: coverage combine + xml + html
uv run coverage combine
uv run coverage xml
uv run coverage html
coveralls-upload: ## Wyślij raport pokrycia do Coveralls
uv run coveralls
uv-sync: ## uv sync --all-extras (synchronizacja zależności Pythona)
uv sync --all-extras
tests: clean-pycache clean-coverage uv-sync tests-without-playwright tests-only-playwright combine-coverage js-tests coveralls-upload ## Pełny test suite (coverage + JS + Coveralls)
# Same as `tests` but forces a full DB rebuild from scratch instead of
# reusing the schema produced by the baseline + delta migrate. Use when
# you suspect schema corruption or need to validate migrations from zero.
tests-fresh: destroy-test-databases tests ## Jak `tests`, ale od zera (destroy-test-databases + tests)
# Regenerate src/baseline-sql/baseline.sql by spinning up an isolated
# postgres (via testcontainers), running migrate, dumping, and writing
# baseline.meta.json. Commit the refreshed files to git.
rebuild-baseline: ## Regeneruj baseline.sql do przyspieszenia testów (commit efektu!)
DJANGO_BPP_SKIP_DOTENV=1 uv run python src/manage.py baseline_rebuild
@echo ""
@echo "Baseline regenerated. Files:"
@ls -lh src/baseline-sql/baseline.sql src/baseline-sql/baseline.meta.json
@echo ""
@echo "Don't forget to commit:"
@echo " git add src/baseline-sql/baseline.sql src/baseline-sql/baseline.meta.json"
# Run tests against pre-existing docker-compose containers (no testcontainers).
tests-no-containers: ## Testy przeciwko istniejącym kontenerom docker-compose (bez testcontainers)
uv run pytest --no-testcontainers -n auto -m "not playwright" --maxfail 50
# Stop reusable testcontainers (bpp-tc-pg, bpp-tc-redis, bpp-tc-rabbitmq).
tests-stop-containers: ## Zatrzymaj i usuń reużywalne testcontainers (bpp-tc-*)
-docker stop bpp-tc-pg bpp-tc-redis bpp-tc-rabbitmq 2>/dev/null
-docker rm bpp-tc-pg bpp-tc-redis bpp-tc-rabbitmq 2>/dev/null
@echo "Testcontainers stopped and removed."
# Remove ALL orphaned testcontainers (including Ryuks from crashed pytest runs).
clean-testcontainers: ## Usuń wszystkie osierocone testcontainers (PG/Redis/Rabbit/Ryuk + reuse bpp-tc-*)
@echo "Removing all containers labeled org.testcontainers=true ..."
-@docker ps -aq --filter "label=org.testcontainers=true" | xargs -r docker rm -f
-@docker rm -f bpp-tc-pg bpp-tc-redis bpp-tc-rabbitmq 2>/dev/null || true
@echo "Done."
# Run tests with ephemeral containers (destroyed after run).
tests-ephemeral: ## Testy w efemerycznych testcontainers (usuwane po teście)
BPP_TESTCONTAINERS_REUSE=0 uv run pytest -n auto -m "not playwright" --maxfail 50
tests-in-docker: ## Testy w pełni w Dockerze (docker-compose.test.yml)
docker compose -f docker-compose.test.yml build test-runner
docker compose -f docker-compose.test.yml up -d db redis rabbitmq
docker compose -f docker-compose.test.yml run --rm test-runner \
uv run pytest -n auto -m "not playwright" --maxfail 50
docker compose -f docker-compose.test.yml down
tests-in-docker-interactive: ## tests-in-docker z interaktywnym bashem w kontenerze
docker compose -f docker-compose.test.yml build test-runner
docker compose -f docker-compose.test.yml up -d db redis rabbitmq
docker compose -f docker-compose.test.yml run --rm test-runner bash
tests-in-docker-down: ## Zatrzymaj i usuń środowisko tests-in-docker (wraz z volume)
docker compose -f docker-compose.test.yml down -v
destroy-test-databases: ## Drop wszystkich baz testowych (local Postgres)
-./bin/drop-test-databases.sh
full-tests: destroy-test-databases clean-coverage tests-with-microsoft-auth destroy-test-databases tests-without-playwright tests-only-playwright js-tests ## Ekstremalnie pełny test suite (drop DB + MS Auth + Playwright + JS)
##@ PBN — integracja
integration-start-from-match: ## PBN integrator od etapu 15 (matching)
python src/manage.py pbn_integrator --enable-all --start-from-stage=15
integration-start-from-download: ## PBN integrator od etapu 12 (pobieranie)
python src/manage.py pbn_integrator --enable-all --start-from-stage=12
integration-start-from-match-single-thread: ## integration-start-from-match bez multiprocessingu
python src/manage.py pbn_integrator --enable-all --start-from-stage=15 --disable-multiprocessing
restart-pbn-from-download: remove-pbn-integracja-publikacji-dane integration-start-from-download ## Wymaż dane integracji i zacznij od pobierania
##@ Wersjonowanie i release
upgrade-version: ## git-flow release + bumpver + towncrier (podbij wersję i zamknij release branch)
$(eval CUR_VERSION=v$(shell ./bin/bpp-version.py))
$(eval NEW_VERSION=$(shell bumpver test $(CUR_VERSION) 'vYYYY0M.BUILD[-TAGNUM]' |head -1|cut -d: -f2))
git flow release start $(NEW_VERSION)
uv run bumpver update --commit
-uv run towncrier build --draft > /tmp/towncrier.txt
-uv run towncrier build --yes
-git add uv.lock
-git commit -F /tmp/towncrier.txt
@afplay /System/Library/Sounds/Funk.aiff
GIT_MERGE_AUTOEDIT=no git flow release finish "$(NEW_VERSION)" -p -m "Release $(NEW_VERSION)"
uv-lock: ## uv lock + commit uv.lock
uv lock
-git commit -m "Update lockfile" uv.lock
##@ GitHub Actions
gh-run-watch: ## `gh run watch` — obserwuj najnowszy run CI
gh run watch
gh-run-watch-docker-images: ## Obserwuj najnowszy run workflow "Docker - oficjalne obrazy"
gh run watch $$(gh run list --workflow="Docker - oficjalne obrazy" --limit=1 --json databaseId --jq '.[0].databaseId')
gh-run-watch-docker-images-alt: ## Alternatywna wersja gh-run-watch-docker-images (pipe)
gh run list --workflow="Docker - oficjalne obrazy" --limit=1 --json databaseId --jq '.[0].databaseId' | xargs gh run watch
##@ Wersjonowanie i release
sleep-3: ## `sleep 3` (helper używany w pipeline release)
sleep 3
##@ Django — zarządzanie
generate-500-page: ## Wygeneruj statyczną stronę 500.html z szablonu 50x.html
uv run python src/manage.py generate_500_page
##@ Wersjonowanie i release
new-release: uv-lock upgrade-version sleep-3 gh-run-watch-docker-images ## Pełny pipeline release'u (uv-lock + bumpver + watch CI)
check-clean-tree: ## Zawołaj błąd, jeśli working tree brudne (pre-release guard)
@if [ -n "$$(git status --porcelain)" ]; then \
echo "Error: Working tree is dirty. Commit or stash changes before releasing."; \
exit 1; \
fi
release: check-clean-tree full-tests new-release ## Pełny release (tree clean + full-tests + new-release)
set-version-from-vcs: ## Ustaw wersję bumpver na podstawie git describe
$(eval CUR_VERSION_VCS=$(shell git describe | sed s/\-/\./ | sed s/\-/\+/))
bumpver update --no-commit --set-version=$(CUR_VERSION_VCS)
# Version management targets for development workflow
bump-dev: ## Podbij wersję do kolejnego -devN (tag=dev)
@echo "Bumping to next development version..."
uv run bumpver update --tag dev --tag-num
@echo "New development version created. Build with: docker compose build"
bump-release: ## Zdejmij -dev z wersji (tag=final)
@echo "Creating release version (removing -dev tag)..."
uv run bumpver update --tag final
@echo "Release version created. You may want to run: make bump-dev"
bump-and-start-dev: ## Release bieżącej + od razu nowy cykl dev
@echo "Releasing current version and starting next development cycle..."
uv run bumpver update --tag final
@echo "Released. Now bumping to next dev version..."
uv run bumpver update --tag dev --tag-num
@echo "Ready for development. Build with: docker compose build"
.PHONY: check-git-clean
check-git-clean: ## Wyrzuć błąd jeśli `git diff` pokazuje zmiany
git diff --quiet
test-package-from-vcs: check-git-clean uv-sync set-version-from-vcs ## Przetestuj uv build z wersją z VCS (reset --hard po!)
uv build
ls -lash dist
git reset --hard
##@ Różne
loc: clean ## Pokaż statystyki liczby linii (pygount)
pygount -N ... -F "...,staticroot,migrations,fixtures" src --format=summary
DOCKER_VERSION=202604.1359
# Cache configuration for docker buildx bake
# - local: use local cache (default for local builds)
# - registry: use Docker Hub registry cache (for CI/CD)
#
# Usage:
# make build # parallel build with local cache
# DOCKER_CACHE_TYPE=registry make build # parallel build with registry cache
# PUSH_TO_REGISTRY=true make build # build and push to registry
DOCKER_CACHE_TYPE ?= local
# Platform detection: use ARM64 on Apple Silicon, AMD64 otherwise
ARCH := $(shell uname -m)
ifeq ($(ARCH),arm64)
DOCKER_PLATFORM ?= linux/arm64
else
DOCKER_PLATFORM ?= linux/amd64
endif
# Build arguments for docker buildx bake
# Use --file to explicitly use only docker-bake.hcl (avoids merge with docker-compose.yml)
# Variables are passed via environment, platform via --set
# --allow grants filesystem access to cache directory (avoids interactive prompt)
BAKE_ARGS = --file docker-bake.hcl --set '*.platform=$(DOCKER_PLATFORM)' --allow=fs.read=/tmp --allow=fs.write=/tmp
# Export variables for bake (HCL variables read from environment)
export DOCKER_VERSION
export CACHE_TYPE := $(DOCKER_CACHE_TYPE)
# Use environment GIT_SHA if provided (e.g., from CI), otherwise compute from git
GIT_SHA ?= $(shell git rev-parse --short HEAD)
export GIT_SHA
ifeq ($(PUSH_TO_REGISTRY),true)
export PUSH := true
endif
##@ Docker build (buildx bake)
# Main build target - parallel builds using docker buildx bake
# This builds all images in parallel where possible:
# - base: builds first
# - appserver, workerserver, beatserver, authserver, denorm-queue: wait for base
# Obraz dbservera (iplweb/bpp_dbserver) jest budowany w osobnym repo:
# https://github.com/iplweb/bpp-dbserver
build: ## Równoległy build wszystkich obrazów (buildx bake)
docker buildx bake $(BAKE_ARGS)
# Force rebuild all images (ignores cache)
build-force: ## Pełny rebuild ignorujący cache
docker buildx bake $(BAKE_ARGS) --no-cache
# Build only the base image
build-base: ## Zbuduj tylko obraz `base`
docker buildx bake $(BAKE_ARGS) base
# Build app services only (requires base image to exist)
build-app-services: ## Zbuduj tylko app-services (wymaga istniejącego `base`)
docker buildx bake $(BAKE_ARGS) app-services
# Individual build targets (for debugging or specific rebuilds)
build-appserver-base: ## Alias do build-base (buduje base dla appservera)
docker buildx bake $(BAKE_ARGS) base
build-appserver: ## Zbuduj tylko appserver
docker buildx bake $(BAKE_ARGS) appserver
build-workerserver: ## Zbuduj tylko workerserver
docker buildx bake $(BAKE_ARGS) workerserver
build-beatserver: ## Zbuduj tylko beatserver
docker buildx bake $(BAKE_ARGS) beatserver
build-authserver: ## Zbuduj tylko authserver
docker buildx bake $(BAKE_ARGS) authserver
build-denorm-queue: ## Zbuduj tylko denorm-queue
docker buildx bake $(BAKE_ARGS) denorm-queue
# Alias for backward compatibility
build-servers: build ## Alias do `build` (kompatybilność wsteczna)
# =============================================================================
# Budowanie obrazów z brancha na Docker Build Cloud
# =============================================================================
#
# Użycie:
# git push
# make build-branch
#
# Obrazy trafiają na Docker Hub z tagiem = sanityzowana nazwa brancha.
# Tag "latest" NIE jest ustawiany (tylko buildy z mastera mają "latest").
#
# Na serwerze docelowym:
# export DOCKER_VERSION=feature-nowe-zglos-publikacje
# docker compose pull && docker compose up -d
#
# =============================================================================
# UWAGA: Celowo budujemy TYLKO dla platformy linux/amd64 (x86_64).
#
# Wszystkie nasze serwery produkcyjne działają na architekturze x86_64,
# więc nie ma potrzeby budowania obrazów ARM. Budowanie na dwie platformy
# trwa dłużej i zużywa więcej zasobów Docker Build Cloud.
#
# Jeśli w przyszłości pojawi się potrzeba budowania również na ARM
# (np. dla serwerów ARM lub lokalnego testowania na Apple Silicon),
# wystarczy zmienić BRANCH_BUILD_PLATFORM na linux/amd64,linux/arm64
# — wtedy Docker Build Cloud zbuduje obrazy na obie platformy
# automatycznie.
# =============================================================================
CLOUD_BUILDER = cloud-iplweb-bpp
BRANCH_BUILD_PLATFORM = linux/amd64
build-branch: ## Zbuduj i wypchnij obrazy z aktualnego brancha do Docker Hub (linux/amd64)
$(eval BRANCH_TAG := $(shell git rev-parse --abbrev-ref HEAD \
| sed 's/[^a-zA-Z0-9._-]/-/g' \
| tr '[:upper:]' '[:lower:]'))
@echo "========================================"
@echo "Building branch: $(BRANCH_TAG)"
@echo "Builder: $(CLOUD_BUILDER)"
@echo "Platform: $(BRANCH_BUILD_PLATFORM)"
@echo "========================================"
DOCKER_VERSION=$(BRANCH_TAG) TAG_LATEST=false PUSH=true \
docker buildx bake \
--builder=$(CLOUD_BUILDER) \
--file docker-bake.hcl \
--set '*.platform=$(BRANCH_BUILD_PLATFORM)' \
--allow=fs.read=/tmp \
--allow=fs.write=/tmp
##@ Docker buildx cache
buildx-cache-stats: ## Pokaż `docker buildx du` (rozmiar cache)
docker buildx du
buildx-cache-prune: ## Wyczyść cache buildx (ostrożnie)
docker buildx prune
buildx-cache-prune-aggressive: ## Wyczyść cache buildx, zostaw tylko 5GB
docker buildx prune --keep-storage 5GB
buildx-cache-prune-registry: ## Instrukcja usuwania cache z Docker Hub (manual)
@echo "Note: Registry caches on Docker Hub must be pruned manually."
@echo "Use 'docker rmi iplweb/bpp_*:cache' to remove local copies of registry caches."
buildx-cache-export: ## Wyeksportuj build cache do /tmp/docker-buildx-cache-backup
@echo "Exporting build cache to local directory..."
mkdir -p /tmp/docker-buildx-cache-backup
docker buildx build --cache-to=type=local,dest=/tmp/docker-buildx-cache-backup,mode=max --load --target=scratch -f- . <<< "FROM scratch"
buildx-cache-import: ## Zaimportuj build cache z /tmp/docker-buildx-cache-backup
@echo "Importing build cache from local directory..."
if [ -d /tmp/docker-buildx-cache-backup ]; then \
echo "Cache backup found at /tmp/docker-buildx-cache-backup"; \
else \
echo "No cache backup found. Run 'make buildx-cache-export' first."; \
exit 1; \
fi
buildx-cache-list: ## Wypisz znane nazwy cache'y rejestru na Docker Hub
@echo "Registry caches on Docker Hub:"
@echo " - iplweb/bpp_base:cache"
@echo " - iplweb/bpp_appserver:cache"
@echo " - iplweb/bpp_workerserver:cache"
@echo " - iplweb/bpp_beatserver:cache"
@echo " - iplweb/bpp_denorm_queue:cache"
##@ Docker compose
compose-restart: ## Restart stacka docker-compose (stop + rm + up --force-recreate)
docker compose stop
docker compose rm -f
docker compose up --force-recreate
compose-dbshell: ## Bash w kontenerze bazy danych (docker compose exec db bash)
docker compose exec db /bin/bash
##@ Celery
celery-worker-run: ## Uruchom celery worker (pool=threads, concurrency=0)
uv run celery -A django_bpp.celery_tasks worker --pool=threads --concurrency=0
celery-purge: ## Wyczyść kolejki denorm i celery (purge -f)
DJANGO_SETTINGS_MODULE=django_bpp.settings.local uv run celery -A django_bpp.celery_tasks purge -Q denorm,celery -f
celery-worker-normal: ## Worker solo dla normalnych kolejek
uv run celery --app=django_bpp.celery_tasks worker --concurrency=1 --loglevel=INFO -P solo --without-gossip --without-mingle --without-heartbeat
celery-worker-denorm: ## Worker solo tylko dla kolejki denorm
uv run celery --app=django_bpp.celery_tasks worker -Q denorm --concurrency=1 --loglevel=INFO -P solo --without-gossip --without-mingle --without-heartbeat
##@ Django — zarządzanie
denorm-queue: ## Uruchom `manage.py denorm_queue`
uv run python src/manage.py denorm_queue
migrate: ## Uruchom `manage.py migrate`
uv run python src/manage.py migrate
cache-delete: ## `manage.py clear_cache` — wyczyść cache Django
python src/manage.py clear_cache
##@ Celery
docker-celery-inspect: ## Wykonaj celery inspect (active, active_queues, stats) na workerserver-general
docker compose exec workerserver-general uv run celery -A django_bpp.celery_tasks inspect active
docker compose exec workerserver-general uv run celery -A django_bpp.celery_tasks inspect active_queues
docker compose exec workerserver-general uv run celery -A django_bpp.celery_tasks inspect stats | grep max-concurrency
##@ Docker compose
refresh: build ## Rebuild + restart całego stacka compose + prune
docker system prune -f
docker compose down
docker compose up -d
docker system prune -f
##@ Django — zarządzanie
remove-denorms: ## Opróżnij tabelę denorm_dirtyinstance
echo "DELETE FROM denorm_dirtyinstance;" | uv run python src/manage.py dbshell
##@ Czyszczenie
clean-docker-cache: ## Wyczyść cały cache Docker buildera i volumy (agresywne!)
docker builder prune
docker builder prune --all
docker system prune -a --volumes
rm -rf /tmp/.buildx-cache*
##@ Django — zarządzanie
invalidate: ## Unieważnij cały cache template fragments (`manage.py invalidate all`)
uv run src/manage.py invalidate all
##@ Docker compose
prune-orphan-volumes: ## docker volume prune -f
docker volume prune -f
open-docker-volume: prune-orphan-volumes ## Wybierz volume przez fzf i wejdź do niego shellem
@VOLUME=$$(docker volume ls --format '{{.Name}}' | fzf --prompt="Select volume: ") && \
docker run --rm -it -v "$$VOLUME":/volume -w /volume alpine:latest /bin/sh -c "ls -las; exec /bin/sh"
open-all-docker-volumes: prune-orphan-volumes ## Zamontuj wszystkie volumy kontekstu w alpinie
@MOUNTS=$$(docker volume ls --format '{{.Name}}' | grep "^$(CONTEXT_NAME)_" | while read vol; do \
name=$${vol#$(CONTEXT_NAME)_}; \
echo "-v $$vol:/volumes/$$name"; \
done | tr '\n' ' ') && \
docker run --rm -it $$MOUNTS -w /volumes alpine:latest /bin/sh -c "ls -las; exec /bin/sh"