-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.dev.yml
More file actions
199 lines (186 loc) · 5.96 KB
/
docker-compose.dev.yml
File metadata and controls
199 lines (186 loc) · 5.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
# ============================================================
# Code Documentation Assistant — Docker Compose (dev branch)
# ============================================================
# Changes from main docker-compose.yml:
# - MLflow tracking server (always-on, not behind profile)
# - vLLM service (GPU required, behind 'vllm' profile)
# - HITL_ENABLED, OUTPUT_REVIEW_MODE, QUALITY_GATE_THRESHOLD
# - INFERENCE_BACKEND: "ollama" (default) | "vllm"
# - CONFIDENCE_THRESHOLD, MAX_RETRIEVAL_ATTEMPTS configurable
# - src/ bind-mount for live-reload during development
#
# Usage:
# docker compose -f docker-compose.dev.yml up --build
# HITL_ENABLED=false docker compose -f docker-compose.dev.yml up
# OUTPUT_REVIEW_MODE=supervisor docker compose -f docker-compose.dev.yml up
# INFERENCE_BACKEND=vllm docker compose -f docker-compose.dev.yml --profile vllm up
# MODEL_TIER=lightweight docker compose -f docker-compose.dev.yml up
#
# Access:
# Streamlit UI: http://localhost:8501
# MLflow UI: http://localhost:5000
# ============================================================
services:
# --- Ollama (default inference backend) ---
ollama:
image: ollama/ollama:latest
container_name: code-doc-ollama
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
environment:
- OLLAMA_HOST=0.0.0.0
healthcheck:
test: ["CMD", "ollama", "list"]
interval: 10s
timeout: 5s
retries: 5
start_period: 15s
# GPU: docker compose -f docker-compose.dev.yml -f docker-compose.gpu.yml up
# --- Model bootstrap ---
ollama-bootstrap:
image: ollama/ollama:latest
container_name: code-doc-bootstrap
depends_on:
ollama:
condition: service_healthy
environment:
- OLLAMA_HOST=ollama:11434
- MODEL_TIER=${MODEL_TIER:-full}
- EMBEDDING_MODEL=${EMBEDDING_MODEL:-nomic-embed-text}
entrypoint: >
bash -c '
case "$${MODEL_TIER}" in
full) MODEL="mistral-nemo" ;;
balanced) MODEL="qwen2.5-coder:7b" ;;
lightweight) MODEL="phi3.5" ;;
*) MODEL="mistral-nemo" ;;
esac
echo "Pulling LLM: $${MODEL}"
ollama pull $${MODEL}
echo "Pulling embedding model: $${EMBEDDING_MODEL}"
ollama pull $${EMBEDDING_MODEL}
echo "Bootstrap complete."
'
restart: "no"
# --- vLLM (alternative inference backend, GPU required) ---
# Enable with: INFERENCE_BACKEND=vllm docker compose -f docker-compose.dev.yml --profile vllm up
vllm:
image: vllm/vllm-openai:latest
container_name: code-doc-vllm
profiles: ["vllm"]
ports:
- "8080:8000"
volumes:
- vllm_model_cache:/root/.cache/huggingface
environment:
- HUGGING_FACE_HUB_TOKEN=${HF_TOKEN:-}
command:
- "--model"
- "${VLLM_MODEL:-mistralai/Mistral-7B-Instruct-v0.3}"
- "--tensor-parallel-size"
- "${VLLM_TP:-1}"
- "--max-model-len"
- "32768"
- "--gpu-memory-utilization"
- "0.90"
healthcheck:
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
# --- ChromaDB ---
chromadb:
image: chromadb/chroma:0.6.3
container_name: code-doc-chromadb
ports:
- "8000:8000"
volumes:
- chroma_data:/chroma/chroma
environment:
- IS_PERSISTENT=TRUE
- PERSIST_DIRECTORY=/chroma/chroma
- ANONYMIZED_TELEMETRY=false
healthcheck:
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/api/v1/heartbeat')"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
# --- MLflow tracking server (always-on in dev) ---
mlflow:
image: ghcr.io/mlflow/mlflow:latest
container_name: code-doc-mlflow
ports:
- "5000:5000"
volumes:
- mlflow_data:/mlflow
command:
- mlflow
- server
- --host=0.0.0.0
- --port=5000
- --backend-store-uri=sqlite:////mlflow/mlflow.db
- --default-artifact-root=/mlflow/artifacts
healthcheck:
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5000/health')"]
interval: 15s
timeout: 5s
retries: 5
start_period: 10s
# --- Application (dev branch — LangGraph agent + Streamlit) ---
app:
build:
context: .
dockerfile: Dockerfile
container_name: code-doc-app
ports:
- "8501:8501"
depends_on:
chromadb:
condition: service_healthy
mlflow:
condition: service_healthy
environment:
# Inference backend
- INFERENCE_BACKEND=${INFERENCE_BACKEND:-ollama}
- OLLAMA_HOST=http://ollama:11434
- VLLM_HOST=http://vllm:8000
- OLLAMA_MODEL=${OLLAMA_MODEL:-mistral-nemo}
# Vector DB
- CHROMA_HOST=http://chromadb:8000
# MLflow
- MLFLOW_TRACKING_URI=http://mlflow:5000
# Model / embedding
- MODEL_TIER=${MODEL_TIER:-full}
- EMBEDDING_MODEL=${EMBEDDING_MODEL:-nomic-embed-text}
# Agent behaviour
- HITL_ENABLED=${HITL_ENABLED:-true}
- OUTPUT_REVIEW_MODE=${OUTPUT_REVIEW_MODE:-human}
- QUALITY_GATE_THRESHOLD=${QUALITY_GATE_THRESHOLD:-6.0}
- CONFIDENCE_THRESHOLD=${CONFIDENCE_THRESHOLD:-0.45}
- MAX_RETRIEVAL_ATTEMPTS=${MAX_RETRIEVAL_ATTEMPTS:-3}
- MAX_GENERATION_ATTEMPTS=${MAX_GENERATION_ATTEMPTS:-3}
- MAX_CONTEXT_TOKENS=${MAX_CONTEXT_TOKENS:-8000}
- LOG_LEVEL=${LOG_LEVEL:-info}
volumes:
- ${REPO_PATH:-./repos}:/data/repos:ro
- ./src:/app/src # live-reload during development
volumes:
ollama_data:
driver: local
vllm_model_cache:
driver: local
chroma_data:
driver: local
mlflow_data:
driver: local