-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
74 lines (61 loc) · 1.65 KB
/
docker-compose.yml
File metadata and controls
74 lines (61 loc) · 1.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
version: '3.8'
services:
securefix:
build:
context: .
dockerfile: Dockerfile
image: securefix:latest
container_name: securefix
# Mount volumes for persistence and model access
volumes:
# Scan target (mount your code here)
- ./vulnerable:/scan:ro
# Persistent data
- ./data/reports:/data/reports
- ./data/fixes:/data/fixes
- ./data/chroma_db:/data/chroma_db
# LlamaCPP models (mount your GGUF models here)
- ./models:/models:ro
# Optional: Custom corpus
# - ./custom_corpus:/securefix/remediation/corpus:ro
# Environment variables
environment:
# LLM Configuration
- MODE=local
- GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
# LlamaCPP Settings (if using llamacpp mode)
- LLAMACPP_MODEL_PATH=/models/qwen-sast-q4_k_m.gguf
- LLAMACPP_N_CTX=2048
- LLAMACPP_N_THREADS=14
- LLAMACPP_N_GPU_LAYERS=0
- LLAMACPP_N_BATCH=512
# Model name (for ollama mode)
- MODEL_NAME=llama3.2:3b
# Vector DB settings
- VECTOR_K=4
- VECTOR_FETCH_K=12
- BM25_TOP_K=2
# For interactive usage
stdin_open: true
tty: true
# Resource limits (adjust based on your hardware)
deploy:
resources:
limits:
cpus: '14'
memory: 16G
reservations:
cpus: '8'
memory: 8G
# Optional: Ollama service (if using Ollama mode)
ollama:
image: ollama/ollama:latest
container_name: ollama
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
profiles:
- ollama # Only start with --profile ollama
volumes:
ollama_data: