-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy path.env.template
More file actions
126 lines (98 loc) · 2.94 KB
/
.env.template
File metadata and controls
126 lines (98 loc) · 2.94 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# YA-PapersWithCode Backend Configuration Template
# Copy this file to .env and configure for your deployment mode
# ====================
# DEPLOYMENT MODE
# ====================
# Options: local, model_only, api_mode
# - local: Local AI model + PapersWithCode database (full functionality)
# - model_only: Only deploy AI model services (no database)
# - api_mode: External AI API + PapersWithCode database (hybrid mode)
DEPLOYMENT_MODE=local
# ====================
# LOCAL MODE SETTINGS
# ====================
# Path to model checkpoints (relative or absolute)
MODEL_PATH=checkpoints
# Device for model inference (cpu, cuda, mps)
DEVICE=cpu
# PyTorch data type (float32, float16, bfloat16)
TORCH_DTYPE=float32
# Embedding model for semantic search
EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# ====================
# API MODE SETTINGS (for api_mode)
# ====================
# OpenAI API Configuration
OPENAI_API_KEY=your-openai-api-key-here
# API base URL (for OpenAI-compatible services)
API_BASE=https://api.openai.com/v1
# Model name (gpt-4o-mini-2025-08-07, gpt-4, claude-3-sonnet, etc.)
MODEL_NAME=gpt-4o-mini-2025-08-07
# ====================
# MODEL ONLY MODE SETTINGS (for model_only)
# ====================
# Enable only AI model endpoints
ENABLE_DB_ENDPOINTS=false
# ====================
# COMMON SETTINGS
# ====================
# Maximum tokens to generate
MAX_TOKENS=512
# Temperature for text generation (0.0-1.0)
TEMPERATURE=0.7
# Request timeout in seconds
TIMEOUT=30
# Log level (DEBUG, INFO, WARNING, ERROR)
LOG_LEVEL=INFO
# ====================
# DATABASE SETTINGS
# ====================
# Database path (SQLite)
DATABASE_PATH=paperswithcode.db
# ====================
# PORT CONFIGURATION
# ====================
# Backend API server
BACKEND_HOST=0.0.0.0
BACKEND_PORT=8000
# Frontend development server
FRONTEND_HOST=localhost
FRONTEND_PORT=5173
# Model-only service port (for model_only mode)
MODEL_SERVICE_PORT=8001
# ====================
# API SERVER SETTINGS
# ====================
# Server host and port (deprecated - use BACKEND_HOST/BACKEND_PORT)
HOST=0.0.0.0
PORT=8000
# CORS allowed origins (comma-separated)
# Note: Update these URLs if you change FRONTEND_HOST or FRONTEND_PORT above
CORS_ORIGINS=http://localhost:5173,http://localhost:3000
# ====================
# SEARCH SETTINGS
# ====================
# Enable agent search
ENABLE_AGENT_SEARCH=true
# Similarity threshold for semantic search (0.0-1.0)
SIMILARITY_THRESHOLD=0.7
# Maximum search results
MAX_SEARCH_RESULTS=50
# ====================
# CACHE SETTINGS
# ====================
# Enable result caching
ENABLE_CACHE=true
# Cache TTL in seconds
CACHE_TTL=3600
# ====================
# FEATURE FLAGS
# ====================
# Enable fallback chains (PASA → API → Semantic → SQL)
ENABLE_FALLBACK=true
# Enable health checks
ENABLE_HEALTH_CHECKS=true
# Enable metrics collection
ENABLE_METRICS=false
# Use mock models instead of real AI models (for testing)
USE_MOCK_MODELS=false