-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathdeploy_voxtral_tts.sh
More file actions
242 lines (196 loc) Β· 6.45 KB
/
deploy_voxtral_tts.sh
File metadata and controls
242 lines (196 loc) Β· 6.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
#!/bin/bash
# Voxtral + Kokoro TTS Deployment Script for RunPod
# Ultra-Low Latency Voice AI System (<500ms end-to-end)
# Optimized for RunPod HTTP/TCP-only infrastructure
set -e # Exit on any error
echo "π Starting Voxtral + Kokoro TTS Deployment on RunPod"
echo "=================================================="
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Function to print colored output
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if running on RunPod
if [ ! -d "/workspace" ]; then
print_warning "Not running on RunPod. Creating workspace directory..."
mkdir -p /workspace
fi
cd /workspace
# Step 1: System Dependencies
print_status "Installing system dependencies..."
apt update && apt upgrade -y
apt install -y git curl wget build-essential python3-dev python3-pip ffmpeg libsndfile1 portaudio19-dev
# Step 2: Verify GPU
print_status "Verifying GPU access..."
if command -v nvidia-smi &> /dev/null; then
nvidia-smi
print_success "GPU detected and accessible"
else
print_error "No GPU detected. This application requires CUDA-compatible GPU."
exit 1
fi
# Step 3: Environment Variables
print_status "Setting up environment variables..."
export CUDA_VISIBLE_DEVICES=0
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512
# Check for HuggingFace token
if [ -z "$HF_TOKEN" ]; then
print_warning "HF_TOKEN not set. Some models may require authentication."
print_warning "Set HF_TOKEN environment variable if needed."
fi
# Step 4: Create directories
print_status "Creating required directories..."
mkdir -p model_cache logs temp_audio
chmod 755 model_cache logs temp_audio
# Step 5: Python Dependencies
print_status "Upgrading pip..."
python3 -m pip install --upgrade pip
print_status "Installing PyTorch with CUDA 12.1 support..."
pip install torch==2.4.1+cu121 torchaudio==2.4.1+cu121 torchvision==0.19.1+cu121 --extra-index-url https://download.pytorch.org/whl/cu121
print_status "Installing project dependencies..."
pip install -r requirements.txt
# Step 6: Verify Installation
print_status "Verifying installation..."
python3 -c "
import torch
print(f'PyTorch: {torch.__version__}')
print(f'CUDA Available: {torch.cuda.is_available()}')
if torch.cuda.is_available():
print(f'CUDA Device: {torch.cuda.get_device_name(0)}')
print(f'CUDA Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB')
else:
print('No CUDA device detected')
"
# Step 7: Model Pre-loading (Optional but recommended)
print_status "Pre-loading models (this may take several minutes)..."
python3 -c "
import os
if os.environ.get('HF_TOKEN'):
os.environ['HUGGINGFACE_HUB_TOKEN'] = os.environ['HF_TOKEN']
try:
from transformers import VoxtralForConditionalGeneration, AutoProcessor
print('Downloading Voxtral model...')
model = VoxtralForConditionalGeneration.from_pretrained(
'mistralai/Voxtral-Mini-3B-2507',
cache_dir='./model_cache',
torch_dtype='float16',
device_map='auto'
)
processor = AutoProcessor.from_pretrained(
'mistralai/Voxtral-Mini-3B-2507',
cache_dir='./model_cache'
)
print('Voxtral model downloaded successfully')
except Exception as e:
print(f'Warning: Could not pre-load Voxtral model: {e}')
try:
import kokoro
print('Kokoro TTS is available')
except Exception as e:
print(f'Warning: Kokoro TTS not available: {e}')
"
# Step 8: Create startup script
print_status "Creating startup script..."
cat > start_voxtral.sh << 'EOF'
#!/bin/bash
# Voxtral + Kokoro TTS Startup Script
echo "π€ Starting Voxtral + Kokoro TTS Voice AI System"
# Set environment variables
export CUDA_VISIBLE_DEVICES=0
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512
# Start services in background
echo "Starting Health Check Server (port 8005)..."
python3 -m src.api.health_check &
HEALTH_PID=$!
echo "Starting Main UI Server (port 8000)..."
python3 -m src.api.ui_server_realtime &
UI_PID=$!
echo "Starting TCP Server (port 8766)..."
python3 -m src.streaming.tcp_server &
TCP_PID=$!
# Wait a moment for services to start
sleep 5
echo "β
All services started!"
echo "π Web Interface: https://[POD_ID]-8000.proxy.runpod.net"
echo "π₯ Health Check: https://[POD_ID]-8005.proxy.runpod.net/health"
echo "π TCP Server: Port 8766"
echo ""
echo "Press Ctrl+C to stop all services"
# Function to cleanup on exit
cleanup() {
echo "Stopping services..."
kill $HEALTH_PID $UI_PID $TCP_PID 2>/dev/null
exit 0
}
trap cleanup SIGINT SIGTERM
# Wait for any process to exit
wait
EOF
chmod +x start_voxtral.sh
# Step 9: Create health check script
print_status "Creating health check script..."
cat > health_check.sh << 'EOF'
#!/bin/bash
# Quick health check for Voxtral system
echo "π₯ Voxtral System Health Check"
echo "=============================="
# Check if services are running
if pgrep -f "src.api.ui_server_realtime" > /dev/null; then
echo "β
UI Server: Running"
else
echo "β UI Server: Not running"
fi
if pgrep -f "src.api.health_check" > /dev/null; then
echo "β
Health Check Server: Running"
else
echo "β Health Check Server: Not running"
fi
if pgrep -f "src.streaming.tcp_server" > /dev/null; then
echo "β
TCP Server: Running"
else
echo "β TCP Server: Not running"
fi
# Check GPU
echo ""
echo "π₯οΈ GPU Status:"
nvidia-smi --query-gpu=name,memory.used,memory.total --format=csv,noheader,nounits
# Check ports
echo ""
echo "π Port Status:"
netstat -tlnp | grep -E ":(8000|8005|8766)" || echo "No services listening on expected ports"
EOF
chmod +x health_check.sh
# Step 10: Final setup
print_status "Final setup and verification..."
# Create logs directory if it doesn't exist
mkdir -p logs
# Set proper permissions
chmod -R 755 src/
chmod +x *.sh
print_success "π Deployment completed successfully!"
echo ""
echo "π Next Steps:"
echo "1. Start the system: ./start_voxtral.sh"
echo "2. Check health: ./health_check.sh"
echo "3. Access web interface: https://[POD_ID]-8000.proxy.runpod.net"
echo ""
echo "π§ Manual Commands:"
echo "- Start UI: python3 -m src.api.ui_server_realtime"
echo "- Start Health: python3 -m src.api.health_check"
echo "- Start TCP: python3 -m src.streaming.tcp_server"
echo ""
print_success "Ready for ultra-low latency voice conversations! π€β¨"