-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstt_server.py
More file actions
104 lines (81 loc) · 2.73 KB
/
stt_server.py
File metadata and controls
104 lines (81 loc) · 2.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
"""STT API server using faster-whisper."""
import tempfile
from pathlib import Path
import av
import numpy as np
import soundfile as sf
from fastapi import FastAPI, File, Form, UploadFile
from faster_whisper import WhisperModel
from config import STT_BEAM_SIZE, STT_COMPUTE_TYPE, STT_DEVICE, STT_MODEL
app = FastAPI(title="GPU STT Server", version="1.0.0")
model: WhisperModel | None = None
def get_model() -> WhisperModel:
global model
if model is not None:
return model
model = WhisperModel(STT_MODEL, device=STT_DEVICE, compute_type=STT_COMPUTE_TYPE)
return model
@app.get("/health")
def health():
return {"status": "ok", "model": STT_MODEL, "device": STT_DEVICE}
@app.post("/stt")
async def transcribe(
audio: UploadFile = File(...),
language: str | None = Form(None),
beam_size: int = Form(STT_BEAM_SIZE),
):
"""Transcribe audio to text."""
whisper = get_model()
content = await audio.read()
filename = audio.filename or ""
# Browser mic recordings arrive as WebM/Opus — decode with PyAV and save as WAV
if not filename.endswith(".wav"):
raw = tempfile.NamedTemporaryFile(suffix=".webm", delete=False)
raw.write(content)
raw.flush()
raw.close()
container = av.open(raw.name)
frames = []
src_rate = 48000
for frame in container.decode(audio=0):
frames.append(frame.to_ndarray())
src_rate = frame.sample_rate
container.close()
Path(raw.name).unlink(missing_ok=True)
audio_data = np.concatenate(frames, axis=1)
if audio_data.shape[0] > 1:
audio_data = audio_data.mean(axis=0, keepdims=True)
tmp = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
tmp.close()
sf.write(tmp.name, audio_data.squeeze(), src_rate)
else:
tmp = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
tmp.write(content)
tmp.flush()
tmp.close()
segments, info = whisper.transcribe(
tmp.name,
beam_size=beam_size,
language=language,
)
Path(tmp.name).unlink(missing_ok=True)
results = []
full_text_parts: list[str] = []
for seg in segments:
results.append({
"start": round(seg.start, 2),
"end": round(seg.end, 2),
"text": seg.text.strip(),
})
full_text_parts.append(seg.text.strip())
return {
"language": info.language,
"language_probability": round(info.language_probability, 3),
"text": " ".join(full_text_parts),
"segments": results,
}
if __name__ == "__main__":
import uvicorn
from config import HOST, PORT
get_model()
uvicorn.run(app, host=HOST, port=int(PORT))