-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathgenerative.py
More file actions
368 lines (331 loc) · 14.3 KB
/
generative.py
File metadata and controls
368 lines (331 loc) · 14.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
import json
import logging
import time
import uuid
import app.api.globals as cms_globals
from typing import Union, Iterable, AsyncGenerator, List
from typing_extensions import Annotated
from functools import partial
from fastapi import APIRouter, Depends, Request, Body, Query
from fastapi.encoders import jsonable_encoder
from fastapi.responses import PlainTextResponse, StreamingResponse, JSONResponse
from starlette.status import HTTP_200_OK, HTTP_400_BAD_REQUEST, HTTP_500_INTERNAL_SERVER_ERROR
from app.domain import (
Tags,
TagsGenerative,
OpenAIChatRequest,
OpenAIChatResponse,
OpenAIEmbeddingsRequest,
OpenAIEmbeddingsResponse,
PromptMessage,
PromptRole,
)
from app.model_services.base import AbstractModelService
from app.utils import get_settings, get_prompt_from_messages
from app.api.utils import get_rate_limiter
from app.api.dependencies import validate_tracking_id
from app.management.prometheus_metrics import cms_prompt_tokens, cms_completion_tokens, cms_total_tokens
PATH_GENERATE = "/generate"
PATH_GENERATE_ASYNC = "/stream/generate"
PATH_OPENAI_COMPLETIONS = "/v1/chat/completions"
PATH_OPENAI_EMBEDDINGS = "/v1/embeddings"
router = APIRouter()
config = get_settings()
limiter = get_rate_limiter(config)
logger = logging.getLogger("cms")
assert cms_globals.props is not None, "Current active user dependency not injected"
assert cms_globals.model_service_dep is not None, "Model service dependency not injected"
@router.post(
PATH_GENERATE,
tags=[TagsGenerative.Generative],
response_class=PlainTextResponse,
dependencies=[Depends(cms_globals.props.current_active_user)],
description="Generate text",
)
def generate_text(
request: Request,
prompt: Annotated[str, Body(description="The prompt to be sent to the model", media_type="text/plain")],
max_tokens: Annotated[int, Query(description="The maximum number of tokens to generate", gt=0)] = 512,
temperature: Annotated[float, Query(description="The temperature of the generated text", ge=0.0)] = 0.7,
top_p: Annotated[float, Query(description="The Top-P value for nucleus sampling", ge=0.0, le=1.0)] = 0.9,
stop_sequences: Annotated[List[str], Query(description="The list of sequences used to stop the generation")] = [],
tracking_id: Union[str, None] = Depends(validate_tracking_id),
model_service: AbstractModelService = Depends(cms_globals.model_service_dep)
) -> PlainTextResponse:
"""
Generates text based on the prompt provided.
Args:
request (Request): The request object.
prompt (str): The prompt to be sent to the model.
max_tokens (int): The maximum number of tokens to generate.
temperature (float): The temperature of the generated text.
top_p (float): The Top-P value for nucleus sampling.
stop_sequences (List[str]): The list of sequences used to stop the generation.
tracking_id (Union[str, None]): An optional tracking ID of the requested task.
model_service (AbstractModelService): The model service dependency.
Returns:
PlainTextResponse: A response containing the generated text.
"""
tracking_id = tracking_id or str(uuid.uuid4())
if prompt:
return PlainTextResponse(
model_service.generate(
prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stop_sequences=stop_sequences,
report_tokens=partial(_send_usage_metrics, handler=PATH_GENERATE),
),
headers={"x-cms-tracking-id": tracking_id},
status_code=HTTP_200_OK,
)
else:
return PlainTextResponse(
_empty_prompt_error(),
headers={"x-cms-tracking-id": tracking_id},
status_code=HTTP_400_BAD_REQUEST,
)
@router.post(
PATH_GENERATE_ASYNC,
tags=[TagsGenerative.Generative],
response_class=StreamingResponse,
dependencies=[Depends(cms_globals.props.current_active_user)],
description="Generate a stream of texts",
)
async def generate_text_stream(
request: Request,
prompt: Annotated[str, Body(description="The prompt to be sent to the model", media_type="text/plain")],
max_tokens: Annotated[int, Query(description="The maximum number of tokens to generate", gt=0)] = 512,
temperature: Annotated[float, Query(description="The temperature of the generated text", ge=0.0)] = 0.7,
top_p: Annotated[float, Query(description="The Top-P value for nucleus sampling", ge=0.0, le=1.0)] = 0.9,
stop_sequences: Annotated[List[str], Query(description="The list of sequences used to stop the generation")] = [],
tracking_id: Union[str, None] = Depends(validate_tracking_id),
model_service: AbstractModelService = Depends(cms_globals.model_service_dep)
) -> StreamingResponse:
"""
Generates a stream of texts in near real-time.
Args:
request (Request): The request object.
prompt (str): The prompt to be sent to the model.
max_tokens (int): The maximum number of tokens to generate.
temperature (float): The temperature of the generated text.
top_p (float): The Top-P value for nucleus sampling.
stop_sequences (List[str]): The list of sequences used to stop the generation.
tracking_id (Union[str, None]): An optional tracking ID of the requested task.
model_service (AbstractModelService): The model service dependency.
Returns:
StreamingResponse: A streaming response containing the text generated in near real-time.
"""
tracking_id = tracking_id or str(uuid.uuid4())
if prompt:
return StreamingResponse(
model_service.generate_async(
prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stop_sequences=stop_sequences,
report_tokens=partial(_send_usage_metrics, handler=PATH_GENERATE_ASYNC),
),
media_type="text/event-stream",
headers={"x-cms-tracking-id": tracking_id},
status_code=HTTP_200_OK,
)
else:
return StreamingResponse(
_empty_prompt_error(),
media_type="text/event-stream",
headers={"x-cms-tracking-id": tracking_id},
status_code=HTTP_400_BAD_REQUEST,
)
@router.post(
PATH_OPENAI_COMPLETIONS,
tags=[Tags.OpenAICompatible],
response_model=None,
dependencies=[Depends(cms_globals.props.current_active_user)],
description="Generate chat response based on messages, similar to OpenAI's /v1/chat/completions",
)
def generate_chat_completions(
request: Request,
request_data: Annotated[OpenAIChatRequest, Body(
description="OpenAI-like completion request", media_type="application/json"
)],
tracking_id: Union[str, None] = Depends(validate_tracking_id),
model_service: AbstractModelService = Depends(cms_globals.model_service_dep)
) -> Union[StreamingResponse, JSONResponse]:
"""
Generates chat response based on messages, mimicking OpenAI's /v1/chat/completions endpoint.
Args:
request (Request): The request object.
request_data (OpenAIChatRequest): The request data containing model, messages, stream, temperature, top_p, and stop_sequences.
tracking_id (Union[str, None]): An optional tracking ID of the requested task.
model_service (AbstractModelService): The model service dependency.
Returns:
StreamingResponse: A OpenAI-like response containing the text generated in near real-time.
JSONResponse: A response containing an error message if the prompt messages are empty.
"""
messages = request_data.messages
model = model_service.model_name if request_data.model != model_service.model_name else request_data.model
stream = request_data.stream
max_tokens = request_data.max_tokens
temperature = request_data.temperature
top_p = request_data.top_p
stop_sequences = request_data.stop_sequences
tracking_id = tracking_id or str(uuid.uuid4())
if not messages:
error_response = {
"error": {
"message": "No prompt messages provided",
"type": "invalid_request_error",
"param": "messages",
"code": "missing_field",
}
}
return JSONResponse(
content=error_response,
status_code=HTTP_400_BAD_REQUEST,
headers={"x-cms-tracking-id": tracking_id},
)
async def _stream(prompt: str, max_tokens: int, temperature: float, top_p: float, stop_sequences: List[str]) -> AsyncGenerator:
data = {
"id": tracking_id,
"object": "chat.completion.chunk",
"choices": [{"delta": {"role": PromptRole.ASSISTANT.value}}],
}
yield f"data: {json.dumps(data)}\n\n"
async for chunk in model_service.generate_async(
prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stop_sequences=stop_sequences,
report_tokens=partial(_send_usage_metrics, handler=PATH_OPENAI_COMPLETIONS)
):
data = {
"choices": [
{
"delta": {"content": chunk}
}
],
"object": "chat.completion.chunk",
}
yield f"data: {json.dumps(data)}\n\n"
yield "data: [DONE]\n\n"
assert hasattr(model_service, "tokenizer"), "Model service doesn't have a tokenizer"
prompt = get_prompt_from_messages(model_service.tokenizer, messages)
if stream:
return StreamingResponse(
_stream(prompt, max_tokens, temperature, top_p, stop_sequences or []),
media_type="text/event-stream",
headers={"x-cms-tracking-id": tracking_id},
)
else:
generated_text = model_service.generate(
prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stop_sequences=stop_sequences or [],
send_metrics=partial(_send_usage_metrics, handler=PATH_OPENAI_COMPLETIONS),
)
completion = OpenAIChatResponse(
id=tracking_id,
object="chat.completion",
created=int(time.time()),
model=model,
choices=[
{
"index": 0,
"message": PromptMessage(
role=PromptRole.ASSISTANT,
content=generated_text,
),
"finish_reason": "stop",
}
],
)
return JSONResponse(content=jsonable_encoder(completion), headers={"x-cms-tracking-id": tracking_id})
@router.post(
PATH_OPENAI_EMBEDDINGS,
tags=[Tags.OpenAICompatible],
response_model=None,
dependencies=[Depends(cms_globals.props.current_active_user)],
description="Create embeddings based on text(s), similar to OpenAI's /v1/embeddings endpoint",
)
def embed_texts(
request: Request,
request_data: Annotated[OpenAIEmbeddingsRequest, Body(
description="Text(s) to be embedded", media_type="application/json"
)],
tracking_id: Union[str, None] = Depends(validate_tracking_id),
model_service: AbstractModelService = Depends(cms_globals.model_service_dep)
) -> JSONResponse:
"""
Embeds text or a list of texts, mimicking OpenAI's /v1/embeddings endpoint.
Args:
request (Request): The request object.
request_data (OpenAIEmbeddingsRequest): The request data containing model and input text(s).
tracking_id (Union[str, None]): An optional tracking ID of the requested task.
model_service (AbstractModelService): The model service dependency.
Returns:
JSONResponse: A response containing the embeddings of the text(s).
"""
tracking_id = tracking_id or str(uuid.uuid4())
if not hasattr(model_service, "create_embeddings"):
error_response = {
"error": {
"message": "Model does not support embeddings",
"type": "invalid_request_error",
"param": "model",
"code": "model_not_supported",
}
}
return JSONResponse(
content=error_response,
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
headers={"x-cms-tracking-id": tracking_id},
)
input_text = request_data.input
model = model_service.model_name if request_data.model != model_service.model_name else request_data.model
if isinstance(input_text, str):
input_texts = [input_text]
else:
input_texts = input_text
try:
embeddings_data = []
for i, embedding in enumerate(model_service.create_embeddings(input_texts)):
embeddings_data.append({
"object": "embedding",
"embedding": embedding,
"index": i,
})
response = OpenAIEmbeddingsResponse(object="list", data=embeddings_data, model=model)
return JSONResponse(
content=jsonable_encoder(response),
headers={"x-cms-tracking-id": tracking_id},
)
except Exception as e:
logger.error("Failed to create embeddings")
logger.exception(e)
error_response = {
"error": {
"message": f"Failed to create embeddings: {str(e)}",
"type": "server_error",
"code": "internal_error",
}
}
return JSONResponse(
content=error_response,
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
headers={"x-cms-tracking-id": tracking_id},
)
def _empty_prompt_error() -> Iterable[str]:
yield "ERROR: No prompt text provided\n"
def _send_usage_metrics(handler: str, prompt_token_num: int, completion_token_num: int) -> None:
cms_prompt_tokens.labels(handler=handler).observe(prompt_token_num)
logger.debug("Sent prompt tokens usage: %s", prompt_token_num)
cms_completion_tokens.labels(handler=handler).observe(completion_token_num)
logger.debug("Sent completion tokens usage: %s", completion_token_num)
cms_total_tokens.labels(handler=handler).observe(prompt_token_num + completion_token_num)
logger.debug("Sent total tokens usage: %s", prompt_token_num + completion_token_num)