-
Notifications
You must be signed in to change notification settings - Fork 821
Expand file tree
/
Copy pathlitellm.py
More file actions
631 lines (519 loc) · 26.5 KB
/
litellm.py
File metadata and controls
631 lines (519 loc) · 26.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
"""LiteLLM model provider.
- Docs: https://docs.litellm.ai/
"""
import json
import logging
from collections.abc import AsyncGenerator
from typing import Any, TypedDict, TypeVar, cast
import litellm
from litellm.exceptions import ContextWindowExceededError
from litellm.utils import supports_response_schema
from pydantic import BaseModel
from typing_extensions import Unpack, override
from ..tools import convert_pydantic_to_tool_spec
from ..types.content import ContentBlock, Messages, SystemContentBlock
from ..types.event_loop import Usage
from ..types.exceptions import ContextWindowOverflowException
from ..types.streaming import MetadataEvent, StreamEvent
from ..types.tools import ToolChoice, ToolSpec, ToolUse
from ._validation import validate_config_keys
from .openai import OpenAIModel
logger = logging.getLogger(__name__)
# Separator used by LiteLLM to embed thought signatures inside tool call IDs.
# See: https://ai.google.dev/gemini-api/docs/thought-signatures
_THOUGHT_SIGNATURE_SEPARATOR = "__thought__"
T = TypeVar("T", bound=BaseModel)
class LiteLLMModel(OpenAIModel):
"""LiteLLM model provider implementation."""
class LiteLLMConfig(TypedDict, total=False):
"""Configuration options for LiteLLM models.
Attributes:
model_id: Model ID (e.g., "openai/gpt-4o", "anthropic/claude-3-sonnet").
For a complete list of supported models, see https://docs.litellm.ai/docs/providers.
params: Model parameters (e.g., max_tokens).
For a complete list of supported parameters, see
https://docs.litellm.ai/docs/completion/input#input-params-1.
"""
model_id: str
params: dict[str, Any] | None
def __init__(self, client_args: dict[str, Any] | None = None, **model_config: Unpack[LiteLLMConfig]) -> None:
"""Initialize provider instance.
Args:
client_args: Arguments for the LiteLLM client.
For a complete list of supported arguments, see
https://github.com/BerriAI/litellm/blob/main/litellm/main.py.
**model_config: Configuration options for the LiteLLM model.
"""
self.client_args = client_args or {}
validate_config_keys(model_config, self.LiteLLMConfig)
self.config = dict(model_config)
self._apply_proxy_prefix()
logger.debug("config=<%s> | initializing", self.config)
@override
def update_config(self, **model_config: Unpack[LiteLLMConfig]) -> None: # type: ignore[override]
"""Update the LiteLLM model configuration with the provided arguments.
Args:
**model_config: Configuration overrides.
"""
validate_config_keys(model_config, self.LiteLLMConfig)
self.config.update(model_config)
self._apply_proxy_prefix()
@override
def get_config(self) -> LiteLLMConfig:
"""Get the LiteLLM model configuration.
Returns:
The LiteLLM model configuration.
"""
return cast(LiteLLMModel.LiteLLMConfig, self.config)
@override
@classmethod
def format_request_message_content(cls, content: ContentBlock, **kwargs: Any) -> dict[str, Any]:
"""Format a LiteLLM content block.
Args:
content: Message content.
**kwargs: Additional keyword arguments for future extensibility.
Returns:
LiteLLM formatted content block.
Raises:
TypeError: If the content block type cannot be converted to a LiteLLM-compatible format.
"""
if "reasoningContent" in content:
return {
"signature": content["reasoningContent"]["reasoningText"]["signature"],
"thinking": content["reasoningContent"]["reasoningText"]["text"],
"type": "thinking",
}
if "video" in content:
return {
"type": "video_url",
"video_url": {
"detail": "auto",
"url": content["video"]["source"]["bytes"],
},
}
return super().format_request_message_content(content)
@classmethod
@override
def format_request_message_tool_call(cls, tool_use: ToolUse, **kwargs: Any) -> dict[str, Any]:
"""Format a LiteLLM compatible tool call, encoding thought signatures into the tool call ID.
Gemini thinking models attach a thought_signature to each function call. LiteLLM's OpenAI-compatible
interface embeds this signature inside the tool call ID using the ``__thought__`` separator. When
``reasoningSignature`` is present and the tool call ID does not already contain the separator, this
method encodes it so LiteLLM can reconstruct the Gemini-native format on the next request.
Args:
tool_use: Tool use requested by the model.
**kwargs: Additional keyword arguments for future extensibility.
Returns:
LiteLLM compatible tool call dict with thought signature encoded in the ID when present.
"""
tool_call = super().format_request_message_tool_call(tool_use, **kwargs)
reasoning_signature = tool_use.get("reasoningSignature")
if reasoning_signature and _THOUGHT_SIGNATURE_SEPARATOR not in tool_call["id"]:
tool_call["id"] = f"{tool_call['id']}{_THOUGHT_SIGNATURE_SEPARATOR}{reasoning_signature}"
return tool_call
def _stream_switch_content(self, data_type: str, prev_data_type: str | None) -> tuple[list[StreamEvent], str]:
"""Handle switching to a new content stream.
Args:
data_type: The next content data type.
prev_data_type: The previous content data type.
Returns:
Tuple containing:
- Stop block for previous content and the start block for the next content.
- Next content data type.
"""
chunks = []
if data_type != prev_data_type:
if prev_data_type is not None:
chunks.append(self.format_chunk({"chunk_type": "content_stop", "data_type": prev_data_type}))
chunks.append(self.format_chunk({"chunk_type": "content_start", "data_type": data_type}))
return chunks, data_type
@override
@classmethod
def _format_system_messages(
cls,
system_prompt: str | None = None,
*,
system_prompt_content: list[SystemContentBlock] | None = None,
**kwargs: Any,
) -> list[dict[str, Any]]:
"""Format system messages for LiteLLM with cache point support.
Args:
system_prompt: System prompt to provide context to the model.
system_prompt_content: System prompt content blocks to provide context to the model.
**kwargs: Additional keyword arguments for future extensibility.
Returns:
List of formatted system messages.
"""
# Handle backward compatibility: if system_prompt is provided but system_prompt_content is None
if system_prompt and system_prompt_content is None:
system_prompt_content = [{"text": system_prompt}]
system_content: list[dict[str, Any]] = []
for block in system_prompt_content or []:
if "text" in block:
system_content.append({"type": "text", "text": block["text"]})
elif "cachePoint" in block and block["cachePoint"].get("type") == "default":
# Apply cache control to the immediately preceding content block
# for LiteLLM/Anthropic compatibility
if system_content:
system_content[-1]["cache_control"] = {"type": "ephemeral"}
# Create single system message with content array rather than mulitple system messages
return [{"role": "system", "content": system_content}] if system_content else []
@override
@classmethod
def format_request_messages(
cls,
messages: Messages,
system_prompt: str | None = None,
*,
system_prompt_content: list[SystemContentBlock] | None = None,
**kwargs: Any,
) -> list[dict[str, Any]]:
"""Format a LiteLLM compatible messages array with cache point support.
Args:
messages: List of message objects to be processed by the model.
system_prompt: System prompt to provide context to the model (for legacy compatibility).
system_prompt_content: System prompt content blocks to provide context to the model.
**kwargs: Additional keyword arguments for future extensibility.
Returns:
A LiteLLM compatible messages array.
"""
formatted_messages = cls._format_system_messages(system_prompt, system_prompt_content=system_prompt_content)
formatted_messages.extend(cls._format_regular_messages(messages))
return [message for message in formatted_messages if "content" in message or "tool_calls" in message]
@override
def format_chunk(self, event: dict[str, Any], **kwargs: Any) -> StreamEvent:
"""Format a LiteLLM response event into a standardized message chunk.
Extends OpenAI's format_chunk to:
1. Handle metadata with prompt caching support.
2. Extract thought signatures that LiteLLM embeds in tool call IDs for Gemini thinking models.
Args:
event: A response event from the LiteLLM model.
**kwargs: Additional keyword arguments for future extensibility.
Returns:
The formatted chunk.
Raises:
RuntimeError: If chunk_type is not recognized.
"""
# Handle metadata case with prompt caching support
if event["chunk_type"] == "metadata":
usage_data: Usage = {
"inputTokens": event["data"].prompt_tokens,
"outputTokens": event["data"].completion_tokens,
"totalTokens": event["data"].total_tokens,
}
# Only LiteLLM over Anthropic supports cache write tokens
# Waiting until a more general approach is available to set cacheWriteInputTokens
if tokens_details := getattr(event["data"], "prompt_tokens_details", None):
if cached := getattr(tokens_details, "cached_tokens", None):
usage_data["cacheReadInputTokens"] = cached
if creation := getattr(event["data"], "cache_creation_input_tokens", None):
usage_data["cacheWriteInputTokens"] = creation
return StreamEvent(
metadata=MetadataEvent(
metrics={
"latencyMs": 0, # TODO
},
usage=usage_data,
)
)
# Extract thought signature from tool call content_start events.
# LiteLLM embeds Gemini thought signatures in the tool call ID using the __thought__ separator.
# We extract it into reasoningSignature so the streaming layer can preserve it through to
# the internal ToolUse representation. The full encoded ID is kept in toolUseId so that
# tool result messages (which reference toolUseId) continue to match the assistant message.
if event["chunk_type"] == "content_start" and event.get("data_type") == "tool":
data = event.get("data")
tool_call_id = getattr(data, "id", None) or ""
if not isinstance(tool_call_id, str):
tool_call_id = ""
# Also check provider_specific_fields for the signature (non-streaming responses)
psf = getattr(data, "provider_specific_fields", None) or {}
if isinstance(psf, dict):
psf_signature = psf.get("thought_signature")
else:
psf_signature = None
# Extract from encoded ID as fallback
id_signature = None
if _THOUGHT_SIGNATURE_SEPARATOR in tool_call_id:
_, id_signature = tool_call_id.split(_THOUGHT_SIGNATURE_SEPARATOR, 1)
# Also check function-level provider_specific_fields
func = getattr(data, "function", None)
func_psf = getattr(func, "provider_specific_fields", None) or {}
if isinstance(func_psf, dict):
func_signature = func_psf.get("thought_signature")
else:
func_signature = None
signature = psf_signature or func_signature or id_signature
chunk = super().format_chunk(event, **kwargs)
if signature:
tool_use = chunk.get("contentBlockStart", {}).get("start", {}).get("toolUse", {})
tool_use["reasoningSignature"] = signature
return chunk
# For all other cases, use the parent implementation
return super().format_chunk(event)
@override
async def stream(
self,
messages: Messages,
tool_specs: list[ToolSpec] | None = None,
system_prompt: str | None = None,
*,
tool_choice: ToolChoice | None = None,
system_prompt_content: list[SystemContentBlock] | None = None,
**kwargs: Any,
) -> AsyncGenerator[StreamEvent, None]:
"""Stream conversation with the LiteLLM model.
Args:
messages: List of message objects to be processed by the model.
tool_specs: List of tool specifications to make available to the model.
system_prompt: System prompt to provide context to the model.
tool_choice: Selection strategy for tool invocation.
system_prompt_content: System prompt content blocks to provide context to the model.
**kwargs: Additional keyword arguments for future extensibility.
Yields:
Formatted message chunks from the model.
"""
logger.debug("formatting request")
request = self.format_request(
messages, tool_specs, system_prompt, tool_choice, system_prompt_content=system_prompt_content
)
logger.debug("request=<%s>", request)
# Check if streaming is disabled in the params
config = self.get_config()
params = config.get("params") or {}
is_streaming = params.get("stream", True)
litellm_request = {**request}
litellm_request["stream"] = is_streaming
logger.debug("invoking model with stream=%s", litellm_request.get("stream"))
try:
if is_streaming:
async for chunk in self._handle_streaming_response(litellm_request):
yield chunk
else:
async for chunk in self._handle_non_streaming_response(litellm_request):
yield chunk
except ContextWindowExceededError as e:
logger.warning("litellm client raised context window overflow")
raise ContextWindowOverflowException(e) from e
logger.debug("finished processing response from model")
@override
async def structured_output(
self, output_model: type[T], prompt: Messages, system_prompt: str | None = None, **kwargs: Any
) -> AsyncGenerator[dict[str, T | Any], None]:
"""Get structured output from the model.
Some models do not support native structured output via response_format.
In cases of proxies, we may not have a way to determine support, so we
fallback to using tool calling to achieve structured output.
Args:
output_model: The output model to use for the agent.
prompt: The prompt messages to use for the agent.
system_prompt: System prompt to provide context to the model.
**kwargs: Additional keyword arguments for future extensibility.
Yields:
Model events with the last being the structured output.
"""
if supports_response_schema(self.get_config()["model_id"]):
logger.debug("structuring output using response schema")
result = await self._structured_output_using_response_schema(output_model, prompt, system_prompt)
else:
logger.debug("model does not support response schema, structuring output using tool approach")
result = await self._structured_output_using_tool(output_model, prompt, system_prompt)
yield {"output": result}
async def _structured_output_using_response_schema(
self, output_model: type[T], prompt: Messages, system_prompt: str | None = None
) -> T:
"""Get structured output using native response_format support."""
response = await litellm.acompletion(
**self.client_args,
model=self.get_config()["model_id"],
messages=self.format_request(prompt, system_prompt=system_prompt)["messages"],
response_format=output_model,
)
if len(response.choices) > 1:
raise ValueError("Multiple choices found in the response.")
if not response.choices:
raise ValueError("No choices found in response")
choice = response.choices[0]
try:
# Parse the message content as JSON
tool_call_data = json.loads(choice.message.content)
# Instantiate the output model with the parsed data
return output_model(**tool_call_data)
except ContextWindowExceededError as e:
logger.warning("litellm client raised context window overflow in structured_output")
raise ContextWindowOverflowException(e) from e
except (json.JSONDecodeError, TypeError, ValueError) as e:
raise ValueError(f"Failed to parse or load content into model: {e}") from e
async def _structured_output_using_tool(
self, output_model: type[T], prompt: Messages, system_prompt: str | None = None
) -> T:
"""Get structured output using tool calling fallback."""
tool_spec = convert_pydantic_to_tool_spec(output_model)
request = self.format_request(prompt, [tool_spec], system_prompt, cast(ToolChoice, {"any": {}}))
args = {**self.client_args, **request, "stream": False}
response = await litellm.acompletion(**args)
if len(response.choices) > 1:
raise ValueError("Multiple choices found in the response.")
if not response.choices or response.choices[0].finish_reason != "tool_calls":
raise ValueError("No tool_calls found in response")
choice = response.choices[0]
try:
# Parse the tool call content as JSON
tool_call = choice.message.tool_calls[0]
tool_call_data = json.loads(tool_call.function.arguments)
# Instantiate the output model with the parsed data
return output_model(**tool_call_data)
except ContextWindowExceededError as e:
logger.warning("litellm client raised context window overflow in structured_output")
raise ContextWindowOverflowException(e) from e
except (json.JSONDecodeError, TypeError, ValueError) as e:
raise ValueError(f"Failed to parse or load content into model: {e}") from e
async def _process_choice_content(
self, choice: Any, data_type: str | None, tool_calls: dict[int, list[Any]], is_streaming: bool = True
) -> AsyncGenerator[tuple[str | None, StreamEvent], None]:
"""Process content from a choice object (streaming or non-streaming).
Args:
choice: The choice object from the response.
data_type: Current data type being processed.
tool_calls: Dictionary to collect tool calls.
is_streaming: Whether this is from a streaming response.
Yields:
Tuples of (updated_data_type, stream_event).
"""
# Get the content source - this is the only difference between streaming/non-streaming
# We use duck typing here: both choice.delta and choice.message have the same interface
# (reasoning_content, content, tool_calls attributes) but different object structures
content_source = choice.delta if is_streaming else choice.message
# Process reasoning content
if hasattr(content_source, "reasoning_content") and content_source.reasoning_content:
chunks, data_type = self._stream_switch_content("reasoning_content", data_type)
for chunk in chunks:
yield data_type, chunk
chunk = self.format_chunk(
{
"chunk_type": "content_delta",
"data_type": "reasoning_content",
"data": content_source.reasoning_content,
}
)
yield data_type, chunk
# Process text content
if hasattr(content_source, "content") and content_source.content:
chunks, data_type = self._stream_switch_content("text", data_type)
for chunk in chunks:
yield data_type, chunk
chunk = self.format_chunk(
{
"chunk_type": "content_delta",
"data_type": "text",
"data": content_source.content,
}
)
yield data_type, chunk
# Process tool calls
if hasattr(content_source, "tool_calls") and content_source.tool_calls:
if is_streaming:
# Streaming: tool calls have index attribute for out-of-order delivery
for tool_call in content_source.tool_calls:
tool_calls.setdefault(tool_call.index, []).append(tool_call)
else:
# Non-streaming: tool calls arrive in order, use enumerated index
for i, tool_call in enumerate(content_source.tool_calls):
tool_calls.setdefault(i, []).append(tool_call)
async def _process_tool_calls(self, tool_calls: dict[int, list[Any]]) -> AsyncGenerator[StreamEvent, None]:
"""Process and yield tool call events.
Args:
tool_calls: Dictionary of tool calls indexed by their position.
Yields:
Formatted tool call chunks.
"""
for tool_deltas in tool_calls.values():
yield self.format_chunk({"chunk_type": "content_start", "data_type": "tool", "data": tool_deltas[0]})
for tool_delta in tool_deltas:
yield self.format_chunk({"chunk_type": "content_delta", "data_type": "tool", "data": tool_delta})
yield self.format_chunk({"chunk_type": "content_stop", "data_type": "tool"})
async def _handle_non_streaming_response(
self, litellm_request: dict[str, Any]
) -> AsyncGenerator[StreamEvent, None]:
"""Handle non-streaming response from LiteLLM.
Args:
litellm_request: The formatted request for LiteLLM.
Yields:
Formatted message chunks from the model.
"""
response = await litellm.acompletion(**self.client_args, **litellm_request)
logger.debug("got non-streaming response from model")
yield self.format_chunk({"chunk_type": "message_start"})
tool_calls: dict[int, list[Any]] = {}
data_type: str | None = None
finish_reason: str | None = None
if hasattr(response, "choices") and response.choices and len(response.choices) > 0:
choice = response.choices[0]
if hasattr(choice, "message") and choice.message:
# Process content using shared logic
async for updated_data_type, chunk in self._process_choice_content(
choice, data_type, tool_calls, is_streaming=False
):
data_type = updated_data_type
yield chunk
if hasattr(choice, "finish_reason"):
finish_reason = choice.finish_reason
# Stop the current content block if we have one
if data_type:
yield self.format_chunk({"chunk_type": "content_stop", "data_type": data_type})
# Process tool calls
async for chunk in self._process_tool_calls(tool_calls):
yield chunk
yield self.format_chunk({"chunk_type": "message_stop", "data": finish_reason})
# Add usage information if available
if hasattr(response, "usage"):
yield self.format_chunk({"chunk_type": "metadata", "data": response.usage})
async def _handle_streaming_response(self, litellm_request: dict[str, Any]) -> AsyncGenerator[StreamEvent, None]:
"""Handle streaming response from LiteLLM.
Args:
litellm_request: The formatted request for LiteLLM.
Yields:
Formatted message chunks from the model.
"""
# For streaming, use the streaming API
response = await litellm.acompletion(**self.client_args, **litellm_request)
logger.debug("got response from model")
yield self.format_chunk({"chunk_type": "message_start"})
tool_calls: dict[int, list[Any]] = {}
data_type: str | None = None
finish_reason: str | None = None
async for event in response:
# Defensive: skip events with empty or missing choices
if not getattr(event, "choices", None):
continue
choice = event.choices[0]
# Process content using shared logic
async for updated_data_type, chunk in self._process_choice_content(
choice, data_type, tool_calls, is_streaming=True
):
data_type = updated_data_type
yield chunk
if choice.finish_reason:
finish_reason = choice.finish_reason
if data_type:
yield self.format_chunk({"chunk_type": "content_stop", "data_type": data_type})
break
# Process tool calls
async for chunk in self._process_tool_calls(tool_calls):
yield chunk
yield self.format_chunk({"chunk_type": "message_stop", "data": finish_reason})
# Skip remaining events as we don't have use for anything except the final usage payload
async for event in response:
_ = event
if usage := getattr(event, "usage", None):
yield self.format_chunk({"chunk_type": "metadata", "data": usage})
logger.debug("finished streaming response from model")
def _apply_proxy_prefix(self) -> None:
"""Apply litellm_proxy/ prefix to model_id when use_litellm_proxy is True.
This is a workaround for https://github.com/BerriAI/litellm/issues/13454
where use_litellm_proxy parameter is not honored.
"""
if self.client_args.get("use_litellm_proxy") and "model_id" in self.config:
model_id = self.get_config()["model_id"]
if not model_id.startswith("litellm_proxy/"):
self.config["model_id"] = f"litellm_proxy/{model_id}"