-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathlitellm.py
More file actions
240 lines (217 loc) · 9.45 KB
/
litellm.py
File metadata and controls
240 lines (217 loc) · 9.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
from __future__ import annotations
from datetime import timedelta
from collections.abc import AsyncGenerator
from temporalio.common import RetryPolicy
from agentex.lib.utils.logging import make_logger
from agentex.lib.utils.temporal import in_temporal_workflow
from agentex.types.task_message import TaskMessage
from agentex.lib.types.llm_messages import LLMConfig, Completion
from agentex.lib.core.tracing.tracer import AsyncTracer
from agentex.lib.adk.utils._modules.client import create_async_agentex_client
from agentex.lib.core.services.adk.streaming import StreamingService
from agentex.lib.core.adapters.llm.adapter_litellm import LiteLLMGateway
from agentex.lib.core.adapters.streams.adapter_redis import RedisStreamRepository
from agentex.lib.core.services.adk.providers.litellm import LiteLLMService
from agentex.lib.core.temporal.activities.activity_helpers import ActivityHelpers
from agentex.lib.core.temporal.activities.adk.providers.litellm_activities import (
LiteLLMActivityName,
ChatCompletionParams,
ChatCompletionAutoSendParams,
ChatCompletionStreamAutoSendParams,
)
logger = make_logger(__name__)
# Default retry policy for all LiteLLM operations
# Retries with exponential backoff: 1s, 2s, 4s, ... up to 30s between attempts
DEFAULT_RETRY_POLICY = RetryPolicy(
maximum_attempts=3,
initial_interval=timedelta(seconds=1),
backoff_coefficient=2.0,
maximum_interval=timedelta(seconds=30),
)
class LiteLLMModule:
"""
Module for managing LiteLLM agent operations in Agentex.
Provides high-level methods for chat completion, streaming.
"""
def __init__(
self,
litellm_service: LiteLLMService | None = None,
):
if litellm_service is None:
# Create default service
agentex_client = create_async_agentex_client()
stream_repository = RedisStreamRepository()
streaming_service = StreamingService(
agentex_client=agentex_client,
stream_repository=stream_repository,
)
litellm_gateway = LiteLLMGateway()
tracer = AsyncTracer(agentex_client)
self._litellm_service = LiteLLMService(
agentex_client=agentex_client,
llm_gateway=litellm_gateway,
streaming_service=streaming_service,
tracer=tracer,
)
else:
self._litellm_service = litellm_service
async def chat_completion(
self,
llm_config: LLMConfig,
trace_id: str | None = None,
parent_span_id: str | None = None,
start_to_close_timeout: timedelta = timedelta(seconds=120),
heartbeat_timeout: timedelta = timedelta(seconds=120),
retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY,
) -> Completion:
"""
Perform a chat completion using LiteLLM.
Args:
llm_config (LLMConfig): The configuration for the LLM.
trace_id (Optional[str]): The trace ID for tracing.
parent_span_id (Optional[str]): The parent span ID for tracing.
start_to_close_timeout (timedelta): The start to close timeout.
heartbeat_timeout (timedelta): The heartbeat timeout.
retry_policy (RetryPolicy): The retry policy.
Returns:
Completion: An OpenAI compatible Completion object
"""
if in_temporal_workflow():
params = ChatCompletionParams(
trace_id=trace_id, parent_span_id=parent_span_id, llm_config=llm_config
)
return await ActivityHelpers.execute_activity(
activity_name=LiteLLMActivityName.CHAT_COMPLETION,
request=params,
response_type=Completion,
start_to_close_timeout=start_to_close_timeout,
heartbeat_timeout=heartbeat_timeout,
retry_policy=retry_policy,
)
else:
return await self._litellm_service.chat_completion(
llm_config=llm_config,
trace_id=trace_id,
parent_span_id=parent_span_id,
)
async def chat_completion_auto_send(
self,
task_id: str,
llm_config: LLMConfig,
trace_id: str | None = None,
parent_span_id: str | None = None,
start_to_close_timeout: timedelta = timedelta(seconds=120),
heartbeat_timeout: timedelta = timedelta(seconds=120),
retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY,
) -> TaskMessage | None:
"""
Chat completion with automatic TaskMessage creation.
Args:
task_id (str): The ID of the task.
llm_config (LLMConfig): The configuration for the LLM (must have stream=False).
trace_id (Optional[str]): The trace ID for tracing.
parent_span_id (Optional[str]): The parent span ID for tracing.
start_to_close_timeout (timedelta): The start to close timeout.
heartbeat_timeout (timedelta): The heartbeat timeout.
retry_policy (RetryPolicy): The retry policy.
Returns:
TaskMessage: The final TaskMessage
"""
if in_temporal_workflow():
# Use streaming activity with stream=False for non-streaming auto-send
params = ChatCompletionAutoSendParams(
trace_id=trace_id,
parent_span_id=parent_span_id,
task_id=task_id,
llm_config=llm_config,
)
return await ActivityHelpers.execute_activity(
activity_name=LiteLLMActivityName.CHAT_COMPLETION_AUTO_SEND,
request=params,
response_type=TaskMessage,
start_to_close_timeout=start_to_close_timeout,
heartbeat_timeout=heartbeat_timeout,
retry_policy=retry_policy,
)
else:
return await self._litellm_service.chat_completion_auto_send(
task_id=task_id,
llm_config=llm_config,
trace_id=trace_id,
parent_span_id=parent_span_id,
)
async def chat_completion_stream(
self,
llm_config: LLMConfig,
trace_id: str | None = None,
parent_span_id: str | None = None,
) -> AsyncGenerator[Completion, None]:
"""
Stream chat completion chunks using LiteLLM.
DEFAULT: Returns raw streaming chunks for manual handling.
NOTE: This method does NOT work in Temporal workflows!
Temporal activities cannot return generators. Use chat_completion_stream_auto_send() instead.
Args:
llm_config (LLMConfig): The configuration for the LLM (must have stream=True).
trace_id (Optional[str]): The trace ID for tracing.
parent_span_id (Optional[str]): The parent span ID for tracing.
start_to_close_timeout (timedelta): The start to close timeout.
heartbeat_timeout (timedelta): The heartbeat timeout.
retry_policy (RetryPolicy): The retry policy.
Returns:
AsyncGenerator[Completion, None]: Generator yielding completion chunks
Raises:
ValueError: If called from within a Temporal workflow
"""
# Delegate to service - it handles temporal workflow checks
async for chunk in self._litellm_service.chat_completion_stream(
llm_config=llm_config,
trace_id=trace_id,
parent_span_id=parent_span_id,
):
yield chunk
async def chat_completion_stream_auto_send(
self,
task_id: str,
llm_config: LLMConfig,
trace_id: str | None = None,
parent_span_id: str | None = None,
start_to_close_timeout: timedelta = timedelta(seconds=120),
heartbeat_timeout: timedelta = timedelta(seconds=120),
retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY,
) -> TaskMessage | None:
"""
Stream chat completion with automatic TaskMessage creation and streaming.
Args:
task_id (str): The ID of the task to run the agent for.
llm_config (LLMConfig): The configuration for the LLM (must have stream=True).
trace_id (Optional[str]): The trace ID for tracing.
parent_span_id (Optional[str]): The parent span ID for tracing.
start_to_close_timeout (timedelta): The start to close timeout.
heartbeat_timeout (timedelta): The heartbeat timeout.
retry_policy (RetryPolicy): The retry policy.
Returns:
TaskMessage: The final TaskMessage after streaming is complete
"""
if in_temporal_workflow():
params = ChatCompletionStreamAutoSendParams(
trace_id=trace_id,
parent_span_id=parent_span_id,
task_id=task_id,
llm_config=llm_config,
)
return await ActivityHelpers.execute_activity(
activity_name=LiteLLMActivityName.CHAT_COMPLETION_STREAM_AUTO_SEND,
request=params,
response_type=TaskMessage,
start_to_close_timeout=start_to_close_timeout,
heartbeat_timeout=heartbeat_timeout,
retry_policy=retry_policy,
)
else:
return await self._litellm_service.chat_completion_stream_auto_send(
task_id=task_id,
llm_config=llm_config,
trace_id=trace_id,
parent_span_id=parent_span_id,
)