forked from google/adk-python
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbase_llm_flow.py
More file actions
1206 lines (1071 loc) · 42.4 KB
/
base_llm_flow.py
File metadata and controls
1206 lines (1071 loc) · 42.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC
import asyncio
import contextlib
import datetime
import inspect
import logging
from typing import AsyncGenerator
from typing import cast
from typing import Optional
from typing import TYPE_CHECKING
from google.genai import types
from websockets.exceptions import ConnectionClosed
from websockets.exceptions import ConnectionClosedOK
from . import _output_schema_processor
from . import functions
from ...agents.base_agent import BaseAgent
from ...agents.callback_context import CallbackContext
from ...agents.invocation_context import InvocationContext
from ...agents.live_request_queue import LiveRequestQueue
from ...agents.readonly_context import ReadonlyContext
from ...agents.run_config import StreamingMode
from ...agents.transcription_entry import TranscriptionEntry
from ...auth.auth_handler import AuthHandler
from ...auth.auth_tool import AuthConfig
from ...auth.credential_manager import CredentialManager
from ...events.event import Event
from ...models.base_llm_connection import BaseLlmConnection
from ...models.llm_request import LlmRequest
from ...models.llm_response import LlmResponse
from ...telemetry import tracing
from ...telemetry.tracing import trace_call_llm
from ...telemetry.tracing import trace_send_data
from ...telemetry.tracing import tracer
from ...tools.base_toolset import BaseToolset
from ...tools.google_search_tool import google_search
from ...tools.tool_context import ToolContext
from ...utils.context_utils import Aclosing
from ...utils.telemetry_utils import is_telemetry_enabled
from .audio_cache_manager import AudioCacheManager
from .functions import build_auth_request_event
from .functions import REQUEST_EUC_FUNCTION_CALL_NAME
# Prefix used by toolset auth credential IDs
TOOLSET_AUTH_CREDENTIAL_ID_PREFIX = '_adk_toolset_auth_'
if TYPE_CHECKING:
from ...agents.llm_agent import LlmAgent
from ...models.base_llm import BaseLlm
from ._base_llm_processor import BaseLlmRequestProcessor
from ._base_llm_processor import BaseLlmResponseProcessor
logger = logging.getLogger('google_adk.' + __name__)
_ADK_AGENT_NAME_LABEL_KEY = 'adk_agent_name'
# Timing configuration
DEFAULT_TRANSFER_AGENT_DELAY = 1.0
DEFAULT_TASK_COMPLETION_DELAY = 1.0
# Statistics configuration
DEFAULT_ENABLE_CACHE_STATISTICS = False
def _finalize_model_response_event(
llm_request: LlmRequest,
llm_response: LlmResponse,
model_response_event: Event,
) -> Event:
"""Finalize and build the model response event from LLM response.
Merges the LLM response data into the model response event and
populates function call IDs and long-running tool information.
Args:
llm_request: The original LLM request.
llm_response: The LLM response from the model.
model_response_event: The base event to populate.
Returns:
The finalized Event with LLM response data merged in.
"""
finalized_event = Event.model_validate({
**model_response_event.model_dump(exclude_none=True),
**llm_response.model_dump(exclude_none=True),
})
if finalized_event.content:
function_calls = finalized_event.get_function_calls()
if function_calls:
functions.populate_client_function_call_id(finalized_event)
finalized_event.long_running_tool_ids = (
functions.get_long_running_function_calls(
function_calls, llm_request.tools_dict
)
)
return finalized_event
class BaseLlmFlow(ABC):
"""A basic flow that calls the LLM in a loop until a final response is generated.
This flow ends when it transfers to another agent.
"""
def __init__(self):
self.request_processors: list[BaseLlmRequestProcessor] = []
self.response_processors: list[BaseLlmResponseProcessor] = []
# Initialize configuration and managers
self.audio_cache_manager = AudioCacheManager()
async def run_live(
self,
invocation_context: InvocationContext,
) -> AsyncGenerator[Event, None]:
"""Runs the flow using live api."""
llm_request = LlmRequest()
event_id = Event.new_id()
# Preprocess before calling the LLM.
async with Aclosing(
self._preprocess_async(invocation_context, llm_request)
) as agen:
async for event in agen:
yield event
if invocation_context.end_invocation:
return
llm = self.__get_llm(invocation_context)
logger.debug(
'Establishing live connection for agent: %s with llm request: %s',
invocation_context.agent.name,
llm_request,
)
attempt = 1
while True:
try:
# On subsequent attempts, use the saved token to reconnect
if invocation_context.live_session_resumption_handle:
logger.info('Attempting to reconnect (Attempt %s)...', attempt)
attempt += 1
if not llm_request.live_connect_config:
llm_request.live_connect_config = types.LiveConnectConfig()
if not llm_request.live_connect_config.session_resumption:
llm_request.live_connect_config.session_resumption = (
types.SessionResumptionConfig()
)
llm_request.live_connect_config.session_resumption.handle = (
invocation_context.live_session_resumption_handle
)
llm_request.live_connect_config.session_resumption.transparent = True
logger.info(
'Establishing live connection for agent: %s',
invocation_context.agent.name,
)
async with llm.connect(llm_request) as llm_connection:
if llm_request.contents:
# Sends the conversation history to the model.
span_context = contextlib.nullcontext()
if is_telemetry_enabled(invocation_context.agent):
span_context = tracer.start_as_current_span('send_data')
with span_context as span:
logger.debug('Sending history to model: %s', llm_request.contents)
await llm_connection.send_history(llm_request.contents)
if span:
trace_send_data(
invocation_context, event_id, llm_request.contents
)
send_task = asyncio.create_task(
self._send_to_model(llm_connection, invocation_context)
)
try:
async with Aclosing(
self._receive_from_model(
llm_connection,
event_id,
invocation_context,
llm_request,
)
) as agen:
async for event in agen:
# Empty event means the queue is closed.
if not event:
break
logger.debug('Receive new event: %s', event)
yield event
# send back the function response to models
if event.get_function_responses():
logger.debug(
'Sending back last function response event: %s', event
)
invocation_context.live_request_queue.send_content(
event.content
)
# We handle agent transfer here in `run_live` rather than
# in `_postprocess_live` to prevent duplication of function
# response processing. If agent transfer were handled in
# `_postprocess_live`, events yielded from child agent's
# `run_live` would bubble up to parent agent's `run_live`,
# causing `event.get_function_responses()` to be true in both
# child and parent, and `send_content()` to be called twice for
# the same function response. By handling agent transfer here,
# we ensure that only child agent processes its own function
# responses after the transfer.
if (
event.content
and event.content.parts
and event.content.parts[0].function_response
and event.content.parts[0].function_response.name
== 'transfer_to_agent'
):
await asyncio.sleep(DEFAULT_TRANSFER_AGENT_DELAY)
# cancel the tasks that belongs to the closed connection.
send_task.cancel()
logger.debug('Closing live connection')
await llm_connection.close()
logger.debug('Live connection closed.')
# transfer to the sub agent.
transfer_to_agent = event.actions.transfer_to_agent
if transfer_to_agent:
logger.debug('Transferring to agent: %s', transfer_to_agent)
agent_to_run = self._get_agent_to_run(
invocation_context, transfer_to_agent
)
async with Aclosing(
agent_to_run.run_live(invocation_context)
) as agen:
async for item in agen:
yield item
if (
event.content
and event.content.parts
and event.content.parts[0].function_response
and event.content.parts[0].function_response.name
== 'task_completed'
):
# this is used for sequential agent to signal the end of the agent.
await asyncio.sleep(DEFAULT_TASK_COMPLETION_DELAY)
# cancel the tasks that belongs to the closed connection.
send_task.cancel()
return
finally:
# Clean up
if not send_task.done():
send_task.cancel()
try:
await send_task
except asyncio.CancelledError:
pass
except (ConnectionClosed, ConnectionClosedOK) as e:
# when the session timeout, it will just close and not throw exception.
# so this is for bad cases
logger.error('Connection closed: %s.', e)
raise
except Exception as e:
logger.error(
'An unexpected error occurred in live flow: %s', e, exc_info=True
)
raise
async def _send_to_model(
self,
llm_connection: BaseLlmConnection,
invocation_context: InvocationContext,
):
"""Sends data to model."""
while True:
live_request_queue = invocation_context.live_request_queue
live_request = await live_request_queue.get()
# duplicate the live_request to all the active streams
logger.debug(
'Sending live request %s to active streams: %s',
live_request,
invocation_context.active_streaming_tools,
)
if invocation_context.active_streaming_tools:
for active_streaming_tool in (
invocation_context.active_streaming_tools
).values():
if active_streaming_tool.stream:
active_streaming_tool.stream.send(live_request)
# Yield to event loop for cooperative multitasking
await asyncio.sleep(0)
if live_request.close:
await llm_connection.close()
return
if live_request.activity_start:
await llm_connection.send_realtime(types.ActivityStart())
elif live_request.activity_end:
await llm_connection.send_realtime(types.ActivityEnd())
elif live_request.blob:
# Cache input audio chunks before flushing
self.audio_cache_manager.cache_audio(
invocation_context, live_request.blob, cache_type='input'
)
await llm_connection.send_realtime(live_request.blob)
if live_request.content:
content = live_request.content
# Persist user text content to session (similar to non-live mode)
# Skip function responses - they are already handled separately
is_function_response = content.parts and any(
part.function_response for part in content.parts
)
if not is_function_response:
if not content.role:
content.role = 'user'
user_content_event = Event(
id=Event.new_id(),
invocation_id=invocation_context.invocation_id,
author='user',
content=content,
)
await invocation_context.session_service.append_event(
session=invocation_context.session,
event=user_content_event,
)
await llm_connection.send_content(live_request.content)
async def _receive_from_model(
self,
llm_connection: BaseLlmConnection,
event_id: str,
invocation_context: InvocationContext,
llm_request: LlmRequest,
) -> AsyncGenerator[Event, None]:
"""Receive data from model and process events using BaseLlmConnection."""
def get_author_for_event(llm_response):
"""Get the author of the event.
When the model returns transcription, the author is "user". Otherwise, the
author is the agent name(not 'model').
Args:
llm_response: The LLM response from the LLM call.
"""
if (
llm_response
and llm_response.content
and llm_response.content.role == 'user'
):
return 'user'
else:
return invocation_context.agent.name
try:
while True:
async with Aclosing(llm_connection.receive()) as agen:
async for llm_response in agen:
if llm_response.live_session_resumption_update:
logger.info(
'Update session resumption handle:'
f' {llm_response.live_session_resumption_update}.'
)
invocation_context.live_session_resumption_handle = (
llm_response.live_session_resumption_update.new_handle
)
model_response_event = Event(
id=Event.new_id(),
invocation_id=invocation_context.invocation_id,
author=get_author_for_event(llm_response),
)
async with Aclosing(
self._postprocess_live(
invocation_context,
llm_request,
llm_response,
model_response_event,
)
) as agen:
async for event in agen:
# Cache output audio chunks from model responses
# TODO: support video data
if (
invocation_context.run_config.save_live_blob
and event.content
and event.content.parts
and event.content.parts[0].inline_data
and event.content.parts[0].inline_data.mime_type.startswith(
'audio/'
)
):
audio_blob = types.Blob(
data=event.content.parts[0].inline_data.data,
mime_type=event.content.parts[0].inline_data.mime_type,
)
self.audio_cache_manager.cache_audio(
invocation_context, audio_blob, cache_type='output'
)
yield event
# Give opportunity for other tasks to run.
await asyncio.sleep(0)
except ConnectionClosedOK:
pass
async def run_async(
self, invocation_context: InvocationContext
) -> AsyncGenerator[Event, None]:
"""Runs the flow."""
while True:
last_event = None
async with Aclosing(self._run_one_step_async(invocation_context)) as agen:
async for event in agen:
last_event = event
yield event
if not last_event or last_event.is_final_response() or last_event.partial:
if last_event and last_event.partial:
logger.warning('The last event is partial, which is not expected.')
break
async def _run_one_step_async(
self,
invocation_context: InvocationContext,
) -> AsyncGenerator[Event, None]:
"""One step means one LLM call."""
llm_request = LlmRequest()
# Preprocess before calling the LLM.
async with Aclosing(
self._preprocess_async(invocation_context, llm_request)
) as agen:
async for event in agen:
yield event
if invocation_context.end_invocation:
return
# Resume the LLM agent based on the last event from the current branch.
# 1. User content: continue the normal flow
# 2. Function call: call the tool and get the response event.
events = invocation_context._get_events(
current_invocation=True, current_branch=True
)
# Long running tool calls should have been handled before this point.
# If there are still long running tool calls, it means the agent is paused
# before, and its branch hasn't been resumed yet.
if (
invocation_context.is_resumable
and events
and len(events) > 1
# TODO: here we are using the last 2 events to decide whether to pause
# the invocation. But this is just being optimistic, we should find a
# way to pause when the long running tool call is followed by more than
# one text responses.
and (
invocation_context.should_pause_invocation(events[-1])
or invocation_context.should_pause_invocation(events[-2])
)
):
return
if (
invocation_context.is_resumable
and events
and events[-1].get_function_calls()
):
model_response_event = events[-1]
async with Aclosing(
self._postprocess_handle_function_calls_async(
invocation_context, model_response_event, llm_request
)
) as agen:
async for event in agen:
event.id = Event.new_id()
yield event
return
# Calls the LLM.
model_response_event = Event(
id=Event.new_id(),
invocation_id=invocation_context.invocation_id,
author=invocation_context.agent.name,
branch=invocation_context.branch,
)
async with Aclosing(
self._call_llm_async(
invocation_context, llm_request, model_response_event
)
) as agen:
async for llm_response in agen:
# Postprocess after calling the LLM.
async with Aclosing(
self._postprocess_async(
invocation_context,
llm_request,
llm_response,
model_response_event,
)
) as agen:
async for event in agen:
# Update the mutable event id to avoid conflict
model_response_event.id = Event.new_id()
model_response_event.timestamp = datetime.datetime.now().timestamp()
yield event
async def _preprocess_async(
self, invocation_context: InvocationContext, llm_request: LlmRequest
) -> AsyncGenerator[Event, None]:
from ...agents.llm_agent import LlmAgent
agent = invocation_context.agent
if not isinstance(agent, LlmAgent):
raise TypeError(
f'Expected agent to be an LlmAgent, but got {type(agent)}'
)
# Runs processors.
for processor in self.request_processors:
async with Aclosing(
processor.run_async(invocation_context, llm_request)
) as agen:
async for event in agen:
yield event
# Resolve toolset authentication before tool listing.
# This ensures credentials are ready before get_tools() is called.
async with Aclosing(
self._resolve_toolset_auth(invocation_context, agent)
) as agen:
async for event in agen:
yield event
if invocation_context.end_invocation:
return
# Run processors for tools.
# We may need to wrap some built-in tools if there are other tools
# because the built-in tools cannot be used together with other tools.
# TODO(b/448114567): Remove once the workaround is no longer needed.
if not agent.tools:
return
multiple_tools = len(agent.tools) > 1
model = agent.canonical_model
for tool_union in agent.tools:
tool_context = ToolContext(invocation_context)
# If it's a toolset, process it first
if isinstance(tool_union, BaseToolset):
await tool_union.process_llm_request(
tool_context=tool_context, llm_request=llm_request
)
from ...agents.llm_agent import _convert_tool_union_to_tools
# Then process all tools from this tool union
tools = await _convert_tool_union_to_tools(
tool_union,
ReadonlyContext(invocation_context),
model,
multiple_tools,
)
for tool in tools:
await tool.process_llm_request(
tool_context=tool_context, llm_request=llm_request
)
async def _resolve_toolset_auth(
self,
invocation_context: InvocationContext,
agent: LlmAgent,
) -> AsyncGenerator[Event, None]:
"""Resolves authentication for toolsets before tool listing.
For each toolset with auth configured via get_auth_config():
- If credential is available, populate auth_config.exchanged_auth_credential
- If credential is not available, yield auth request event and interrupt
Args:
invocation_context: The invocation context.
agent: The LLM agent.
Yields:
Auth request events if any toolset needs authentication.
"""
if not agent.tools:
return
pending_auth_requests: dict[str, AuthConfig] = {}
callback_context = CallbackContext(invocation_context)
for tool_union in agent.tools:
if not isinstance(tool_union, BaseToolset):
continue
auth_config = tool_union.get_auth_config()
if not auth_config:
continue
try:
credential = await CredentialManager(auth_config).get_auth_credential(
callback_context
)
except ValueError as e:
# Validation errors from CredentialManager should be logged but not
# block the flow - the toolset may still work without auth
logger.warning(
'Failed to get auth credential for toolset %s: %s',
type(tool_union).__name__,
e,
)
credential = None
if credential:
# Populate in-place for toolset to use in get_tools()
auth_config.exchanged_auth_credential = credential
else:
# Need auth - will interrupt
toolset_id = (
f'{TOOLSET_AUTH_CREDENTIAL_ID_PREFIX}{type(tool_union).__name__}'
)
pending_auth_requests[toolset_id] = auth_config
if not pending_auth_requests:
return
# Build auth requests dict with generated auth requests
auth_requests = {
credential_id: AuthHandler(auth_config).generate_auth_request()
for credential_id, auth_config in pending_auth_requests.items()
}
# Yield event with auth requests using the shared helper
yield build_auth_request_event(
invocation_context,
auth_requests,
author=agent.name,
)
# Interrupt invocation
invocation_context.end_invocation = True
async def _postprocess_async(
self,
invocation_context: InvocationContext,
llm_request: LlmRequest,
llm_response: LlmResponse,
model_response_event: Event,
) -> AsyncGenerator[Event, None]:
"""Postprocess after calling the LLM.
Args:
invocation_context: The invocation context.
llm_request: The original LLM request.
llm_response: The LLM response from the LLM call.
model_response_event: A mutable event for the LLM response.
Yields:
A generator of events.
"""
# Runs processors.
async with Aclosing(
self._postprocess_run_processors_async(invocation_context, llm_response)
) as agen:
async for event in agen:
yield event
# Skip the model response event if there is no content and no error code.
# This is needed for the code executor to trigger another loop.
if (
not llm_response.content
and not llm_response.error_code
and not llm_response.interrupted
):
return
# Builds the event.
model_response_event = self._finalize_model_response_event(
llm_request, llm_response, model_response_event
)
yield model_response_event
# Handles function calls.
if model_response_event.get_function_calls():
# Skip partial function call events - they should not trigger execution
# since partial events are not saved to session (see runners.py).
# Only execute function calls in the non-partial events.
if model_response_event.partial:
return
async with Aclosing(
self._postprocess_handle_function_calls_async(
invocation_context, model_response_event, llm_request
)
) as agen:
async for event in agen:
yield event
async def _postprocess_live(
self,
invocation_context: InvocationContext,
llm_request: LlmRequest,
llm_response: LlmResponse,
model_response_event: Event,
) -> AsyncGenerator[Event, None]:
"""Postprocess after calling the LLM asynchronously.
Args:
invocation_context: The invocation context.
llm_request: The original LLM request.
llm_response: The LLM response from the LLM call.
model_response_event: A mutable event for the LLM response.
Yields:
A generator of events.
"""
# Runs processors.
async with Aclosing(
self._postprocess_run_processors_async(invocation_context, llm_response)
) as agen:
async for event in agen:
yield event
# Skip the model response event if there is no content and no error code.
# This is needed for the code executor to trigger another loop.
# But don't skip control events like turn_complete or transcription events.
if (
not llm_response.content
and not llm_response.error_code
and not llm_response.interrupted
and not llm_response.turn_complete
and not llm_response.input_transcription
and not llm_response.output_transcription
and not llm_response.usage_metadata
):
return
# Handle transcription events ONCE per llm_response, outside the event loop
if llm_response.input_transcription:
model_response_event.input_transcription = (
llm_response.input_transcription
)
model_response_event.partial = llm_response.partial
yield model_response_event
return
if llm_response.output_transcription:
model_response_event.output_transcription = (
llm_response.output_transcription
)
model_response_event.partial = llm_response.partial
yield model_response_event
return
# Flush audio caches based on control events using configurable settings
if invocation_context.run_config.save_live_blob:
flushed_events = await self._handle_control_event_flush(
invocation_context, llm_response
)
for event in flushed_events:
yield event
if flushed_events:
# NOTE below return is O.K. for now, because currently we only flush
# events on interrupted or turn_complete. turn_complete is a pure
# control event and interrupted is not with content but those content
# is ignorable because model is already interrupted. If we have other
# case to flush events in the future that are not pure control events,
# we should not return here.
return
# Builds the event.
model_response_event = self._finalize_model_response_event(
llm_request, llm_response, model_response_event
)
yield model_response_event
# Handles function calls.
if model_response_event.get_function_calls():
function_response_event = await functions.handle_function_calls_live(
invocation_context, model_response_event, llm_request.tools_dict
)
# Always yield the function response event first
yield function_response_event
# Check if this is a set_model_response function response
if json_response := _output_schema_processor.get_structured_model_response(
function_response_event
):
# Create and yield a final model response event
final_event = (
_output_schema_processor.create_final_model_response_event(
invocation_context, json_response
)
)
yield final_event
async def _postprocess_run_processors_async(
self, invocation_context: InvocationContext, llm_response: LlmResponse
) -> AsyncGenerator[Event, None]:
for processor in self.response_processors:
async with Aclosing(
processor.run_async(invocation_context, llm_response)
) as agen:
async for event in agen:
yield event
async def _postprocess_handle_function_calls_async(
self,
invocation_context: InvocationContext,
function_call_event: Event,
llm_request: LlmRequest,
) -> AsyncGenerator[Event, None]:
if function_response_event := await functions.handle_function_calls_async(
invocation_context, function_call_event, llm_request.tools_dict
):
auth_event = functions.generate_auth_event(
invocation_context, function_response_event
)
if auth_event:
yield auth_event
tool_confirmation_event = functions.generate_request_confirmation_event(
invocation_context, function_call_event, function_response_event
)
if tool_confirmation_event:
yield tool_confirmation_event
# Always yield the function response event first
yield function_response_event
# Check if this is a set_model_response function response
if json_response := _output_schema_processor.get_structured_model_response(
function_response_event
):
# Create and yield a final model response event
final_event = (
_output_schema_processor.create_final_model_response_event(
invocation_context, json_response
)
)
yield final_event
transfer_to_agent = function_response_event.actions.transfer_to_agent
if transfer_to_agent:
agent_to_run = self._get_agent_to_run(
invocation_context, transfer_to_agent
)
async with Aclosing(agent_to_run.run_async(invocation_context)) as agen:
async for event in agen:
yield event
def _get_agent_to_run(
self, invocation_context: InvocationContext, agent_name: str
) -> BaseAgent:
root_agent = invocation_context.agent.root_agent
agent_to_run = root_agent.find_agent(agent_name)
if not agent_to_run:
raise ValueError(f'Agent {agent_name} not found in the agent tree.')
return agent_to_run
async def _call_llm_async(
self,
invocation_context: InvocationContext,
llm_request: LlmRequest,
model_response_event: Event,
) -> AsyncGenerator[LlmResponse, None]:
# Runs before_model_callback if it exists.
if response := await self._handle_before_model_callback(
invocation_context, llm_request, model_response_event
):
yield response
return
llm_request.config = llm_request.config or types.GenerateContentConfig()
llm_request.config.labels = llm_request.config.labels or {}
# Add agent name as a label to the llm_request. This will help with slicing
# the billing reports on a per-agent basis.
if _ADK_AGENT_NAME_LABEL_KEY not in llm_request.config.labels:
llm_request.config.labels[_ADK_AGENT_NAME_LABEL_KEY] = (
invocation_context.agent.name
)
# Calls the LLM.
llm = self.__get_llm(invocation_context)
async def _call_llm_with_tracing() -> AsyncGenerator[LlmResponse, None]:
span_context = contextlib.nullcontext()
if is_telemetry_enabled(invocation_context.agent):
span_context = tracer.start_as_current_span('call_llm')
with span_context as span:
if invocation_context.run_config.support_cfc:
invocation_context.live_request_queue = LiveRequestQueue()
responses_generator = self.run_live(invocation_context)
async with Aclosing(
self._run_and_handle_error(
responses_generator,
invocation_context,
llm_request,
model_response_event,
)
) as agen:
async for llm_response in agen:
# Runs after_model_callback if it exists.
if altered_llm_response := await self._handle_after_model_callback(
invocation_context, llm_response, model_response_event
):
llm_response = altered_llm_response
# only yield partial response in SSE streaming mode
if (
invocation_context.run_config.streaming_mode
== StreamingMode.SSE
or not llm_response.partial
):
yield llm_response
if llm_response.turn_complete:
invocation_context.live_request_queue.close()
else:
# Check if we can make this llm call or not. If the current call
# pushes the counter beyond the max set value, then the execution is
# stopped right here, and exception is thrown.
invocation_context.increment_llm_call_count()
responses_generator = llm.generate_content_async(
llm_request,
stream=invocation_context.run_config.streaming_mode
== StreamingMode.SSE,
)
async with Aclosing(
self._run_and_handle_error(
responses_generator,
invocation_context,
llm_request,
model_response_event,
)
) as agen:
async for llm_response in agen:
trace_call_llm(
invocation_context,
model_response_event.id,
llm_request,
llm_response,
span,
)
# Runs after_model_callback if it exists.
if altered_llm_response := await self._handle_after_model_callback(
invocation_context, llm_response, model_response_event
):
llm_response = altered_llm_response
yield llm_response
async with Aclosing(_call_llm_with_tracing()) as agen:
async for event in agen:
yield event
async def _handle_before_model_callback(
self,
invocation_context: InvocationContext,
llm_request: LlmRequest,
model_response_event: Event,
) -> Optional[LlmResponse]:
from ...agents.llm_agent import LlmAgent
agent = invocation_context.agent
callback_context = CallbackContext(
invocation_context, event_actions=model_response_event.actions
)
# First run callbacks from the plugins.
callback_response = (
await invocation_context.plugin_manager.run_before_model_callback(
callback_context=callback_context,
llm_request=llm_request,
)
)
if callback_response:
return callback_response