forked from google/adk-python
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfunctions.py
More file actions
1197 lines (1028 loc) · 38.5 KB
/
functions.py
File metadata and controls
1197 lines (1028 loc) · 38.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles function calling for LLM flow."""
from __future__ import annotations
import asyncio
import base64
import binascii
from concurrent.futures import ThreadPoolExecutor
import copy
import functools
import inspect
import logging
import threading
from typing import Any
from typing import AsyncGenerator
from typing import cast
from typing import Dict
from typing import Optional
from typing import TYPE_CHECKING
import uuid
from google.adk.tools.computer_use.computer_use_tool import ComputerUseTool
from google.genai import types
from ...agents.active_streaming_tool import ActiveStreamingTool
from ...agents.live_request_queue import LiveRequestQueue
from ...auth.auth_tool import AuthConfig
from ...auth.auth_tool import AuthToolArguments
from ...events.event import Event
from ...events.event_actions import EventActions
from ...telemetry.tracing import trace_merged_tool_calls
from ...telemetry.tracing import trace_tool_call
from ...telemetry.tracing import tracer
from ...tools.base_tool import BaseTool
from ...tools.tool_confirmation import ToolConfirmation
from ...tools.tool_context import ToolContext
from ...utils.context_utils import Aclosing
if TYPE_CHECKING:
from ...agents.invocation_context import InvocationContext
from ...agents.llm_agent import LlmAgent
AF_FUNCTION_CALL_ID_PREFIX = 'adk-'
REQUEST_EUC_FUNCTION_CALL_NAME = 'adk_request_credential'
REQUEST_CONFIRMATION_FUNCTION_CALL_NAME = 'adk_request_confirmation'
REQUEST_INPUT_FUNCTION_CALL_NAME = 'adk_request_input'
logger = logging.getLogger('google_adk.' + __name__)
# Global thread pool executors for running tools in background threads.
# This prevents blocking tools from blocking the event loop in Live API mode.
# Key is max_workers, value is the executor.
_TOOL_THREAD_POOLS: dict[int, ThreadPoolExecutor] = {}
_TOOL_THREAD_POOL_LOCK = threading.Lock()
def _is_live_request_queue_annotation(param: inspect.Parameter) -> bool:
"""Check whether a parameter is annotated as LiveRequestQueue.
Handles both the class itself and the string form produced by
``from __future__ import annotations``.
"""
ann = param.annotation
return ann is LiveRequestQueue or (
isinstance(ann, str) and ann == 'LiveRequestQueue'
)
def _get_tool_thread_pool(max_workers: int = 4) -> ThreadPoolExecutor:
"""Gets or creates a thread pool executor for tool execution.
Args:
max_workers: Maximum number of worker threads in the pool.
Returns:
A ThreadPoolExecutor with the specified max_workers.
"""
if max_workers not in _TOOL_THREAD_POOLS:
with _TOOL_THREAD_POOL_LOCK:
if max_workers not in _TOOL_THREAD_POOLS:
_TOOL_THREAD_POOLS[max_workers] = ThreadPoolExecutor(
max_workers=max_workers, thread_name_prefix='adk_tool_executor'
)
return _TOOL_THREAD_POOLS[max_workers]
def _is_sync_tool(tool: BaseTool) -> bool:
"""Checks if a tool's underlying function is synchronous."""
if not hasattr(tool, 'func'):
return False
func = tool.func
return not (
inspect.iscoroutinefunction(func)
or inspect.isasyncgenfunction(func)
or (
hasattr(func, '__call__')
and inspect.iscoroutinefunction(func.__call__)
)
)
async def _call_tool_in_thread_pool(
tool: BaseTool,
args: dict[str, Any],
tool_context: ToolContext,
max_workers: int = 4,
) -> Any:
"""Runs a tool in a thread pool to avoid blocking the event loop.
For sync tools, this runs the tool's function directly in a background thread.
For async tools, this creates a new event loop in the background thread and
runs the async function there. This helps catch blocking I/O (like time.sleep,
network calls, file I/O) that was mistakenly used inside async functions.
Note: Due to Python's GIL, this does NOT help with pure Python CPU-bound code.
Thread pool only helps when the GIL is released (blocking I/O, C extensions).
Args:
tool: The tool to execute.
args: Arguments to pass to the tool.
tool_context: The tool context.
max_workers: Maximum number of worker threads in the pool.
Returns:
The result of running the tool.
"""
from ...tools.function_tool import FunctionTool
loop = asyncio.get_running_loop()
executor = _get_tool_thread_pool(max_workers)
if _is_sync_tool(tool):
# For sync FunctionTool, call the underlying function directly
def run_sync_tool():
if isinstance(tool, FunctionTool):
args_to_call = tool._preprocess_args(args)
signature = inspect.signature(tool.func)
valid_params = {param for param in signature.parameters}
if 'tool_context' in valid_params:
args_to_call['tool_context'] = tool_context
args_to_call = {
k: v for k, v in args_to_call.items() if k in valid_params
}
return tool.func(**args_to_call)
else:
# For other sync tool types, we can't easily run them in thread pool
return None
result = await loop.run_in_executor(executor, run_sync_tool)
if result is not None:
return result
else:
# For async tools, run them in a new event loop in a background thread.
# This helps when async functions contain blocking I/O (common user mistake)
# that would otherwise block the main event loop.
def run_async_tool_in_new_loop():
# Create a new event loop for this thread
return asyncio.run(tool.run_async(args=args, tool_context=tool_context))
return await loop.run_in_executor(executor, run_async_tool_in_new_loop)
# Fall back to normal async execution for non-FunctionTool sync tools
return await tool.run_async(args=args, tool_context=tool_context)
def generate_client_function_call_id() -> str:
return f'{AF_FUNCTION_CALL_ID_PREFIX}{uuid.uuid4()}'
def populate_client_function_call_id(
model_response_event: Event,
function_call_id_cache: Optional[dict[str, str]] = None,
) -> None:
if not model_response_event.get_function_calls():
return
for idx, function_call in enumerate(
model_response_event.get_function_calls()
):
if not function_call.id:
# Use (name, index) as cache key so that two calls to the same
# function in a single response keep separate stable IDs.
cache_key = f'{function_call.name}:{idx}'
if function_call_id_cache is not None and cache_key in function_call_id_cache:
function_call.id = function_call_id_cache[cache_key]
else:
function_call.id = generate_client_function_call_id()
if function_call_id_cache is not None:
function_call_id_cache[cache_key] = function_call.id
def remove_client_function_call_id(content: Optional[types.Content]) -> None:
"""Removes ADK-generated function call IDs from content before sending to LLM.
Strips client-side function call/response IDs that start with 'adk-' prefix
to avoid sending internal tracking IDs to the model.
Args:
content: Content containing function calls/responses to clean.
"""
if content and content.parts:
for part in content.parts:
if (
part.function_call
and part.function_call.id
and part.function_call.id.startswith(AF_FUNCTION_CALL_ID_PREFIX)
):
part.function_call.id = None
if (
part.function_response
and part.function_response.id
and part.function_response.id.startswith(AF_FUNCTION_CALL_ID_PREFIX)
):
part.function_response.id = None
def get_long_running_function_calls(
function_calls: list[types.FunctionCall],
tools_dict: dict[str, BaseTool],
) -> set[str]:
long_running_tool_ids = set()
for function_call in function_calls:
if (
function_call.name in tools_dict
and tools_dict[function_call.name].is_long_running
):
long_running_tool_ids.add(function_call.id)
return long_running_tool_ids
def build_auth_request_event(
invocation_context: InvocationContext,
auth_requests: Dict[str, AuthConfig],
*,
author: Optional[str] = None,
role: Optional[str] = None,
) -> Event:
"""Builds an auth request event with function calls for each auth request.
This is a shared helper used by both tool-level auth (when a tool requests
auth during execution) and toolset-level auth (before tool listing).
Args:
invocation_context: The invocation context.
auth_requests: Dict mapping function_call_id to AuthConfig.
author: The event author. Defaults to agent name.
role: The content role. Defaults to None.
Returns:
Event with auth request function calls.
"""
parts = []
long_running_tool_ids = set()
for function_call_id, auth_config in auth_requests.items():
request_euc_function_call = types.FunctionCall(
name=REQUEST_EUC_FUNCTION_CALL_NAME,
id=generate_client_function_call_id(),
args=AuthToolArguments(
function_call_id=function_call_id,
auth_config=auth_config,
).model_dump(exclude_none=True, by_alias=True),
)
long_running_tool_ids.add(request_euc_function_call.id)
parts.append(types.Part(function_call=request_euc_function_call))
return Event(
invocation_id=invocation_context.invocation_id,
author=author or invocation_context.agent.name,
branch=invocation_context.branch,
content=types.Content(parts=parts, role=role),
long_running_tool_ids=long_running_tool_ids,
)
def generate_auth_event(
invocation_context: InvocationContext,
function_response_event: Event,
) -> Optional[Event]:
"""Generates an auth request event from a function response event.
This is used for tool-level auth where a tool requests credentials during
execution.
Args:
invocation_context: The invocation context.
function_response_event: The function response event with auth requests.
Returns:
Event with auth request function calls, or None if no auth requested.
"""
if not function_response_event.actions.requested_auth_configs:
return None
return build_auth_request_event(
invocation_context,
function_response_event.actions.requested_auth_configs,
role=function_response_event.content.role,
)
def generate_request_confirmation_event(
invocation_context: InvocationContext,
function_call_event: Event,
function_response_event: Event,
) -> Optional[Event]:
"""Generates a request confirmation event from a function response event."""
if not function_response_event.actions.requested_tool_confirmations:
return None
parts = []
long_running_tool_ids = set()
function_calls = function_call_event.get_function_calls()
for (
function_call_id,
tool_confirmation,
) in function_response_event.actions.requested_tool_confirmations.items():
original_function_call = next(
(fc for fc in function_calls if fc.id == function_call_id), None
)
if not original_function_call:
continue
request_confirmation_function_call = types.FunctionCall(
name=REQUEST_CONFIRMATION_FUNCTION_CALL_NAME,
args={
'originalFunctionCall': original_function_call.model_dump(
exclude_none=True, by_alias=True
),
'toolConfirmation': tool_confirmation.model_dump(
by_alias=True, exclude_none=True
),
},
)
request_confirmation_function_call.id = generate_client_function_call_id()
long_running_tool_ids.add(request_confirmation_function_call.id)
parts.append(types.Part(function_call=request_confirmation_function_call))
return Event(
invocation_id=invocation_context.invocation_id,
author=invocation_context.agent.name,
branch=invocation_context.branch,
content=types.Content(
parts=parts, role=function_response_event.content.role
),
long_running_tool_ids=long_running_tool_ids,
)
async def handle_function_calls_async(
invocation_context: InvocationContext,
function_call_event: Event,
tools_dict: dict[str, BaseTool],
filters: Optional[set[str]] = None,
tool_confirmation_dict: Optional[dict[str, ToolConfirmation]] = None,
) -> Optional[Event]:
"""Calls the functions and returns the function response event."""
function_calls = function_call_event.get_function_calls()
return await handle_function_call_list_async(
invocation_context,
function_calls,
tools_dict,
filters,
tool_confirmation_dict,
)
async def handle_function_call_list_async(
invocation_context: InvocationContext,
function_calls: list[types.FunctionCall],
tools_dict: dict[str, BaseTool],
filters: Optional[set[str]] = None,
tool_confirmation_dict: Optional[dict[str, ToolConfirmation]] = None,
) -> Optional[Event]:
"""Calls the functions and returns the function response event."""
from ...agents.llm_agent import LlmAgent
agent = invocation_context.agent
# Filter function calls
filtered_calls = [
fc for fc in function_calls if not filters or fc.id in filters
]
if not filtered_calls:
return None
# Create tasks for parallel execution
tasks = [
asyncio.create_task(
_execute_single_function_call_async(
invocation_context,
function_call,
tools_dict,
agent,
tool_confirmation_dict[function_call.id]
if tool_confirmation_dict
else None,
)
)
for function_call in filtered_calls
]
# Wait for all tasks to complete
function_response_events = await asyncio.gather(*tasks)
# Filter out None results
function_response_events = [
event for event in function_response_events if event is not None
]
if not function_response_events:
return None
merged_event = merge_parallel_function_response_events(
function_response_events
)
if len(function_response_events) > 1:
# this is needed for debug traces of parallel calls
# individual response with tool.name is traced in __build_response_event
# (we drop tool.name from span name here as this is merged event)
with tracer.start_as_current_span('execute_tool (merged)'):
trace_merged_tool_calls(
response_event_id=merged_event.id,
function_response_event=merged_event,
)
return merged_event
async def _execute_single_function_call_async(
invocation_context: InvocationContext,
function_call: types.FunctionCall,
tools_dict: dict[str, BaseTool],
agent: LlmAgent,
tool_confirmation: Optional[ToolConfirmation] = None,
) -> Optional[Event]:
"""Execute a single function call with thread safety for state modifications."""
async def _run_on_tool_error_callbacks(
*,
tool: BaseTool,
tool_args: dict[str, Any],
tool_context: ToolContext,
error: Exception,
) -> Optional[dict[str, Any]]:
"""Runs the on_tool_error_callbacks for the given tool."""
error_response = (
await invocation_context.plugin_manager.run_on_tool_error_callback(
tool=tool,
tool_args=tool_args,
tool_context=tool_context,
error=error,
)
)
if error_response is not None:
return error_response
for callback in agent.canonical_on_tool_error_callbacks:
error_response = callback(
tool=tool,
args=tool_args,
tool_context=tool_context,
error=error,
)
if inspect.isawaitable(error_response):
error_response = await error_response
if error_response is not None:
return error_response
return None
# Do not use "args" as the variable name, because it is a reserved keyword
# in python debugger.
# Make a deep copy to avoid being modified.
function_args = (
copy.deepcopy(function_call.args) if function_call.args else {}
)
tool_context = _create_tool_context(
invocation_context, function_call, tool_confirmation
)
try:
tool = _get_tool(function_call, tools_dict)
except ValueError as tool_error:
tool = BaseTool(name=function_call.name, description='Tool not found')
error_response = await _run_on_tool_error_callbacks(
tool=tool,
tool_args=function_args,
tool_context=tool_context,
error=tool_error,
)
if error_response is not None:
return __build_response_event(
tool, error_response, tool_context, invocation_context
)
else:
raise tool_error
async def _run_with_trace():
nonlocal function_args
# Step 1: Check if plugin before_tool_callback overrides the function
# response.
function_response = (
await invocation_context.plugin_manager.run_before_tool_callback(
tool=tool, tool_args=function_args, tool_context=tool_context
)
)
# Step 2: If no overrides are provided from the plugins, further run the
# canonical callback.
if function_response is None:
for callback in agent.canonical_before_tool_callbacks:
function_response = callback(
tool=tool, args=function_args, tool_context=tool_context
)
if inspect.isawaitable(function_response):
function_response = await function_response
if function_response:
break
# Step 3: Otherwise, proceed calling the tool normally.
if function_response is None:
try:
function_response = await __call_tool_async(
tool, args=function_args, tool_context=tool_context
)
except Exception as tool_error:
error_response = await _run_on_tool_error_callbacks(
tool=tool,
tool_args=function_args,
tool_context=tool_context,
error=tool_error,
)
if error_response is not None:
function_response = error_response
else:
raise tool_error
# Step 4: Check if plugin after_tool_callback overrides the function
# response.
altered_function_response = (
await invocation_context.plugin_manager.run_after_tool_callback(
tool=tool,
tool_args=function_args,
tool_context=tool_context,
result=function_response,
)
)
# Step 5: If no overrides are provided from the plugins, further run the
# canonical after_tool_callbacks.
if altered_function_response is None:
for callback in agent.canonical_after_tool_callbacks:
altered_function_response = callback(
tool=tool,
args=function_args,
tool_context=tool_context,
tool_response=function_response,
)
if inspect.isawaitable(altered_function_response):
altered_function_response = await altered_function_response
if altered_function_response:
break
# Step 6: If alternative response exists from after_tool_callback, use it
# instead of the original function response.
if altered_function_response is not None:
function_response = altered_function_response
if tool.is_long_running:
# Allow long-running function to return None to not provide function
# response.
if not function_response:
return None
# Note: State deltas are not applied here - they are collected in
# tool_context.actions.state_delta and applied later when the session
# service processes the events
# Builds the function response event.
function_response_event = __build_response_event(
tool, function_response, tool_context, invocation_context
)
return function_response_event
with tracer.start_as_current_span(f'execute_tool {tool.name}'):
function_response_event = None
try:
function_response_event = await _run_with_trace()
return function_response_event
finally:
trace_tool_call(
tool=tool,
args=function_args,
function_response_event=function_response_event,
)
async def handle_function_calls_live(
invocation_context: InvocationContext,
function_call_event: Event,
tools_dict: dict[str, BaseTool],
) -> Event:
"""Calls the functions and returns the function response event."""
from ...agents.llm_agent import LlmAgent
agent = cast(LlmAgent, invocation_context.agent)
function_calls = function_call_event.get_function_calls()
if not function_calls:
return None
# Create async lock for active_streaming_tools modifications
streaming_lock = asyncio.Lock()
# Create tasks for parallel execution
tasks = [
asyncio.create_task(
_execute_single_function_call_live(
invocation_context,
function_call,
tools_dict,
agent,
streaming_lock,
)
)
for function_call in function_calls
]
# Wait for all tasks to complete
function_response_events = await asyncio.gather(*tasks)
# Filter out None results
function_response_events = [
event for event in function_response_events if event is not None
]
if not function_response_events:
return None
merged_event = merge_parallel_function_response_events(
function_response_events
)
if len(function_response_events) > 1:
# this is needed for debug traces of parallel calls
# individual response with tool.name is traced in __build_response_event
# (we drop tool.name from span name here as this is merged event)
with tracer.start_as_current_span('execute_tool (merged)'):
trace_merged_tool_calls(
response_event_id=merged_event.id,
function_response_event=merged_event,
)
return merged_event
async def _execute_single_function_call_live(
invocation_context: InvocationContext,
function_call: types.FunctionCall,
tools_dict: dict[str, BaseTool],
agent: LlmAgent,
streaming_lock: asyncio.Lock,
) -> Optional[Event]:
"""Execute a single function call for live mode with thread safety."""
async def _run_on_tool_error_callbacks(
*,
tool: BaseTool,
tool_args: dict[str, Any],
tool_context: ToolContext,
error: Exception,
) -> Optional[dict[str, Any]]:
"""Runs the on_tool_error_callbacks for the given tool."""
error_response = (
await invocation_context.plugin_manager.run_on_tool_error_callback(
tool=tool,
tool_args=tool_args,
tool_context=tool_context,
error=error,
)
)
if error_response is not None:
return error_response
for callback in agent.canonical_on_tool_error_callbacks:
error_response = callback(
tool=tool,
args=tool_args,
tool_context=tool_context,
error=error,
)
if inspect.isawaitable(error_response):
error_response = await error_response
if error_response is not None:
return error_response
return None
# Do not use "args" as the variable name, because it is a reserved keyword
# in python debugger.
# Make a deep copy to avoid being modified.
function_args = (
copy.deepcopy(function_call.args) if function_call.args else {}
)
tool_context = _create_tool_context(invocation_context, function_call)
try:
tool = _get_tool(function_call, tools_dict)
except ValueError as tool_error:
tool = BaseTool(name=function_call.name, description='Tool not found')
error_response = await _run_on_tool_error_callbacks(
tool=tool,
tool_args=function_args,
tool_context=tool_context,
error=tool_error,
)
if error_response is not None:
return __build_response_event(
tool, error_response, tool_context, invocation_context
)
raise tool_error
async def _run_with_trace():
nonlocal function_args
# Do not use "args" as the variable name, because it is a reserved keyword
# in python debugger.
# Make a deep copy to avoid being modified.
function_response = None
# Handle before_tool_callbacks - iterate through the canonical callback
# list
for callback in agent.canonical_before_tool_callbacks:
function_response = callback(
tool=tool, args=function_args, tool_context=tool_context
)
if inspect.isawaitable(function_response):
function_response = await function_response
if function_response:
break
if function_response is None:
function_response = await _process_function_live_helper(
tool,
tool_context,
function_call,
function_args,
invocation_context,
streaming_lock,
)
# Calls after_tool_callback if it exists.
altered_function_response = None
for callback in agent.canonical_after_tool_callbacks:
altered_function_response = callback(
tool=tool,
args=function_args,
tool_context=tool_context,
tool_response=function_response,
)
if inspect.isawaitable(altered_function_response):
altered_function_response = await altered_function_response
if altered_function_response:
break
if altered_function_response is not None:
function_response = altered_function_response
if tool.is_long_running:
# Allow async function to return None to not provide function response.
if not function_response:
return None
# Note: State deltas are not applied here - they are collected in
# tool_context.actions.state_delta and applied later when the session
# service processes the events
# Builds the function response event.
function_response_event = __build_response_event(
tool, function_response, tool_context, invocation_context
)
return function_response_event
with tracer.start_as_current_span(f'execute_tool {tool.name}'):
function_response_event = None
try:
function_response_event = await _run_with_trace()
return function_response_event
finally:
trace_tool_call(
tool=tool,
args=function_args,
function_response_event=function_response_event,
)
async def _process_function_live_helper(
tool,
tool_context,
function_call,
function_args,
invocation_context,
streaming_lock: asyncio.Lock,
):
function_response = None
# Check if this is a stop_streaming function call
if (
function_call.name == 'stop_streaming'
and 'function_name' in function_args
):
function_name = function_args['function_name']
# Thread-safe access to active_streaming_tools
async with streaming_lock:
active_tasks = invocation_context.active_streaming_tools
if (
active_tasks
and function_name in active_tasks
and active_tasks[function_name].task
and not active_tasks[function_name].task.done()
):
task = active_tasks[function_name].task
else:
task = None
if task:
task.cancel()
try:
# Wait for the task to be cancelled
await asyncio.wait_for(task, timeout=1.0)
except (asyncio.CancelledError, asyncio.TimeoutError):
# Log the specific condition
if task.cancelled():
logging.info('Task %s was cancelled successfully', function_name)
elif task.done():
logging.info('Task %s completed during cancellation', function_name)
else:
logging.warning(
'Task %s might still be running after cancellation timeout',
function_name,
)
function_response = {
'status': f'The task is not cancelled yet for {function_name}.'
}
if not function_response:
# Clean up the reference under lock
async with streaming_lock:
if (
invocation_context.active_streaming_tools
and function_name in invocation_context.active_streaming_tools
):
invocation_context.active_streaming_tools[function_name].task = None
invocation_context.active_streaming_tools[function_name].stream = (
None
)
function_response = {
'status': f'Successfully stopped streaming function {function_name}'
}
else:
function_response = {
'status': f'No active streaming function named {function_name} found'
}
elif hasattr(tool, 'func') and inspect.isasyncgenfunction(tool.func):
# for streaming tool use case
# we require the function to be an async generator function
async def run_tool_and_update_queue(tool, function_args, tool_context):
try:
async with Aclosing(
__call_tool_live(
tool=tool,
args=function_args,
tool_context=tool_context,
invocation_context=invocation_context,
)
) as agen:
async for result in agen:
updated_content = types.Content(
role='user',
parts=[
types.Part.from_text(
text=f'Function {tool.name} returned: {result}'
)
],
)
invocation_context.live_request_queue.send_content(updated_content)
except asyncio.CancelledError:
raise # Re-raise to properly propagate the cancellation
task = asyncio.create_task(
run_tool_and_update_queue(tool, function_args, tool_context)
)
async with streaming_lock:
if invocation_context.active_streaming_tools is None:
invocation_context.active_streaming_tools = {}
if tool.name in invocation_context.active_streaming_tools:
invocation_context.active_streaming_tools[tool.name].task = task
else:
# Register the streaming tool lazily when the model calls it.
invocation_context.active_streaming_tools[tool.name] = (
ActiveStreamingTool(task=task)
)
logger.debug('Lazily registered streaming tool: %s', tool.name)
# For input-streaming tools (those with `input_stream:
# LiveRequestQueue`), create a dedicated LiveRequestQueue so
# _send_to_model starts duplicating data to it. This also
# handles re-invocation after stop_streaming reset .stream
# to None.
sig = inspect.signature(tool.func)
if (
'input_stream' in sig.parameters
and _is_live_request_queue_annotation(sig.parameters['input_stream'])
):
invocation_context.active_streaming_tools[tool.name].stream = (
LiveRequestQueue()
)
# Immediately return a pending response.
# This is required by current live model.
function_response = {
'status': (
'The function is running asynchronously and the results are'
' pending.'
)
}
else:
# Check if we should run tools in thread pool to avoid blocking event loop
thread_pool_config = invocation_context.run_config.tool_thread_pool_config
if thread_pool_config is not None:
function_response = await _call_tool_in_thread_pool(
tool,
args=function_args,
tool_context=tool_context,
max_workers=thread_pool_config.max_workers,
)
else:
function_response = await __call_tool_async(
tool, args=function_args, tool_context=tool_context
)
return function_response
def _get_tool(
function_call: types.FunctionCall, tools_dict: dict[str, BaseTool]
):
"""Returns the tool corresponding to the function call."""
if function_call.name not in tools_dict:
available = list(tools_dict.keys())
error_msg = (
f"Tool '{function_call.name}' not found.\nAvailable tools:"
f" {', '.join(available)}\n\nPossible causes:\n 1. LLM hallucinated"
' the function name - review agent instruction clarity\n 2. Tool not'
' registered - verify agent.tools list\n 3. Name mismatch - check for'
' typos\n\nSuggested fixes:\n - Review agent instruction to ensure'
' tool usage is clear\n - Verify tool is included in agent.tools'
' list\n - Check for typos in function name'
)
raise ValueError(error_msg)
return tools_dict[function_call.name]
def _create_tool_context(
invocation_context: InvocationContext,
function_call: types.FunctionCall,
tool_confirmation: Optional[ToolConfirmation] = None,
):
"""Creates a ToolContext object."""
return ToolContext(
invocation_context=invocation_context,
function_call_id=function_call.id,
tool_confirmation=tool_confirmation,
)
def _get_tool_and_context(
invocation_context: InvocationContext,
function_call: types.FunctionCall,
tools_dict: dict[str, BaseTool],
tool_confirmation: Optional[ToolConfirmation] = None,
):
"""Returns the tool and tool context corresponding to the function call."""
tool = _get_tool(function_call, tools_dict)