Skip to content

Commit 021378e

Browse files
committed
refactor: Extract _set_finish_reason helper to eliminate code duplication
Extract finish_reason mapping logic into a reusable helper function to address code duplication feedback from Gemini Code Assist review on PR google#3698. Changes: - Added _set_finish_reason(response, finish_reason) helper function - Replaced three duplicate mapping blocks with single helper call: * Non-streaming path (line ~880) * Streaming tool-call path (line ~1387) * Streaming text-only path (line ~1409) - Preserved all existing comments and behavior - Improved maintainability - single source of truth for mapping logic Addresses: https://github.com/google/adk-python/pull/3698\#discussion_r18xxxxx
1 parent 69bb67f commit 021378e

1 file changed

Lines changed: 22 additions & 21 deletions

File tree

src/google/adk/models/lite_llm.py

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,25 @@
8787
)
8888

8989

90+
def _set_finish_reason(
91+
response: types.LlmResponse, finish_reason: Any
92+
) -> None:
93+
"""Sets the finish reason on the LlmResponse, mapping from string if necessary.
94+
95+
Args:
96+
response: The LlmResponse object to update.
97+
finish_reason: The finish reason value, either a FinishReason enum or a string
98+
that needs to be mapped.
99+
"""
100+
if isinstance(finish_reason, types.FinishReason):
101+
response.finish_reason = finish_reason
102+
else:
103+
finish_reason_str = str(finish_reason).lower()
104+
response.finish_reason = _FINISH_REASON_MAPPING.get(
105+
finish_reason_str, types.FinishReason.OTHER
106+
)
107+
108+
90109
def _decode_inline_text_data(raw_bytes: bytes) -> str:
91110
"""Decodes inline file bytes that represent textual content."""
92111
try:
@@ -861,13 +880,7 @@ def _model_response_to_generate_content_response(
861880
if finish_reason:
862881
# If LiteLLM already provides a FinishReason enum (e.g., for Gemini), use
863882
# it directly. Otherwise, map the finish_reason string to the enum.
864-
if isinstance(finish_reason, types.FinishReason):
865-
llm_response.finish_reason = finish_reason
866-
else:
867-
finish_reason_str = str(finish_reason).lower()
868-
llm_response.finish_reason = _FINISH_REASON_MAPPING.get(
869-
finish_reason_str, types.FinishReason.OTHER
870-
)
883+
_set_finish_reason(llm_response, finish_reason)
871884
if response.get("usage", None):
872885
llm_response.usage_metadata = types.GenerateContentResponseUsageMetadata(
873886
prompt_token_count=response["usage"].get("prompt_tokens", 0),
@@ -1371,13 +1384,7 @@ async def generate_content_async(
13711384
# to ensure consistent behavior across both streaming and non-streaming modes.
13721385
# Without this, Claude and other models via LiteLLM would hit stop conditions
13731386
# that the agent couldn't properly handle.
1374-
if isinstance(finish_reason, types.FinishReason):
1375-
aggregated_llm_response_with_tool_call.finish_reason = finish_reason
1376-
else:
1377-
finish_reason_str = str(finish_reason).lower()
1378-
aggregated_llm_response_with_tool_call.finish_reason = _FINISH_REASON_MAPPING.get(
1379-
finish_reason_str, types.FinishReason.OTHER
1380-
)
1387+
_set_finish_reason(aggregated_llm_response_with_tool_call, finish_reason)
13811388
text = ""
13821389
reasoning_parts = []
13831390
function_calls.clear()
@@ -1399,13 +1406,7 @@ async def generate_content_async(
13991406
# to ensure consistent behavior across both streaming and non-streaming modes.
14001407
# Without this, Claude and other models via LiteLLM would hit stop conditions
14011408
# that the agent couldn't properly handle.
1402-
if isinstance(finish_reason, types.FinishReason):
1403-
aggregated_llm_response.finish_reason = finish_reason
1404-
else:
1405-
finish_reason_str = str(finish_reason).lower()
1406-
aggregated_llm_response.finish_reason = _FINISH_REASON_MAPPING.get(
1407-
finish_reason_str, types.FinishReason.OTHER
1408-
)
1409+
_set_finish_reason(aggregated_llm_response, finish_reason)
14091410
text = ""
14101411
reasoning_parts = []
14111412

0 commit comments

Comments
 (0)