Skip to content

Commit d2c4f48

Browse files
committed
fix: prompt token may be None in streaming mode
1 parent 352dd99 commit d2c4f48

2 files changed

Lines changed: 47 additions & 4 deletions

File tree

src/google/adk/telemetry/tracing.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -293,10 +293,11 @@ def trace_call_llm(
293293
span.set_attribute('gcp.vertex.agent.llm_response', {})
294294

295295
if llm_response.usage_metadata is not None:
296-
span.set_attribute(
297-
'gen_ai.usage.input_tokens',
298-
llm_response.usage_metadata.prompt_token_count,
299-
)
296+
if llm_response.usage_metadata.prompt_token_count is not None:
297+
span.set_attribute(
298+
'gen_ai.usage.input_tokens',
299+
llm_response.usage_metadata.prompt_token_count,
300+
)
300301
if llm_response.usage_metadata.candidates_token_count is not None:
301302
span.set_attribute(
302303
'gen_ai.usage.output_tokens',

tests/unittests/telemetry/test_spans.py

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,48 @@ async def test_trace_call_llm(monkeypatch, mock_span_fixture):
155155
)
156156

157157

158+
@pytest.mark.asyncio
159+
async def test_trace_call_llm_with_no_usage_metadata(monkeypatch, mock_span_fixture):
160+
"""Test trace_call_llm sets all telemetry attributes correctly with normal content."""
161+
monkeypatch.setattr(
162+
'opentelemetry.trace.get_current_span', lambda: mock_span_fixture
163+
)
164+
165+
agent = LlmAgent(name='test_agent')
166+
invocation_context = await _create_invocation_context(agent)
167+
llm_request = LlmRequest(
168+
model='gemini-pro',
169+
contents=[
170+
types.Content(
171+
role='user',
172+
parts=[types.Part(text='Hello, how are you?')],
173+
),
174+
],
175+
config=types.GenerateContentConfig(
176+
top_p=0.95,
177+
max_output_tokens=1024,
178+
),
179+
)
180+
llm_response = LlmResponse(
181+
turn_complete=True,
182+
finish_reason=types.FinishReason.STOP,
183+
usage_metadata=types.GenerateContentResponseUsageMetadata(),
184+
)
185+
trace_call_llm(invocation_context, 'test_event_id', llm_request, llm_response)
186+
187+
expected_calls = [
188+
mock.call('gen_ai.system', 'gcp.vertex.agent'),
189+
mock.call('gen_ai.request.top_p', 0.95),
190+
mock.call('gen_ai.request.max_tokens', 1024),
191+
mock.call('gcp.vertex.agent.llm_response', mock.ANY),
192+
mock.call('gen_ai.response.finish_reasons', ['stop']),
193+
]
194+
assert mock_span_fixture.set_attribute.call_count == 10
195+
mock_span_fixture.set_attribute.assert_has_calls(
196+
expected_calls, any_order=True
197+
)
198+
199+
158200
@pytest.mark.asyncio
159201
async def test_trace_call_llm_with_binary_content(
160202
monkeypatch, mock_span_fixture

0 commit comments

Comments
 (0)