@@ -155,6 +155,48 @@ async def test_trace_call_llm(monkeypatch, mock_span_fixture):
155155 )
156156
157157
158+ @pytest .mark .asyncio
159+ async def test_trace_call_llm_with_no_usage_metadata (monkeypatch , mock_span_fixture ):
160+ """Test trace_call_llm sets all telemetry attributes correctly with normal content."""
161+ monkeypatch .setattr (
162+ 'opentelemetry.trace.get_current_span' , lambda : mock_span_fixture
163+ )
164+
165+ agent = LlmAgent (name = 'test_agent' )
166+ invocation_context = await _create_invocation_context (agent )
167+ llm_request = LlmRequest (
168+ model = 'gemini-pro' ,
169+ contents = [
170+ types .Content (
171+ role = 'user' ,
172+ parts = [types .Part (text = 'Hello, how are you?' )],
173+ ),
174+ ],
175+ config = types .GenerateContentConfig (
176+ top_p = 0.95 ,
177+ max_output_tokens = 1024 ,
178+ ),
179+ )
180+ llm_response = LlmResponse (
181+ turn_complete = True ,
182+ finish_reason = types .FinishReason .STOP ,
183+ usage_metadata = types .GenerateContentResponseUsageMetadata (),
184+ )
185+ trace_call_llm (invocation_context , 'test_event_id' , llm_request , llm_response )
186+
187+ expected_calls = [
188+ mock .call ('gen_ai.system' , 'gcp.vertex.agent' ),
189+ mock .call ('gen_ai.request.top_p' , 0.95 ),
190+ mock .call ('gen_ai.request.max_tokens' , 1024 ),
191+ mock .call ('gcp.vertex.agent.llm_response' , mock .ANY ),
192+ mock .call ('gen_ai.response.finish_reasons' , ['stop' ]),
193+ ]
194+ assert mock_span_fixture .set_attribute .call_count == 10
195+ mock_span_fixture .set_attribute .assert_has_calls (
196+ expected_calls , any_order = True
197+ )
198+
199+
158200@pytest .mark .asyncio
159201async def test_trace_call_llm_with_binary_content (
160202 monkeypatch , mock_span_fixture
0 commit comments