|
12 | 12 |
|
13 | 13 | class MockOpenAIResponse: |
14 | 14 | """Mock OpenAI API response""" |
15 | | - def __init__(self, response_id="chatcmpl-123", model="gpt-4o-mini", content="Hello!", input_tokens=10, output_tokens=20): |
| 15 | + |
| 16 | + def __init__( |
| 17 | + self, |
| 18 | + response_id="chatcmpl-123", |
| 19 | + model="gpt-4o-mini", |
| 20 | + content="Hello!", |
| 21 | + input_tokens=10, |
| 22 | + output_tokens=20, |
| 23 | + ): |
16 | 24 | self.id = response_id |
17 | 25 | self.model = model |
18 | 26 | self.choices = [ |
19 | | - type('Choice', (), { |
20 | | - 'message': type('Message', (), {'content': content})(), |
21 | | - 'finish_reason': 'stop' |
22 | | - })() |
| 27 | + type( |
| 28 | + "Choice", |
| 29 | + (), |
| 30 | + {"message": type("Message", (), {"content": content})(), "finish_reason": "stop"}, |
| 31 | + )() |
23 | 32 | ] |
24 | | - self.usage = type('Usage', (), { |
25 | | - 'prompt_tokens': input_tokens, |
26 | | - 'completion_tokens': output_tokens, |
27 | | - 'total_tokens': input_tokens + output_tokens |
28 | | - })() |
| 33 | + self.usage = type( |
| 34 | + "Usage", |
| 35 | + (), |
| 36 | + { |
| 37 | + "prompt_tokens": input_tokens, |
| 38 | + "completion_tokens": output_tokens, |
| 39 | + "total_tokens": input_tokens + output_tokens, |
| 40 | + }, |
| 41 | + )() |
29 | 42 |
|
30 | 43 |
|
31 | 44 | class MockAnthropicResponse: |
32 | 45 | """Mock Anthropic API response""" |
33 | | - def __init__(self, response_id="msg_123", model="claude-3-haiku", content="Hello!", input_tokens=10, output_tokens=20): |
| 46 | + |
| 47 | + def __init__( |
| 48 | + self, |
| 49 | + response_id="msg_123", |
| 50 | + model="claude-3-haiku", |
| 51 | + content="Hello!", |
| 52 | + input_tokens=10, |
| 53 | + output_tokens=20, |
| 54 | + ): |
34 | 55 | self.id = response_id |
35 | 56 | self.model = model |
36 | | - self.content = [type('Content', (), {'text': content})()] |
37 | | - self.usage = type('Usage', (), { |
38 | | - 'input_tokens': input_tokens, |
39 | | - 'output_tokens': output_tokens |
40 | | - })() |
| 57 | + self.content = [type("Content", (), {"text": content})()] |
| 58 | + self.usage = type( |
| 59 | + "Usage", (), {"input_tokens": input_tokens, "output_tokens": output_tokens} |
| 60 | + )() |
41 | 61 |
|
42 | 62 |
|
43 | 63 | class TestObserveDecorator: |
@@ -176,7 +196,7 @@ def call_openai(prompt: str): |
176 | 196 | model="gpt-4o-mini", |
177 | 197 | content="Hello from OpenAI!", |
178 | 198 | input_tokens=10, |
179 | | - output_tokens=25 |
| 199 | + output_tokens=25, |
180 | 200 | ) |
181 | 201 |
|
182 | 202 | result = call_openai("Say hello") |
@@ -209,7 +229,7 @@ def call_anthropic(prompt: str): |
209 | 229 | model="claude-3-haiku", |
210 | 230 | content="Hello from Claude!", |
211 | 231 | input_tokens=15, |
212 | | - output_tokens=30 |
| 232 | + output_tokens=30, |
213 | 233 | ) |
214 | 234 |
|
215 | 235 | result = call_anthropic("Say hello") |
@@ -375,11 +395,7 @@ def test_decorator_cost_calculation(self, tracer_setup): |
375 | 395 |
|
376 | 396 | @observe(pricing=pricing) |
377 | 397 | def expensive_call(): |
378 | | - return MockOpenAIResponse( |
379 | | - model="gpt-4o", |
380 | | - input_tokens=1000, |
381 | | - output_tokens=500 |
382 | | - ) |
| 398 | + return MockOpenAIResponse(model="gpt-4o", input_tokens=1000, output_tokens=500) |
383 | 399 |
|
384 | 400 | expensive_call() |
385 | 401 |
|
@@ -443,10 +459,7 @@ async def test_async_decorator_with_llm_response(self, tracer_setup): |
443 | 459 | @observe(pricing=pricing, tags=["async", "test"]) |
444 | 460 | async def async_llm_call(prompt: str): |
445 | 461 | # Simulate async API call |
446 | | - return MockOpenAIResponse( |
447 | | - response_id="chatcmpl-async123", |
448 | | - content="Async response" |
449 | | - ) |
| 462 | + return MockOpenAIResponse(response_id="chatcmpl-async123", content="Async response") |
450 | 463 |
|
451 | 464 | result = await async_llm_call("test prompt") |
452 | 465 | assert result.id == "chatcmpl-async123" |
|
0 commit comments