Skip to content

Commit 86bf46d

Browse files
Format code with Black to fix CI linting failures
All tests passing but CI was failing on Black format check. Formatted 5 files to comply with Black style: - examples/send_to_last9.py - tests/test_context.py - tests/test_decorators.py - tests/test_e2e.py - tests/test_core.py Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
1 parent 98fc331 commit 86bf46d

5 files changed

Lines changed: 121 additions & 82 deletions

File tree

examples/send_to_last9.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
# Import OTLP exporter
3131
try:
3232
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
33+
3334
OTLP_AVAILABLE = True
3435
except ImportError:
3536
print("❌ OTLP exporter not installed!")
@@ -74,11 +75,13 @@ def setup_last9_export():
7475
print(f" Authentication: {'✅ Configured' if api_key else '❌ Missing'}")
7576

7677
# Create resource with service identification
77-
resource = Resource.create({
78-
SERVICE_NAME: "last9-genai-python-example",
79-
SERVICE_VERSION: "1.0.0",
80-
"deployment.environment": os.getenv("ENVIRONMENT", "development"),
81-
})
78+
resource = Resource.create(
79+
{
80+
SERVICE_NAME: "last9-genai-python-example",
81+
SERVICE_VERSION: "1.0.0",
82+
"deployment.environment": os.getenv("ENVIRONMENT", "development"),
83+
}
84+
)
8285

8386
# Create tracer provider with resource
8487
provider = TracerProvider(resource=resource)
@@ -231,10 +234,7 @@ def check_account():
231234

232235
# LLM call
233236
print(" 🤖 Generating support response...")
234-
response = chat_with_llm(
235-
"gpt-4o",
236-
f"Generate support response for {account_info}"
237-
)
237+
response = chat_with_llm("gpt-4o", f"Generate support response for {account_info}")
238238

239239
print(f"\n🤖 Support response: {response['content'][:80]}...\n")
240240

@@ -283,6 +283,7 @@ def main():
283283
except Exception as e:
284284
print(f"\n❌ Error: {e}")
285285
import traceback
286+
286287
traceback.print_exc()
287288
sys.exit(1)
288289

tests/test_context.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -353,10 +353,9 @@ def thread_function(thread_id: int):
353353
with conversation_context(conversation_id=f"conv_{thread_id}"):
354354
with tracer.start_as_current_span(f"span_{thread_id}"):
355355
context = get_current_context()
356-
results.append({
357-
"thread_id": thread_id,
358-
"conversation_id": context.get("conversation_id")
359-
})
356+
results.append(
357+
{"thread_id": thread_id, "conversation_id": context.get("conversation_id")}
358+
)
360359

361360
# Create multiple threads with different contexts
362361
threads = []

tests/test_core.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -655,9 +655,10 @@ def test_add_tool_call_events(self, tracer_setup):
655655
call_event = events[0]
656656
assert call_event.name == EventNames.GEN_AI_TOOL_CALL
657657
assert call_event.attributes[GenAIAttributes.TOOL_NAME] == "search"
658-
assert '"query": "python tutorial"' in call_event.attributes[
659-
Last9Attributes.FUNCTION_CALL_ARGUMENTS
660-
]
658+
assert (
659+
'"query": "python tutorial"'
660+
in call_event.attributes[Last9Attributes.FUNCTION_CALL_ARGUMENTS]
661+
)
661662

662663
# Check tool result event
663664
result_event = events[1]
@@ -737,9 +738,7 @@ def test_create_tool_span_with_workflow(self, tracer_setup):
737738
"""Test creating tool span with workflow"""
738739
tracer, memory_exporter = tracer_setup
739740

740-
span = create_tool_span(
741-
tracer, "api_call", tool_type="external_api", workflow_id="wf_tool"
742-
)
741+
span = create_tool_span(tracer, "api_call", tool_type="external_api", workflow_id="wf_tool")
743742
span.end()
744743

745744
spans = memory_exporter.get_finished_spans()

tests/test_decorators.py

Lines changed: 40 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -12,32 +12,52 @@
1212

1313
class MockOpenAIResponse:
1414
"""Mock OpenAI API response"""
15-
def __init__(self, response_id="chatcmpl-123", model="gpt-4o-mini", content="Hello!", input_tokens=10, output_tokens=20):
15+
16+
def __init__(
17+
self,
18+
response_id="chatcmpl-123",
19+
model="gpt-4o-mini",
20+
content="Hello!",
21+
input_tokens=10,
22+
output_tokens=20,
23+
):
1624
self.id = response_id
1725
self.model = model
1826
self.choices = [
19-
type('Choice', (), {
20-
'message': type('Message', (), {'content': content})(),
21-
'finish_reason': 'stop'
22-
})()
27+
type(
28+
"Choice",
29+
(),
30+
{"message": type("Message", (), {"content": content})(), "finish_reason": "stop"},
31+
)()
2332
]
24-
self.usage = type('Usage', (), {
25-
'prompt_tokens': input_tokens,
26-
'completion_tokens': output_tokens,
27-
'total_tokens': input_tokens + output_tokens
28-
})()
33+
self.usage = type(
34+
"Usage",
35+
(),
36+
{
37+
"prompt_tokens": input_tokens,
38+
"completion_tokens": output_tokens,
39+
"total_tokens": input_tokens + output_tokens,
40+
},
41+
)()
2942

3043

3144
class MockAnthropicResponse:
3245
"""Mock Anthropic API response"""
33-
def __init__(self, response_id="msg_123", model="claude-3-haiku", content="Hello!", input_tokens=10, output_tokens=20):
46+
47+
def __init__(
48+
self,
49+
response_id="msg_123",
50+
model="claude-3-haiku",
51+
content="Hello!",
52+
input_tokens=10,
53+
output_tokens=20,
54+
):
3455
self.id = response_id
3556
self.model = model
36-
self.content = [type('Content', (), {'text': content})()]
37-
self.usage = type('Usage', (), {
38-
'input_tokens': input_tokens,
39-
'output_tokens': output_tokens
40-
})()
57+
self.content = [type("Content", (), {"text": content})()]
58+
self.usage = type(
59+
"Usage", (), {"input_tokens": input_tokens, "output_tokens": output_tokens}
60+
)()
4161

4262

4363
class TestObserveDecorator:
@@ -176,7 +196,7 @@ def call_openai(prompt: str):
176196
model="gpt-4o-mini",
177197
content="Hello from OpenAI!",
178198
input_tokens=10,
179-
output_tokens=25
199+
output_tokens=25,
180200
)
181201

182202
result = call_openai("Say hello")
@@ -209,7 +229,7 @@ def call_anthropic(prompt: str):
209229
model="claude-3-haiku",
210230
content="Hello from Claude!",
211231
input_tokens=15,
212-
output_tokens=30
232+
output_tokens=30,
213233
)
214234

215235
result = call_anthropic("Say hello")
@@ -375,11 +395,7 @@ def test_decorator_cost_calculation(self, tracer_setup):
375395

376396
@observe(pricing=pricing)
377397
def expensive_call():
378-
return MockOpenAIResponse(
379-
model="gpt-4o",
380-
input_tokens=1000,
381-
output_tokens=500
382-
)
398+
return MockOpenAIResponse(model="gpt-4o", input_tokens=1000, output_tokens=500)
383399

384400
expensive_call()
385401

@@ -443,10 +459,7 @@ async def test_async_decorator_with_llm_response(self, tracer_setup):
443459
@observe(pricing=pricing, tags=["async", "test"])
444460
async def async_llm_call(prompt: str):
445461
# Simulate async API call
446-
return MockOpenAIResponse(
447-
response_id="chatcmpl-async123",
448-
content="Async response"
449-
)
462+
return MockOpenAIResponse(response_id="chatcmpl-async123", content="Async response")
450463

451464
result = await async_llm_call("test prompt")
452465
assert result.id == "chatcmpl-async123"

0 commit comments

Comments
 (0)