Skip to content

Commit aefab79

Browse files
Hooman Mehrclaude
andcommitted
Fix CI: ruff formatting in DeepEval test files
Apply ruff auto-formatting to conftest.py and test_deepeval_analysis.py. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent cc853b8 commit aefab79

File tree

2 files changed

+16
-1
lines changed

2 files changed

+16
-1
lines changed

tests/conftest.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,31 @@
11
"""Shared test fixtures and markers for baft tests."""
2+
23
import os
34

45
import pytest
56

67
os.environ.setdefault("DEEPEVAL_TELEMETRY_OPT_OUT", "YES")
78

9+
810
def _ollama_available() -> bool:
911
try:
1012
import httpx
13+
1114
r = httpx.get("http://localhost:11434/api/tags", timeout=2.0)
1215
return r.status_code == 200
1316
except Exception:
1417
return False
1518

19+
1620
def _deepeval_available() -> bool:
1721
try:
1822
import deepeval # noqa: F401
23+
1924
return True
2025
except ImportError:
2126
return False
2227

28+
2329
skip_no_deepeval = pytest.mark.skipif(
2430
not _deepeval_available() or not _ollama_available(),
2531
reason="DeepEval or Ollama not available",

tests/test_deepeval_analysis.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,24 +8,29 @@
88
Run: uv run pytest tests/test_deepeval_analysis.py -v
99
Skip: uv run pytest tests/ -m "not deepeval"
1010
"""
11+
1112
import pytest
1213

1314
from tests.conftest import skip_no_deepeval
1415

1516
pytestmark = [pytest.mark.deepeval, skip_no_deepeval]
1617

18+
1719
@pytest.fixture(scope="module")
1820
def judge_model():
1921
from deepeval.models import OllamaModel
22+
2023
return OllamaModel(
2124
model="command-r7b:latest",
2225
base_url="http://localhost:11434",
2326
)
2427

28+
2529
@pytest.fixture(scope="module")
2630
def claim_extraction_quality(judge_model):
2731
from deepeval.metrics import GEval
2832
from deepeval.test_case import LLMTestCaseParams
33+
2934
return GEval(
3035
name="Claim Extraction Quality",
3136
criteria=(
@@ -46,10 +51,12 @@ def claim_extraction_quality(judge_model):
4651
threshold=0.6,
4752
)
4853

54+
4955
@pytest.fixture(scope="module")
5056
def synthesis_faithfulness(judge_model):
5157
from deepeval.metrics import GEval
5258
from deepeval.test_case import LLMTestCaseParams
59+
5360
return GEval(
5461
name="Synthesis Faithfulness",
5562
criteria=(
@@ -69,6 +76,7 @@ def synthesis_faithfulness(judge_model):
6976
threshold=0.6,
7077
)
7178

79+
7280
def test_sp_claim_extraction(judge_model, claim_extraction_quality):
7381
"""SP worker should extract relevant claims with proper epistemic tagging."""
7482
from deepeval import assert_test
@@ -89,11 +97,12 @@ def test_sp_claim_extraction(judge_model, claim_extraction_quality):
8997
' "epistemic_tag": "inference", "source_tier": 3},'
9098
' {"text": "Some economists skeptical about effectiveness", '
9199
' "epistemic_tag": "uncertain", "source_tier": 3}'
92-
']}'
100+
"]}"
93101
),
94102
)
95103
assert_test(test_case, [claim_extraction_quality])
96104

105+
97106
def test_as_synthesis_faithfulness(judge_model, synthesis_faithfulness):
98107
"""AS synthesis should faithfully represent audit node outputs."""
99108
from deepeval import assert_test

0 commit comments

Comments
 (0)