Skip to content

Commit 55023c3

Browse files
committed
Rename TestIdentifiers to ResourceIdentifiers to silence warning
1 parent 0e520ab commit 55023c3

File tree

2 files changed

+37
-35
lines changed

2 files changed

+37
-35
lines changed

tests/custom/integration/conftest.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,11 @@
88
import uuid
99
import pytest
1010
import dotenv
11-
from humanloop.client import Humanloop
1211
from tests.custom.types import GetHumanloopClientFn
1312

1413

1514
@dataclass
16-
class TestIdentifiers:
15+
class ResourceIdentifiers:
1716
file_id: str
1817
file_path: str
1918

@@ -40,6 +39,7 @@ def openai_key() -> str:
4039
@pytest.fixture(scope="function")
4140
def sdk_test_dir(get_humanloop_client: GetHumanloopClientFn) -> Generator[str, None, None]:
4241
humanloop_client = get_humanloop_client()
42+
4343
def cleanup_directory(directory_id: str):
4444
directory_response = humanloop_client.directories.get(id=directory_id)
4545
for subdirectory in directory_response.subdirectories:
@@ -95,7 +95,9 @@ def test_prompt_config() -> dict[str, Any]:
9595

9696

9797
@pytest.fixture(scope="function")
98-
def eval_dataset(get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str) -> Generator[TestIdentifiers, None, None]:
98+
def eval_dataset(
99+
get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str
100+
) -> Generator[ResourceIdentifiers, None, None]:
99101
humanloop_client = get_humanloop_client()
100102
dataset_path = f"{sdk_test_dir}/eval_dataset"
101103
try:
@@ -119,7 +121,7 @@ def eval_dataset(get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str)
119121
},
120122
],
121123
)
122-
yield TestIdentifiers(file_id=response.id, file_path=response.path)
124+
yield ResourceIdentifiers(file_id=response.id, file_path=response.path)
123125
humanloop_client.datasets.delete(id=response.id)
124126
except Exception as e:
125127
pytest.fail(f"Failed to create dataset {dataset_path}: {e}")
@@ -128,15 +130,15 @@ def eval_dataset(get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str)
128130
@pytest.fixture(scope="function")
129131
def eval_prompt(
130132
get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any]
131-
) -> Generator[TestIdentifiers, None, None]:
133+
) -> Generator[ResourceIdentifiers, None, None]:
132134
humanloop_client = get_humanloop_client()
133135
prompt_path = f"{sdk_test_dir}/eval_prompt"
134136
try:
135137
response = humanloop_client.prompts.upsert(
136138
path=prompt_path,
137139
**test_prompt_config,
138140
)
139-
yield TestIdentifiers(file_id=response.id, file_path=response.path)
141+
yield ResourceIdentifiers(file_id=response.id, file_path=response.path)
140142
humanloop_client.prompts.delete(id=response.id)
141143
except Exception as e:
142144
pytest.fail(f"Failed to create prompt {prompt_path}: {e}")
@@ -145,7 +147,7 @@ def eval_prompt(
145147
@pytest.fixture(scope="function")
146148
def output_not_null_evaluator(
147149
get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str
148-
) -> Generator[TestIdentifiers, None, None]:
150+
) -> Generator[ResourceIdentifiers, None, None]:
149151
humanloop_client = get_humanloop_client()
150152
evaluator_path = f"{sdk_test_dir}/output_not_null_evaluator"
151153
try:
@@ -161,14 +163,14 @@ def output_not_null(log: dict) -> bool:
161163
"evaluator_type": "python",
162164
},
163165
)
164-
yield TestIdentifiers(file_id=response.id, file_path=response.path)
166+
yield ResourceIdentifiers(file_id=response.id, file_path=response.path)
165167
humanloop_client.evaluators.delete(id=response.id)
166168
except Exception as e:
167169
pytest.fail(f"Failed to create evaluator {evaluator_path}: {e}")
168170

169171

170172
@pytest.fixture(scope="function")
171-
def id_for_staging_environment(get_humanloop_client: GetHumanloopClientFn, eval_prompt: TestIdentifiers) -> str:
173+
def id_for_staging_environment(get_humanloop_client: GetHumanloopClientFn, eval_prompt: ResourceIdentifiers) -> str:
172174
humanloop_client = get_humanloop_client()
173175
response = humanloop_client.prompts.list_environments(id=eval_prompt.file_id)
174176
for environment in response:

tests/custom/integration/test_evals.py

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,15 @@
33

44
import pytest
55
from humanloop.error import HumanloopRuntimeError
6-
from tests.custom.integration.conftest import TestIdentifiers
6+
from tests.custom.integration.conftest import ResourceIdentifiers
77
from tests.custom.types import GetHumanloopClientFn
88

99

1010
def test_eval_run_works_on_online_files(
1111
get_humanloop_client: GetHumanloopClientFn,
12-
output_not_null_evaluator: TestIdentifiers,
13-
eval_dataset: TestIdentifiers,
14-
eval_prompt: TestIdentifiers,
12+
output_not_null_evaluator: ResourceIdentifiers,
13+
eval_dataset: ResourceIdentifiers,
14+
eval_prompt: ResourceIdentifiers,
1515
) -> None:
1616
humanloop_client = get_humanloop_client()
1717
humanloop_client.evaluations.run( # type: ignore [attr-defined]
@@ -39,9 +39,9 @@ def test_eval_run_works_on_online_files(
3939

4040
def test_eval_run_version_id(
4141
get_humanloop_client: GetHumanloopClientFn,
42-
output_not_null_evaluator: TestIdentifiers,
43-
eval_dataset: TestIdentifiers,
44-
eval_prompt: TestIdentifiers,
42+
output_not_null_evaluator: ResourceIdentifiers,
43+
eval_dataset: ResourceIdentifiers,
44+
eval_prompt: ResourceIdentifiers,
4545
test_prompt_config: dict[str, Any],
4646
) -> None:
4747
humanloop_client = get_humanloop_client()
@@ -88,9 +88,9 @@ def test_eval_run_version_id(
8888

8989
def test_eval_run_environment(
9090
get_humanloop_client: GetHumanloopClientFn,
91-
output_not_null_evaluator: TestIdentifiers,
92-
eval_dataset: TestIdentifiers,
93-
eval_prompt: TestIdentifiers,
91+
output_not_null_evaluator: ResourceIdentifiers,
92+
eval_dataset: ResourceIdentifiers,
93+
eval_prompt: ResourceIdentifiers,
9494
test_prompt_config: dict[str, Any],
9595
id_for_staging_environment: str,
9696
) -> None:
@@ -141,9 +141,9 @@ def test_eval_run_environment(
141141
@pytest.mark.parametrize("version_lookup", ["version_id", "environment"])
142142
def test_eval_run_version_lookup_fails_with_path(
143143
get_humanloop_client: GetHumanloopClientFn,
144-
eval_prompt: TestIdentifiers,
145-
eval_dataset: TestIdentifiers,
146-
output_not_null_evaluator: TestIdentifiers,
144+
eval_prompt: ResourceIdentifiers,
145+
eval_dataset: ResourceIdentifiers,
146+
output_not_null_evaluator: ResourceIdentifiers,
147147
version_lookup: str,
148148
):
149149
# GIVEN an eval run where we try to evaluate a non-default version
@@ -172,9 +172,9 @@ def test_eval_run_version_lookup_fails_with_path(
172172

173173
def test_eval_run_with_version_upsert(
174174
get_humanloop_client: GetHumanloopClientFn,
175-
eval_prompt: TestIdentifiers,
176-
eval_dataset: TestIdentifiers,
177-
output_not_null_evaluator: TestIdentifiers,
175+
eval_prompt: ResourceIdentifiers,
176+
eval_dataset: ResourceIdentifiers,
177+
output_not_null_evaluator: ResourceIdentifiers,
178178
test_prompt_config: dict[str, Any],
179179
):
180180
humanloop_client = get_humanloop_client()
@@ -210,8 +210,8 @@ def test_eval_run_with_version_upsert(
210210

211211
def test_flow_eval_does_not_work_without_callable(
212212
get_humanloop_client: GetHumanloopClientFn,
213-
eval_dataset: TestIdentifiers,
214-
output_not_null_evaluator: TestIdentifiers,
213+
eval_dataset: ResourceIdentifiers,
214+
output_not_null_evaluator: ResourceIdentifiers,
215215
):
216216
with pytest.raises(HumanloopRuntimeError) as e:
217217
humanloop_client = get_humanloop_client()
@@ -241,8 +241,8 @@ def test_flow_eval_does_not_work_without_callable(
241241

242242
def test_flow_eval_works_with_callable(
243243
get_humanloop_client: GetHumanloopClientFn,
244-
eval_dataset: TestIdentifiers,
245-
output_not_null_evaluator: TestIdentifiers,
244+
eval_dataset: ResourceIdentifiers,
245+
output_not_null_evaluator: ResourceIdentifiers,
246246
sdk_test_dir: str,
247247
):
248248
humanloop_client = get_humanloop_client()
@@ -290,8 +290,8 @@ def test_flow_eval_works_with_callable(
290290

291291
def test_cannot_evaluate_agent_with_callable(
292292
get_humanloop_client: GetHumanloopClientFn,
293-
eval_dataset: TestIdentifiers,
294-
output_not_null_evaluator: TestIdentifiers,
293+
eval_dataset: ResourceIdentifiers,
294+
output_not_null_evaluator: ResourceIdentifiers,
295295
):
296296
with pytest.raises(ValueError) as e:
297297
humanloop_client = get_humanloop_client()
@@ -316,8 +316,8 @@ def test_cannot_evaluate_agent_with_callable(
316316

317317
def test_flow_eval_resolves_to_default_with_callable(
318318
get_humanloop_client: GetHumanloopClientFn,
319-
output_not_null_evaluator: TestIdentifiers,
320-
eval_dataset: TestIdentifiers,
319+
output_not_null_evaluator: ResourceIdentifiers,
320+
eval_dataset: ResourceIdentifiers,
321321
sdk_test_dir: str,
322322
) -> None:
323323
humanloop_client = get_humanloop_client()
@@ -361,8 +361,8 @@ def test_flow_eval_resolves_to_default_with_callable(
361361
@pytest.mark.skip(reason="Skip until agents are in prod")
362362
def test_agent_eval_works_upserting(
363363
get_humanloop_client: GetHumanloopClientFn,
364-
eval_dataset: TestIdentifiers,
365-
output_not_null_evaluator: TestIdentifiers,
364+
eval_dataset: ResourceIdentifiers,
365+
output_not_null_evaluator: ResourceIdentifiers,
366366
sdk_test_dir: str,
367367
):
368368
humanloop_client = get_humanloop_client()

0 commit comments

Comments
 (0)