|
10 | 10 | from opentelemetry.trace import Tracer |
11 | 11 |
|
12 | 12 | from humanloop.core.client_wrapper import SyncClientWrapper |
13 | | -from humanloop.decorators.types import DecoratorPromptKernelRequestParams |
| 13 | +from humanloop.utilities.types import DecoratorPromptKernelRequestParams |
14 | 14 | from humanloop.eval_utils.context import EVALUATION_CONTEXT_VARIABLE_NAME, EvaluationContext |
15 | 15 |
|
16 | 16 | from humanloop.eval_utils import log_with_evaluation_context, run_eval |
17 | 17 | from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File |
18 | 18 |
|
19 | 19 | from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop |
20 | | -from humanloop.decorators.flow import flow as flow_decorator_factory |
21 | | -from humanloop.decorators.prompt import prompt as prompt_decorator_factory |
22 | | -from humanloop.decorators.tool import tool as tool_decorator_factory |
| 20 | +from humanloop.utilities.flow import flow as flow_decorator_factory |
| 21 | +from humanloop.utilities.prompt import prompt as prompt_decorator_factory |
| 22 | +from humanloop.utilities.tool import tool as tool_decorator_factory |
23 | 23 | from humanloop.environment import HumanloopEnvironment |
24 | 24 | from humanloop.evaluations.client import EvaluationsClient |
25 | 25 | from humanloop.otel import instrument_provider |
@@ -49,7 +49,6 @@ def run( |
49 | 49 | name: Optional[str], |
50 | 50 | dataset: Dataset, |
51 | 51 | evaluators: Optional[Sequence[Evaluator]] = None, |
52 | | - # logs: typing.Sequence[dict] | None = None, |
53 | 52 | workers: int = 4, |
54 | 53 | ) -> List[EvaluatorCheck]: |
55 | 54 | """Evaluate your function for a given `Dataset` and set of `Evaluators`. |
|
0 commit comments