1010from opentelemetry .trace import Tracer
1111
1212from humanloop .core .client_wrapper import SyncClientWrapper
13- from humanloop .decorators .types import DecoratorPromptKernelRequestParams
13+ from humanloop .utilities .types import DecoratorPromptKernelRequestParams
1414from humanloop .eval_utils .context import EVALUATION_CONTEXT_VARIABLE_NAME , EvaluationContext
1515
1616from humanloop .eval_utils import log_with_evaluation_context , run_eval
1717from humanloop .eval_utils .types import Dataset , Evaluator , EvaluatorCheck , File
1818
1919from humanloop .base_client import AsyncBaseHumanloop , BaseHumanloop
20- from humanloop .decorators .flow import flow as flow_decorator_factory
21- from humanloop .decorators .prompt import prompt as prompt_decorator_factory
22- from humanloop .decorators .tool import tool as tool_decorator_factory
20+ from humanloop .utilities .flow import flow as flow_decorator_factory
21+ from humanloop .utilities .prompt import prompt as prompt_decorator_factory
22+ from humanloop .utilities .tool import tool as tool_decorator_factory
2323from humanloop .environment import HumanloopEnvironment
2424from humanloop .evaluations .client import EvaluationsClient
2525from humanloop .otel import instrument_provider
@@ -49,7 +49,6 @@ def run(
4949 name : Optional [str ],
5050 dataset : Dataset ,
5151 evaluators : Optional [Sequence [Evaluator ]] = None ,
52- # logs: typing.Sequence[dict] | None = None,
5352 workers : int = 4 ,
5453 ) -> List [EvaluatorCheck ]:
5554 """Evaluate your function for a given `Dataset` and set of `Evaluators`.
@@ -175,14 +174,14 @@ def prompt(
175174 path : Optional [str ] = None ,
176175 ** prompt_kernel : Unpack [DecoratorPromptKernelRequestParams ], # type: ignore
177176 ):
178- """Decorator for declaring a (Prompt)[ https://humanloop.com/docs/explanation/prompts] in code.
177+ """Decorator for declaring a [Prompt]( https://humanloop.com/docs/explanation/prompts) in code.
179178
180179 The decorator intercepts calls to LLM provider APIs and creates
181180 a new Prompt file based on the hyperparameters used in the call.
182181 If a hyperparameter is specified in the `@prompt` decorator, then
183182 they override any value intercepted from the LLM provider call.
184183
185- If the (Prompt)[ https://humanloop.com/docs/explanation/prompts] already exists
184+ If the [Prompt]( https://humanloop.com/docs/explanation/prompts) already exists
186185 on the specified path, a new version will be upserted when any of the above change.
187186
188187 Here's an example of declaring a (Prompt)[https://humanloop.com/docs/explanation/prompts] in code:
@@ -200,7 +199,7 @@ def call_llm(messages):
200199 ).choices[0].message.content
201200 ```
202201
203- This will create a (Prompt)[ https://humanloop.com/docs/explanation/prompts] with the following attributes:
202+ This will create a [Prompt]( https://humanloop.com/docs/explanation/prompts] with the following attributes:
204203
205204 ```python
206205 {
@@ -342,9 +341,9 @@ def flow(
342341 ):
343342 """Decorator for declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code.
344343
345- A [Flow](https://humanloop.com/docs/explanation/flows) decorator should be added
346- at the entrypoint of your LLM feature. Call other functions decorated with
347- Humanloop SDK decorators to create a Trace of Logs on Humanloop.
344+ A [Flow](https://humanloop.com/docs/explanation/flows) wrapped callable should
345+ be used as the entrypoint of your LLM feature. Call other functions wrapped with
346+ Humanloop decorators to create a trace of Logs on Humanloop.
348347
349348 Here's an example of declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code:
350349 ```python
0 commit comments