Skip to content

Commit 9e184d9

Browse files
author
Andrei Bratu
authored
Release 0.8.40b6: Introduce async decorators (#75)
* Introduce async versions of the File decorators * The decorators are compatible with the `eval.run(...)` utility * The new decorators are available on the sync version of the Humanloop client. In the near future, we'll work on providing feature parity between the sync and async client, and move the async decorator versions there.
1 parent 951b12a commit 9e184d9

File tree

17 files changed

+1081
-169
lines changed

17 files changed

+1081
-169
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ name = "humanloop"
33

44
[tool.poetry]
55
name = "humanloop"
6-
version = "0.8.40b5"
6+
version = "0.8.40b6"
77
description = ""
88
readme = "README.md"
99
authors = []

pytest.ini

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
[pytest]
22
addopts = -n auto
3+
asyncio_mode = auto

src/humanloop/client.py

Lines changed: 170 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,10 @@
1010

1111
from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
1212
from humanloop.core.client_wrapper import SyncClientWrapper
13-
from humanloop.decorators.flow import flow as flow_decorator_factory
14-
from humanloop.decorators.prompt import prompt_decorator_factory
13+
from humanloop.decorators.flow import a_flow_decorator_factory as a_flow_decorator_factory
14+
from humanloop.decorators.flow import flow_decorator_factory as flow_decorator_factory
15+
from humanloop.decorators.prompt import a_prompt_decorator_factory, prompt_decorator_factory
16+
from humanloop.decorators.tool import a_tool_decorator_factory as a_tool_decorator_factory
1517
from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory
1618
from humanloop.environment import HumanloopEnvironment
1719
from humanloop.evals import run_eval
@@ -273,6 +275,50 @@ def call_llm(messages):
273275
"""
274276
return prompt_decorator_factory(path=path)
275277

278+
def a_prompt(
279+
self,
280+
*,
281+
path: str,
282+
):
283+
"""Auto-instrument LLM providers and create [Prompt](https://humanloop.com/docs/explanation/prompts)
284+
Logs on Humanloop from them, for async functions.
285+
286+
```python
287+
@a_prompt(path="My Async Prompt")
288+
async def call_llm_async(messages):
289+
client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
290+
response = await client.chat.completions.create(
291+
model="gpt-4o",
292+
temperature=0.8,
293+
frequency_penalty=0.5,
294+
max_tokens=200,
295+
messages=messages,
296+
)
297+
return response.choices[0].message.content
298+
299+
Calling the function above creates a new Log on Humanloop
300+
against this Prompt version:
301+
{
302+
provider: "openai",
303+
model: "gpt-4o",
304+
endpoint: "chat",
305+
max_tokens: 200,
306+
temperature: 0.8,
307+
frequency_penalty: 0.5,
308+
}
309+
```
310+
311+
If a different model, endpoint, or hyperparameter is used, a new
312+
Prompt version is created.
313+
314+
:param path: The path where the Prompt is created. If not
315+
provided, the function name is used as the path and the File
316+
is created in the root of your Humanloop organization workspace.
317+
318+
:param prompt_kernel: Attributes that define the Prompt. See `class:DecoratorPromptKernelRequestParams`
319+
"""
320+
return a_prompt_decorator_factory(path=path)
321+
276322
def tool(
277323
self,
278324
*,
@@ -331,6 +377,64 @@ def calculator(a: int, b: Optional[int]) -> int:
331377
setup_values=setup_values,
332378
)
333379

380+
def a_tool(
381+
self,
382+
*,
383+
path: str,
384+
attributes: Optional[dict[str, Any]] = None,
385+
setup_values: Optional[dict[str, Any]] = None,
386+
):
387+
"""Manage async [Tool](https://humanloop.com/docs/explanation/tools) Files through code.
388+
389+
The decorator inspects the wrapped async function's source code to infer the Tool's
390+
JSON Schema. If the function declaration changes, a new Tool version
391+
is upserted with an updated JSON Schema.
392+
393+
For example:
394+
395+
```python
396+
# Adding @a_tool on this function
397+
@humanloop_client.a_tool(path="async_calculator")
398+
async def async_calculator(a: int, b: Optional[int]) -> int:
399+
\"\"\"Add two numbers together asynchronously.\"\"\"
400+
return a + b
401+
402+
# Creates a Tool with this JSON Schema:
403+
{
404+
strict: True,
405+
function: {
406+
"name": "async_calculator",
407+
"description": "Add two numbers together asynchronously.",
408+
"parameters": {
409+
type: "object",
410+
properties: {
411+
a: {type: "integer"},
412+
b: {type: "integer"}
413+
},
414+
required: ["a"],
415+
},
416+
}
417+
}
418+
```
419+
420+
The return value of the decorated function must be JSON serializable.
421+
422+
If the function raises an exception, the created Log will have `output`
423+
set to null, and the `error` field populated.
424+
425+
:param path: The path of the File in the Humanloop workspace.
426+
427+
:param setup_values: Values needed to setup the Tool, defined in [JSON Schema](https://json-schema.org/)
428+
429+
:param attributes: Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
430+
"""
431+
return a_tool_decorator_factory(
432+
opentelemetry_tracer=self._opentelemetry_tracer,
433+
path=path,
434+
attributes=attributes,
435+
setup_values=setup_values,
436+
)
437+
334438
def flow(
335439
self,
336440
*,
@@ -394,6 +498,70 @@ def agent():
394498
attributes=attributes,
395499
)
396500

501+
def a_flow(
502+
self,
503+
*,
504+
path: str,
505+
attributes: Optional[dict[str, Any]] = None,
506+
):
507+
"""Trace SDK logging calls through [Flows](https://humanloop.com/docs/explanation/flows) for async functions.
508+
509+
Use it as the entrypoint of your async LLM feature. Logging calls like `prompts.call(...)`,
510+
`tools.call(...)`, or other Humanloop decorators will be automatically added to the trace.
511+
512+
For example:
513+
514+
```python
515+
@a_prompt(template="You are an assistant on the following topics: {{topics}}.")
516+
async def call_llm_async(messages):
517+
client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
518+
response = await client.chat.completions.create(
519+
model="gpt-4o",
520+
temperature=0.8,
521+
frequency_penalty=0.5,
522+
max_tokens=200,
523+
messages=messages,
524+
)
525+
return response.choices[0].message.content
526+
527+
@a_flow(attributes={"version": "v1"})
528+
async def async_agent():
529+
while True:
530+
messages = []
531+
user_input = input("You: ")
532+
if user_input == "exit":
533+
break
534+
messages.append({"role": "user", "content": user_input})
535+
response = await call_llm_async(messages)
536+
messages.append({"role": "assistant", "content": response})
537+
print(f"Assistant: {response}")
538+
```
539+
540+
Each call to async_agent will create a trace corresponding to the conversation
541+
session. Multiple Prompt Logs will be created as the LLM is called. They
542+
will be added to the trace, allowing you to see the whole conversation
543+
in the UI.
544+
545+
If the function returns a ChatMessage-like object, the Log will
546+
populate the `output_message` field. Otherwise, it will serialize
547+
the return value and populate the `output` field.
548+
549+
If an exception is raised, the output fields will be set to None
550+
and the error message will be set in the Log's `error` field.
551+
552+
:param path: The path to the Flow. If not provided, the function name
553+
will be used as the path and the File will be created in the root
554+
of your organization workspace.
555+
556+
:param attributes: Additional fields to describe the Flow. Helpful to separate Flow versions from each other with details on how they were created or used.
557+
"""
558+
return a_flow_decorator_factory(
559+
client=self,
560+
opentelemetry_tracer=self._opentelemetry_tracer,
561+
path=path,
562+
attributes=attributes,
563+
)
564+
397565
def pull(self, path: Optional[str] = None, environment: Optional[str] = None) -> Tuple[List[str], List[str]]:
398566
"""Pull Prompt and Agent files from Humanloop to local filesystem.
399567

src/humanloop/context.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
1+
import threading
12
from contextlib import contextmanager
23
from dataclasses import dataclass
3-
import threading
44
from typing import Any, Callable, Generator, Literal, Optional
5+
56
from opentelemetry import context as context_api
67

78
from humanloop.error import HumanloopRuntimeError
89
from humanloop.otel.constants import (
9-
HUMANLOOP_CONTEXT_EVALUATION,
1010
HUMANLOOP_CONTEXT_DECORATOR,
11+
HUMANLOOP_CONTEXT_EVALUATION,
1112
HUMANLOOP_CONTEXT_TRACE_ID,
1213
)
1314

src/humanloop/core/client_wrapper.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
1414

1515
def get_headers(self) -> typing.Dict[str, str]:
1616
headers: typing.Dict[str, str] = {
17-
"User-Agent": "humanloop/0.8.40b5",
17+
"User-Agent": "humanloop/0.8.40b6",
1818
"X-Fern-Language": "Python",
1919
"X-Fern-SDK-Name": "humanloop",
20-
"X-Fern-SDK-Version": "0.8.40b5",
20+
"X-Fern-SDK-Version": "0.8.40b6",
2121
}
2222
headers["X-API-KEY"] = self.api_key
2323
return headers

0 commit comments

Comments
 (0)