From c65635aceb338181b745df5ce39d1e497a2cc5bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Tue, 24 Mar 2026 22:25:41 +0000 Subject: [PATCH 01/14] ref(langchain): start to migrate to integrations API --- integrations/langchain-py/README.md | 28 +- integrations/langchain-py/pyproject.toml | 7 +- .../src/braintrust_langchain/__init__.py | 30 +- .../src/braintrust_langchain/callbacks.py | 655 +------- .../src/braintrust_langchain/context.py | 34 +- py/Makefile | 2 +- py/examples/langchain/auto.py | 24 + py/examples/langchain/manual_patching.py | 27 + py/noxfile.py | 18 + py/src/braintrust/auto.py | 7 +- py/src/braintrust/integrations/__init__.py | 3 +- .../auto_test_scripts/test_auto_langchain.py | 21 + .../integrations/langchain/__init__.py | 47 + .../cassettes/test_async_langchain_invoke | 276 ++++ .../cassettes/test_chain_with_memory | 332 ++++ .../langchain/cassettes/test_global_handler | 225 +++ .../test_langchain_anthropic_integration | 300 ++++ .../cassettes/test_langgraph_state_management | 327 ++++ .../langchain/cassettes/test_llm_calls | 333 ++++ .../cassettes/test_parallel_execution | 234 +++ .../cassettes/test_prompt_caching_tokens | 324 ++++ .../langchain/cassettes/test_streaming_ttft | 298 ++++ .../langchain/cassettes/test_tool_usage | 350 +++++ .../integrations/langchain/conftest.py | 58 + .../integrations/langchain/integration.py | 34 + .../integrations/langchain/test_langchain.py | 1380 +++++++++++++++++ .../integrations/langchain/tracing.py | 701 +++++++++ py/src/braintrust/wrappers/langchain.py | 150 -- .../braintrust/wrappers/langchain/__init__.py | 21 + 29 files changed, 5420 insertions(+), 826 deletions(-) create mode 100644 py/examples/langchain/auto.py create mode 100644 py/examples/langchain/manual_patching.py create mode 100644 py/src/braintrust/integrations/auto_test_scripts/test_auto_langchain.py create mode 100644 py/src/braintrust/integrations/langchain/__init__.py create mode 100644 py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke create mode 100644 py/src/braintrust/integrations/langchain/cassettes/test_chain_with_memory create mode 100644 py/src/braintrust/integrations/langchain/cassettes/test_global_handler create mode 100644 py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration create mode 100644 py/src/braintrust/integrations/langchain/cassettes/test_langgraph_state_management create mode 100644 py/src/braintrust/integrations/langchain/cassettes/test_llm_calls create mode 100644 py/src/braintrust/integrations/langchain/cassettes/test_parallel_execution create mode 100644 py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens create mode 100644 py/src/braintrust/integrations/langchain/cassettes/test_streaming_ttft create mode 100644 py/src/braintrust/integrations/langchain/cassettes/test_tool_usage create mode 100644 py/src/braintrust/integrations/langchain/conftest.py create mode 100644 py/src/braintrust/integrations/langchain/integration.py create mode 100644 py/src/braintrust/integrations/langchain/test_langchain.py create mode 100644 py/src/braintrust/integrations/langchain/tracing.py delete mode 100644 py/src/braintrust/wrappers/langchain.py create mode 100644 py/src/braintrust/wrappers/langchain/__init__.py diff --git a/integrations/langchain-py/README.md b/integrations/langchain-py/README.md index ec00daef..19717270 100644 --- a/integrations/langchain-py/README.md +++ b/integrations/langchain-py/README.md @@ -1,9 +1,35 @@ -# braintrust-langchain +# braintrust-langchain (DEPRECATED) [![PyPI version](https://img.shields.io/pypi/v/braintrust-langchain.svg)](https://pypi.org/project/braintrust-langchain/) SDK for integrating [Braintrust](https://braintrust.dev) with [LangChain](https://langchain.com/). This package provides a callback handler to automatically log LangChain executions to Braintrust. +> **This package is deprecated.** The LangChain integration is now included in the main [`braintrust`](https://pypi.org/project/braintrust/) package. + +## Migration + +1. Remove `braintrust-langchain` from your dependencies +2. Install or upgrade `braintrust`: + ```bash + pip install --upgrade braintrust + ``` +3. Update your imports: + ```python + # Before + from braintrust_langchain import BraintrustCallbackHandler, set_global_handler + + # After (option 1: auto-instrument langchain library) + import braintrust + braintrust.auto_instrument() + + # After (option 2: explicit) + from braintrust.wrappers.langchain import BraintrustCallbackHandler, set_global_handler + ``` + +The API is identical - no code changes needed beyond the import path. + +--- + ## Installation ```bash diff --git a/integrations/langchain-py/pyproject.toml b/integrations/langchain-py/pyproject.toml index 9bbf9d7a..9d620604 100644 --- a/integrations/langchain-py/pyproject.toml +++ b/integrations/langchain-py/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "braintrust-langchain" version = "0.2.1" -description = "Integration for LangChain and Braintrust Tracing" +description = "DEPRECATED: LangChain integration is now included in the main braintrust package. Install braintrust instead." readme = "README.md" requires-python = ">=3.10" dependencies = [ @@ -10,9 +10,9 @@ dependencies = [ ] license = "MIT" authors = [{ name = "Braintrust", email = "info@braintrust.dev" }] -keywords = ["braintrust", "langchain", "llm", "tracing", "ai", "agents"] +keywords = ["braintrust", "langchain", "llm", "tracing", "ai", "agents", "deprecated"] classifiers = [ - "Development Status :: 4 - Beta", + "Development Status :: 7 - Inactive", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -73,4 +73,3 @@ known-third-party = ["braintrust", "langchain"] [tool.pytest.ini_options] testpaths = ["src/tests"] python_files = ["test_*.py"] -addopts = "-v" diff --git a/integrations/langchain-py/src/braintrust_langchain/__init__.py b/integrations/langchain-py/src/braintrust_langchain/__init__.py index 2feeb7bc..9f9fb04a 100644 --- a/integrations/langchain-py/src/braintrust_langchain/__init__.py +++ b/integrations/langchain-py/src/braintrust_langchain/__init__.py @@ -1,4 +1,28 @@ -from .callbacks import BraintrustCallbackHandler -from .context import set_global_handler +""" +DEPRECATED: braintrust-langchain is now part of the main braintrust package. -__all__ = ["BraintrustCallbackHandler", "set_global_handler"] +Install `braintrust` and use `braintrust.integrations.langchain` or +`braintrust.auto_instrument()` instead. This package now re-exports from +`braintrust.integrations.langchain` for backward compatibility. +""" + +import warnings + +warnings.warn( + "braintrust-langchain is deprecated. The LangChain integration is now included in the " + "main 'braintrust' package. Use 'from braintrust.integrations.langchain import " + "BraintrustCallbackHandler' or 'braintrust.auto_instrument()' instead. " + "This package will be removed in a future release.", + DeprecationWarning, + stacklevel=2, +) + +# Re-export public API from the new location for backward compatibility +from braintrust.integrations.langchain import ( # noqa: E402, F401 + BraintrustCallbackHandler, + BraintrustTracer, + clear_global_handler, + set_global_handler, +) + +__all__ = ["BraintrustCallbackHandler", "BraintrustTracer", "set_global_handler", "clear_global_handler"] diff --git a/integrations/langchain-py/src/braintrust_langchain/callbacks.py b/integrations/langchain-py/src/braintrust_langchain/callbacks.py index 016a1268..1adfae9d 100644 --- a/integrations/langchain-py/src/braintrust_langchain/callbacks.py +++ b/integrations/langchain-py/src/braintrust_langchain/callbacks.py @@ -1,648 +1,15 @@ -import json -import logging -import re -import time -from collections.abc import Mapping, Sequence -from re import Pattern -from typing import ( - Any, - TypedDict, - Union, -) -from uuid import UUID - -import braintrust -from braintrust import NOOP_SPAN, Logger, Span, SpanAttributes, SpanTypeAttribute, current_span, init_logger -from braintrust.version import VERSION as sdk_version -from langchain_core.agents import AgentAction, AgentFinish -from langchain_core.callbacks.base import BaseCallbackHandler -from langchain_core.documents import Document -from langchain_core.messages import BaseMessage -from langchain_core.outputs.llm_result import LLMResult -from tenacity import RetryCallState -from typing_extensions import NotRequired - -from braintrust_langchain.version import version - -_logger = logging.getLogger("braintrust_langchain") - - -class LogEvent(TypedDict): - input: NotRequired[Any] - output: NotRequired[Any] - expected: NotRequired[Any] - error: NotRequired[str] - tags: NotRequired[Sequence[str] | None] - scores: NotRequired[Mapping[str, int | float]] - metadata: NotRequired[Mapping[str, Any]] - metrics: NotRequired[Mapping[str, int | float]] - id: NotRequired[str] - dataset_record_id: NotRequired[str] - - -class BraintrustCallbackHandler(BaseCallbackHandler): - root_run_id: UUID | None = None - - def __init__( - self, - logger: Logger | Span | None = None, - debug: bool = False, - exclude_metadata_props: Pattern[str] | None = None, - ): - self.logger = logger - self.spans: dict[UUID, Span] = {} - self.debug = debug # DEPRECATED - self.exclude_metadata_props = exclude_metadata_props or re.compile( - r"^(l[sc]_|langgraph_|__pregel_|checkpoint_ns)" - ) - self.skipped_runs: set[UUID] = set() - # Set run_inline=True to avoid thread executor in async contexts - # This ensures memory logger context is preserved - self.run_inline = True - - self._start_times: dict[UUID, float] = {} - self._first_token_times: dict[UUID, float] = {} - self._ttft_ms: dict[UUID, float] = {} - - def _start_span( - self, - parent_run_id: UUID | None, - run_id: UUID, - name: str | None = None, - type: SpanTypeAttribute | None = SpanTypeAttribute.TASK, - span_attributes: SpanAttributes | Mapping[str, Any] | None = None, - start_time: float | None = None, - set_current: bool | None = None, - parent: str | None = None, - event: LogEvent | None = None, - ) -> Any: - if run_id in self.spans: - # XXX: See graph test case of an example where this _may_ be intended. - _logger.warning(f"Span already exists for run_id {run_id} (this is likely a bug)") - return - - if not parent_run_id: - self.root_run_id = run_id - - current_parent = current_span() - parent_span = None - if parent_run_id and parent_run_id in self.spans: - parent_span = self.spans[parent_run_id] - elif current_parent != NOOP_SPAN: - parent_span = current_parent - elif self.logger is not None: - parent_span = self.logger - else: - parent_span = braintrust - - if event is None: - event = {} - - tags = event.get("tags") or [] - event = { - **event, - "tags": None, - "metadata": { - **({"tags": tags}), - **(event.get("metadata") or {}), - "run_id": run_id, - "parent_run_id": parent_run_id, - "braintrust": { - "integration_name": "langchain-py", - "integration_version": version, - "sdk_version": sdk_version, - "language": "python", - }, - }, - } - - span = parent_span.start_span( - name=name, - type=type, - span_attributes=span_attributes, - start_time=start_time, - set_current=set_current, - parent=parent, - **event, - ) - - if self.logger != NOOP_SPAN and span == NOOP_SPAN: - _logger.warning( - "Braintrust logging not configured. Pass a `logger`, call `init_logger`, or run an experiment to configure Braintrust logging. Setting up a default." - ) - span = init_logger().start_span( - name=name, - type=type, - span_attributes=span_attributes, - start_time=start_time, - set_current=set_current, - parent=parent, - **event, - ) - - span.set_current() - - self.spans[run_id] = span - return span - - def _end_span( - self, - run_id: UUID, - parent_run_id: UUID | None = None, - input: Any | None = None, - output: Any | None = None, - expected: Any | None = None, - error: str | None = None, - tags: Sequence[str] | None = None, - scores: Mapping[str, int | float] | None = None, - metadata: Mapping[str, Any] | None = None, - metrics: Mapping[str, int | float] | None = None, - dataset_record_id: str | None = None, - ) -> Any: - if run_id not in self.spans: - return - - if run_id in self.skipped_runs: - self.skipped_runs.discard(run_id) - return - - span = self.spans.pop(run_id) - - if self.root_run_id == run_id: - self.root_run_id = None - - span.log( - input=input, - output=output, - expected=expected, - error=error, - tags=None, - scores=scores, - metadata={ - **({"tags": tags} if tags else {}), - **(metadata or {}), - }, - metrics=metrics, - dataset_record_id=dataset_record_id, - ) - - # In async workflows, callbacks may execute in different async contexts. - # The span's context variable token may have been created in a different - # context, causing ValueError when trying to reset it. We catch and ignore - # this specific error since the span hierarchy is maintained via self.spans. - try: - span.unset_current() - except ValueError as e: - if "was created in a different Context" in str(e): - pass - else: - raise - - span.end() - - def on_llm_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - **kwargs: Any, # TODO: response= - ) -> Any: - self._end_span(run_id, error=str(error), metadata={**kwargs}) - - self._start_times.pop(run_id, None) - self._first_token_times.pop(run_id, None) - self._ttft_ms.pop(run_id, None) - - def on_chain_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - **kwargs: Any, # TODO: some metadata - ) -> Any: - self._end_span(run_id, error=str(error), metadata={**kwargs}) - - def on_tool_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - **kwargs: Any, - ) -> Any: - self._end_span(run_id, error=str(error), metadata={**kwargs}) - - def on_retriever_error( - self, - error: BaseException, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - **kwargs: Any, - ) -> Any: - self._end_span(run_id, error=str(error), metadata={**kwargs}) - - # Agent Methods - def on_agent_action( - self, - action: AgentAction, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - **kwargs: Any, - ) -> Any: - self._start_span( - parent_run_id, - run_id, - type=SpanTypeAttribute.LLM, - name=action.tool, - event={"input": action, "metadata": {**kwargs}}, - ) - - def on_agent_finish( - self, - finish: AgentFinish, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - **kwargs: Any, - ) -> Any: - self._end_span(run_id, output=finish, metadata={**kwargs}) - - def on_chain_start( - self, - serialized: dict[str, Any], - inputs: dict[str, Any], - *, - run_id: UUID, - parent_run_id: UUID | None = None, - tags: list[str] | None = None, - name: str | None = None, - metadata: dict[str, Any] | None = None, - **kwargs: Any, - ) -> Any: - tags = tags or [] - - # avoids extra logs that seem not as useful esp. with langgraph - if "langsmith:hidden" in tags: - self.skipped_runs.add(run_id) - return - - metadata = metadata or {} - resolved_name = ( - name - or metadata.get("langgraph_node") - or serialized.get("name") - or last_item(serialized.get("id") or []) - or "Chain" - ) - - self._start_span( - parent_run_id, - run_id, - name=resolved_name, - event={ - "input": inputs, - "tags": tags, - "metadata": { - "serialized": serialized, - "name": name, - "metadata": metadata, - **kwargs, - }, - }, - ) - - def on_chain_end( - self, - outputs: dict[str, Any], - *, - run_id: UUID, - parent_run_id: UUID | None = None, - tags: list[str] | None = None, - **kwargs: Any, - ) -> Any: - self._end_span(run_id, output=outputs, tags=tags, metadata={**kwargs}) +""" +DEPRECATED: Import from braintrust.wrappers.langchain instead. +""" - def on_llm_start( - self, - serialized: dict[str, Any], - prompts: list[str], - *, - run_id: UUID, - parent_run_id: UUID | None = None, - tags: list[str] | None = None, - metadata: dict[str, Any] | None = None, - name: str | None = None, - **kwargs: Any, - ) -> Any: - self._start_times[run_id] = time.perf_counter() - self._first_token_times.pop(run_id, None) - self._ttft_ms.pop(run_id, None) +import warnings - name = name or serialized.get("name") or last_item(serialized.get("id") or []) or "LLM" - self._start_span( - parent_run_id, - run_id, - name=name, - type=SpanTypeAttribute.LLM, - event={ - "input": prompts, - "tags": tags, - "metadata": { - "serialized": serialized, - "name": name, - "metadata": metadata, - **kwargs, - }, - }, - ) - - def on_chat_model_start( - self, - serialized: dict[str, Any], - messages: list[list["BaseMessage"]], - *, - run_id: UUID, - parent_run_id: UUID | None = None, - tags: list[str] | None = None, - metadata: dict[str, Any] | None = None, - name: str | None = None, - invocation_params: dict[str, Any] | None = None, - **kwargs: Any, - ) -> Any: - self._start_times[run_id] = time.perf_counter() - self._first_token_times.pop(run_id, None) - self._ttft_ms.pop(run_id, None) - - invocation_params = invocation_params or {} - self._start_span( - parent_run_id, - run_id, - name=name or serialized.get("name") or last_item(serialized.get("id") or []) or "Chat Model", - type=SpanTypeAttribute.LLM, - event={ - "input": messages, - "tags": tags, - "metadata": ( - { - "serialized": serialized, - "invocation_params": invocation_params, - "metadata": metadata or {}, - "name": name, - **kwargs, - } - ), - }, - ) - - def on_llm_end( - self, - response: LLMResult, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - tags: list[str] | None = None, - **kwargs: Any, - ) -> Any: - if run_id not in self.spans: - return - - metrics = _get_metrics_from_response(response) - - ttft = self._ttft_ms.pop(run_id, None) - if ttft is not None: - metrics["time_to_first_token"] = ttft - - model_name = _get_model_name_from_response(response) - - self._start_times.pop(run_id, None) - self._first_token_times.pop(run_id, None) - - self._end_span( - run_id, - output=response, - metrics=metrics, - tags=tags, - metadata={ - "model": model_name, - **kwargs, - }, - ) - - def on_tool_start( - self, - serialized: dict[str, Any], - input_str: str, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - tags: list[str] | None = None, - metadata: dict[str, Any] | None = None, - inputs: dict[str, Any] | None = None, - name: str | None = None, - **kwargs: Any, - ) -> Any: - self._start_span( - parent_run_id, - run_id, - name=name or serialized.get("name") or last_item(serialized.get("id") or []) or "Tool", - type=SpanTypeAttribute.TOOL, - event={ - "input": inputs or safe_parse_serialized_json(input_str), - "tags": tags, - "metadata": { - "metadata": metadata, - "serialized": serialized, - "input_str": input_str, - "input": safe_parse_serialized_json(input_str), - "inputs": inputs, - "name": name, - **kwargs, - }, - }, - ) - - def on_tool_end( - self, - output: Any, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - **kwargs: Any, - ) -> Any: - self._end_span(run_id, output=output, metadata={**kwargs}) - - def on_retriever_start( - self, - serialized: dict[str, Any], - query: str, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - tags: list[str] | None = None, - metadata: dict[str, Any] | None = None, - name: str | None = None, - **kwargs: Any, - ) -> Any: - self._start_span( - parent_run_id, - run_id, - name=name or serialized.get("name") or last_item(serialized.get("id") or []) or "Retriever", - type=SpanTypeAttribute.FUNCTION, - event={ - "input": query, - "tags": tags, - "metadata": { - "serialized": serialized, - "metadata": metadata, - "name": name, - **kwargs, - }, - }, - ) - - def on_retriever_end( - self, - documents: Sequence[Document], - *, - run_id: UUID, - parent_run_id: UUID | None = None, - **kwargs: Any, - ) -> Any: - self._end_span(run_id, output=documents, metadata={**kwargs}) - - def on_llm_new_token( - self, - token: str, - *, - chunk: Union["GenerationChunk", "ChatGenerationChunk"] | None = None, # type: ignore - run_id: UUID, - parent_run_id: UUID | None = None, - **kwargs: Any, - ) -> Any: - if run_id not in self._first_token_times: - now = time.perf_counter() - self._first_token_times[run_id] = now - start = self._start_times.get(run_id) - if start is not None: - self._ttft_ms[run_id] = now - start - - def on_text( - self, - text: str, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - **kwargs: Any, - ) -> Any: - pass - - def on_retry( - self, - retry_state: RetryCallState, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - **kwargs: Any, - ) -> Any: - pass - - def on_custom_event( - self, - name: str, - data: Any, - *, - run_id: UUID, - tags: list[str] | None = None, - metadata: dict[str, Any] | None = None, - **kwargs: Any, - ) -> Any: - pass - - -def clean_object(obj: dict[str, Any]) -> dict[str, Any]: - return { - k: v - for k, v in obj.items() - if v is not None and not (isinstance(v, list) and not v) and not (isinstance(v, dict) and not v) - } - - -def safe_parse_serialized_json(input_str: str) -> Any: - try: - return json.loads(input_str) - except: - return input_str - - -def last_item(items: list[Any]) -> Any: - return items[-1] if items else None - - -def _walk_generations(response: LLMResult): - for generations in response.generations or []: - yield from generations or [] - - -def _get_model_name_from_response(response: LLMResult) -> str | None: - model_name = None - for generation in _walk_generations(response): - message = getattr(generation, "message", None) - if not message: - continue - - response_metadata = getattr(message, "response_metadata", None) - if response_metadata and isinstance(response_metadata, dict): - model_name = response_metadata.get("model_name") - - if model_name: - break - - if not model_name: - llm_output: dict[str, Any] = response.llm_output or {} - model_name = llm_output.get("model_name") or llm_output.get("model") or "" - - return model_name - - -def _get_metrics_from_response(response: LLMResult): - metrics = {} - - for generation in _walk_generations(response): - message = getattr(generation, "message", None) - if not message: - continue - - usage_metadata = getattr(message, "usage_metadata", None) - - if usage_metadata and isinstance(usage_metadata, dict): - metrics.update( - clean_object( - { - "total_tokens": usage_metadata.get("total_tokens"), - "prompt_tokens": usage_metadata.get("input_tokens"), - "completion_tokens": usage_metadata.get("output_tokens"), - } - ) - ) - - # Extract cache tokens from nested input_token_details (LangChain format) - # Maps to Braintrust's standard cache token metric names - input_token_details = usage_metadata.get("input_token_details") - if input_token_details and isinstance(input_token_details, dict): - cache_read = input_token_details.get("cache_read") - cache_creation = input_token_details.get("cache_creation") - - if cache_read is not None: - metrics["prompt_cached_tokens"] = cache_read - if cache_creation is not None: - metrics["prompt_cache_creation_tokens"] = cache_creation +warnings.warn( + "braintrust_langchain.callbacks is deprecated. Import from 'braintrust.wrappers.langchain' instead.", + DeprecationWarning, + stacklevel=2, +) - if not metrics or not any(metrics.values()): - llm_output: dict[str, Any] = response.llm_output or {} - metrics = llm_output.get("token_usage") or llm_output.get("estimatedTokens") or {} +from braintrust.integrations.langchain import BraintrustCallbackHandler # noqa: F401 - return clean_object(metrics) +__all__ = ["BraintrustCallbackHandler"] diff --git a/integrations/langchain-py/src/braintrust_langchain/context.py b/integrations/langchain-py/src/braintrust_langchain/context.py index 5c6bb4e8..c11385d1 100644 --- a/integrations/langchain-py/src/braintrust_langchain/context.py +++ b/integrations/langchain-py/src/braintrust_langchain/context.py @@ -1,26 +1,18 @@ -from contextvars import ContextVar +""" +DEPRECATED: Import from braintrust.wrappers.langchain instead. +""" -from langchain_core.tracers.context import register_configure_hook +import warnings -from braintrust_langchain.callbacks import BraintrustCallbackHandler - -__all__ = ["set_global_handler", "clear_global_handler"] - - -braintrust_callback_handler_var: ContextVar[BraintrustCallbackHandler | None] = ContextVar( - "braintrust_callback_handler", default=None +warnings.warn( + "braintrust_langchain.context is deprecated. Import from 'braintrust.wrappers.langchain' instead.", + DeprecationWarning, + stacklevel=2, ) - -def set_global_handler(handler: BraintrustCallbackHandler): - braintrust_callback_handler_var.set(handler) - - -def clear_global_handler(): - braintrust_callback_handler_var.set(None) - - -register_configure_hook( - context_var=braintrust_callback_handler_var, - inheritable=True, +from braintrust.integrations.langchain import ( # noqa: F401 + clear_global_handler, + set_global_handler, ) + +__all__ = ["set_global_handler", "clear_global_handler"] diff --git a/py/Makefile b/py/Makefile index 4696d84d..f2e29b58 100644 --- a/py/Makefile +++ b/py/Makefile @@ -60,7 +60,7 @@ install-dev: install-build-deps $(UV) pip install -r requirements-dev.txt install-optional: install-dev - $(UV) pip install anthropic openai pydantic_ai litellm agno google-genai google-adk dspy langsmith + $(UV) pip install anthropic openai pydantic_ai litellm agno google-genai google-adk dspy langsmith langchain-core langchain-openai langchain-anthropic langgraph $(UV) pip install -e .[temporal,otel] .DEFAULT_GOAL := help diff --git a/py/examples/langchain/auto.py b/py/examples/langchain/auto.py new file mode 100644 index 00000000..b6776dd7 --- /dev/null +++ b/py/examples/langchain/auto.py @@ -0,0 +1,24 @@ +"""Auto-instrument LangChain with Braintrust tracing. + +Usage: + export BRAINTRUST_API_KEY="your-api-key" + export OPENAI_API_KEY="your-openai-api-key" + python auto.py +""" + +import braintrust + + +# Auto-instrument all supported libraries including LangChain +braintrust.auto_instrument() + +from langchain_openai import ChatOpenAI + + +def main(): + llm = ChatOpenAI(model="gpt-4o-mini") + response = llm.invoke("What is the capital of France?") + print(response.content) + + +main() diff --git a/py/examples/langchain/manual_patching.py b/py/examples/langchain/manual_patching.py new file mode 100644 index 00000000..142bcc5a --- /dev/null +++ b/py/examples/langchain/manual_patching.py @@ -0,0 +1,27 @@ +"""Manually patch LangChain with Braintrust tracing. + +Usage: + export BRAINTRUST_API_KEY="your-api-key" + export OPENAI_API_KEY="your-openai-api-key" + python manual_patching.py +""" + +from braintrust import init_logger +from braintrust.wrappers.langchain import set_global_handler, BraintrustCallbackHandler + + +# Setup LangChain tracing with a specific project +logger = init_logger(project="my-langchain-project") +handler = BraintrustCallbackHandler(logger=logger) +set_global_handler(handler) + +from langchain_openai import ChatOpenAI + + +def main(): + llm = ChatOpenAI(model="gpt-4o-mini") + response = llm.invoke("What is the capital of France?") + print(response.content) + + +main() diff --git a/py/noxfile.py b/py/noxfile.py index bff911db..2789bfee 100644 --- a/py/noxfile.py +++ b/py/noxfile.py @@ -78,6 +78,7 @@ def _pinned_python_version(): "google.genai", "google.adk", "temporalio", + "langchain_core", ) # Test matrix @@ -101,6 +102,7 @@ def _pinned_python_version(): GENAI_VERSIONS = (LATEST,) DSPY_VERSIONS = (LATEST,) GOOGLE_ADK_VERSIONS = (LATEST, "1.14.1") +LANGCHAIN_VERSIONS = (LATEST,) # temporalio 1.19.0+ requires Python >= 3.10; skip Python 3.9 entirely TEMPORAL_VERSIONS = (LATEST, "1.20.0", "1.19.0") PYTEST_VERSIONS = (LATEST, "8.4.2") @@ -192,6 +194,19 @@ def test_google_genai(session, version): _run_core_tests(session) +@nox.session() +@nox.parametrize("version", LANGCHAIN_VERSIONS, ids=LANGCHAIN_VERSIONS) +def test_langchain(session, version): + """Test LangChain integration.""" + _install_test_deps(session) + _install(session, "langchain-core", version) + _install(session, "langchain-openai", version) + _install(session, "langchain-anthropic", version) + session.install("langgraph", silent=SILENT_INSTALLS) + _run_tests(session, f"{INTEGRATION_DIR}/langchain/test_langchain.py") + _run_core_tests(session) + + @nox.session() @nox.parametrize("version", GOOGLE_ADK_VERSIONS, ids=GOOGLE_ADK_VERSIONS) def test_google_adk(session, version): @@ -329,6 +344,9 @@ def pylint(session): session.install("opentelemetry.instrumentation.openai") # langsmith is needed for the wrapper module but not in VENDOR_PACKAGES session.install("langsmith") + # langchain deps are needed for the langchain wrapper (langchain-core is in VENDOR_PACKAGES; + # the rest are installed explicitly here for pylint coverage) + session.install("langchain-core", "langchain-openai", "langchain-anthropic", "langgraph") result = session.run("git", "ls-files", "**/*.py", silent=True, log=False) files = [path for path in result.strip().splitlines() if path not in GENERATED_LINT_EXCLUDES] diff --git a/py/src/braintrust/auto.py b/py/src/braintrust/auto.py index 6c15b653..fb40d088 100644 --- a/py/src/braintrust/auto.py +++ b/py/src/braintrust/auto.py @@ -9,7 +9,7 @@ import logging from contextlib import contextmanager -from braintrust.integrations import AnthropicIntegration, IntegrationPatchConfig +from braintrust.integrations import AnthropicIntegration, IntegrationPatchConfig, LangChainIntegration __all__ = ["auto_instrument"] @@ -40,6 +40,7 @@ def auto_instrument( claude_agent_sdk: bool = True, dspy: bool = True, adk: bool = True, + langchain: bool = True, ) -> dict[str, bool]: """ Auto-instrument supported AI/ML libraries for Braintrust tracing. @@ -61,6 +62,7 @@ def auto_instrument( claude_agent_sdk: Enable Claude Agent SDK instrumentation (default: True) dspy: Enable DSPy instrumentation (default: True) adk: Enable Google ADK instrumentation (default: True) + langchain: Enable LangChain instrumentation (default: True) Returns: Dict mapping integration name to whether it was successfully instrumented. @@ -117,6 +119,7 @@ def auto_instrument( claude_agent_sdk_enabled = _normalize_bool_option("claude_agent_sdk", claude_agent_sdk) dspy_enabled = _normalize_bool_option("dspy", dspy) adk_enabled = _normalize_bool_option("adk", adk) + langchain_enabled = _normalize_bool_option("langchain", langchain) if openai_enabled: results["openai"] = _instrument_openai() @@ -136,6 +139,8 @@ def auto_instrument( results["dspy"] = _instrument_dspy() if adk_enabled: results["adk"] = _instrument_adk() + if langchain_enabled: + results["langchain"] = _instrument_integration(LangChainIntegration) return results diff --git a/py/src/braintrust/integrations/__init__.py b/py/src/braintrust/integrations/__init__.py index 1dddbd91..74840ad7 100644 --- a/py/src/braintrust/integrations/__init__.py +++ b/py/src/braintrust/integrations/__init__.py @@ -1,5 +1,6 @@ from .anthropic import AnthropicIntegration from .base import IntegrationPatchConfig +from .langchain import LangChainIntegration -__all__ = ["AnthropicIntegration", "IntegrationPatchConfig"] +__all__ = ["AnthropicIntegration", "IntegrationPatchConfig", "LangChainIntegration"] diff --git a/py/src/braintrust/integrations/auto_test_scripts/test_auto_langchain.py b/py/src/braintrust/integrations/auto_test_scripts/test_auto_langchain.py new file mode 100644 index 00000000..a3719ef8 --- /dev/null +++ b/py/src/braintrust/integrations/auto_test_scripts/test_auto_langchain.py @@ -0,0 +1,21 @@ +"""Test auto_instrument for LangChain.""" + +from braintrust.auto import auto_instrument +from braintrust.integrations.langchain import BraintrustCallbackHandler + +# 1. Instrument +results = auto_instrument() +assert results.get("langchain") == True, "auto_instrument should return True for langchain" + +# 2. Idempotent +results2 = auto_instrument() +assert results2.get("langchain") == True, "auto_instrument should still return True on second call" + +# 3. Verify that a global handler was registered with LangChain +from langchain_core.callbacks import CallbackManager + +manager = CallbackManager.configure() +handler = next((h for h in manager.handlers if isinstance(h, BraintrustCallbackHandler)), None) +assert handler is not None, "BraintrustCallbackHandler should be registered globally after auto_instrument()" + +print("SUCCESS") diff --git a/py/src/braintrust/integrations/langchain/__init__.py b/py/src/braintrust/integrations/langchain/__init__.py new file mode 100644 index 00000000..9758ca95 --- /dev/null +++ b/py/src/braintrust/integrations/langchain/__init__.py @@ -0,0 +1,47 @@ +"""Braintrust integration for LangChain.""" + +from .integration import LangChainIntegration + + +def setup_langchain() -> bool: + """ + Auto-instrument LangChain for Braintrust tracing. + + Registers a global BraintrustCallbackHandler with LangChain's callback system + so that all chains, LLMs, tools, and retrievers are automatically traced. + + This is called automatically by braintrust.auto_instrument(). It is safe to + call multiple times – subsequent calls are no-ops. + + Returns: + True if setup succeeded, False if langchain_core is not installed. + """ + return LangChainIntegration.setup() + + +# Lazily imported to avoid circular imports at module load time +# (tracing.py imports from braintrust, which must be fully initialized first) +_LAZY_ATTRS = frozenset(["BraintrustCallbackHandler", "BraintrustTracer", "set_global_handler", "clear_global_handler"]) + + +def __getattr__(name: str): + if name in _LAZY_ATTRS: + from .tracing import BraintrustCallbackHandler, BraintrustTracer, clear_global_handler, set_global_handler + + g = globals() + g["BraintrustCallbackHandler"] = BraintrustCallbackHandler + g["BraintrustTracer"] = BraintrustTracer + g["set_global_handler"] = set_global_handler + g["clear_global_handler"] = clear_global_handler + return g[name] + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +__all__ = [ + "LangChainIntegration", + "BraintrustCallbackHandler", + "BraintrustTracer", + "set_global_handler", + "clear_global_handler", + "setup_langchain", +] diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke b/py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke new file mode 100644 index 00000000..3ecc362e --- /dev/null +++ b/py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke @@ -0,0 +1,276 @@ +interactions: +- request: + body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "What is + 1 + 2?"}], "model": "claude-sonnet-4-20250514"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '110' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - AsyncAnthropic/Python 0.68.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.68.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.13 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//dJBfSwMxEMS/yjGvpnB3bUUCvhR88aEg/gERCTFZ2uDd5kw2Ui333eWK + Rar4tLC/mWGYPYKHRp83pm4ublf5+uWT0/DYP9ysr+7XOx/uoCAfA00qytluCAopdtPD5hyyWBYo + 9NFTBw3X2eJpliMzyWwxa+t2WS+bBRRcZCEW6Kf9MVJoN5kPR6Opzqq2uqzmGJ8VssTBJLI5MjSI + vZGSGN8g01shdgTNpesUyqGa3iPwUMRIfCXO0M25grNuS8YlshIim1NBfeSJrP+PHb1TPg1b6inZ + ziz7v/of2mx/01EhFjlpN1fIlN6DIyOBEjSmPb1NHuP4BQAA//8DABaJlhKdAQAA + headers: + CF-RAY: + - 983cc1f7fda07e2d-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Tue, 23 Sep 2025 20:23:04 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - 02af79b5-9b1a-4100-a05f-9235eb38bda4 + cf-cache-status: + - DYNAMIC + request-id: + - req_011CTRxS1WS9ia9upALgfUZK + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - 1.1 google + x-envoy-upstream-service-time: + - '1030' + status: + code: 200 + message: OK +- request: + body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '110' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - AsyncAnthropic/Python 0.68.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.68.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//dJDdSgMxEEZfZfluTWG37YoGvLagN0VBRCSEZGhDdydrMimWsu8uWyxS + xauBOWd++I7oo6cOGq6zxdMsR2aS2XI2r+dt3TZLKAQPjT5vTN08397v1qG5WT+Ep/Z19bJfySMf + oCCHgSaLcrYbgkKK3dSwOYcslgUKLrIQC/Tb8ewLfU7kVDSa6qqaV3fVAuO7QpY4mEQ2R4YGsTdS + EuMbZPooxI6guXSdQjnd1UcEHooYiTviDN1cKzjrtmRcIishsrkU6jNPZP1/7Dw77adhSz0l25m2 + /+v/0Gb7m44KscjFdwuFTGkfHBkJlKAxheVt8hjHLwAAAP//AwBHCKHFnQEAAA== + headers: + CF-RAY: + - 99b0eabe4896b976-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:22:38 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - 27796668-7351-40ac-acc4-024aee8995a5 + anthropic-ratelimit-input-tokens-limit: + - '3000000' + anthropic-ratelimit-input-tokens-remaining: + - '3000000' + anthropic-ratelimit-input-tokens-reset: + - '2025-11-08T00:22:38Z' + anthropic-ratelimit-output-tokens-limit: + - '600000' + anthropic-ratelimit-output-tokens-remaining: + - '600000' + anthropic-ratelimit-output-tokens-reset: + - '2025-11-08T00:22:38Z' + anthropic-ratelimit-tokens-limit: + - '3600000' + anthropic-ratelimit-tokens-remaining: + - '3600000' + anthropic-ratelimit-tokens-reset: + - '2025-11-08T00:22:38Z' + cf-cache-status: + - DYNAMIC + request-id: + - req_011CUuU6hWk8Jg8Bh2c4Vyty + retry-after: + - '23' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - '1801' + status: + code: 200 + message: OK +- request: + body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '110' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - AsyncAnthropic/Python 0.68.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.68.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAAA3SQTUvDQBCG/0p4r24gaRvRBQ9CDyJ4rBeRZbs7tNFkNu7OBkvJf5cUi1TxNDDP + Mx+8R/TBUwcN19nsqUyBmaRclYtq0VRNvYJC66HRp52p6qd1/7DZ3o8yjIe355v1o9tsm1soyGGg + 2aKU7I6gEEM3N2xKbRLLAgUXWIgF+uV49oU+Z3IqGnVxVSyKu2KJ6VUhSRhMJJsCQ4PYG8mR8Q0S + fWRiR9Ccu04hn+7qI1oeshgJ78QJur5WcNbtybhIVtrA5lKozjyS9f+x8+y8n4Y99RRtZ5r+r/9D + 6/1vOimELBffLRUSxbF1ZKSlCI05LG+jxzR9AQAA//8DAEp7u9udAQAA + headers: + CF-RAY: + - 99b0ebedd90d6897-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:23:27 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - 27796668-7351-40ac-acc4-024aee8995a5 + anthropic-ratelimit-input-tokens-limit: + - '3000000' + anthropic-ratelimit-input-tokens-remaining: + - '3000000' + anthropic-ratelimit-input-tokens-reset: + - '2025-11-08T00:23:26Z' + anthropic-ratelimit-output-tokens-limit: + - '600000' + anthropic-ratelimit-output-tokens-remaining: + - '600000' + anthropic-ratelimit-output-tokens-reset: + - '2025-11-08T00:23:26Z' + anthropic-ratelimit-tokens-limit: + - '3600000' + anthropic-ratelimit-tokens-remaining: + - '3600000' + anthropic-ratelimit-tokens-reset: + - '2025-11-08T00:23:26Z' + cf-cache-status: + - DYNAMIC + request-id: + - req_011CUuUAHB8QqxGoW7TZyUaz + retry-after: + - '34' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - '1851' + status: + code: 200 + message: OK +version: 1 diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_chain_with_memory b/py/src/braintrust/integrations/langchain/cassettes/test_chain_with_memory new file mode 100644 index 00000000..88cc8848 --- /dev/null +++ b/py/src/braintrust/integrations/langchain/cassettes/test_chain_with_memory @@ -0,0 +1,332 @@ +interactions: +- request: + body: '{"messages": [{"content": "Assistant: Hello! How can I assist you today? + User: What''s your name?", "role": "user"}], "model": "gpt-4o-mini", "stream": + false}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '149' + content-type: + - application/json + host: + - localhost:8000 + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.13 + method: POST + uri: http://localhost:8000/v1/proxy/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CJ3pSTx8NVvtJFY51xvv7gmxKCqAO\",\n \"object\": + \"chat.completion\",\n \"created\": 1758658986,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Assistant: I don't have a personal + name, but you can call me Assistant. How can I help you today?\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 24,\n \"completion_tokens\": 23,\n \"total_tokens\": 47,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_560af6e559\"\n}\n" + headers: + Access-Control-Allow-Credentials: + - 'true' + Access-Control-Expose-Headers: + - x-bt-cursor,x-bt-found-existing,x-bt-span-id,x-bt-span-export,x-bt-query-plan,x-bt-internal-trace-id + Connection: + - keep-alive + Date: + - Tue, 23 Sep 2025 20:23:06 GMT + Keep-Alive: + - timeout=5 + Transfer-Encoding: + - chunked + Vary: + - Origin + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 983cc206fc1f67ef-SJC + content-type: + - application/json + openai-organization: + - braintrust-data + openai-processing-ms: + - '755' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + set-cookie: + - _cfuvid=TIArUY3FKYo9t2vz5lADo0yFHggpjc9nkMoRBVQYfbA-1758658986949-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-bt-cached: + - MISS + x-bt-internal-trace-id: + - 899e875d60ba290b68341d027600a8fd + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '775' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999980' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_3b64f4bb78c14e2ea80001681e34611d + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"Assistant: Hello! How can I assist you today? + User: What''s your name?","role":"user"}],"model":"gpt-4o-mini","stream":false}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '149' + content-type: + - application/json + cookie: + - __cf_bm=W_Ukgb.mz8e1GW7CfhzN.QQaN09_xQq1uTHm3a.dJdU-1762561359-1.0.1.1-6IrkySxpZaL.1C65iH0iOLFfere0JxHCiasT6bak.RihYFMyJgIz2OuYJqcUey8c5vicjtorNby_Z_GJX.ZMIHa6PyzVrhqgfZZmtnnn.sA; + _cfuvid=jwWMA4k30hLPwBwTSCIdIeS5.m1TkcdYLYTt4YSTZhI-1762561359243-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4ySTW/bMAyG7/4VhC67xEW+s+YyFAO2ZNhp22HYUBiMRNvaZFGT5KRBkf8+2E5i + d22BXXTgw5fiS/IxARBaiTUIWWKUlTPp+x9fxg/u4yFuv20O9ezD98/7YvPn01fFxeFOjBoF736R + jBfVjeTKGYqabYelJ4zUVJ2sltPFcjJb3LagYkWmkRUupnNOK211Oh1P5+l4lU7entUla0lBrOFn + AgDw2L5Nn1bRg1jDeHSJVBQCFiTW1yQA4dk0EYEh6BDRRjHqoWQbybat3134Grag2L6JUOKeAMGR + D2zRgMWKRrCrIxy5BokWJBoDFcFVfAMbPrRoCyUZ12ZGVnh8N/zXU14HbLzb2pgBQGs5YjO71vH9 + mZyuHg0XzvMu/CMVubY6lJknDGwbPyGyEy09JQD37SzrJ+MRznPlYhb5N7XfTeddOdFvcABnZxg5 + ounj89XohWqZoojahMEuhERZkuqV/eKwVpoHIBl4ft7MS7U739oW/1O+B1KSi6Qy50lp+dRwn+ap + ue/X0q4zbhsWgfxeS8qiJt/sQVGOtemuToRjiFRlubYFeed1d3q5yxbLMeZLWixuRXJK/gIAAP// + AwCouO6tiAMAAA== + headers: + CF-RAY: + - 99b0eacf8822aaac-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:22:39 GMT + Server: + - cloudflare + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - braintrust-data + openai-processing-ms: + - '628' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '639' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999980' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_1009d84201314e5aa9ccdcbafeeac4af + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"Assistant: Hello! How can I assist you today? + User: What''s your name?","role":"user"}],"model":"gpt-4o-mini","stream":false}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '149' + content-type: + - application/json + cookie: + - __cf_bm=.AxQfRhAvElThVl_Qz9zUVdqz_GtBGXwRQ0TVPIg5pc-1762561407-1.0.1.1-klsoMaFKHjzxOrHy2Zfd8Sc76RDHsMXURLAaIzORncnm47NI1MY0BqqBGOEsVXlZb.RdqeqpxzGFhl8DlRDjy.SqRfa2B4zEYdKZqQ2kVB0; + _cfuvid=0ohSoYMS21h1NkHWl4FeeVCp5aK2KHeEjclSm1NY7yY-1762561407934-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jJLBbtswDIbvfgpO53hIgiRNcxmKAcN6GNbuMmBDYTASHauRRU2imwRF + gb3GXm9PUthp4nTLgF104Mef4k/yMQNQ1qgFKF2h6Dq4/P23L6P18vJm8+Pz1e16m8pPH25u7fbe + fJ1vlmrQKnh5T1oOqrea6+BILPs91pFQqK06upiNp7PRZDjvQM2GXCtbBcknnNfW23w8HE/y4UU+ + mr+oK7aaklrA9wwA4LF72z69oa1awHBwiNSUEq5ILY5JACqyayMKU7JJ0Isa9FCzF/Jd61cHvoBr + MOx///wlUOEDAUIKpG1pNXisaQDLRmDHDWj0oNE5qAmO8jfwkTcduoaKXOgyhQ3u3p3+HKlsErbu + fePcCUDvWbCdXuf57oU8HV06XoXIy/SHVJXW21QVkTCxbx0l4aA6+pQB3HXTbF4NSIXIdZBCeE3d + d+PJvpzqd3gGCgu6Pj6ZD85UKwwJWpdOtqE06opMr+xXh42xfAKyE89/N3Ou9t639av/Kd8DrSkI + mSJEMla/NtynRWov/F9pxxl3DatE8cFqKsRSbPdgqMTG7e9OpV0SqovS+hXFEO3++MpQTGdDLGc0 + nV6q7Cl7BgAA//8DAEMiDgGKAwAA + headers: + CF-RAY: + - 99b0ebffc94fed3b-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:23:28 GMT + Server: + - cloudflare + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - braintrust-data + openai-processing-ms: + - '680' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '708' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999980' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_e273cb6eb8624df78282659b4a19fffe + status: + code: 200 + message: OK +version: 1 diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_global_handler b/py/src/braintrust/integrations/langchain/cassettes/test_global_handler new file mode 100644 index 00000000..ba9f4fa9 --- /dev/null +++ b/py/src/braintrust/integrations/langchain/cassettes/test_global_handler @@ -0,0 +1,225 @@ +interactions: +- request: + body: '{"messages": [{"content": "What is 1 + 2?", "role": "user"}], "model": + "gpt-4o-mini", "frequency_penalty": 0.0, "n": 1, "presence_penalty": 0.0, "stream": + false, "temperature": 1.0, "top_p": 1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '177' + content-type: + - application/json + host: + - localhost:8000 + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.13 + method: POST + uri: http://localhost:8000/v1/proxy/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CJ44VUVp2sk1koSWXX64CaLEy1mWy\",\n \"object\": + \"chat.completion\",\n \"created\": 1758659919,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"1 + 2 equals 3.\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 15,\n \"completion_tokens\": 8,\n \"total_tokens\": 23,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_560af6e559\"\n}\n" + headers: + Access-Control-Allow-Credentials: + - 'true' + Access-Control-Expose-Headers: + - x-bt-cursor,x-bt-found-existing,x-bt-span-id,x-bt-span-export,x-bt-query-plan,x-bt-internal-trace-id + Connection: + - keep-alive + Date: + - Tue, 23 Sep 2025 20:38:40 GMT + Keep-Alive: + - timeout=5 + Transfer-Encoding: + - chunked + Vary: + - Origin + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 983cd8d01c33943a-SJC + content-type: + - application/json + openai-organization: + - braintrust-data + openai-processing-ms: + - '930' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + set-cookie: + - _cfuvid=XPwF0fhMV9JwjYuWwUMNbzPKxvSJ.HOkXEftYzjXRew-1758659920459-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-bt-cached: + - MISS + x-bt-internal-trace-id: + - 93acad0503781eb98ab6ea3412173537 + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '1026' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999992' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_181413148bbe4814a905514521d6dc34 + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"What is 1 + 2?","role":"user"}],"model":"gpt-4o-mini","frequency_penalty":0.0,"n":1,"presence_penalty":0.0,"stream":false,"temperature":1.0,"top_p":1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '177' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jJJBb9swDIXv/hUCr4uL2IlTJ9et2y2HHQZ0Q2EoMm1rk0VNoocNRf77 + IDuN3a0FevGBHx/1Hs3HRAjQNRwEqE6y6p1J33/9XH7adscPRXGXnTb7+49fNse7/b2iYylhFRV0 + +o6Kn1Q3inpnkDXZCSuPkjFOzW53ebHLyiwfQU81mihrHadbSnttdZqv8226vk2z8qLuSCsMcBDf + EiGEeBy/0aet8TccxHr1VOkxBNkiHK5NQoAnEysgQ9CBpWVYzVCRZbSj9Uy8E7nAn4M0QWxull0e + myHI6NQOxiyAtJZYxqSjv4cLOV8dGWqdp1P4RwqNtjp0lUcZyMbXA5ODkZ4TIR7G5MOzMOA89Y4r + ph84PpcV0ziY9z3D8sKYWJq5nG9WLwyramSpTVgsDpRUHdazct6yHGpNC5AsIv/v5aXZU2xt27eM + n4FS6BjrynmstXqed27zGI/xtbbrikfDEND/0gor1ujjb6ixkYOZTgTCn8DYV422LXrn9XQnjauK + 3Vo2OyyKPSTn5C8AAAD//wMAcIbFgjUDAAA= + headers: + CF-RAY: + - 99b0f5db9f1cbffc-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:30:12 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=vfpKl6dvzcujjwigai_kp7UkNhR2ltT1SwFsT05VrS8-1762561812-1.0.1.1-UAyuy134RWxRUzjbClH59IJarw95du8Dl347lkXcDkbXBBx7vCmRuxRccJQB2f1T6oobZSgBj7O8hdaLY4hef6ypZ2uHUshy880EnptiWEY; + path=/; expires=Sat, 08-Nov-25 01:00:12 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=N6FAUGU_qhcPvlVWdt0kvrpbt1SzTvQ0v29fL2QCNbA-1762561812358-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - braintrust-data + openai-processing-ms: + - '319' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '489' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999992' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_3e940a310adf4d9a88c8da6b70645bb7 + status: + code: 200 + message: OK +version: 1 diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration b/py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration new file mode 100644 index 00000000..6c396d02 --- /dev/null +++ b/py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration @@ -0,0 +1,300 @@ +interactions: +- request: + body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "What is + 1 + 2?"}], "model": "claude-sonnet-4-20250514"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '110' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - Anthropic/Python 0.68.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.68.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.13 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//dJBRSwMxEIT/yjGvpnDX9hQDvggegn9AEAkxWdvg3eZMNsVa7r/LFYtU + 8Wlhv5lhmAOCh8aQN6Zu7nZX3IXr+8/X9cNt213vu5fucYSC7EeaVZSz3RAUUuznh805ZLEsUBii + px4arrfF0yJHZpLFerGsl23dNmsouMhCLNBPh1Ok0MdsPh6NprqoltVNtcL0rJAljiaRzZGhQeyN + lMT4BpneC7EjaC59r1CO1fQBgcciRuIbcYZuLhWcdVsyLpGVENmcC+oTT2T9f+zknfNp3NJAyfam + Hf7qf2iz/U0nhVjkrN1KIVPaBUdGAiVozHt6mzym6QsAAP//AwD8n6CUnQEAAA== + headers: + CF-RAY: + - 983cc09c5f361679-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Tue, 23 Sep 2025 20:22:09 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - 02af79b5-9b1a-4100-a05f-9235eb38bda4 + anthropic-ratelimit-input-tokens-limit: + - '30000' + anthropic-ratelimit-input-tokens-remaining: + - '30000' + anthropic-ratelimit-input-tokens-reset: + - '2025-09-23T20:22:09Z' + anthropic-ratelimit-output-tokens-limit: + - '8000' + anthropic-ratelimit-output-tokens-remaining: + - '8000' + anthropic-ratelimit-output-tokens-reset: + - '2025-09-23T20:22:09Z' + anthropic-ratelimit-requests-limit: + - '50' + anthropic-ratelimit-requests-remaining: + - '49' + anthropic-ratelimit-requests-reset: + - '2025-09-23T20:22:09Z' + anthropic-ratelimit-tokens-limit: + - '38000' + anthropic-ratelimit-tokens-remaining: + - '38000' + anthropic-ratelimit-tokens-reset: + - '2025-09-23T20:22:09Z' + cf-cache-status: + - DYNAMIC + request-id: + - req_011CTRxMui53W9h6eXYGxUJb + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - 1.1 google + x-envoy-upstream-service-time: + - '1110' + status: + code: 200 + message: OK +- request: + body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '110' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - Anthropic/Python 0.68.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.68.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//dJDLasMwEEV/xdxtFfAjCa2g29JVoetShJCmsRt75Eij9BH878WhpqSl + q4E5Zx7cE4bgqYeG6232tEqBmWS1XtVlvSk31RoKnYfGkHamrB5z87bPfPfwGa+Ph9f7401wL1so + yMdIs0Up2R1BIYZ+btiUuiSWBQousBAL9NNp8YXeZ3IuGlVxVdTFbdFgelZIEkYTyabA0CD2RnJk + fINEh0zsCJpz3yvk8119QsdjFiNhT5ygq62Cs64l4yJZ6QKbS6FceCTr/2PL7LyfxpYGirY3m+Gv + /0Or9jedFEKWi+8ahUTx2Dky0lGExhyWt9Fjmr4AAAD//wMARZkZqp0BAAA= + headers: + CF-RAY: + - 99b0eab2783f1758-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:22:36 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - 27796668-7351-40ac-acc4-024aee8995a5 + anthropic-ratelimit-input-tokens-limit: + - '3000000' + anthropic-ratelimit-input-tokens-remaining: + - '3000000' + anthropic-ratelimit-input-tokens-reset: + - '2025-11-08T00:22:36Z' + anthropic-ratelimit-output-tokens-limit: + - '600000' + anthropic-ratelimit-output-tokens-remaining: + - '600000' + anthropic-ratelimit-output-tokens-reset: + - '2025-11-08T00:22:36Z' + anthropic-ratelimit-tokens-limit: + - '3600000' + anthropic-ratelimit-tokens-remaining: + - '3600000' + anthropic-ratelimit-tokens-reset: + - '2025-11-08T00:22:36Z' + cf-cache-status: + - DYNAMIC + request-id: + - req_011CUuU6ZRKcH4CRrH5o4j6b + retry-after: + - '24' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - '1694' + status: + code: 200 + message: OK +- request: + body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '110' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - Anthropic/Python 0.68.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.68.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//dJBNS8QwEIb/SnmvptDudhUCHvQkC4oieBEJIRl3y7aTmkz8Kv3v0sVF + VvE0MM8zH7wj+uCpg4brbPZUpsBMUjblolqsqlXdQKH10OjTxlT1fXPxcHXzubwOZ2vKb7fPu7vL + 9Q4K8jHQbFFKdkNQiKGbGzalNollgYILLMQC/TgefKH3meyLRl2cFIvivFhielJIEgYTyabA0CD2 + RnJkfINEL5nYETTnrlPI+7t6RMtDFiNhR5yg61MFZ92WjItkpQ1sjoXqwCNZ/x87zM77adhST9F2 + ZtX/9X9ovf1NJ4WQ5ei7pUKi+No6MtJShMYclrfRY5q+AAAA//8DAAqaanadAQAA + headers: + CF-RAY: + - 99b0ebe2db3d67ca-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:23:24 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - 27796668-7351-40ac-acc4-024aee8995a5 + anthropic-ratelimit-input-tokens-limit: + - '3000000' + anthropic-ratelimit-input-tokens-remaining: + - '3000000' + anthropic-ratelimit-input-tokens-reset: + - '2025-11-08T00:23:24Z' + anthropic-ratelimit-output-tokens-limit: + - '600000' + anthropic-ratelimit-output-tokens-remaining: + - '600000' + anthropic-ratelimit-output-tokens-reset: + - '2025-11-08T00:23:24Z' + anthropic-ratelimit-tokens-limit: + - '3600000' + anthropic-ratelimit-tokens-remaining: + - '3600000' + anthropic-ratelimit-tokens-reset: + - '2025-11-08T00:23:24Z' + cf-cache-status: + - DYNAMIC + request-id: + - req_011CUuUA9cTfN1Yz5PMKHD5d + retry-after: + - '37' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - '1556' + status: + code: 200 + message: OK +version: 1 diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_langgraph_state_management b/py/src/braintrust/integrations/langchain/cassettes/test_langgraph_state_management new file mode 100644 index 00000000..20ffc04b --- /dev/null +++ b/py/src/braintrust/integrations/langchain/cassettes/test_langgraph_state_management @@ -0,0 +1,327 @@ +interactions: +- request: + body: '{"messages": [{"content": "Say hello", "role": "user"}], "model": "gpt-4o-mini", + "frequency_penalty": 0.0, "n": 1, "presence_penalty": 0.0, "stream": false, + "temperature": 1.0, "top_p": 1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '172' + content-type: + - application/json + host: + - localhost:8000 + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.13 + method: POST + uri: http://localhost:8000/v1/proxy/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CJ3xSBjbTuwYXAmP3RRw0GoHz5Ooy\",\n \"object\": + \"chat.completion\",\n \"created\": 1758659482,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Hello! How can I assist you today?\",\n + \ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": + null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 9,\n \"completion_tokens\": 9,\n \"total_tokens\": 18,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_51db84afab\"\n}\n" + headers: + Access-Control-Allow-Credentials: + - 'true' + Access-Control-Expose-Headers: + - x-bt-cursor,x-bt-found-existing,x-bt-span-id,x-bt-span-export,x-bt-query-plan,x-bt-internal-trace-id + Connection: + - keep-alive + Date: + - Tue, 23 Sep 2025 20:31:22 GMT + Keep-Alive: + - timeout=5 + Transfer-Encoding: + - chunked + Vary: + - Origin + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 983cce247c46cf2f-SJC + content-type: + - application/json + openai-organization: + - braintrust-data + openai-processing-ms: + - '381' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + set-cookie: + - _cfuvid=Y9Om0gYdHB3h9aUHhUUY9eEia6Y3wmSARFX9Xq907Ho-1758659482810-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-bt-cached: + - MISS + x-bt-internal-trace-id: + - ebcf889942216eb0b613f43f2cdb11b1 + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '397' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999995' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_75709538073646e4bd7355c91bc2ce52 + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"Say hello","role":"user"}],"model":"gpt-4o-mini","frequency_penalty":0.0,"n":1,"presence_penalty":0.0,"stream":false,"temperature":1.0,"top_p":1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '172' + content-type: + - application/json + cookie: + - __cf_bm=W_Ukgb.mz8e1GW7CfhzN.QQaN09_xQq1uTHm3a.dJdU-1762561359-1.0.1.1-6IrkySxpZaL.1C65iH0iOLFfere0JxHCiasT6bak.RihYFMyJgIz2OuYJqcUey8c5vicjtorNby_Z_GJX.ZMIHa6PyzVrhqgfZZmtnnn.sA; + _cfuvid=jwWMA4k30hLPwBwTSCIdIeS5.m1TkcdYLYTt4YSTZhI-1762561359243-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4xSwW7UMBC95ysGnzcoWbppu5eqqoSKgAu9oKIq8tqTrMHxGHuydKn235GTdpPS + InHxYd685/dm5iEDEEaLNQi1law6b/Or2y/F70p//Pz+UprNzf2uuO2uvrY/d9XN8pNYJAZtvqPi + J9ZbRZ23yIbcCKuAkjGplqfVclWV76pyADrSaBOt9ZyfUN4ZZ/JlsTzJi9O8PHtkb8kojGIN3zIA + gIfhTT6dxnuxhmLxVOkwRtmiWB+bAEQgmypCxmgiS8diMYGKHKMbrF+jtfQGrukXKOngA4wE2FMP + TFruL+bEgE0fZTLvemtngHSOWKbwg+W7R+RwNGmp9YE28S+qaIwzcVsHlJFcMhSZvBjQQwZwNwyj + f5ZP+ECd55rpBw7fnY9qYtrAS4yJpZ3K5dniFa1aI0tj42yUQkm1RT0xp7nLXhuaAdks8Usvr2mP + qY1r/0d+ApRCz6hrH1Ab9Tzv1BYwnee/2o4THgyLiGFnFNZsMKQtaGxkb8ejEXEfGbu6Ma7F4IMZ + L6fx9aoqZFPhanUuskP2BwAA//8DABw5ElFHAwAA + headers: + CF-RAY: + - 99b0eadaea14aaac-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:22:41 GMT + Server: + - cloudflare + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - braintrust-data + openai-processing-ms: + - '328' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '342' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999995' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_68644fc1eb1a4533b2f98192dc918822 + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"Say hello","role":"user"}],"model":"gpt-4o-mini","frequency_penalty":0.0,"n":1,"presence_penalty":0.0,"stream":false,"temperature":1.0,"top_p":1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '172' + content-type: + - application/json + cookie: + - __cf_bm=.AxQfRhAvElThVl_Qz9zUVdqz_GtBGXwRQ0TVPIg5pc-1762561407-1.0.1.1-klsoMaFKHjzxOrHy2Zfd8Sc76RDHsMXURLAaIzORncnm47NI1MY0BqqBGOEsVXlZb.RdqeqpxzGFhl8DlRDjy.SqRfa2B4zEYdKZqQ2kVB0; + _cfuvid=0ohSoYMS21h1NkHWl4FeeVCp5aK2KHeEjclSm1NY7yY-1762561407934-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jFLBbtswDL37Kzid48EOkrTJZYcd1g1osRVFDy0KQ5FoW5ssChK9LSjy + 74PsNnbXDthFBz6+p/dIPmYAwmixA6FayarzNv94d13am9ubQJf79nB5W9/Z609XV1+V/BK+iUVi + 0P47Kn5mvVfUeYtsyI2wCigZk2p5tlmuN+Wq2A5ARxptojWe8xXlnXEmXxbLVV6c5eX5E7slozCK + HdxnAACPw5t8Oo2/xQ6KxXOlwxhlg2J3agIQgWyqCBmjiSwdi8UEKnKMbrB+gdbSO7igX6Ckg88w + EuBAPTBpefgwJwas+yiTeddbOwOkc8QyhR8sPzwhx5NJS40PtI9/UUVtnIltFVBGcslQZPJiQI8Z + wMMwjP5FPuEDdZ4rph84fLcd1cS0gdcYE0s7lcvzxRtalUaWxsbZKIWSqkU9Mae5y14bmgHZLPFr + L29pj6mNa/5HfgKUQs+oKx9QG/Uy79QWMJ3nv9pOEx4Mi4jhp1FYscGQtqCxlr0dj0bEQ2Tsqtq4 + BoMPZryc2lfrTSHrDa7XW5Edsz8AAAD//wMAVD8AOUcDAAA= + headers: + CF-RAY: + - 99b0ec0acb26ed3b-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:23:30 GMT + Server: + - cloudflare + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - braintrust-data + openai-processing-ms: + - '589' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '607' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999995' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_67359745154e404899e3fd81a37cf26a + status: + code: 200 + message: OK +version: 1 diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_llm_calls b/py/src/braintrust/integrations/langchain/cassettes/test_llm_calls new file mode 100644 index 00000000..cea55348 --- /dev/null +++ b/py/src/braintrust/integrations/langchain/cassettes/test_llm_calls @@ -0,0 +1,333 @@ +interactions: +- request: + body: '{"messages": [{"content": "What is 1 + 2?", "role": "user"}], "model": + "gpt-4o-mini", "frequency_penalty": 0.0, "n": 1, "presence_penalty": 0.0, "stream": + false, "temperature": 1.0, "top_p": 1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '177' + content-type: + - application/json + host: + - localhost:8000 + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.13 + method: POST + uri: http://localhost:8000/v1/proxy/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CJ3pRI2shpJIGYKUU8RFWUyB6W5O1\",\n \"object\": + \"chat.completion\",\n \"created\": 1758658985,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"1 + 2 equals 3.\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 15,\n \"completion_tokens\": 8,\n \"total_tokens\": 23,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_560af6e559\"\n}\n" + headers: + Access-Control-Allow-Credentials: + - 'true' + Access-Control-Expose-Headers: + - x-bt-cursor,x-bt-found-existing,x-bt-span-id,x-bt-span-export,x-bt-query-plan,x-bt-internal-trace-id + Connection: + - keep-alive + Date: + - Tue, 23 Sep 2025 20:23:06 GMT + Keep-Alive: + - timeout=5 + Transfer-Encoding: + - chunked + Vary: + - Origin + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 983cc2032f2967ef-SJC + content-type: + - application/json + openai-organization: + - braintrust-data + openai-processing-ms: + - '441' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + set-cookie: + - _cfuvid=uhF3qDlYXbYwV7mlgYhl_d7MyPH3FwQHxL6cek.ONAQ-1758658986041-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-bt-cached: + - MISS + x-bt-internal-trace-id: + - f4e0a5413e529acf383233e54ad00e99 + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '454' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999995' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_023ebefb1f6b4dec8910b8cb4d7421f5 + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"What is 1 + 2?","role":"user"}],"model":"gpt-4o-mini","frequency_penalty":0.0,"n":1,"presence_penalty":0.0,"stream":false,"temperature":1.0,"top_p":1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '177' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jJI/b9wwDMV3fwqBa8+B7Yudy61dunRJtxaBoZNon1JZVCQ6/RPcdy9k + X85OmgJdPPBHPr1H8zkTAoyGvQB1lKwGb/OPX++KH7eh6XcPvz8Psm++6O1d8+kRnw5+B5s0QYcH + VPwydaVo8BbZkJuxCigZk2p501R1U27r3QQG0mjTWO85v6Z8MM7kVVFd58VNXp7F1ZGMwgh78S0T + Qojn6Zt8Oo0/YS+KzUtlwBhlj7C/NAkBgWyqgIzRRJaOYbNARY7RTdZL8UFUAh9HaaPYXq27AnZj + lMmpG61dAekcsUxJJ3/3Z3K6OLLU+0CH+GYUOuNMPLYBZSSXXo9MHiZ6yoS4n5KPr8KADzR4bpm+ + 4/RcWc9ysOx7gbszY2Jpl3K13bwj1mpkaWxcLQ6UVEfUy+SyZTlqQyuQrSL/7eU97Tm2cf3/yC9A + KfSMuvUBtVGv8y5tAdMx/qvtsuLJMEQMT0ZhywZD+g0aOzna+UQg/oqMQ9sZ12Pwwcx30vm2bgrZ + NVjXt5Cdsj8AAAD//wMAbYrr4zUDAAA= + headers: + CF-RAY: + - 99b0eacc1d35aaac-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:22:39 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=W_Ukgb.mz8e1GW7CfhzN.QQaN09_xQq1uTHm3a.dJdU-1762561359-1.0.1.1-6IrkySxpZaL.1C65iH0iOLFfere0JxHCiasT6bak.RihYFMyJgIz2OuYJqcUey8c5vicjtorNby_Z_GJX.ZMIHa6PyzVrhqgfZZmtnnn.sA; + path=/; expires=Sat, 08-Nov-25 00:52:39 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=jwWMA4k30hLPwBwTSCIdIeS5.m1TkcdYLYTt4YSTZhI-1762561359243-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - braintrust-data + openai-processing-ms: + - '300' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '430' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999995' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_24854ba725b942179830d357f1af2add + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"What is 1 + 2?","role":"user"}],"model":"gpt-4o-mini","frequency_penalty":0.0,"n":1,"presence_penalty":0.0,"stream":false,"temperature":1.0,"top_p":1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '177' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//jJJBb9swDIXv/hUEr4sL242TLtdhu+wy7BRsKAxFoh2lsqRKdLGtyH8f + ZKexu3XALj7w46Peo/mcAaBWuAOUR8Gy9yb/8O1reeq+rJ/osdvvf32sDqftA3/+NFC33+MqKdzh + RJJfVDfS9d4Qa2cnLAMJpjS13G6qelOui+0IeqfIJFnnOV+7vNdW51VRrfNim5d3F/XRaUkRd/A9 + AwB4Hr/Jp1X0A3dQrF4qPcUoOsLdtQkAgzOpgiJGHVlYxtUMpbNMdrRewjuogB4HYSLc3iy7ArVD + FMmpHYxZAGGtY5GSjv7uL+R8dWRc54M7xD+k2Gqr47EJJKKz6fXIzuNIzxnA/Zh8eBUGfXC954bd + A43PlfU0Dud9z/DuwtixMHO5ul29MaxRxEKbuFgcSiGPpGblvGUxKO0WIFtE/tvLW7On2Np2/zN+ + BlKSZ1KND6S0fJ13bguUjvFfbdcVj4YxUnjSkhrWFNJvUNSKwUwngvFnZOqbVtuOgg96upPWN/Wm + EO2G6vo9ZufsNwAAAP//AwDHwDA2NQMAAA== + headers: + CF-RAY: + - 99b0ebfc4e5ced3b-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:23:27 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=.AxQfRhAvElThVl_Qz9zUVdqz_GtBGXwRQ0TVPIg5pc-1762561407-1.0.1.1-klsoMaFKHjzxOrHy2Zfd8Sc76RDHsMXURLAaIzORncnm47NI1MY0BqqBGOEsVXlZb.RdqeqpxzGFhl8DlRDjy.SqRfa2B4zEYdKZqQ2kVB0; + path=/; expires=Sat, 08-Nov-25 00:53:27 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=0ohSoYMS21h1NkHWl4FeeVCp5aK2KHeEjclSm1NY7yY-1762561407934-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - braintrust-data + openai-processing-ms: + - '269' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '435' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999995' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_617bc8e11f2a43a98a0658e7e91298fd + status: + code: 200 + message: OK +version: 1 diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_parallel_execution b/py/src/braintrust/integrations/langchain/cassettes/test_parallel_execution new file mode 100644 index 00000000..aec3440e --- /dev/null +++ b/py/src/braintrust/integrations/langchain/cassettes/test_parallel_execution @@ -0,0 +1,234 @@ +interactions: +- request: + body: '{"messages": [{"content": "Tell me a joke about bear", "role": "user"}], + "model": "gpt-4o-mini", "frequency_penalty": 0.0, "n": 1, "presence_penalty": + 0.0, "stream": false, "temperature": 1.0, "top_p": 1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '188' + content-type: + - application/json + host: + - localhost:8000 + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.13 + method: POST + uri: http://localhost:8000/v1/proxy/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CJ3vA6tl1z95spYoDxT9RtqqzDF8n\",\n \"object\": + \"chat.completion\",\n \"created\": 1758659340,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Why don\u2019t bears ever get lost?\\n\\nBecause + they always take the bear necessities! \U0001F43B\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 13,\n \"completion_tokens\": 19,\n \"total_tokens\": 32,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_51db84afab\"\n}\n" + headers: + Access-Control-Allow-Credentials: + - 'true' + Access-Control-Expose-Headers: + - x-bt-cursor,x-bt-found-existing,x-bt-span-id,x-bt-span-export,x-bt-query-plan,x-bt-internal-trace-id + Connection: + - keep-alive + Date: + - Tue, 23 Sep 2025 20:29:00 GMT + Keep-Alive: + - timeout=5 + Transfer-Encoding: + - chunked + Vary: + - Origin + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 983ccaa98d189e59-SJC + content-type: + - application/json + openai-organization: + - braintrust-data + openai-processing-ms: + - '742' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + set-cookie: + - _cfuvid=h4eOl14etTzzF9eOjCE9SDq4Y79ZdPOJeIYnqb.tN3E-1758659340929-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-bt-cached: + - MISS + x-bt-internal-trace-id: + - ba7859db365b14edae0dc1d75360d5cb + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '912' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999990' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_31748d3aea8d488c9f1b1b7764b3a5d7 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"content": "write a 2-line poem about bear", "role": "user"}], + "model": "gpt-4o-mini", "frequency_penalty": 0.0, "n": 1, "presence_penalty": + 0.0, "stream": false, "temperature": 1.0, "top_p": 1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '193' + content-type: + - application/json + host: + - localhost:8000 + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.13 + method: POST + uri: http://localhost:8000/v1/proxy/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CJ3vAwrz88GjVnlchECG5UbilcrZG\",\n \"object\": + \"chat.completion\",\n \"created\": 1758659340,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"In forest shadows, a bear roams free, + \ \\nMajestic guardian of the ancient tree.\",\n \"refusal\": null,\n + \ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 15,\n \"completion_tokens\": + 19,\n \"total_tokens\": 34,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_560af6e559\"\n}\n" + headers: + Access-Control-Allow-Credentials: + - 'true' + Access-Control-Expose-Headers: + - x-bt-cursor,x-bt-found-existing,x-bt-span-id,x-bt-span-export,x-bt-query-plan,x-bt-internal-trace-id + Connection: + - keep-alive + Date: + - Tue, 23 Sep 2025 20:29:01 GMT + Keep-Alive: + - timeout=5 + Transfer-Encoding: + - chunked + Vary: + - Origin + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 983ccaa99f09cecd-SJC + content-type: + - application/json + openai-organization: + - braintrust-data + openai-processing-ms: + - '909' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + set-cookie: + - _cfuvid=I8TMI8qNGmqspYd_94RtBiCEVRDIffMScd.j_yw35Es-1758659341697-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-bt-cached: + - MISS + x-bt-internal-trace-id: + - 7d350d2a8b4d267107b257e3a1989c5a + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '1375' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999990' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_891af1935bbf49c39105d7299babb315 + status: + code: 200 + message: OK +version: 1 diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens b/py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens new file mode 100644 index 00000000..441128e9 --- /dev/null +++ b/py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens @@ -0,0 +1,324 @@ +interactions: +- request: + body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is the first + type of testing mentioned in section 1.2?"}],"model":"claude-sonnet-4-5-20250929","system":[{"type":"text","text":"\n# + Comprehensive Guide to Software Testing Methods!\n\n## Chapter 1: Introduction + to Testing\n\nSoftware testing is a critical component of the software development + lifecycle. It ensures that applications\nfunction correctly, meet requirements, + and provide a positive user experience. This guide covers various\ntesting methodologies, + best practices, and tools used in modern software development.\n\n### 1.1 The + Importance of Testing\n\nTesting helps identify defects early in the development + process, reducing the cost of fixing issues later.\nStudies have shown that + the cost of fixing a bug increases exponentially as it progresses through the\ndevelopment + lifecycle. A bug found during requirements gathering might cost $1 to fix, while + the same bug\nfound in production could cost $100 or more.\n\n### 1.2 Types + of Testing\n\nThere are many types of testing, including:\n- Unit Testing: Testing + individual components or functions in isolation\n- Integration Testing: Testing + how components work together\n- End-to-End Testing: Testing the entire application + flow\n- Performance Testing: Testing application speed and scalability\n- Security + Testing: Testing for vulnerabilities and security issues\n- Usability Testing: + Testing user experience and interface design\n\n## Chapter 2: Unit Testing Best + Practices\n\nUnit testing focuses on testing the smallest testable parts of + an application. Here are some best practices:\n\n### 2.1 Write Tests First (TDD)\n\nTest-Driven + Development (TDD) is a methodology where tests are written before the actual + code. The process\nfollows a simple cycle: Red (write a failing test), Green + (write code to pass the test), Refactor (improve\nthe code while keeping tests + passing).\n\n### 2.2 Keep Tests Independent\n\nEach test should be independent + of others. Tests should not rely on the state created by previous tests.\nThis + ensures that tests can be run in any order and that failures are isolated and + easy to debug.\n\n### 2.3 Use Meaningful Names\n\nTest names should clearly + describe what is being tested and what the expected outcome is. A good test + name\nmight be \"test_user_registration_with_valid_email_succeeds\" rather than + just \"test_registration\".\n\n### 2.4 Test Edge Cases\n\nDon''t just test the + happy path. Consider edge cases like:\n- Empty inputs\n- Null or undefined values\n- + Very large inputs\n- Invalid formats\n- Boundary conditions\n\n## Chapter 3: + Integration Testing\n\nIntegration testing verifies that different modules or + services work together correctly.\n\n### 3.1 Database Integration\n\nWhen testing + database interactions, consider using:\n- Test databases separate from production\n- + Database transactions that roll back after each test\n- Mock data that represents + realistic scenarios\n\n### 3.2 API Integration\n\nAPI integration tests should + verify:\n- Correct HTTP status codes\n- Response format and schema\n- Error + handling\n- Authentication and authorization\n\n## Chapter 4: Performance Testing\n\nPerformance + testing ensures your application can handle expected load and scale appropriately.\n\n### + 4.1 Load Testing\n\nLoad testing simulates multiple users accessing the application + simultaneously. Key metrics include:\n- Response time under load\n- Throughput + (requests per second)\n- Error rates\n- Resource utilization (CPU, memory, network)\n\n### + 4.2 Stress Testing\n\nStress testing pushes the application beyond normal operational + capacity to find breaking points and\nunderstand how the system fails gracefully.\n\n## + Chapter 5: Continuous Integration and Testing\n\nModern development practices + integrate testing into the CI/CD pipeline.\n\n### 5.1 Automated Test Runs\n\nTests + should run automatically on every code change. This includes:\n- Running unit + tests on every commit\n- Running integration tests on pull requests\n- Running + end-to-end tests before deployment\n\n### 5.2 Test Coverage\n\nTest coverage + metrics help identify untested code. While 100% coverage isn''t always practical + or necessary,\nmaintaining good coverage helps ensure code quality. Focus on + critical paths and business logic.\n\n## Chapter 6: Testing Tools and Frameworks\n\nMany + tools exist to support testing efforts:\n\n### 6.1 Python Testing\n- pytest: + Feature-rich testing framework\n- unittest: Built-in Python testing module\n- + mock: Library for mocking objects\n\n### 6.2 JavaScript Testing\n- Jest: Popular + testing framework\n- Mocha: Flexible testing framework\n- Cypress: End-to-end + testing tool\n\n### 6.3 Other Tools\n- Selenium: Browser automation\n- JMeter: + Performance testing\n- Postman: API testing\n\n## Conclusion\n\nEffective testing + is essential for delivering high-quality software. By following best practices + and using\nappropriate tools, teams can catch bugs early, improve code quality, + and deliver better products to users.\n\nRemember: Testing is not just about + finding bugs, it''s about building confidence in your code.\n","cache_control":{"type":"ephemeral"}}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5160' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - Anthropic/Python 0.76.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.76.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.19 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAA/22RzU7rMBCFX8WaZZWiJGqvaHYXsQIhsaBsKIqMPW0sEjt4xgVU9d0ZFyp+V4nn + fHOOZ7yDIVjsoQHT62RxSsF75OlsOp/WZT0vF/UCCnBWiIE2bVndrsO/s+1ptYzXl+fz04tN0nZ5 + JQy/jpgpJNIblEIMfS5oIkesPUvJBM8of83d7sgzvmTl8GngvzEhWuc3ioMiNOyCV9VJrVZwIzyp + sFY3SCxEsQLFHaq1i8Qqu2WR30U1SIz0olWO1GSy9I6PjZNJoZ47Z7osWSQT3YNwmnLIR7vz1m2d + TbpXJgyjGHmW8KjWyR8uRYJIf+h1Pp2sAPb3BRCHsY2oZYsyDHrbcooePgTCp4TeyNQ+9X0B6bCo + ZgfOj4lbDo/oCZq6lEVp02FrxCrbt9+BqpwtjogQ9of8qz1H4NjhgFH37Xz40+4TqLqfhvsCQuKv + pZmEEMatM9iywyjT5je2OlrY798Az+eCZFYCAAA= + headers: + CF-RAY: + - 9c1a60c71c9c67cb-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 21 Jan 2026 22:51:47 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - 27796668-7351-40ac-acc4-024aee8995a5 + anthropic-ratelimit-input-tokens-limit: + - '3000000' + anthropic-ratelimit-input-tokens-remaining: + - '3000000' + anthropic-ratelimit-input-tokens-reset: + - '2026-01-21T22:51:46Z' + anthropic-ratelimit-output-tokens-limit: + - '600000' + anthropic-ratelimit-output-tokens-remaining: + - '600000' + anthropic-ratelimit-output-tokens-reset: + - '2026-01-21T22:51:47Z' + anthropic-ratelimit-tokens-limit: + - '3600000' + anthropic-ratelimit-tokens-remaining: + - '3600000' + anthropic-ratelimit-tokens-reset: + - '2026-01-21T22:51:46Z' + cf-cache-status: + - DYNAMIC + request-id: + - req_011CXMLqXaFZ4xWZExkXJyyb + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - '2088' + status: + code: 200 + message: OK +- request: + body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is the first + type of testing mentioned in section 1.2?"},{"role":"assistant","content":"According + to section 1.2 \"Types of Testing,\" the first type of testing mentioned is + **Unit Testing**, which is described as \"Testing individual components or functions + in isolation.\""},{"role":"user","content":"What testing framework is mentioned + for Python?"}],"model":"claude-sonnet-4-5-20250929","system":[{"type":"text","text":"\n# + Comprehensive Guide to Software Testing Methods!\n\n## Chapter 1: Introduction + to Testing\n\nSoftware testing is a critical component of the software development + lifecycle. It ensures that applications\nfunction correctly, meet requirements, + and provide a positive user experience. This guide covers various\ntesting methodologies, + best practices, and tools used in modern software development.\n\n### 1.1 The + Importance of Testing\n\nTesting helps identify defects early in the development + process, reducing the cost of fixing issues later.\nStudies have shown that + the cost of fixing a bug increases exponentially as it progresses through the\ndevelopment + lifecycle. A bug found during requirements gathering might cost $1 to fix, while + the same bug\nfound in production could cost $100 or more.\n\n### 1.2 Types + of Testing\n\nThere are many types of testing, including:\n- Unit Testing: Testing + individual components or functions in isolation\n- Integration Testing: Testing + how components work together\n- End-to-End Testing: Testing the entire application + flow\n- Performance Testing: Testing application speed and scalability\n- Security + Testing: Testing for vulnerabilities and security issues\n- Usability Testing: + Testing user experience and interface design\n\n## Chapter 2: Unit Testing Best + Practices\n\nUnit testing focuses on testing the smallest testable parts of + an application. Here are some best practices:\n\n### 2.1 Write Tests First (TDD)\n\nTest-Driven + Development (TDD) is a methodology where tests are written before the actual + code. The process\nfollows a simple cycle: Red (write a failing test), Green + (write code to pass the test), Refactor (improve\nthe code while keeping tests + passing).\n\n### 2.2 Keep Tests Independent\n\nEach test should be independent + of others. Tests should not rely on the state created by previous tests.\nThis + ensures that tests can be run in any order and that failures are isolated and + easy to debug.\n\n### 2.3 Use Meaningful Names\n\nTest names should clearly + describe what is being tested and what the expected outcome is. A good test + name\nmight be \"test_user_registration_with_valid_email_succeeds\" rather than + just \"test_registration\".\n\n### 2.4 Test Edge Cases\n\nDon''t just test the + happy path. Consider edge cases like:\n- Empty inputs\n- Null or undefined values\n- + Very large inputs\n- Invalid formats\n- Boundary conditions\n\n## Chapter 3: + Integration Testing\n\nIntegration testing verifies that different modules or + services work together correctly.\n\n### 3.1 Database Integration\n\nWhen testing + database interactions, consider using:\n- Test databases separate from production\n- + Database transactions that roll back after each test\n- Mock data that represents + realistic scenarios\n\n### 3.2 API Integration\n\nAPI integration tests should + verify:\n- Correct HTTP status codes\n- Response format and schema\n- Error + handling\n- Authentication and authorization\n\n## Chapter 4: Performance Testing\n\nPerformance + testing ensures your application can handle expected load and scale appropriately.\n\n### + 4.1 Load Testing\n\nLoad testing simulates multiple users accessing the application + simultaneously. Key metrics include:\n- Response time under load\n- Throughput + (requests per second)\n- Error rates\n- Resource utilization (CPU, memory, network)\n\n### + 4.2 Stress Testing\n\nStress testing pushes the application beyond normal operational + capacity to find breaking points and\nunderstand how the system fails gracefully.\n\n## + Chapter 5: Continuous Integration and Testing\n\nModern development practices + integrate testing into the CI/CD pipeline.\n\n### 5.1 Automated Test Runs\n\nTests + should run automatically on every code change. This includes:\n- Running unit + tests on every commit\n- Running integration tests on pull requests\n- Running + end-to-end tests before deployment\n\n### 5.2 Test Coverage\n\nTest coverage + metrics help identify untested code. While 100% coverage isn''t always practical + or necessary,\nmaintaining good coverage helps ensure code quality. Focus on + critical paths and business logic.\n\n## Chapter 6: Testing Tools and Frameworks\n\nMany + tools exist to support testing efforts:\n\n### 6.1 Python Testing\n- pytest: + Feature-rich testing framework\n- unittest: Built-in Python testing module\n- + mock: Library for mocking objects\n\n### 6.2 JavaScript Testing\n- Jest: Popular + testing framework\n- Mocha: Flexible testing framework\n- Cypress: End-to-end + testing tool\n\n### 6.3 Other Tools\n- Selenium: Browser automation\n- JMeter: + Performance testing\n- Postman: API testing\n\n## Conclusion\n\nEffective testing + is essential for delivering high-quality software. By following best practices + and using\nappropriate tools, teams can catch bugs early, improve code quality, + and deliver better products to users.\n\nRemember: Testing is not just about + finding bugs, it''s about building confidence in your code.\n","cache_control":{"type":"ephemeral"}}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5456' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - Anthropic/Python 0.76.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.76.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.10.19 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAA/2VSzU7DMAx+lSjHqUXtYEzrDYQQB5A4IA2JoiokZg1rnZI4wDTt3XHK/zgl+X4c + +0u2sncGOllJ3aloIA8OESg/ymf5tJjOisV0ITNpDSv6sGqKsqSr5fX87OVC6RbncblY3y7PkDW0 + GSCpIAS1Aga86xKgQrCBFBJD2iEB76q77Zee4C0x41LJE62dNxZXgpwIoMk6FMcHpajl9YZaPtxA + IOazWgpqPYCgD0A8etXDq/PrIJQH0fM9bAYjHp0XH+aqxhrLAzGZDJtkm0xELs5BUfSQe6vb/8Vq + nCZ9REvfjtNoO8otflb9NnGUsYMaD5Ojd3o9qi/tg1d+M7aRwKR0D088W5C7+0wGckPjQXHyHACg + abidFOhIBHiOgJqTwth1mYxjuNVWWhwiNeTWgEFW85LD5ReBRnOpNHjzV1B88UybPa4sjhb79nQF + DC304FXXzPr/5X7Yst1nd5l0kX5Dx+wI4F+shoYseB41fQqjvJG73TumCh7LhwIAAA== + headers: + CF-RAY: + - 9c1a60d4ab5e67cb-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 21 Jan 2026 22:51:49 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - 27796668-7351-40ac-acc4-024aee8995a5 + anthropic-ratelimit-input-tokens-limit: + - '3000000' + anthropic-ratelimit-input-tokens-remaining: + - '3000000' + anthropic-ratelimit-input-tokens-reset: + - '2026-01-21T22:51:48Z' + anthropic-ratelimit-output-tokens-limit: + - '600000' + anthropic-ratelimit-output-tokens-remaining: + - '600000' + anthropic-ratelimit-output-tokens-reset: + - '2026-01-21T22:51:49Z' + anthropic-ratelimit-tokens-limit: + - '3600000' + anthropic-ratelimit-tokens-remaining: + - '3600000' + anthropic-ratelimit-tokens-reset: + - '2026-01-21T22:51:48Z' + cf-cache-status: + - DYNAMIC + request-id: + - req_011CXMLqgrrchykwCdY7YRKM + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - '2016' + status: + code: 200 + message: OK +version: 1 diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_streaming_ttft b/py/src/braintrust/integrations/langchain/cassettes/test_streaming_ttft new file mode 100644 index 00000000..1ee7a837 --- /dev/null +++ b/py/src/braintrust/integrations/langchain/cassettes/test_streaming_ttft @@ -0,0 +1,298 @@ +interactions: +- request: + body: '{"messages":[{"content":"Count from 1 to 5.","role":"user"}],"model":"gpt-4o-mini","max_completion_tokens":50,"stream":true}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '124' + content-type: + - application/json + cookie: + - __cf_bm=W_Ukgb.mz8e1GW7CfhzN.QQaN09_xQq1uTHm3a.dJdU-1762561359-1.0.1.1-6IrkySxpZaL.1C65iH0iOLFfere0JxHCiasT6bak.RihYFMyJgIz2OuYJqcUey8c5vicjtorNby_Z_GJX.ZMIHa6PyzVrhqgfZZmtnnn.sA; + _cfuvid=jwWMA4k30hLPwBwTSCIdIeS5.m1TkcdYLYTt4YSTZhI-1762561359243-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + method: POST + uri: http://localhost:8000/v1/proxy/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"obfuscation":"uoycSw"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"1"},"logprobs":null,"finish_reason":null}],"obfuscation":"7R9sCOG"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}],"obfuscation":"jNZOnCU"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}],"obfuscation":"NTkR0fq"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"2"},"logprobs":null,"finish_reason":null}],"obfuscation":"KhfgFBA"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}],"obfuscation":"u5zk4uv"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}],"obfuscation":"yQyBcA4"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"3"},"logprobs":null,"finish_reason":null}],"obfuscation":"HhGcZch"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}],"obfuscation":"GNLE7Ci"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}],"obfuscation":"d0EKjlZ"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}],"obfuscation":"YytmIuX"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}],"obfuscation":"Umbehc1"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}],"obfuscation":"3xi8C7o"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"5"},"logprobs":null,"finish_reason":null}],"obfuscation":"N0uOsTp"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}],"obfuscation":"RilMN7a"} + + + data: {"id":"chatcmpl-CZR0zJXGi0lsnYkPoiga2R6HChxps","object":"chat.completion.chunk","created":1762561361,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"obfuscation":"oF"} + + + data: [DONE] + + + ' + headers: + CF-RAY: + - 99b0eaddeca8aaac-SJC + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Sat, 08 Nov 2025 00:22:42 GMT + Server: + - cloudflare + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - braintrust-data + openai-processing-ms: + - '275' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '519' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999992' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_05aebff8dd644228befd59a7372d3c93 + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"Count from 1 to 5.","role":"user"}],"model":"gpt-4o-mini","max_completion_tokens":50,"stream":true}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '124' + content-type: + - application/json + cookie: + - __cf_bm=.AxQfRhAvElThVl_Qz9zUVdqz_GtBGXwRQ0TVPIg5pc-1762561407-1.0.1.1-klsoMaFKHjzxOrHy2Zfd8Sc76RDHsMXURLAaIzORncnm47NI1MY0BqqBGOEsVXlZb.RdqeqpxzGFhl8DlRDjy.SqRfa2B4zEYdKZqQ2kVB0; + _cfuvid=0ohSoYMS21h1NkHWl4FeeVCp5aK2KHeEjclSm1NY7yY-1762561407934-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + method: POST + uri: http://localhost:8000/v1/proxy/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"obfuscation":"ov7JiI"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"1"},"logprobs":null,"finish_reason":null}],"obfuscation":"eXpmCqg"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}],"obfuscation":"C8QZXu8"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}],"obfuscation":"xdqGFpo"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"2"},"logprobs":null,"finish_reason":null}],"obfuscation":"O3SLgWG"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}],"obfuscation":"0aoEi42"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}],"obfuscation":"2oO8rJa"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"3"},"logprobs":null,"finish_reason":null}],"obfuscation":"jOHTEGa"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}],"obfuscation":"qGeoxr1"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}],"obfuscation":"uvMar7j"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}],"obfuscation":"4dFvFfq"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}],"obfuscation":"GdoZztm"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}],"obfuscation":"NHxpCPR"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"5"},"logprobs":null,"finish_reason":null}],"obfuscation":"mfV8KdT"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}],"obfuscation":"EkPlssM"} + + + data: {"id":"chatcmpl-CZR1mouRDQnH9qWlT2zp6Fs0nW1Uq","object":"chat.completion.chunk","created":1762561410,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_560af6e559","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"obfuscation":"fj"} + + + data: [DONE] + + + ' + headers: + CF-RAY: + - 99b0ec0f7961ed3b-SJC + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Sat, 08 Nov 2025 00:23:30 GMT + Server: + - cloudflare + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - braintrust-data + openai-processing-ms: + - '149' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '171' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999992' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_8afec9e4717b433e9c6900220b2dbd93 + status: + code: 200 + message: OK +version: 1 diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_tool_usage b/py/src/braintrust/integrations/langchain/cassettes/test_tool_usage new file mode 100644 index 00000000..e21d44cc --- /dev/null +++ b/py/src/braintrust/integrations/langchain/cassettes/test_tool_usage @@ -0,0 +1,350 @@ +interactions: +- request: + body: '{"messages": [{"content": "What is 3 * 12", "role": "user"}], "model": + "gpt-4o-mini", "frequency_penalty": 0.0, "n": 1, "presence_penalty": 0.0, "stream": + false, "temperature": 1.0, "tools": [{"type": "function", "function": {"name": + "calculator", "description": "Can perform mathematical operations.", "parameters": + {"properties": {"input": {"properties": {"operation": {"description": "The type + of operation to execute.", "enum": ["add", "subtract", "multiply", "divide"], + "type": "string"}, "number1": {"description": "The first number to operate on.", + "type": "number"}, "number2": {"description": "The second number to operate + on.", "type": "number"}}, "required": ["operation", "number1", "number2"], "type": + "object"}}, "required": ["input"], "type": "object"}}}], "top_p": 1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '725' + content-type: + - application/json + host: + - localhost:8000 + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.13 + method: POST + uri: http://localhost:8000/v1/proxy/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CJ3pT0xTT4C4WwCqA5bvyrihLFrbd\",\n \"object\": + \"chat.completion\",\n \"created\": 1758658987,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_faZyqlGfMGsX50e2EuExUqK0\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"calculator\",\n + \ \"arguments\": \"{\\\"input\\\":{\\\"operation\\\":\\\"multiply\\\",\\\"number1\\\":3,\\\"number2\\\":12}}\"\n + \ }\n }\n ],\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 97,\n \"completion_tokens\": + 26,\n \"total_tokens\": 123,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_51db84afab\"\n}\n" + headers: + Access-Control-Allow-Credentials: + - 'true' + Access-Control-Expose-Headers: + - x-bt-cursor,x-bt-found-existing,x-bt-span-id,x-bt-span-export,x-bt-query-plan,x-bt-internal-trace-id + Connection: + - keep-alive + Date: + - Tue, 23 Sep 2025 20:23:07 GMT + Keep-Alive: + - timeout=5 + Transfer-Encoding: + - chunked + Vary: + - Origin + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + cf-ray: + - 983cc20cabc267ef-SJC + content-type: + - application/json + openai-organization: + - braintrust-data + openai-processing-ms: + - '648' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + set-cookie: + - _cfuvid=inx7Y1lMFCkI1jONo8plrYH7k2d1EAvkr2WlMIyrK.s-1758658987739-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-bt-cached: + - MISS + x-bt-internal-trace-id: + - 475d214543543ac965368ac2a190850f + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '663' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999992' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_f6bcef66199c4bcaa6ad5864f7d1d9fb + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"What is 3 * 12","role":"user"}],"model":"gpt-4o-mini","frequency_penalty":0.0,"n":1,"presence_penalty":0.0,"stream":false,"temperature":1.0,"tools":[{"type":"function","function":{"name":"calculator","description":"Can + perform mathematical operations.","parameters":{"properties":{"input":{"properties":{"operation":{"description":"The + type of operation to execute.","enum":["add","subtract","multiply","divide"],"type":"string"},"number1":{"description":"The + first number to operate on.","type":"number"},"number2":{"description":"The + second number to operate on.","type":"number"}},"required":["operation","number1","number2"],"type":"object"}},"required":["input"],"type":"object"}}}],"top_p":1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '725' + content-type: + - application/json + cookie: + - __cf_bm=W_Ukgb.mz8e1GW7CfhzN.QQaN09_xQq1uTHm3a.dJdU-1762561359-1.0.1.1-6IrkySxpZaL.1C65iH0iOLFfere0JxHCiasT6bak.RihYFMyJgIz2OuYJqcUey8c5vicjtorNby_Z_GJX.ZMIHa6PyzVrhqgfZZmtnnn.sA; + _cfuvid=jwWMA4k30hLPwBwTSCIdIeS5.m1TkcdYLYTt4YSTZhI-1762561359243-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4xT0W6bMBR95yus+xymQAppedum7SFKNXXSqmqjQo65EG/G9myzLY3y7xMmBZKm + UnlAcI/PucfH1/uAEOAlZATYljrWaBF+/P51vnsyt/dSfzCrMmFyeXu3fr96untwX2DWMdTmJzL3 + zHrHVKMFOq5kDzOD1GGnGi3TOEmjRTr3QKNKFB2t1i68UmHDJQ/jeXwVzpdhdH1kbxVnaCEjPwJC + CNn7d+dTlvgPMuK1fKVBa2mNkA2LCAGjRFcBai23jkoHsxFkSjqUnXXZCjEBnFKiYFSIsXH/7Cff + Y1hUiALZUupvWK/u/z4k5e9PuNafV+vrab9eeqe9oaqVbAhpgg/17KwZISBpg8eGrBXUKXPGJgSo + qdsGpeucwz4HLnXrcsj2OSiNhnbaOWQ5NK1wXItdDrMcZNts0EQ5ZIvhL84hi+LDAU5aHIJL34+T + 8AxWraXiZapUSuW8AR/r4xE5DCcoVK2N2tgzKlRccrstDFLrg5meT/BsxFuA9mQEQBvVaFc49Qt9 + 05tlLwrjlI5gnB5BpxwVYz2KF7MLckWJjnI/IsNUMsq2WI7UcTppW3I1AYLJ1l+6uaTdb5/L+i3y + I8AYaodloQ2WnJ3ueFxmsLvEry0bQvaGwaL5wxkWjqPpjqPEiraiH3WwO+uwKSouazTacH+/oNJF + ks5plWKS3EBwCP4DAAD//wMAguKIhm0EAAA= + headers: + CF-RAY: + - 99b0ead42b8caaac-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:22:40 GMT + Server: + - cloudflare + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - braintrust-data + openai-processing-ms: + - '557' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '702' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999995' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_edb893697ec245fbb710a31d27a3ed78 + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"What is 3 * 12","role":"user"}],"model":"gpt-4o-mini","frequency_penalty":0.0,"n":1,"presence_penalty":0.0,"stream":false,"temperature":1.0,"tools":[{"type":"function","function":{"name":"calculator","description":"Can + perform mathematical operations.","parameters":{"properties":{"input":{"properties":{"operation":{"description":"The + type of operation to execute.","enum":["add","subtract","multiply","divide"],"type":"string"},"number1":{"description":"The + first number to operate on.","type":"number"},"number2":{"description":"The + second number to operate on.","type":"number"}},"required":["operation","number1","number2"],"type":"object"}},"required":["input"],"type":"object"}}}],"top_p":1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '725' + content-type: + - application/json + cookie: + - __cf_bm=.AxQfRhAvElThVl_Qz9zUVdqz_GtBGXwRQ0TVPIg5pc-1762561407-1.0.1.1-klsoMaFKHjzxOrHy2Zfd8Sc76RDHsMXURLAaIzORncnm47NI1MY0BqqBGOEsVXlZb.RdqeqpxzGFhl8DlRDjy.SqRfa2B4zEYdKZqQ2kVB0; + _cfuvid=0ohSoYMS21h1NkHWl4FeeVCp5aK2KHeEjclSm1NY7yY-1762561407934-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.108.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.108.2 + x-stainless-raw-response: + - 'true' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAA4xTTY/aMBC951dYc4YqCV+7udEPqSdaVeqh26wiY0/AXce2/MHCIv57lQSSwFKp + OUT2PM+b5+eZY0QICA4ZAbalnlVGjj89/UjkfIlfVm+/4o9P4dW/2a/S0dfl5PAAozpDr/8g85es + D0xXRqIXWrUws0g91qzJYp7O5sk0fmyASnOUddrG+PFUjyuhxDiN0+k4XoyTMznbasHQQUZ+R4QQ + cmz+tU7FcQ8ZiUeXSIXO0Q1C1h0iBKyWdQSoc8J5qjyMepBp5VHV0lWQcgB4rWXBqJR94fY7Dta9 + WVTK4udkP9upwHY7/nm1XH2Pefi2f4n5oF5LfTCNoDIo1pk0wLt4dlOMEFC0wnNBFiT12t5kEwLU + bkKFytfK4ZiDUCb4HLJjDtqgpTV3DlkOVZBeGHnIYZSDCtUabZJDNul2aQ5Zkp5OcFXiFN1bPw/M + s1gGR+V7V6lS2jcCGlufz8ipe0GpN8bqtbtJhVIo4baFReoaY4bvE12ENBIgXLUAGKsr4wuvX7Ap + +rhoSaHv0h5M52fQa09lH0/SyegOXcHRU9G0SNeVjLIt8j61704auNADIBpc/b2ae9zt9YXa/A99 + DzCGxiMvjEUu2PWN+2MW6yH+17HO5EYwOLQ7wbDwAm39HBxLGmQ7WuAOzmNVlEJt0BormvmC0hSz + hK8fprSka4hO0V8AAAD//wMAMU2sv20EAAA= + headers: + CF-RAY: + - 99b0ec04f9abed3b-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 08 Nov 2025 00:23:29 GMT + Server: + - cloudflare + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - braintrust-data + openai-processing-ms: + - '614' + openai-project: + - proj_vsCSXafhhByzWOThMrJcZiw9 + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '756' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999995' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_b741763f424444f38ded6343a488e723 + status: + code: 200 + message: OK +version: 1 diff --git a/py/src/braintrust/integrations/langchain/conftest.py b/py/src/braintrust/integrations/langchain/conftest.py new file mode 100644 index 00000000..078502b2 --- /dev/null +++ b/py/src/braintrust/integrations/langchain/conftest.py @@ -0,0 +1,58 @@ +import os +from pathlib import Path + +import pytest +from braintrust.logger import ( + TEST_API_KEY, + Logger, + _internal_reset_global_state, + _internal_with_memory_background_logger, + _MemoryBackgroundLogger, +) +from braintrust.test_helpers import init_test_logger + +from braintrust.wrappers.langchain import clear_global_handler + + +@pytest.fixture(autouse=True) +def setup_braintrust_langchain(): + os.environ["BRAINTRUST_SYNC_FLUSH"] = "1" + os.environ["BRAINTRUST_API_URL"] = "http://localhost:8000" + os.environ["BRAINTRUST_APP_URL"] = "http://localhost:3000" + os.environ["BRAINTRUST_API_KEY"] = TEST_API_KEY + os.environ["ANTHROPIC_API_KEY"] = "your_anthropic_api_key_here" + os.environ["OPENAI_API_KEY"] = "your_openai_api_key_here" + os.environ["OPENAI_BASE_URL"] = "http://localhost:8000/v1/proxy" + + _internal_reset_global_state() + clear_global_handler() + yield + + +@pytest.fixture(scope="module") +def vcr_config(): + record_mode = "none" if (os.environ.get("CI") or os.environ.get("GITHUB_ACTIONS")) else "once" + + return { + "filter_headers": [ + "authorization", + "x-goog-api-key", + "x-api-key", + "api-key", + "openai-api-key", + ], + "record_mode": record_mode, + "match_on": ["uri", "method", "body"], + "cassette_library_dir": str(Path(__file__).parent / "cassettes"), + "path_transformer": lambda path: path.replace(".yaml", ""), + } + + +@pytest.fixture +def logger_memory_logger(): + logger = init_test_logger("langchain-py") + with _internal_with_memory_background_logger() as bgl: + yield (logger, bgl) + + +LoggerMemoryLogger = tuple[Logger, _MemoryBackgroundLogger] diff --git a/py/src/braintrust/integrations/langchain/integration.py b/py/src/braintrust/integrations/langchain/integration.py new file mode 100644 index 00000000..e22cdc0f --- /dev/null +++ b/py/src/braintrust/integrations/langchain/integration.py @@ -0,0 +1,34 @@ +"""LangChain integration orchestration.""" + +from typing import Any + +from braintrust.integrations.base import BasePatcher, BaseIntegration + + +class LangChainCallbackPatcher(BasePatcher): + """Patcher that registers a global BraintrustCallbackHandler with LangChain.""" + + name = "langchain_callback" + _patched: bool = False + + @classmethod + def is_patched(cls, module: Any | None, version: str | None, *, target: Any | None = None) -> bool: + return cls._patched + + @classmethod + def patch(cls, module: Any | None, version: str | None, *, target: Any | None = None) -> bool: + from .tracing import BraintrustCallbackHandler, _ensure_hook_registered, set_global_handler + + _ensure_hook_registered() + handler = BraintrustCallbackHandler() + set_global_handler(handler) + cls._patched = True + return True + + +class LangChainIntegration(BaseIntegration): + """Braintrust instrumentation for LangChain.""" + + name = "langchain" + import_names = ("langchain_core",) + patchers = (LangChainCallbackPatcher,) diff --git a/py/src/braintrust/integrations/langchain/test_langchain.py b/py/src/braintrust/integrations/langchain/test_langchain.py new file mode 100644 index 00000000..827cf777 --- /dev/null +++ b/py/src/braintrust/integrations/langchain/test_langchain.py @@ -0,0 +1,1380 @@ +# pyright: reportTypedDictNotRequiredAccess=none +import uuid +from typing import Any, Dict, List, Sequence, Union, cast +from unittest.mock import ANY + +import pytest +from braintrust.logger import flush +from langchain_anthropic import ChatAnthropic +from langchain_core.callbacks import BaseCallbackHandler, CallbackManager +from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.prompts.prompt import PromptTemplate +from langchain_core.runnables import RunnableMap, RunnableSerializable +from langchain_core.tools import tool +from langchain_openai import ChatOpenAI +from pydantic import BaseModel, Field + +from braintrust.integrations.langchain import BraintrustCallbackHandler, set_global_handler +from braintrust.wrappers.test_utils import verify_autoinstrument_script + +from .conftest import LoggerMemoryLogger + +# --------------------------------------------------------------------------- +# Helpers (inlined from the integration package) +# --------------------------------------------------------------------------- + + +def assert_matches_object(actual: Any, expected: Any, ignore_order: bool = False) -> None: + """Assert that actual contains all key-value pairs from expected.""" + if isinstance(expected, (list, tuple)): + assert isinstance(actual, (list, tuple)), f"Expected sequence but got {type(actual)}" + assert len(actual) >= len(expected), ( + f"Expected sequence of length >= {len(expected)} but got length {len(actual)}" + ) + if not ignore_order: + for i, expected_item in enumerate(expected): + assert_matches_object(actual[i], expected_item) + else: + for expected_item in expected: + matched = False + for actual_item in actual: + try: + assert_matches_object(actual_item, expected_item) + matched = True + except Exception: + pass + assert matched, ( + f"Expected {expected_item} in unordered sequence but couldn't find match in {actual}" + ) + elif isinstance(expected, dict): + assert isinstance(actual, dict), f"Expected dict but got {type(actual)}" + for k, v in expected.items(): + assert k in actual, f"Missing key {k}" + if v is ANY: + continue + if isinstance(v, (dict, list, tuple)): + assert_matches_object(actual[k], v) + else: + assert actual[k] == v, f"Key {k}: expected {v} but got {actual[k]}" + else: + assert actual == expected, f"Expected {expected} but got {actual}" + + +def find_spans_by_attributes(spans: List[Any], **attributes: Any) -> List[Any]: + """Find all spans matching the given span_attributes.""" + matching = [] + for span in spans: + if "span_attributes" not in span: + continue + if all(span["span_attributes"].get(k) == v for k, v in attributes.items()): + matching.append(span) + return matching + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +@pytest.mark.vcr +def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + handler = BraintrustCallbackHandler(logger=logger) + prompt = ChatPromptTemplate.from_template("What is 1 + {number}?") + model = ChatOpenAI( + model="gpt-4o-mini", + temperature=1, + top_p=1, + frequency_penalty=0, + presence_penalty=0, + n=1, + ) + chain: RunnableSerializable[Dict[str, str], BaseMessage] = prompt.pipe(model) + chain.invoke({"number": "2"}, config={"callbacks": [cast(BaseCallbackHandler, handler)]}) + + spans = memory_logger.pop() + assert len(spans) == 3 + + root_span_id = spans[0]["span_id"] + + assert_matches_object( + spans, + [ + { + "span_attributes": { + "name": "RunnableSequence", + "type": "task", + }, + "input": {"number": "2"}, + "output": { + "content": ANY, + "additional_kwargs": ANY, + "response_metadata": ANY, + "type": "ai", + "name": ANY, + "id": ANY, + "example": ANY, + "tool_calls": ANY, + "invalid_tool_calls": ANY, + "usage_metadata": ANY, + }, + "metadata": {"tags": []}, + "span_id": root_span_id, + "root_span_id": root_span_id, + }, + { + "span_attributes": {"name": "ChatPromptTemplate"}, + "input": {"number": "2"}, + "output": { + "messages": [ + { + "content": ANY, + "additional_kwargs": {}, + "response_metadata": {}, + "type": "human", + "name": None, + "id": None, + } + ] + }, + "metadata": {"tags": ["seq:step:1"]}, + "root_span_id": root_span_id, + "span_parents": [root_span_id], + }, + { + "span_attributes": {"name": "ChatOpenAI", "type": "llm"}, + "input": [ + [ + { + "content": ANY, + "additional_kwargs": {}, + "response_metadata": {}, + "type": "human", + "name": None, + "id": None, + "example": ANY, + } + ] + ], + "output": { + "generations": [ + [ + { + "text": ANY, + "generation_info": ANY, + "type": "ChatGeneration", + "message": { + "content": ANY, + "additional_kwargs": ANY, + "response_metadata": ANY, + "type": "ai", + "name": None, + "id": ANY, + }, + } + ] + ], + "llm_output": { + "token_usage": { + "completion_tokens": ANY, + "prompt_tokens": ANY, + "total_tokens": ANY, + }, + "model_name": "gpt-4o-mini-2024-07-18", + }, + "run": None, + "type": "LLMResult", + }, + "metrics": { + "start": ANY, + "total_tokens": ANY, + "prompt_tokens": ANY, + "completion_tokens": ANY, + "end": ANY, + }, + "metadata": { + "tags": ["seq:step:2"], + "model": "gpt-4o-mini-2024-07-18", + }, + "root_span_id": root_span_id, + "span_parents": [root_span_id], + }, + ], + ) + + +@pytest.mark.vcr +def test_global_handler(logger_memory_logger: LoggerMemoryLogger): + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + handler = BraintrustCallbackHandler(logger=logger, debug=True) + set_global_handler(handler) + + # Make sure the handler is registered in the LangChain library + manager = CallbackManager.configure() + assert next((h for h in manager.handlers if isinstance(h, BraintrustCallbackHandler)), None) == handler + + prompt = ChatPromptTemplate.from_template("What is 1 + {number}?") + model = ChatOpenAI( + model="gpt-4o-mini", + temperature=1, + top_p=1, + frequency_penalty=0, + presence_penalty=0, + n=1, + ) + chain: RunnableSerializable[Dict[str, str], BaseMessage] = prompt.pipe(model) + + message = chain.invoke({"number": "2"}) + + spans = memory_logger.pop() + assert len(spans) > 0 + + root_span_id = spans[0]["span_id"] + + assert_matches_object( + spans, + [ + { + "span_attributes": { + "name": "RunnableSequence", + "type": "task", + }, + "input": {"number": "2"}, + "output": { + "content": ANY, + "additional_kwargs": ANY, + "response_metadata": ANY, + "type": "ai", + "name": ANY, + "id": ANY, + "example": ANY, + "tool_calls": ANY, + "invalid_tool_calls": ANY, + "usage_metadata": ANY, + }, + "metadata": {"tags": []}, + "span_id": root_span_id, + "root_span_id": root_span_id, + }, + { + "span_attributes": {"name": "ChatPromptTemplate"}, + "input": {"number": "2"}, + "output": { + "messages": [ + { + "content": ANY, + "additional_kwargs": {}, + "response_metadata": {}, + "type": "human", + "name": None, + "id": None, + } + ] + }, + "metadata": {"tags": ["seq:step:1"]}, + "root_span_id": root_span_id, + "span_parents": [root_span_id], + }, + { + "span_attributes": {"name": "ChatOpenAI", "type": "llm"}, + "input": [ + [ + { + "content": ANY, + "additional_kwargs": {}, + "response_metadata": {}, + "type": "human", + "name": None, + "id": None, + "example": ANY, + } + ] + ], + "output": { + "generations": [ + [ + { + "text": ANY, + "generation_info": ANY, + "type": "ChatGeneration", + "message": { + "content": ANY, + "additional_kwargs": ANY, + "response_metadata": ANY, + "type": "ai", + "name": None, + "id": ANY, + }, + } + ] + ], + "llm_output": { + "token_usage": { + "completion_tokens": ANY, + "prompt_tokens": ANY, + "total_tokens": ANY, + }, + "model_name": "gpt-4o-mini-2024-07-18", + }, + "run": None, + "type": "LLMResult", + }, + "metrics": { + "start": ANY, + "total_tokens": ANY, + "prompt_tokens": ANY, + "completion_tokens": ANY, + "end": ANY, + }, + "metadata": { + "tags": ["seq:step:2"], + "model": "gpt-4o-mini-2024-07-18", + }, + "root_span_id": root_span_id, + "span_parents": [root_span_id], + }, + ], + ) + + assert message.content == "1 + 2 equals 3." + + +@pytest.mark.vcr +def test_chain_with_memory(logger_memory_logger: LoggerMemoryLogger): + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + handler = BraintrustCallbackHandler(logger=logger) + prompt = ChatPromptTemplate.from_template("{history} User: {input}") + model = ChatOpenAI(model="gpt-4o-mini") + chain: RunnableSerializable[Dict[str, str], BaseMessage] = prompt.pipe(model) + + memory = {"history": "Assistant: Hello! How can I assist you today?"} + chain.invoke( + {"input": "What's your name?", **memory}, + config={"callbacks": [cast(BaseCallbackHandler, handler)], "tags": ["test"]}, + ) + + spans = memory_logger.pop() + assert len(spans) == 3 + + root_span_id = spans[0]["span_id"] + + assert_matches_object( + spans, + [ + { + "span_attributes": { + "name": "RunnableSequence", + "type": "task", + }, + "input": {"input": "What's your name?", "history": "Assistant: Hello! How can I assist you today?"}, + "output": { + "content": ANY, + "additional_kwargs": ANY, + "response_metadata": ANY, + "type": "ai", + }, + "metadata": {"tags": ["test"]}, + "span_id": root_span_id, + "root_span_id": root_span_id, + }, + { + "span_attributes": {"name": "ChatPromptTemplate"}, + "input": {"input": "What's your name?", "history": "Assistant: Hello! How can I assist you today?"}, + "output": { + "messages": [ + { + "content": ANY, + "additional_kwargs": {}, + "response_metadata": {}, + "type": "human", + "name": None, + "id": None, + } + ] + }, + "metadata": {"tags": ["seq:step:1", "test"]}, + "root_span_id": root_span_id, + "span_parents": [root_span_id], + }, + { + "span_attributes": {"name": "ChatOpenAI", "type": "llm"}, + "input": [ + [ + { + "content": ANY, + "additional_kwargs": {}, + "response_metadata": {}, + "type": "human", + "name": None, + "id": None, + "example": ANY, + } + ] + ], + "output": { + "generations": [ + [ + { + "text": ANY, + "generation_info": ANY, + "type": "ChatGeneration", + "message": { + "content": ANY, + "additional_kwargs": ANY, + "response_metadata": ANY, + "type": "ai", + "name": None, + "id": ANY, + }, + } + ] + ], + "llm_output": { + "token_usage": { + "completion_tokens": ANY, + "prompt_tokens": ANY, + "total_tokens": ANY, + }, + "model_name": "gpt-4o-mini-2024-07-18", + }, + "run": None, + "type": "LLMResult", + }, + "metrics": { + "start": ANY, + "total_tokens": ANY, + "prompt_tokens": ANY, + "completion_tokens": ANY, + "end": ANY, + }, + "metadata": { + "tags": ["seq:step:2", "test"], + "model": "gpt-4o-mini-2024-07-18", + }, + "root_span_id": root_span_id, + "span_parents": [root_span_id], + }, + ], + ) + + +@pytest.mark.vcr +def test_tool_usage(logger_memory_logger: LoggerMemoryLogger): + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + handler = BraintrustCallbackHandler(logger=logger) + + class CalculatorInput(BaseModel): + operation: str = Field( + description="The type of operation to execute.", + json_schema_extra={"enum": ["add", "subtract", "multiply", "divide"]}, + ) + number1: float = Field(description="The first number to operate on.") + number2: float = Field(description="The second number to operate on.") + + @tool + def calculator(input: CalculatorInput) -> str: + """Can perform mathematical operations.""" + if input.operation == "add": + return str(input.number1 + input.number2) + elif input.operation == "subtract": + return str(input.number1 - input.number2) + elif input.operation == "multiply": + return str(input.number1 * input.number2) + elif input.operation == "divide": + return str(input.number1 / input.number2) + else: + raise ValueError("Invalid operation.") + + model = ChatOpenAI( + model="gpt-4o-mini", + temperature=1, + top_p=1, + frequency_penalty=0, + presence_penalty=0, + n=1, + ) + model_with_tools = model.bind_tools([calculator]) + model_with_tools.invoke("What is 3 * 12", config={"callbacks": [cast(BaseCallbackHandler, handler)]}) + + spans = memory_logger.pop() + root_span_id = spans[0]["span_id"] + + assert_matches_object( + spans, + [ + { + "span_id": root_span_id, + "root_span_id": root_span_id, + "span_attributes": { + "name": "ChatOpenAI", + "type": "llm", + }, + "input": [ + [ + { + "content": ANY, + "additional_kwargs": {}, + "response_metadata": {}, + "type": "human", + "name": None, + "id": None, + "example": ANY, + } + ] + ], + "metadata": { + "tags": [], + "model": "gpt-4o-mini-2024-07-18", + "invocation_params": { + "tools": [ + { + "type": "function", + "function": { + "name": "calculator", + "description": "Can perform mathematical operations.", + "parameters": ANY, + }, + } + ], + }, + }, + "output": { + "generations": [ + [ + { + "generation_info": ANY, + "type": "ChatGeneration", + "message": { + "content": ANY, + "type": "ai", + "additional_kwargs": { + "tool_calls": ANY, + }, + "response_metadata": ANY, + "name": None, + "id": ANY, + }, + } + ] + ], + "llm_output": { + "token_usage": { + "completion_tokens": ANY, + "prompt_tokens": ANY, + "total_tokens": ANY, + }, + "model_name": "gpt-4o-mini-2024-07-18", + }, + "run": None, + "type": "LLMResult", + }, + "metrics": { + "start": ANY, + "total_tokens": ANY, + "prompt_tokens": ANY, + "completion_tokens": ANY, + "end": ANY, + }, + } + ], + ) + + +@pytest.mark.vcr +@pytest.mark.skip(reason="Not yet working with VCR.") +def test_parallel_execution(logger_memory_logger: LoggerMemoryLogger): + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + handler = BraintrustCallbackHandler(logger=logger) + + model = ChatOpenAI( + model="gpt-4o-mini", + temperature=1, + top_p=1, + frequency_penalty=0, + presence_penalty=0, + n=1, + ) + + joke_chain = PromptTemplate.from_template("Tell me a joke about {topic}").pipe(model) + poem_chain = PromptTemplate.from_template("write a 2-line poem about {topic}").pipe(model) + + map_chain = RunnableMap( + { + "joke": joke_chain, + "poem": poem_chain, + } + ) + + map_chain.invoke({"topic": "bear"}, config={"callbacks": [cast(BaseCallbackHandler, handler)]}) + + spans = memory_logger.pop() + + llm_spans = find_spans_by_attributes(spans, name="ChatOpenAI") + assert len(llm_spans) == 2 + + for span in llm_spans: + assert_matches_object( + span, + { + "span_attributes": {"name": "ChatOpenAI", "type": "llm"}, + "metadata": { + "tags": ["seq:step:2"], + "model": "gpt-4o-mini-2024-07-18", + }, + "input": [ + [ + { + "content": ANY, + "additional_kwargs": {}, + "response_metadata": {}, + "type": "human", + } + ] + ], + "output": { + "generations": [ + [ + { + "text": ANY, + "generation_info": ANY, + "type": "ChatGeneration", + "message": { + "content": ANY, + "type": "ai", + }, + } + ] + ], + "llm_output": { + "token_usage": { + "completion_tokens": ANY, + "prompt_tokens": ANY, + "total_tokens": ANY, + }, + "model_name": "gpt-4o-mini-2024-07-18", + }, + "type": "LLMResult", + }, + "metrics": { + "start": ANY, + "total_tokens": ANY, + "prompt_tokens": ANY, + "completion_tokens": ANY, + "end": ANY, + }, + }, + ) + + +@pytest.mark.vcr +def test_langgraph_state_management(logger_memory_logger: LoggerMemoryLogger): + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + try: + from langgraph.graph import END, START, StateGraph + except ImportError: + pytest.skip("langgraph not installed") + + handler = BraintrustCallbackHandler(logger=logger) + model = ChatOpenAI( + model="gpt-4o-mini", + temperature=1, + top_p=1, + frequency_penalty=0, + presence_penalty=0, + n=1, + ) + + def say_hello(state: Dict[str, str]): + response = model.invoke("Say hello") + return cast(Union[str, List[str], Dict[str, str]], response.content) + + def say_bye(state: Dict[str, str]): + print("From the 'sayBye' node: Bye world!") + return "Bye" + + workflow = ( + StateGraph(state_schema=Dict[str, str]) + .add_node("sayHello", say_hello) + .add_node("sayBye", say_bye) + .add_edge(START, "sayHello") + .add_edge("sayHello", "sayBye") + .add_edge("sayBye", END) + ) + + graph = workflow.compile() + graph.invoke({}, config={"callbacks": [handler]}) + + spans = memory_logger.pop() + + langgraph_spans = find_spans_by_attributes(spans, name="LangGraph") + say_hello_spans = find_spans_by_attributes(spans, name="sayHello") + say_bye_spans = find_spans_by_attributes(spans, name="sayBye") + llm_spans = find_spans_by_attributes(spans, name="ChatOpenAI") + + assert len(langgraph_spans) == 1 + assert len(say_hello_spans) == 1 + assert len(say_bye_spans) == 1 + assert len(llm_spans) == 1 + + assert_matches_object( + langgraph_spans[0], + { + "span_attributes": { + "name": "LangGraph", + "type": "task", + }, + "input": {}, + "metadata": { + "tags": [], + }, + "output": "Bye", + }, + ) + + assert_matches_object( + say_hello_spans[0], + { + "span_attributes": { + "name": "sayHello", + }, + "input": {}, + "metadata": { + "tags": ["graph:step:1"], + }, + "output": ANY, + }, + ) + + assert_matches_object( + llm_spans[0], + { + "span_attributes": { + "name": "ChatOpenAI", + "type": "llm", + }, + "input": [ + [ + { + "content": ANY, + "additional_kwargs": {}, + "response_metadata": {}, + "type": "human", + "name": None, + "id": None, + "example": ANY, + } + ] + ], + "metadata": { + "model": "gpt-4o-mini-2024-07-18", + "tags": [], + }, + "output": { + "generations": [ + [ + { + "text": ANY, + "generation_info": ANY, + "type": "ChatGeneration", + "message": { + "content": ANY, + "additional_kwargs": ANY, + "response_metadata": ANY, + "type": "ai", + "name": None, + "id": ANY, + }, + } + ] + ], + "llm_output": { + "token_usage": { + "completion_tokens": ANY, + "prompt_tokens": ANY, + "total_tokens": ANY, + }, + "model_name": "gpt-4o-mini-2024-07-18", + }, + "run": None, + "type": "LLMResult", + }, + "metrics": { + "start": ANY, + "total_tokens": ANY, + "prompt_tokens": ANY, + "completion_tokens": ANY, + "end": ANY, + }, + }, + ) + + assert_matches_object( + say_bye_spans[0], + { + "span_attributes": { + "name": "sayBye", + }, + "input": ANY, + "metadata": { + "tags": ["graph:step:2"], + }, + "output": "Bye", + }, + ) + + +@pytest.mark.vcr +def test_chain_null_values(logger_memory_logger: LoggerMemoryLogger): + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + handler = BraintrustCallbackHandler(logger=logger) + + run_id = uuid.UUID("f81d4fae-7dec-11d0-a765-00a0c91e6bf6") + + handler.on_chain_start( + {"id": ["TestChain"], "lc": 1, "type": "not_implemented"}, + {"input1": "value1", "input2": None, "input3": None}, + run_id=run_id, + parent_run_id=None, + tags=["test"], + ) + + handler.on_chain_end( + {"output1": "value1", "output2": None, "output3": None}, + run_id=run_id, + parent_run_id=None, + tags=["test"], + ) + + flush() + + spans = memory_logger.pop() + root_span_id = spans[0]["span_id"] + + assert_matches_object( + spans, + [ + { + "root_span_id": root_span_id, + "span_attributes": { + "name": "TestChain", + "type": "task", + }, + "input": { + "input1": "value1", + "input2": None, + "input3": None, + }, + "metadata": { + "tags": ["test"], + }, + "output": { + "output1": "value1", + "output2": None, + "output3": None, + }, + }, + ], + ) + + +def test_consecutive_eval_calls(logger_memory_logger: LoggerMemoryLogger): + from braintrust import Eval + + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + def task_fn(input, hooks): + handler = BraintrustCallbackHandler(logger=logger) + + run_id = uuid.uuid4() + + handler.on_chain_start( + {"id": ["RunnableSequence"], "lc": 1, "type": "not_implemented"}, + {"number": str(input)}, + run_id=run_id, + parent_run_id=None, + ) + + output = f"Result for {input}" + + handler.on_chain_end( + {"content": output}, + run_id=run_id, + parent_run_id=None, + ) + + return output + + with logger.start_span(name="test-consecutive-eval", span_attributes={"type": "eval"}) as parent_span: + Eval( + "test-consecutive-eval", + data=[{"input": 1, "expected": "Result for 1"}, {"input": 2, "expected": "Result for 2"}], + task=task_fn, + scores=[], + parent=parent_span.id, + ) + + flush() + + spans = memory_logger.pop() + + assert len(spans) == 5, f"Expected 5 spans, got {len(spans)}" + + root_eval_span = [s for s in spans if s.get("span_attributes", {}).get("name") == "test-consecutive-eval"][0] + root_eval_span_id = root_eval_span["span_id"] + + eval_record_spans = [ + s + for s in spans + if s.get("span_attributes", {}).get("name") == "eval" and root_eval_span_id in (s.get("span_parents") or []) + ] + assert len(eval_record_spans) == 2, f"Expected 2 eval record spans, got {len(eval_record_spans)}" + + eval_record_spans_sorted = sorted(eval_record_spans, key=lambda s: s.get("input", 0)) + eval_record_1 = eval_record_spans_sorted[0] + eval_record_2 = eval_record_spans_sorted[1] + + task_spans = [s for s in spans if s.get("span_attributes", {}).get("name") == "task"] + assert len(task_spans) == 2, f"Expected 2 task spans, got {len(task_spans)}" + + task_spans_sorted = sorted(task_spans, key=lambda s: s.get("input", 0)) + task_1_span = task_spans_sorted[0] + task_2_span = task_spans_sorted[1] + + assert_matches_object( + [root_eval_span], + [ + { + "span_id": root_eval_span_id, + "root_span_id": root_eval_span_id, + "span_attributes": { + "name": "test-consecutive-eval", + "type": "eval", + }, + } + ], + ) + + assert_matches_object( + [eval_record_1], + [ + { + "root_span_id": root_eval_span_id, + "span_parents": [root_eval_span_id], + "span_attributes": {"name": "eval"}, + "input": 1, + "output": "Result for 1", + } + ], + ) + + assert_matches_object( + [eval_record_2], + [ + { + "root_span_id": root_eval_span_id, + "span_parents": [root_eval_span_id], + "span_attributes": {"name": "eval"}, + "input": 2, + "output": "Result for 2", + } + ], + ) + + assert_matches_object( + [task_1_span], + [ + { + "root_span_id": root_eval_span_id, + "span_parents": [eval_record_1["span_id"]], + "span_attributes": {"name": "task"}, + "input": 1, + "output": "Result for 1", + } + ], + ) + + assert_matches_object( + [task_2_span], + [ + { + "root_span_id": root_eval_span_id, + "span_parents": [eval_record_2["span_id"]], + "span_attributes": {"name": "task"}, + "input": 2, + "output": "Result for 2", + } + ], + ) + + +@pytest.mark.vcr +def test_streaming_ttft(logger_memory_logger: LoggerMemoryLogger): + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + handler = BraintrustCallbackHandler(logger=logger) + prompt = ChatPromptTemplate.from_template("Count from 1 to 5.") + model = ChatOpenAI( + model="gpt-4o-mini", + max_completion_tokens=50, + streaming=True, + ) + chain: RunnableSerializable[Dict[str, str], BaseMessage] = prompt.pipe(model) + + chunks: List[str] = [] + for chunk in chain.stream({}, config={"callbacks": [cast(BaseCallbackHandler, handler)]}): + if chunk.content: + chunks.append(str(chunk.content)) + + assert len(chunks) > 0, "Expected to receive streaming chunks" + + spans = memory_logger.pop() + assert len(spans) == 3 + + llm_spans = find_spans_by_attributes(spans, name="ChatOpenAI", type="llm") + assert len(llm_spans) == 1 + llm_span = llm_spans[0] + + assert_matches_object( + [llm_span], + [ + { + "id": ANY, + "input": [ + [ + { + "additional_kwargs": {}, + "content": "Count from 1 to 5.", + "example": False, + "id": None, + "name": None, + "response_metadata": {}, + "type": "human", + } + ] + ], + "metadata": { + "braintrust": { + "integration_name": "langchain-py", + } + }, + "metrics": { + "time_to_first_token": ANY, + }, + "output": { + "generations": [ + [ + { + "generation_info": { + "finish_reason": "stop", + "model_name": ANY, + }, + "message": { + "content": "1, 2, 3, 4, 5.", + "type": "AIMessageChunk", + }, + "text": "1, 2, 3, 4, 5.", + "type": "ChatGenerationChunk", + } + ] + ], + "type": "LLMResult", + }, + "project_id": "langchain-py", + "span_attributes": {"name": "ChatOpenAI", "type": "llm"}, + } + ], + ) + + +@pytest.mark.vcr +def test_prompt_caching_tokens(logger_memory_logger: LoggerMemoryLogger): + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + handler = BraintrustCallbackHandler(logger=logger) + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + # XXX: if you need to change the cassette or test, you'll want to change the text below to invalidate the stored cache. + + # Anthropic prompt caching requires a minimum of 1024 tokens for Claude Sonnet models. + # This static text (~1500 tokens) ensures we meet that threshold consistently. + # See: https://platform.claude.com/docs/en/build-with-claude/prompt-caching + long_text_for_caching = """ +# Comprehensive Guide to Software Testing Methods! + +## Chapter 1: Introduction to Testing + +Software testing is a critical component of the software development lifecycle. It ensures that applications +function correctly, meet requirements, and provide a positive user experience. This guide covers various +testing methodologies, best practices, and tools used in modern software development. + +### 1.1 The Importance of Testing + +Testing helps identify defects early in the development process, reducing the cost of fixing issues later. +Studies have shown that the cost of fixing a bug increases exponentially as it progresses through the +development lifecycle. A bug found during requirements gathering might cost $1 to fix, while the same bug +found in production could cost $100 or more. + +### 1.2 Types of Testing + +There are many types of testing, including: +- Unit Testing: Testing individual components or functions in isolation +- Integration Testing: Testing how components work together +- End-to-End Testing: Testing the entire application flow +- Performance Testing: Testing application speed and scalability +- Security Testing: Testing for vulnerabilities and security issues +- Usability Testing: Testing user experience and interface design + +## Chapter 2: Unit Testing Best Practices + +Unit testing focuses on testing the smallest testable parts of an application. Here are some best practices: + +### 2.1 Write Tests First (TDD) + +Test-Driven Development (TDD) is a methodology where tests are written before the actual code. The process +follows a simple cycle: Red (write a failing test), Green (write code to pass the test), Refactor (improve +the code while keeping tests passing). + +### 2.2 Keep Tests Independent + +Each test should be independent of others. Tests should not rely on the state created by previous tests. +This ensures that tests can be run in any order and that failures are isolated and easy to debug. + +### 2.3 Use Meaningful Names + +Test names should clearly describe what is being tested and what the expected outcome is. A good test name +might be "test_user_registration_with_valid_email_succeeds" rather than just "test_registration". + +### 2.4 Test Edge Cases + +Don't just test the happy path. Consider edge cases like: +- Empty inputs +- Null or undefined values +- Very large inputs +- Invalid formats +- Boundary conditions + +## Chapter 3: Integration Testing + +Integration testing verifies that different modules or services work together correctly. + +### 3.1 Database Integration + +When testing database interactions, consider using: +- Test databases separate from production +- Database transactions that roll back after each test +- Mock data that represents realistic scenarios + +### 3.2 API Integration + +API integration tests should verify: +- Correct HTTP status codes +- Response format and schema +- Error handling +- Authentication and authorization + +## Chapter 4: Performance Testing + +Performance testing ensures your application can handle expected load and scale appropriately. + +### 4.1 Load Testing + +Load testing simulates multiple users accessing the application simultaneously. Key metrics include: +- Response time under load +- Throughput (requests per second) +- Error rates +- Resource utilization (CPU, memory, network) + +### 4.2 Stress Testing + +Stress testing pushes the application beyond normal operational capacity to find breaking points and +understand how the system fails gracefully. + +## Chapter 5: Continuous Integration and Testing + +Modern development practices integrate testing into the CI/CD pipeline. + +### 5.1 Automated Test Runs + +Tests should run automatically on every code change. This includes: +- Running unit tests on every commit +- Running integration tests on pull requests +- Running end-to-end tests before deployment + +### 5.2 Test Coverage + +Test coverage metrics help identify untested code. While 100% coverage isn't always practical or necessary, +maintaining good coverage helps ensure code quality. Focus on critical paths and business logic. + +## Chapter 6: Testing Tools and Frameworks + +Many tools exist to support testing efforts: + +### 6.1 Python Testing +- pytest: Feature-rich testing framework +- unittest: Built-in Python testing module +- mock: Library for mocking objects + +### 6.2 JavaScript Testing +- Jest: Popular testing framework +- Mocha: Flexible testing framework +- Cypress: End-to-end testing tool + +### 6.3 Other Tools +- Selenium: Browser automation +- JMeter: Performance testing +- Postman: API testing + +## Conclusion + +Effective testing is essential for delivering high-quality software. By following best practices and using +appropriate tools, teams can catch bugs early, improve code quality, and deliver better products to users. + +Remember: Testing is not just about finding bugs, it's about building confidence in your code. +""" + + messages: list[BaseMessage] = [ + SystemMessage( + content=[ + { + "type": "text", + "text": long_text_for_caching, + "cache_control": {"type": "ephemeral"}, + } + ] + ), + HumanMessage(content="What is the first type of testing mentioned in section 1.2?"), + ] + + res = model.invoke(messages, config={"callbacks": [cast(BaseCallbackHandler, handler)]}) + + spans = memory_logger.pop() + assert len(spans) > 0 + + llm_spans = find_spans_by_attributes(spans, name="ChatAnthropic", type="llm") + assert len(llm_spans) == 1 + first_span = llm_spans[0] + + assert "metrics" in first_span + first_metrics = first_span["metrics"] + assert "prompt_tokens" in first_metrics + assert first_metrics["prompt_tokens"] > 0 + + assert "prompt_cache_creation_tokens" in first_metrics + assert first_metrics["prompt_cache_creation_tokens"] > 0 + assert first_metrics["prompt_cached_tokens"] == 0 + + res = model.invoke( + messages + [res, HumanMessage(content="What testing framework is mentioned for Python?")], + config={"callbacks": [cast(BaseCallbackHandler, handler)]}, + ) + + spans = memory_logger.pop() + assert len(spans) > 0 + + llm_spans = find_spans_by_attributes(spans, name="ChatAnthropic", type="llm") + + assert len(llm_spans) == 1 + second_span = llm_spans[0] + + assert "metrics" in second_span + second_metrics = second_span["metrics"] + + assert "prompt_cached_tokens" in second_metrics + assert second_metrics["prompt_cached_tokens"] > 0 + + assert "prompt_tokens" in second_metrics + assert second_metrics["prompt_tokens"] > 0 + + +@pytest.mark.vcr +def test_langchain_anthropic_integration(logger_memory_logger: LoggerMemoryLogger): + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + MODEL = "claude-sonnet-4-20250514" + + handler = BraintrustCallbackHandler(logger=logger) + set_global_handler(handler) + + prompt = ChatPromptTemplate.from_template("What is 1 + {number}?") + model = ChatAnthropic(model_name=MODEL) + + chain = prompt | model + + result = chain.invoke({"number": "2"}) + + flush() + + assert isinstance(result.content, str) + assert "3" in result.content.lower() + + spans = memory_logger.pop() + assert len(spans) > 0 + + llm_spans = [span for span in spans if span["span_attributes"].get("type") == "llm"] + assert len(llm_spans) > 0, "Should have at least one LLM call" + + llm_span = llm_spans[0] + assert llm_span["metadata"]["model"] == MODEL + + assert_matches_object( + llm_span["metrics"], + { + "completion_tokens": 13, + "end": ANY, + "prompt_tokens": 16, + "start": ANY, + "total_tokens": 29, + }, + ) + + +def test_auto_instrument_langchain(): + """Test that auto_instrument registers a global LangChain callback handler.""" + verify_autoinstrument_script("test_auto_langchain.py") + + +@pytest.mark.vcr +@pytest.mark.asyncio +async def test_async_langchain_invoke(logger_memory_logger: LoggerMemoryLogger): + logger, memory_logger = logger_memory_logger + assert not memory_logger.pop() + + MODEL = "claude-sonnet-4-20250514" + + handler = BraintrustCallbackHandler(logger=logger) + set_global_handler(handler) + + prompt = ChatPromptTemplate.from_template("What is 1 + {number}?") + model = ChatAnthropic(model_name=MODEL) + + chain = prompt | model + + result = await chain.ainvoke({"number": "2"}) + + flush() + + assert isinstance(result.content, str) + assert "3" in result.content.lower() + + spans = memory_logger.pop() + assert len(spans) > 0 diff --git a/py/src/braintrust/integrations/langchain/tracing.py b/py/src/braintrust/integrations/langchain/tracing.py new file mode 100644 index 00000000..08458314 --- /dev/null +++ b/py/src/braintrust/integrations/langchain/tracing.py @@ -0,0 +1,701 @@ +""" +Braintrust tracing implementation for LangChain. + +Contains BraintrustCallbackHandler and supporting utilities for tracing LangChain +chains, LLMs, tools, and retrievers. +""" + +import json +import logging +import re +import time +from collections.abc import Mapping, Sequence +from contextvars import ContextVar +from re import Pattern +from typing import ( + Any, + TypedDict, + Union, +) +from uuid import UUID + +import braintrust +from braintrust import NOOP_SPAN, Logger, Span, SpanAttributes, SpanTypeAttribute, current_span, init_logger +from braintrust.version import VERSION as sdk_version +from typing_extensions import NotRequired + +_logger = logging.getLogger(__name__) + +# integration_name stays "langchain-py" for backward compatibility with existing traces +_INTEGRATION_NAME = "langchain-py" + +# Global handler context variable – registered with LangChain's configure hook system +# so that all LangChain invocations in the process are automatically traced. +_braintrust_callback_handler_var: ContextVar["BraintrustCallbackHandler | None"] = ContextVar( + "braintrust_callback_handler", default=None +) + +_hook_registered = False + + +def _ensure_hook_registered() -> None: + """Lazily register the context var with LangChain's callback configure hook.""" + global _hook_registered + if _hook_registered: + return + from langchain_core.tracers.context import register_configure_hook + + register_configure_hook( + context_var=_braintrust_callback_handler_var, + inheritable=True, + ) + _hook_registered = True + + +def set_global_handler(handler: "BraintrustCallbackHandler") -> None: + """Register a BraintrustCallbackHandler as the global LangChain handler. + + Ensures the LangChain configure hook is registered so the handler is picked + up automatically by all subsequent LangChain invocations in this context. + """ + _ensure_hook_registered() + _braintrust_callback_handler_var.set(handler) + + +def clear_global_handler() -> None: + """Remove the global BraintrustCallbackHandler.""" + _braintrust_callback_handler_var.set(None) + + +class LogEvent(TypedDict): + input: NotRequired[Any] + output: NotRequired[Any] + expected: NotRequired[Any] + error: NotRequired[str] + tags: NotRequired[Sequence[str] | None] + scores: NotRequired[Mapping[str, int | float]] + metadata: NotRequired[Mapping[str, Any]] + metrics: NotRequired[Mapping[str, int | float]] + id: NotRequired[str] + dataset_record_id: NotRequired[str] + + +class BraintrustCallbackHandler: + """LangChain callback handler that traces chains, LLMs, tools, and retrievers in Braintrust.""" + + root_run_id: UUID | None = None + + def __init__( + self, + logger: Logger | Span | None = None, + debug: bool = False, + exclude_metadata_props: Pattern[str] | None = None, + ): + self.logger = logger + self.spans: dict[UUID, Span] = {} + self.debug = debug # DEPRECATED + self.exclude_metadata_props = exclude_metadata_props or re.compile( + r"^(l[sc]_|langgraph_|__pregel_|checkpoint_ns)" + ) + self.skipped_runs: set[UUID] = set() + # Set run_inline=True to avoid thread executor in async contexts + # This ensures memory logger context is preserved + self.run_inline = True + + self._start_times: dict[UUID, float] = {} + self._first_token_times: dict[UUID, float] = {} + self._ttft_ms: dict[UUID, float] = {} + + def _start_span( + self, + parent_run_id: UUID | None, + run_id: UUID, + name: str | None = None, + type: SpanTypeAttribute | None = SpanTypeAttribute.TASK, + span_attributes: SpanAttributes | Mapping[str, Any] | None = None, + start_time: float | None = None, + set_current: bool | None = None, + parent: str | None = None, + event: LogEvent | None = None, + ) -> Any: + if run_id in self.spans: + # XXX: See graph test case of an example where this _may_ be intended. + _logger.warning(f"Span already exists for run_id {run_id} (this is likely a bug)") + return + + if not parent_run_id: + self.root_run_id = run_id + + current_parent = current_span() + parent_span = None + if parent_run_id and parent_run_id in self.spans: + parent_span = self.spans[parent_run_id] + elif current_parent != NOOP_SPAN: + parent_span = current_parent + elif self.logger is not None: + parent_span = self.logger + else: + parent_span = braintrust + + if event is None: + event = {} + + tags = event.get("tags") or [] + event = { + **event, + "tags": None, + "metadata": { + **({"tags": tags}), + **(event.get("metadata") or {}), + "run_id": run_id, + "parent_run_id": parent_run_id, + "braintrust": { + "integration_name": _INTEGRATION_NAME, + "integration_version": sdk_version, + "sdk_version": sdk_version, + "language": "python", + }, + }, + } + + span = parent_span.start_span( + name=name, + type=type, + span_attributes=span_attributes, + start_time=start_time, + set_current=set_current, + parent=parent, + **event, + ) + + if self.logger != NOOP_SPAN and span == NOOP_SPAN: + _logger.warning( + "Braintrust logging not configured. Pass a `logger`, call `init_logger`, or run an experiment to configure Braintrust logging. Setting up a default." + ) + span = init_logger().start_span( + name=name, + type=type, + span_attributes=span_attributes, + start_time=start_time, + set_current=set_current, + parent=parent, + **event, + ) + + span.set_current() + + self.spans[run_id] = span + return span + + def _end_span( + self, + run_id: UUID, + parent_run_id: UUID | None = None, + input: Any | None = None, + output: Any | None = None, + expected: Any | None = None, + error: str | None = None, + tags: Sequence[str] | None = None, + scores: Mapping[str, int | float] | None = None, + metadata: Mapping[str, Any] | None = None, + metrics: Mapping[str, int | float] | None = None, + dataset_record_id: str | None = None, + ) -> Any: + if run_id not in self.spans: + return + + if run_id in self.skipped_runs: + self.skipped_runs.discard(run_id) + return + + span = self.spans.pop(run_id) + + if self.root_run_id == run_id: + self.root_run_id = None + + span.log( + input=input, + output=output, + expected=expected, + error=error, + tags=None, + scores=scores, + metadata={ + **({"tags": tags} if tags else {}), + **(metadata or {}), + }, + metrics=metrics, + dataset_record_id=dataset_record_id, + ) + + # In async workflows, callbacks may execute in different async contexts. + # The span's context variable token may have been created in a different + # context, causing ValueError when trying to reset it. We catch and ignore + # this specific error since the span hierarchy is maintained via self.spans. + try: + span.unset_current() + except ValueError as e: + if "was created in a different Context" in str(e): + pass + else: + raise + + span.end() + + def on_llm_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + **kwargs: Any, # TODO: response= + ) -> Any: + self._end_span(run_id, error=str(error), metadata={**kwargs}) + + self._start_times.pop(run_id, None) + self._first_token_times.pop(run_id, None) + self._ttft_ms.pop(run_id, None) + + def on_chain_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + **kwargs: Any, # TODO: some metadata + ) -> Any: + self._end_span(run_id, error=str(error), metadata={**kwargs}) + + def on_tool_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + **kwargs: Any, + ) -> Any: + self._end_span(run_id, error=str(error), metadata={**kwargs}) + + def on_retriever_error( + self, + error: BaseException, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + **kwargs: Any, + ) -> Any: + self._end_span(run_id, error=str(error), metadata={**kwargs}) + + # Agent Methods + def on_agent_action( + self, + action: Any, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + **kwargs: Any, + ) -> Any: + self._start_span( + parent_run_id, + run_id, + type=SpanTypeAttribute.LLM, + name=action.tool, + event={"input": action, "metadata": {**kwargs}}, + ) + + def on_agent_finish( + self, + finish: Any, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + **kwargs: Any, + ) -> Any: + self._end_span(run_id, output=finish, metadata={**kwargs}) + + def on_chain_start( + self, + serialized: dict[str, Any], + inputs: dict[str, Any], + *, + run_id: UUID, + parent_run_id: UUID | None = None, + tags: list[str] | None = None, + name: str | None = None, + metadata: dict[str, Any] | None = None, + **kwargs: Any, + ) -> Any: + tags = tags or [] + + # avoids extra logs that seem not as useful esp. with langgraph + if "langsmith:hidden" in tags: + self.skipped_runs.add(run_id) + return + + metadata = metadata or {} + resolved_name = ( + name + or metadata.get("langgraph_node") + or serialized.get("name") + or _last_item(serialized.get("id") or []) + or "Chain" + ) + + self._start_span( + parent_run_id, + run_id, + name=resolved_name, + event={ + "input": inputs, + "tags": tags, + "metadata": { + "serialized": serialized, + "name": name, + "metadata": metadata, + **kwargs, + }, + }, + ) + + def on_chain_end( + self, + outputs: dict[str, Any], + *, + run_id: UUID, + parent_run_id: UUID | None = None, + tags: list[str] | None = None, + **kwargs: Any, + ) -> Any: + self._end_span(run_id, output=outputs, tags=tags, metadata={**kwargs}) + + def on_llm_start( + self, + serialized: dict[str, Any], + prompts: list[str], + *, + run_id: UUID, + parent_run_id: UUID | None = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + name: str | None = None, + **kwargs: Any, + ) -> Any: + self._start_times[run_id] = time.perf_counter() + self._first_token_times.pop(run_id, None) + self._ttft_ms.pop(run_id, None) + + name = name or serialized.get("name") or _last_item(serialized.get("id") or []) or "LLM" + self._start_span( + parent_run_id, + run_id, + name=name, + type=SpanTypeAttribute.LLM, + event={ + "input": prompts, + "tags": tags, + "metadata": { + "serialized": serialized, + "name": name, + "metadata": metadata, + **kwargs, + }, + }, + ) + + def on_chat_model_start( + self, + serialized: dict[str, Any], + messages: list[list[Any]], + *, + run_id: UUID, + parent_run_id: UUID | None = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + name: str | None = None, + invocation_params: dict[str, Any] | None = None, + **kwargs: Any, + ) -> Any: + self._start_times[run_id] = time.perf_counter() + self._first_token_times.pop(run_id, None) + self._ttft_ms.pop(run_id, None) + + invocation_params = invocation_params or {} + self._start_span( + parent_run_id, + run_id, + name=name or serialized.get("name") or _last_item(serialized.get("id") or []) or "Chat Model", + type=SpanTypeAttribute.LLM, + event={ + "input": messages, + "tags": tags, + "metadata": ( + { + "serialized": serialized, + "invocation_params": invocation_params, + "metadata": metadata or {}, + "name": name, + **kwargs, + } + ), + }, + ) + + def on_llm_end( + self, + response: Any, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + tags: list[str] | None = None, + **kwargs: Any, + ) -> Any: + if run_id not in self.spans: + return + + metrics = _get_metrics_from_response(response) + + ttft = self._ttft_ms.pop(run_id, None) + if ttft is not None: + metrics["time_to_first_token"] = ttft + + model_name = _get_model_name_from_response(response) + + self._start_times.pop(run_id, None) + self._first_token_times.pop(run_id, None) + + self._end_span( + run_id, + output=response, + metrics=metrics, + tags=tags, + metadata={ + "model": model_name, + **kwargs, + }, + ) + + def on_tool_start( + self, + serialized: dict[str, Any], + input_str: str, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + inputs: dict[str, Any] | None = None, + name: str | None = None, + **kwargs: Any, + ) -> Any: + self._start_span( + parent_run_id, + run_id, + name=name or serialized.get("name") or _last_item(serialized.get("id") or []) or "Tool", + type=SpanTypeAttribute.TOOL, + event={ + "input": inputs or _safe_parse_json(input_str), + "tags": tags, + "metadata": { + "metadata": metadata, + "serialized": serialized, + "input_str": input_str, + "input": _safe_parse_json(input_str), + "inputs": inputs, + "name": name, + **kwargs, + }, + }, + ) + + def on_tool_end( + self, + output: Any, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + **kwargs: Any, + ) -> Any: + self._end_span(run_id, output=output, metadata={**kwargs}) + + def on_retriever_start( + self, + serialized: dict[str, Any], + query: str, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + name: str | None = None, + **kwargs: Any, + ) -> Any: + self._start_span( + parent_run_id, + run_id, + name=name or serialized.get("name") or _last_item(serialized.get("id") or []) or "Retriever", + type=SpanTypeAttribute.FUNCTION, + event={ + "input": query, + "tags": tags, + "metadata": { + "serialized": serialized, + "metadata": metadata, + "name": name, + **kwargs, + }, + }, + ) + + def on_retriever_end( + self, + documents: Sequence[Any], + *, + run_id: UUID, + parent_run_id: UUID | None = None, + **kwargs: Any, + ) -> Any: + self._end_span(run_id, output=documents, metadata={**kwargs}) + + def on_llm_new_token( + self, + token: str, + *, + chunk: Union[Any, None] = None, + run_id: UUID, + parent_run_id: UUID | None = None, + **kwargs: Any, + ) -> Any: + if run_id not in self._first_token_times: + now = time.perf_counter() + self._first_token_times[run_id] = now + start = self._start_times.get(run_id) + if start is not None: + self._ttft_ms[run_id] = now - start + + def on_text( + self, + text: str, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + **kwargs: Any, + ) -> Any: + pass + + def on_retry( + self, + retry_state: Any, + *, + run_id: UUID, + parent_run_id: UUID | None = None, + **kwargs: Any, + ) -> Any: + pass + + def on_custom_event( + self, + name: str, + data: Any, + *, + run_id: UUID, + tags: list[str] | None = None, + metadata: dict[str, Any] | None = None, + **kwargs: Any, + ) -> Any: + pass + + +class BraintrustTracer(BraintrustCallbackHandler): + """Deprecated. Use BraintrustCallbackHandler instead.""" + + def __init__(self, *args: Any, **kwargs: Any): + _logger.warning( + "BraintrustTracer is deprecated. Use BraintrustCallbackHandler from " + "braintrust.wrappers.langchain instead." + ) + super().__init__(*args, **kwargs) + + +def _safe_parse_json(input_str: str) -> Any: + try: + return json.loads(input_str) + except Exception: + return input_str + + +def _last_item(items: list[Any]) -> Any: + return items[-1] if items else None + + +def _walk_generations(response: Any): + for generations in response.generations or []: + yield from generations or [] + + +def _get_model_name_from_response(response: Any) -> "str | None": + model_name = None + for generation in _walk_generations(response): + message = getattr(generation, "message", None) + if not message: + continue + + response_metadata = getattr(message, "response_metadata", None) + if response_metadata and isinstance(response_metadata, dict): + model_name = response_metadata.get("model_name") + + if model_name: + break + + if not model_name: + llm_output: dict[str, Any] = (response.llm_output or {}) if hasattr(response, "llm_output") else {} + model_name = llm_output.get("model_name") or llm_output.get("model") or "" + + return model_name + + +def _clean_object(obj: dict[str, Any]) -> dict[str, Any]: + return { + k: v + for k, v in obj.items() + if v is not None and not (isinstance(v, list) and not v) and not (isinstance(v, dict) and not v) + } + + +def _get_metrics_from_response(response: Any) -> dict[str, Any]: + metrics: dict[str, Any] = {} + + for generation in _walk_generations(response): + message = getattr(generation, "message", None) + if not message: + continue + + usage_metadata = getattr(message, "usage_metadata", None) + + if usage_metadata and isinstance(usage_metadata, dict): + metrics.update( + _clean_object( + { + "total_tokens": usage_metadata.get("total_tokens"), + "prompt_tokens": usage_metadata.get("input_tokens"), + "completion_tokens": usage_metadata.get("output_tokens"), + } + ) + ) + + # Extract cache tokens from nested input_token_details (LangChain format) + # Maps to Braintrust's standard cache token metric names + input_token_details = usage_metadata.get("input_token_details") + if input_token_details and isinstance(input_token_details, dict): + cache_read = input_token_details.get("cache_read") + cache_creation = input_token_details.get("cache_creation") + + if cache_read is not None: + metrics["prompt_cached_tokens"] = cache_read + if cache_creation is not None: + metrics["prompt_cache_creation_tokens"] = cache_creation + + if not metrics or not any(metrics.values()): + llm_output: dict[str, Any] = (response.llm_output or {}) if hasattr(response, "llm_output") else {} + metrics = llm_output.get("token_usage") or llm_output.get("estimatedTokens") or {} + + return _clean_object(metrics) diff --git a/py/src/braintrust/wrappers/langchain.py b/py/src/braintrust/wrappers/langchain.py deleted file mode 100644 index 6beeb578..00000000 --- a/py/src/braintrust/wrappers/langchain.py +++ /dev/null @@ -1,150 +0,0 @@ -import contextvars -import logging -from typing import Any -from uuid import UUID - -import braintrust - - -_logger = logging.getLogger("braintrust.wrappers.langchain") - -try: - from langchain.callbacks.base import BaseCallbackHandler - from langchain.schema import Document - from langchain.schema.agent import AgentAction - from langchain.schema.messages import BaseMessage - from langchain.schema.output import LLMResult -except ImportError: - _logger.warning("Failed to import langchain, using stubs") - BaseCallbackHandler = object - Document = object - AgentAction = object - BaseMessage = object - LLMResult = object - -langchain_parent = contextvars.ContextVar("langchain_current_span", default=None) - - -class BraintrustTracer(BaseCallbackHandler): - def __init__(self, logger=None): - _logger.warning("BraintrustTracer is deprecated, use `pip install braintrust-langchain` instead") - self.logger = logger - self.spans = {} - - def _start_span(self, parent_run_id, run_id, name: str | None, **kwargs: Any) -> Any: - assert run_id not in self.spans, f"Span already exists for run_id {run_id} (this is likely a bug)" - - current_parent = langchain_parent.get() - if parent_run_id in self.spans: - parent_span = self.spans[parent_run_id] - elif current_parent is not None: - parent_span = current_parent - elif self.logger is not None: - parent_span = self.logger - else: - parent_span = braintrust - - span = parent_span.start_span(name=name, **kwargs) - langchain_parent.set(span) - self.spans[run_id] = span - return span - - def _end_span(self, run_id, **kwargs: Any) -> Any: - assert run_id in self.spans, f"No span exists for run_id {run_id} (this is likely a bug)" - span = self.spans.pop(run_id) - span.log(**kwargs) - - if langchain_parent.get() == span: - langchain_parent.set(None) - - span.end() - - def on_chain_start( - self, - serialized: dict[str, Any], - inputs: dict[str, Any], - *, - run_id: UUID, - parent_run_id: UUID | None = None, - tags: list[str] | None = None, - **kwargs: Any, - ) -> Any: - self._start_span(parent_run_id, run_id, "Chain", input=inputs, metadata={"tags": tags}) - - def on_chain_end( - self, outputs: dict[str, Any], *, run_id: UUID, parent_run_id: UUID | None = None, **kwargs: Any - ) -> Any: - self._end_span(run_id, output=outputs) - - def on_llm_start( - self, - serialized: dict[str, Any], - prompts: list[str], - *, - run_id: UUID, - parent_run_id: UUID | None = None, - tags: list[str] | None = None, - **kwargs: Any, - ) -> Any: - self._start_span( - parent_run_id, - run_id, - "LLM", - input=prompts, - metadata={"tags": tags, **kwargs["invocation_params"]}, - ) - - def on_chat_model_start( - self, - serialized: dict[str, Any], - messages: list[list[BaseMessage]], - *, - run_id: UUID, - parent_run_id: UUID | None = None, - tags: list[str] | None = None, - **kwargs: Any, - ) -> Any: - self._start_span( - parent_run_id, - run_id, - "Chat Model", - input=[[m.dict() for m in batch] for batch in messages], - metadata={"tags": tags, **kwargs["invocation_params"]}, - ) - - def on_llm_end( - self, response: LLMResult, *, run_id: UUID, parent_run_id: UUID | None = None, **kwargs: Any - ) -> Any: - metrics = {} - token_usage = response.llm_output.get("token_usage", {}) - if "total_tokens" in token_usage: - metrics["tokens"] = token_usage["total_tokens"] - if "prompt_tokens" in token_usage: - metrics["prompt_tokens"] = token_usage["prompt_tokens"] - if "completion_tokens" in token_usage: - metrics["completion_tokens"] = token_usage["completion_tokens"] - - self._end_span(run_id, output=[[m.dict() for m in batch] for batch in response.generations], metrics=metrics) - - def on_tool_start( - self, - serialized: dict[str, Any], - input_str: str, - *, - run_id: UUID, - parent_run_id: UUID | None = None, - tags: list[str] | None = None, - **kwargs: Any, - ) -> Any: - _logger.warning("Starting tool, but it will not be traced in braintrust (unsupported)") - - def on_tool_end(self, output: str, *, run_id: UUID, parent_run_id: UUID | None = None, **kwargs: Any) -> Any: - pass - - def on_retriever_start(self, query: str, *, run_id: UUID, parent_run_id: UUID | None = None, **kwargs: Any) -> Any: - _logger.warning("Starting retriever, but it will not be traced in braintrust (unsupported)") - - def on_retriever_end( - self, response: list[Document], *, run_id: UUID, parent_run_id: UUID | None = None, **kwargs: Any - ) -> Any: - pass diff --git a/py/src/braintrust/wrappers/langchain/__init__.py b/py/src/braintrust/wrappers/langchain/__init__.py new file mode 100644 index 00000000..0296a51a --- /dev/null +++ b/py/src/braintrust/wrappers/langchain/__init__.py @@ -0,0 +1,21 @@ +""" +Braintrust LangChain wrapper — re-exports from braintrust.integrations.langchain. +""" + +from braintrust.integrations.langchain import ( + BraintrustCallbackHandler, + BraintrustTracer, + LangChainIntegration, + clear_global_handler, + set_global_handler, + setup_langchain, +) + +__all__ = [ + "BraintrustCallbackHandler", + "BraintrustTracer", + "LangChainIntegration", + "set_global_handler", + "clear_global_handler", + "setup_langchain", +] From 813405d17c5b0ced466448c80ab8dc3fb3d2dfdf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 01:26:25 +0000 Subject: [PATCH 02/14] maybe done --- integrations/langchain-py/README.md | 2 +- py/examples/langchain/manual_patching.py | 2 +- py/noxfile.py | 12 ++++++++++-- py/src/braintrust/auto.py | 2 -- .../auto_test_scripts/test_auto_langchain.py | 2 ++ py/src/braintrust/integrations/langchain/__init__.py | 4 +++- py/src/braintrust/integrations/langchain/conftest.py | 1 - .../braintrust/integrations/langchain/integration.py | 2 +- .../integrations/langchain/test_langchain.py | 12 +++++------- py/src/braintrust/integrations/langchain/tracing.py | 3 ++- py/src/braintrust/wrappers/langchain/__init__.py | 1 + 11 files changed, 26 insertions(+), 17 deletions(-) diff --git a/integrations/langchain-py/README.md b/integrations/langchain-py/README.md index 19717270..b2e1b2ab 100644 --- a/integrations/langchain-py/README.md +++ b/integrations/langchain-py/README.md @@ -23,7 +23,7 @@ SDK for integrating [Braintrust](https://braintrust.dev) with [LangChain](https: braintrust.auto_instrument() # After (option 2: explicit) - from braintrust.wrappers.langchain import BraintrustCallbackHandler, set_global_handler + from braintrust.integrations.langchain import BraintrustCallbackHandler, set_global_handler ``` The API is identical - no code changes needed beyond the import path. diff --git a/py/examples/langchain/manual_patching.py b/py/examples/langchain/manual_patching.py index 142bcc5a..4912e707 100644 --- a/py/examples/langchain/manual_patching.py +++ b/py/examples/langchain/manual_patching.py @@ -7,7 +7,7 @@ """ from braintrust import init_logger -from braintrust.wrappers.langchain import set_global_handler, BraintrustCallbackHandler +from braintrust.integrations.langchain import BraintrustCallbackHandler, set_global_handler # Setup LangChain tracing with a specific project diff --git a/py/noxfile.py b/py/noxfile.py index 2789bfee..1c8c7389 100644 --- a/py/noxfile.py +++ b/py/noxfile.py @@ -43,6 +43,7 @@ def _pinned_python_version(): INTEGRATION_DIR = "braintrust/integrations" INTEGRATION_AUTO_TEST_DIR = "braintrust/integrations/auto_test_scripts" ANTHROPIC_INTEGRATION_DIR = "braintrust/integrations/anthropic" +LANGCHAIN_INTEGRATION_DIR = "braintrust/integrations/langchain" CONTRIB_DIR = "braintrust/contrib" DEVSERVER_DIR = "braintrust/devserver" @@ -102,7 +103,7 @@ def _pinned_python_version(): GENAI_VERSIONS = (LATEST,) DSPY_VERSIONS = (LATEST,) GOOGLE_ADK_VERSIONS = (LATEST, "1.14.1") -LANGCHAIN_VERSIONS = (LATEST,) +LANGCHAIN_VERSIONS = (LATEST, "0.3.83") # temporalio 1.19.0+ requires Python >= 3.10; skip Python 3.9 entirely TEMPORAL_VERSIONS = (LATEST, "1.20.0", "1.19.0") PYTEST_VERSIONS = (LATEST, "8.4.2") @@ -425,7 +426,14 @@ def _run_core_tests(session): _run_tests( session, SRC_DIR, - ignore_paths=[WRAPPER_DIR, INTEGRATION_AUTO_TEST_DIR, ANTHROPIC_INTEGRATION_DIR, CONTRIB_DIR, DEVSERVER_DIR], + ignore_paths=[ + WRAPPER_DIR, + INTEGRATION_AUTO_TEST_DIR, + ANTHROPIC_INTEGRATION_DIR, + LANGCHAIN_INTEGRATION_DIR, + CONTRIB_DIR, + DEVSERVER_DIR, + ], ) diff --git a/py/src/braintrust/auto.py b/py/src/braintrust/auto.py index fb40d088..ad824726 100644 --- a/py/src/braintrust/auto.py +++ b/py/src/braintrust/auto.py @@ -4,8 +4,6 @@ Provides one-line instrumentation for supported libraries. """ -from __future__ import annotations - import logging from contextlib import contextmanager diff --git a/py/src/braintrust/integrations/auto_test_scripts/test_auto_langchain.py b/py/src/braintrust/integrations/auto_test_scripts/test_auto_langchain.py index a3719ef8..9cc7c771 100644 --- a/py/src/braintrust/integrations/auto_test_scripts/test_auto_langchain.py +++ b/py/src/braintrust/integrations/auto_test_scripts/test_auto_langchain.py @@ -3,6 +3,7 @@ from braintrust.auto import auto_instrument from braintrust.integrations.langchain import BraintrustCallbackHandler + # 1. Instrument results = auto_instrument() assert results.get("langchain") == True, "auto_instrument should return True for langchain" @@ -14,6 +15,7 @@ # 3. Verify that a global handler was registered with LangChain from langchain_core.callbacks import CallbackManager + manager = CallbackManager.configure() handler = next((h for h in manager.handlers if isinstance(h, BraintrustCallbackHandler)), None) assert handler is not None, "BraintrustCallbackHandler should be registered globally after auto_instrument()" diff --git a/py/src/braintrust/integrations/langchain/__init__.py b/py/src/braintrust/integrations/langchain/__init__.py index 9758ca95..e1a19352 100644 --- a/py/src/braintrust/integrations/langchain/__init__.py +++ b/py/src/braintrust/integrations/langchain/__init__.py @@ -21,7 +21,9 @@ def setup_langchain() -> bool: # Lazily imported to avoid circular imports at module load time # (tracing.py imports from braintrust, which must be fully initialized first) -_LAZY_ATTRS = frozenset(["BraintrustCallbackHandler", "BraintrustTracer", "set_global_handler", "clear_global_handler"]) +_LAZY_ATTRS = frozenset( + ["BraintrustCallbackHandler", "BraintrustTracer", "set_global_handler", "clear_global_handler"] +) def __getattr__(name: str): diff --git a/py/src/braintrust/integrations/langchain/conftest.py b/py/src/braintrust/integrations/langchain/conftest.py index 078502b2..a8f56176 100644 --- a/py/src/braintrust/integrations/langchain/conftest.py +++ b/py/src/braintrust/integrations/langchain/conftest.py @@ -10,7 +10,6 @@ _MemoryBackgroundLogger, ) from braintrust.test_helpers import init_test_logger - from braintrust.wrappers.langchain import clear_global_handler diff --git a/py/src/braintrust/integrations/langchain/integration.py b/py/src/braintrust/integrations/langchain/integration.py index e22cdc0f..0f29b0da 100644 --- a/py/src/braintrust/integrations/langchain/integration.py +++ b/py/src/braintrust/integrations/langchain/integration.py @@ -2,7 +2,7 @@ from typing import Any -from braintrust.integrations.base import BasePatcher, BaseIntegration +from braintrust.integrations.base import BaseIntegration, BasePatcher class LangChainCallbackPatcher(BasePatcher): diff --git a/py/src/braintrust/integrations/langchain/test_langchain.py b/py/src/braintrust/integrations/langchain/test_langchain.py index 827cf777..889fe505 100644 --- a/py/src/braintrust/integrations/langchain/test_langchain.py +++ b/py/src/braintrust/integrations/langchain/test_langchain.py @@ -1,10 +1,12 @@ # pyright: reportTypedDictNotRequiredAccess=none import uuid -from typing import Any, Dict, List, Sequence, Union, cast +from typing import Any, Dict, List, Union, cast from unittest.mock import ANY import pytest +from braintrust.integrations.langchain import BraintrustCallbackHandler, set_global_handler from braintrust.logger import flush +from braintrust.wrappers.test_utils import verify_autoinstrument_script from langchain_anthropic import ChatAnthropic from langchain_core.callbacks import BaseCallbackHandler, CallbackManager from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage @@ -15,11 +17,9 @@ from langchain_openai import ChatOpenAI from pydantic import BaseModel, Field -from braintrust.integrations.langchain import BraintrustCallbackHandler, set_global_handler -from braintrust.wrappers.test_utils import verify_autoinstrument_script - from .conftest import LoggerMemoryLogger + # --------------------------------------------------------------------------- # Helpers (inlined from the integration package) # --------------------------------------------------------------------------- @@ -44,9 +44,7 @@ def assert_matches_object(actual: Any, expected: Any, ignore_order: bool = False matched = True except Exception: pass - assert matched, ( - f"Expected {expected_item} in unordered sequence but couldn't find match in {actual}" - ) + assert matched, f"Expected {expected_item} in unordered sequence but couldn't find match in {actual}" elif isinstance(expected, dict): assert isinstance(actual, dict), f"Expected dict but got {type(actual)}" for k, v in expected.items(): diff --git a/py/src/braintrust/integrations/langchain/tracing.py b/py/src/braintrust/integrations/langchain/tracing.py index 08458314..85044bf1 100644 --- a/py/src/braintrust/integrations/langchain/tracing.py +++ b/py/src/braintrust/integrations/langchain/tracing.py @@ -24,6 +24,7 @@ from braintrust.version import VERSION as sdk_version from typing_extensions import NotRequired + _logger = logging.getLogger(__name__) # integration_name stays "langchain-py" for backward compatibility with existing traces @@ -611,7 +612,7 @@ class BraintrustTracer(BraintrustCallbackHandler): def __init__(self, *args: Any, **kwargs: Any): _logger.warning( "BraintrustTracer is deprecated. Use BraintrustCallbackHandler from " - "braintrust.wrappers.langchain instead." + "braintrust.integrations.langchain instead." ) super().__init__(*args, **kwargs) diff --git a/py/src/braintrust/wrappers/langchain/__init__.py b/py/src/braintrust/wrappers/langchain/__init__.py index 0296a51a..72f30ca4 100644 --- a/py/src/braintrust/wrappers/langchain/__init__.py +++ b/py/src/braintrust/wrappers/langchain/__init__.py @@ -11,6 +11,7 @@ setup_langchain, ) + __all__ = [ "BraintrustCallbackHandler", "BraintrustTracer", From 1968511e74935b0d97f2f0557614182f267abea2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 17:40:59 +0000 Subject: [PATCH 03/14] rename the cassettes --- ...st_async_langchain_invoke => test_async_langchain_invoke.yaml} | 0 .../{test_chain_with_memory => test_chain_with_memory.yaml} | 0 .../cassettes/{test_global_handler => test_global_handler.yaml} | 0 ...opic_integration => test_langchain_anthropic_integration.yaml} | 0 ...raph_state_management => test_langgraph_state_management.yaml} | 0 .../langchain/cassettes/{test_llm_calls => test_llm_calls.yaml} | 0 .../{test_parallel_execution => test_parallel_execution.yaml} | 0 ...test_prompt_caching_tokens => test_prompt_caching_tokens.yaml} | 0 .../cassettes/{test_streaming_ttft => test_streaming_ttft.yaml} | 0 .../langchain/cassettes/{test_tool_usage => test_tool_usage.yaml} | 0 10 files changed, 0 insertions(+), 0 deletions(-) rename py/src/braintrust/integrations/langchain/cassettes/{test_async_langchain_invoke => test_async_langchain_invoke.yaml} (100%) rename py/src/braintrust/integrations/langchain/cassettes/{test_chain_with_memory => test_chain_with_memory.yaml} (100%) rename py/src/braintrust/integrations/langchain/cassettes/{test_global_handler => test_global_handler.yaml} (100%) rename py/src/braintrust/integrations/langchain/cassettes/{test_langchain_anthropic_integration => test_langchain_anthropic_integration.yaml} (100%) rename py/src/braintrust/integrations/langchain/cassettes/{test_langgraph_state_management => test_langgraph_state_management.yaml} (100%) rename py/src/braintrust/integrations/langchain/cassettes/{test_llm_calls => test_llm_calls.yaml} (100%) rename py/src/braintrust/integrations/langchain/cassettes/{test_parallel_execution => test_parallel_execution.yaml} (100%) rename py/src/braintrust/integrations/langchain/cassettes/{test_prompt_caching_tokens => test_prompt_caching_tokens.yaml} (100%) rename py/src/braintrust/integrations/langchain/cassettes/{test_streaming_ttft => test_streaming_ttft.yaml} (100%) rename py/src/braintrust/integrations/langchain/cassettes/{test_tool_usage => test_tool_usage.yaml} (100%) diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke b/py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke.yaml similarity index 100% rename from py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke rename to py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke.yaml diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_chain_with_memory b/py/src/braintrust/integrations/langchain/cassettes/test_chain_with_memory.yaml similarity index 100% rename from py/src/braintrust/integrations/langchain/cassettes/test_chain_with_memory rename to py/src/braintrust/integrations/langchain/cassettes/test_chain_with_memory.yaml diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_global_handler b/py/src/braintrust/integrations/langchain/cassettes/test_global_handler.yaml similarity index 100% rename from py/src/braintrust/integrations/langchain/cassettes/test_global_handler rename to py/src/braintrust/integrations/langchain/cassettes/test_global_handler.yaml diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration b/py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration.yaml similarity index 100% rename from py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration rename to py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration.yaml diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_langgraph_state_management b/py/src/braintrust/integrations/langchain/cassettes/test_langgraph_state_management.yaml similarity index 100% rename from py/src/braintrust/integrations/langchain/cassettes/test_langgraph_state_management rename to py/src/braintrust/integrations/langchain/cassettes/test_langgraph_state_management.yaml diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_llm_calls b/py/src/braintrust/integrations/langchain/cassettes/test_llm_calls.yaml similarity index 100% rename from py/src/braintrust/integrations/langchain/cassettes/test_llm_calls rename to py/src/braintrust/integrations/langchain/cassettes/test_llm_calls.yaml diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_parallel_execution b/py/src/braintrust/integrations/langchain/cassettes/test_parallel_execution.yaml similarity index 100% rename from py/src/braintrust/integrations/langchain/cassettes/test_parallel_execution rename to py/src/braintrust/integrations/langchain/cassettes/test_parallel_execution.yaml diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens b/py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens.yaml similarity index 100% rename from py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens rename to py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens.yaml diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_streaming_ttft b/py/src/braintrust/integrations/langchain/cassettes/test_streaming_ttft.yaml similarity index 100% rename from py/src/braintrust/integrations/langchain/cassettes/test_streaming_ttft rename to py/src/braintrust/integrations/langchain/cassettes/test_streaming_ttft.yaml diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_tool_usage b/py/src/braintrust/integrations/langchain/cassettes/test_tool_usage.yaml similarity index 100% rename from py/src/braintrust/integrations/langchain/cassettes/test_tool_usage rename to py/src/braintrust/integrations/langchain/cassettes/test_tool_usage.yaml From edd29ebed7ca4d150460d0cac8b333c4ead107aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 17:42:48 +0000 Subject: [PATCH 04/14] and update the tests to use the renamed cassettes --- py/src/braintrust/integrations/langchain/conftest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/py/src/braintrust/integrations/langchain/conftest.py b/py/src/braintrust/integrations/langchain/conftest.py index a8f56176..a10ab496 100644 --- a/py/src/braintrust/integrations/langchain/conftest.py +++ b/py/src/braintrust/integrations/langchain/conftest.py @@ -43,7 +43,6 @@ def vcr_config(): "record_mode": record_mode, "match_on": ["uri", "method", "body"], "cassette_library_dir": str(Path(__file__).parent / "cassettes"), - "path_transformer": lambda path: path.replace(".yaml", ""), } From ab5c901876241c2a8e012914f5bc295ae8010591 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 17:48:43 +0000 Subject: [PATCH 05/14] remove unecessary python patterns used by python < 3.10 --- .../integrations/langchain/test_langchain.py | 22 +++++++++---------- .../integrations/langchain/tracing.py | 6 ++--- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/py/src/braintrust/integrations/langchain/test_langchain.py b/py/src/braintrust/integrations/langchain/test_langchain.py index 889fe505..5698fdb7 100644 --- a/py/src/braintrust/integrations/langchain/test_langchain.py +++ b/py/src/braintrust/integrations/langchain/test_langchain.py @@ -1,6 +1,6 @@ # pyright: reportTypedDictNotRequiredAccess=none import uuid -from typing import Any, Dict, List, Union, cast +from typing import Any, cast from unittest.mock import ANY import pytest @@ -59,7 +59,7 @@ def assert_matches_object(actual: Any, expected: Any, ignore_order: bool = False assert actual == expected, f"Expected {expected} but got {actual}" -def find_spans_by_attributes(spans: List[Any], **attributes: Any) -> List[Any]: +def find_spans_by_attributes(spans: list[Any], **attributes: Any) -> list[Any]: """Find all spans matching the given span_attributes.""" matching = [] for span in spans: @@ -90,7 +90,7 @@ def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): presence_penalty=0, n=1, ) - chain: RunnableSerializable[Dict[str, str], BaseMessage] = prompt.pipe(model) + chain: RunnableSerializable[dict[str, str], BaseMessage] = prompt.pipe(model) chain.invoke({"number": "2"}, config={"callbacks": [cast(BaseCallbackHandler, handler)]}) spans = memory_logger.pop() @@ -225,7 +225,7 @@ def test_global_handler(logger_memory_logger: LoggerMemoryLogger): presence_penalty=0, n=1, ) - chain: RunnableSerializable[Dict[str, str], BaseMessage] = prompt.pipe(model) + chain: RunnableSerializable[dict[str, str], BaseMessage] = prompt.pipe(model) message = chain.invoke({"number": "2"}) @@ -350,7 +350,7 @@ def test_chain_with_memory(logger_memory_logger: LoggerMemoryLogger): handler = BraintrustCallbackHandler(logger=logger) prompt = ChatPromptTemplate.from_template("{history} User: {input}") model = ChatOpenAI(model="gpt-4o-mini") - chain: RunnableSerializable[Dict[str, str], BaseMessage] = prompt.pipe(model) + chain: RunnableSerializable[dict[str, str], BaseMessage] = prompt.pipe(model) memory = {"history": "Assistant: Hello! How can I assist you today?"} chain.invoke( @@ -695,16 +695,16 @@ def test_langgraph_state_management(logger_memory_logger: LoggerMemoryLogger): n=1, ) - def say_hello(state: Dict[str, str]): + def say_hello(state: dict[str, str]): response = model.invoke("Say hello") - return cast(Union[str, List[str], Dict[str, str]], response.content) + return cast(str | list[str] | dict[str, str], response.content) - def say_bye(state: Dict[str, str]): + def say_bye(state: dict[str, str]): print("From the 'sayBye' node: Bye world!") return "Bye" workflow = ( - StateGraph(state_schema=Dict[str, str]) + StateGraph(state_schema=dict[str, str]) .add_node("sayHello", say_hello) .add_node("sayBye", say_bye) .add_edge(START, "sayHello") @@ -1033,9 +1033,9 @@ def test_streaming_ttft(logger_memory_logger: LoggerMemoryLogger): max_completion_tokens=50, streaming=True, ) - chain: RunnableSerializable[Dict[str, str], BaseMessage] = prompt.pipe(model) + chain: RunnableSerializable[dict[str, str], BaseMessage] = prompt.pipe(model) - chunks: List[str] = [] + chunks: list[str] = [] for chunk in chain.stream({}, config={"callbacks": [cast(BaseCallbackHandler, handler)]}): if chunk.content: chunks.append(str(chunk.content)) diff --git a/py/src/braintrust/integrations/langchain/tracing.py b/py/src/braintrust/integrations/langchain/tracing.py index 85044bf1..1a142e6b 100644 --- a/py/src/braintrust/integrations/langchain/tracing.py +++ b/py/src/braintrust/integrations/langchain/tracing.py @@ -11,11 +11,9 @@ import time from collections.abc import Mapping, Sequence from contextvars import ContextVar -from re import Pattern from typing import ( Any, TypedDict, - Union, ) from uuid import UUID @@ -90,7 +88,7 @@ def __init__( self, logger: Logger | Span | None = None, debug: bool = False, - exclude_metadata_props: Pattern[str] | None = None, + exclude_metadata_props: re.Pattern[str] | None = None, ): self.logger = logger self.spans: dict[UUID, Span] = {} @@ -561,7 +559,7 @@ def on_llm_new_token( self, token: str, *, - chunk: Union[Any, None] = None, + chunk: Any | None = None, run_id: UUID, parent_run_id: UUID | None = None, **kwargs: Any, From 45710117a8c1282227d85d230baca10385b75aa8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 18:47:52 +0000 Subject: [PATCH 06/14] rerecord cassettes --- .../test_async_langchain_invoke.yaml | 201 +++++++++++++++++- .../test_langchain_anthropic_integration.yaml | 196 ++++++++++++++++- .../cassettes/test_prompt_caching_tokens.yaml | 4 +- .../integrations/langchain/conftest.py | 4 +- .../integrations/langchain/test_langchain.py | 49 +---- .../integrations/langchain/tracing.py | 26 ++- 6 files changed, 418 insertions(+), 62 deletions(-) diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke.yaml b/py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke.yaml index 3ecc362e..2ffcb4e8 100644 --- a/py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke.yaml +++ b/py/src/braintrust/integrations/langchain/cassettes/test_async_langchain_invoke.yaml @@ -1,6 +1,6 @@ interactions: - request: - body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "What is + body: '{"max_tokens": 64000, "messages": [{"role": "user", "content": "What is 1 + 2?"}], "model": "claude-sonnet-4-20250514"}' headers: accept: @@ -80,7 +80,7 @@ interactions: code: 200 message: OK - request: - body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + body: '{"max_tokens":64000,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' headers: accept: - application/json @@ -177,7 +177,7 @@ interactions: code: 200 message: OK - request: - body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + body: '{"max_tokens":64000,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' headers: accept: - application/json @@ -273,4 +273,199 @@ interactions: status: code: 200 message: OK +- request: + body: '{"max_tokens":64000,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '111' + Content-Type: + - application/json + Host: + - api.anthropic.com + User-Agent: + - langchain-anthropic/1.4.0 + X-Stainless-Arch: + - x64 + X-Stainless-Async: + - async:asyncio + X-Stainless-Lang: + - python + X-Stainless-OS: + - Linux + X-Stainless-Package-Version: + - 0.86.0 + X-Stainless-Runtime: + - CPython + X-Stainless-Runtime-Version: + - 3.14.3 + anthropic-version: + - '2023-06-01' + x-stainless-retry-count: + - '0' + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAAA6pWKqksSFWyUkotKsovUtKB0lbVMPHE0pKM1LySzOTEksz8vHiYstzU4uLEdJCC + zLyyxJzMFIUK3cSCTN3s1EqlWh2lotTC0tTikvjMFCUrECfewNDQOSrQvSzPx6K8zDjCNCOyMDI1 + qiRKqRYAAAD//wMAzB6OOoIAAAA= + headers: + CF-RAY: + - 9e1ffdb81a020555-CMH + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Security-Policy: + - default-src 'none'; frame-ancestors 'none' + Content-Type: + - application/json + Date: + - Wed, 25 Mar 2026 18:31:11 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + cf-cache-status: + - DYNAMIC + request-id: + - req_011CZQGvnL8wv3X5hYqYeZtZ + server-timing: + - x-originResponse;dur=13 + set-cookie: + - _cfuvid=gyUUSAZvGwkONX58EfvF0btj1CdyxvDfcMYIpZzdC_s-1774463471.3759444-1.0.1.1-Y5yl2w0p0ULztM1yD9IfmXjoPl60ueMNw9C.Pdj8r9E; + HttpOnly; SameSite=None; Secure; Path=/; Domain=api.anthropic.com + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + vary: + - Accept-Encoding + x-envoy-upstream-service-time: + - '11' + x-should-retry: + - 'false' + status: + code: 401 + message: Unauthorized +- request: + body: '{"max_tokens":64000,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '111' + Content-Type: + - application/json + Host: + - api.anthropic.com + User-Agent: + - langchain-anthropic/1.4.0 + X-Stainless-Arch: + - x64 + X-Stainless-Async: + - async:asyncio + X-Stainless-Lang: + - python + X-Stainless-OS: + - Linux + X-Stainless-Package-Version: + - 0.86.0 + X-Stainless-Runtime: + - CPython + X-Stainless-Runtime-Version: + - 3.14.3 + anthropic-version: + - '2023-06-01' + x-stainless-retry-count: + - '0' + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAAA3SQTUsDMRCG/8ryXk1htx+2BDx4EEF68KxIiMnYLs1OtslsaSn73yXFIiqeBuZ5 + 5vOMLnoK0HDBDp4mOTKTTOaTaT1d1ItmDoXWQ6PLG1M3y+XDav1y79ar07579k+7R79KRyjIqadi + Uc52Q1BIMZSEzbnNYlmg4CILsUC/nq++0LGQS9BoqptqWt1VM4xvCllibxLZHBkaxN7IkBhfINN+ + IHYEzUMICsNlrj6j5X4QI3FHnKGbWwVn3ZaMS2SljWx+CvWVJ7L+P3atLf2p31JHyQaz6P7637TZ + /qajQhzkx3YzhUzp0Doy0lKCRnmWt8mXv/MHpXKj2VCEBkcx9mDbYN8DYRw/AQAA//8DAJRQPdG9 + AQAA + headers: + CF-RAY: + - 9e1ffe8f48166bf7-CMH + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Security-Policy: + - default-src 'none'; frame-ancestors 'none' + Content-Type: + - application/json + Date: + - Wed, 25 Mar 2026 18:31:46 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - 27796668-7351-40ac-acc4-024aee8995a5 + anthropic-ratelimit-input-tokens-limit: + - '3000000' + anthropic-ratelimit-input-tokens-remaining: + - '3000000' + anthropic-ratelimit-input-tokens-reset: + - '2026-03-25T18:31:46Z' + anthropic-ratelimit-output-tokens-limit: + - '600000' + anthropic-ratelimit-output-tokens-remaining: + - '600000' + anthropic-ratelimit-output-tokens-reset: + - '2026-03-25T18:31:46Z' + anthropic-ratelimit-requests-limit: + - '20000' + anthropic-ratelimit-requests-remaining: + - '19999' + anthropic-ratelimit-requests-reset: + - '2026-03-25T18:31:45Z' + anthropic-ratelimit-tokens-limit: + - '3600000' + anthropic-ratelimit-tokens-remaining: + - '3600000' + anthropic-ratelimit-tokens-reset: + - '2026-03-25T18:31:46Z' + cf-cache-status: + - DYNAMIC + request-id: + - req_011CZQGyKYAWfDXkbs6TYmFj + server-timing: + - x-originResponse;dur=1115 + set-cookie: + - _cfuvid=GkeOmu_nJ3IH0wFr_Ysu_15x_mE_eZFWx7SyJP8s9wY-1774463505.8047376-1.0.1.1-yT6hue7f9O0MUMAxxAf.Cl6UVJAog65JmUFJd1pdxEw; + HttpOnly; SameSite=None; Secure; Path=/; Domain=api.anthropic.com + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + vary: + - Accept-Encoding + x-envoy-upstream-service-time: + - '1114' + status: + code: 200 + message: OK version: 1 diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration.yaml b/py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration.yaml index 6c396d02..516f74f8 100644 --- a/py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration.yaml +++ b/py/src/braintrust/integrations/langchain/cassettes/test_langchain_anthropic_integration.yaml @@ -1,6 +1,6 @@ interactions: - request: - body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "What is + body: '{"max_tokens": 64000, "messages": [{"role": "user", "content": "What is 1 + 2?"}], "model": "claude-sonnet-4-20250514"}' headers: accept: @@ -104,7 +104,7 @@ interactions: code: 200 message: OK - request: - body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + body: '{"max_tokens":64000,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' headers: accept: - application/json @@ -201,7 +201,7 @@ interactions: code: 200 message: OK - request: - body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + body: '{"max_tokens":64000,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' headers: accept: - application/json @@ -297,4 +297,194 @@ interactions: status: code: 200 message: OK +- request: + body: '{"max_tokens":64000,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '111' + Content-Type: + - application/json + Cookie: + - _cfuvid=173XVm8c6LwLnZGSZjw3nktrd.OcW9mUj4Ct6Nzco6M-1774463470.6692584-1.0.1.1-Z8FXX.hH2DDJmupKJNi.NwPEp.ON4Sm.8PzX9hNNFu4 + Host: + - api.anthropic.com + User-Agent: + - langchain-anthropic/1.4.0 + X-Stainless-Arch: + - x64 + X-Stainless-Async: + - 'false' + X-Stainless-Lang: + - python + X-Stainless-OS: + - Linux + X-Stainless-Package-Version: + - 0.86.0 + X-Stainless-Runtime: + - CPython + X-Stainless-Runtime-Version: + - 3.14.3 + anthropic-version: + - '2023-06-01' + x-stainless-retry-count: + - '0' + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAAA6pWKqksSFWyUkotKsovUtKB0lbVMPHE0pKM1LySzOTEksz8vHiYstzU4uLEdJCC + zLyyxJzMFIUK3cSCTN3s1EqlWh2lotTC0tTikvjMFCUrECfewNDQOSrQvSw738/c0CKlqsIlONXX + 1CNTqRYAAAD//wMAPzaTsIIAAAA= + headers: + CF-RAY: + - 9e1ffdb5cf75e6b0-CMH + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Security-Policy: + - default-src 'none'; frame-ancestors 'none' + Content-Type: + - application/json + Date: + - Wed, 25 Mar 2026 18:31:11 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + cf-cache-status: + - DYNAMIC + request-id: + - req_011CZQGvkoN718dzxDSeM5Hi + server-timing: + - x-originResponse;dur=32 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + vary: + - Accept-Encoding + x-envoy-upstream-service-time: + - '28' + x-should-retry: + - 'false' + status: + code: 401 + message: Unauthorized +- request: + body: '{"max_tokens":64000,"messages":[{"role":"user","content":"What is 1 + 2?"}],"model":"claude-sonnet-4-20250514"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '111' + Content-Type: + - application/json + Cookie: + - _cfuvid=PQKr9HJ20fMN78IoIWNmzDAMdKfcxl7b4a28wiRRrOA-1774463501.3608425-1.0.1.1-vWPsSIFWMsbw3tO25QWBZA3vmGBN5garPTOk7LK_8Y4 + Host: + - api.anthropic.com + User-Agent: + - langchain-anthropic/1.4.0 + X-Stainless-Arch: + - x64 + X-Stainless-Async: + - 'false' + X-Stainless-Lang: + - python + X-Stainless-OS: + - Linux + X-Stainless-Package-Version: + - 0.86.0 + X-Stainless-Runtime: + - CPython + X-Stainless-Runtime-Version: + - 3.14.3 + anthropic-version: + - '2023-06-01' + x-stainless-retry-count: + - '0' + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: !!binary | + H4sIAAAAAAAA/3WQT0vEMBDFv0qZq11ou1vRgjePHoS9KRKyzdgG00lNJovr0u/uZHGRVTxleL+X + N3+OMHmDDjronU4GV9ETIa82q6Zq2qqtN1CCNcKnOKiqbh83djek7efN/cN869fzE46HrXj4MGN2 + YYx6QBGCd1nQMdrImlik3hOjVN3z8exn/Mjk9HRQF1dFU9wVa1heSojsZxVQy0jCkIziFAi+QcT3 + hNRLCCXnSkinvt0RLM2JFfs3pAhdfS19dT+i6iWKrSd1aajOXLD5j53/5nycR5wwaKfa6a//h9bj + b7qU4BNfTLeWbTDsbY+KLQbZMx/L6GDy3ekVQ95RDegFkWel99o6vZPbLssXLJhatL0BAAA= + headers: + CF-RAY: + - 9e1ffe873bf7d04f-CMH + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Security-Policy: + - default-src 'none'; frame-ancestors 'none' + Content-Type: + - application/json + Date: + - Wed, 25 Mar 2026 18:31:45 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - 27796668-7351-40ac-acc4-024aee8995a5 + anthropic-ratelimit-input-tokens-limit: + - '3000000' + anthropic-ratelimit-input-tokens-remaining: + - '3000000' + anthropic-ratelimit-input-tokens-reset: + - '2026-03-25T18:31:45Z' + anthropic-ratelimit-output-tokens-limit: + - '600000' + anthropic-ratelimit-output-tokens-remaining: + - '600000' + anthropic-ratelimit-output-tokens-reset: + - '2026-03-25T18:31:45Z' + anthropic-ratelimit-requests-limit: + - '20000' + anthropic-ratelimit-requests-remaining: + - '19999' + anthropic-ratelimit-requests-reset: + - '2026-03-25T18:31:44Z' + anthropic-ratelimit-tokens-limit: + - '3600000' + anthropic-ratelimit-tokens-remaining: + - '3600000' + anthropic-ratelimit-tokens-reset: + - '2026-03-25T18:31:45Z' + cf-cache-status: + - DYNAMIC + request-id: + - req_011CZQGyE516m7mENVa2xVhJ + server-timing: + - x-originResponse;dur=1219 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - '1218' + status: + code: 200 + message: OK version: 1 diff --git a/py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens.yaml b/py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens.yaml index 441128e9..dd57682c 100644 --- a/py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens.yaml +++ b/py/src/braintrust/integrations/langchain/cassettes/test_prompt_caching_tokens.yaml @@ -1,6 +1,6 @@ interactions: - request: - body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is the first + body: '{"max_tokens":64000,"messages":[{"role":"user","content":"What is the first type of testing mentioned in section 1.2?"}],"model":"claude-sonnet-4-5-20250929","system":[{"type":"text","text":"\n# Comprehensive Guide to Software Testing Methods!\n\n## Chapter 1: Introduction to Testing\n\nSoftware testing is a critical component of the software development @@ -159,7 +159,7 @@ interactions: code: 200 message: OK - request: - body: '{"max_tokens":1024,"messages":[{"role":"user","content":"What is the first + body: '{"max_tokens":64000,"messages":[{"role":"user","content":"What is the first type of testing mentioned in section 1.2?"},{"role":"assistant","content":"According to section 1.2 \"Types of Testing,\" the first type of testing mentioned is **Unit Testing**, which is described as \"Testing individual components or functions diff --git a/py/src/braintrust/integrations/langchain/conftest.py b/py/src/braintrust/integrations/langchain/conftest.py index a10ab496..e369fffd 100644 --- a/py/src/braintrust/integrations/langchain/conftest.py +++ b/py/src/braintrust/integrations/langchain/conftest.py @@ -19,8 +19,8 @@ def setup_braintrust_langchain(): os.environ["BRAINTRUST_API_URL"] = "http://localhost:8000" os.environ["BRAINTRUST_APP_URL"] = "http://localhost:3000" os.environ["BRAINTRUST_API_KEY"] = TEST_API_KEY - os.environ["ANTHROPIC_API_KEY"] = "your_anthropic_api_key_here" - os.environ["OPENAI_API_KEY"] = "your_openai_api_key_here" + os.environ.setdefault("ANTHROPIC_API_KEY", "your_anthropic_api_key_here") + os.environ.setdefault("OPENAI_API_KEY", "your_openai_api_key_here") os.environ["OPENAI_BASE_URL"] = "http://localhost:8000/v1/proxy" _internal_reset_global_state() diff --git a/py/src/braintrust/integrations/langchain/test_langchain.py b/py/src/braintrust/integrations/langchain/test_langchain.py index 5698fdb7..9bb711d5 100644 --- a/py/src/braintrust/integrations/langchain/test_langchain.py +++ b/py/src/braintrust/integrations/langchain/test_langchain.py @@ -112,9 +112,6 @@ def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": ANY, - "id": ANY, - "example": ANY, "tool_calls": ANY, "invalid_tool_calls": ANY, "usage_metadata": ANY, @@ -133,8 +130,6 @@ def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, } ] }, @@ -151,9 +146,6 @@ def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, - "example": ANY, } ] ], @@ -169,8 +161,6 @@ def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": None, - "id": ANY, }, } ] @@ -183,7 +173,6 @@ def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): }, "model_name": "gpt-4o-mini-2024-07-18", }, - "run": None, "type": "LLMResult", }, "metrics": { @@ -248,9 +237,6 @@ def test_global_handler(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": ANY, - "id": ANY, - "example": ANY, "tool_calls": ANY, "invalid_tool_calls": ANY, "usage_metadata": ANY, @@ -269,8 +255,6 @@ def test_global_handler(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, } ] }, @@ -287,9 +271,6 @@ def test_global_handler(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, - "example": ANY, } ] ], @@ -305,8 +286,6 @@ def test_global_handler(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": None, - "id": ANY, }, } ] @@ -319,7 +298,6 @@ def test_global_handler(logger_memory_logger: LoggerMemoryLogger): }, "model_name": "gpt-4o-mini-2024-07-18", }, - "run": None, "type": "LLMResult", }, "metrics": { @@ -392,8 +370,6 @@ def test_chain_with_memory(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, } ] }, @@ -410,9 +386,6 @@ def test_chain_with_memory(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, - "example": ANY, } ] ], @@ -428,8 +401,6 @@ def test_chain_with_memory(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": None, - "id": ANY, }, } ] @@ -442,7 +413,6 @@ def test_chain_with_memory(logger_memory_logger: LoggerMemoryLogger): }, "model_name": "gpt-4o-mini-2024-07-18", }, - "run": None, "type": "LLMResult", }, "metrics": { @@ -523,9 +493,6 @@ def calculator(input: CalculatorInput) -> str: "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, - "example": ANY, } ] ], @@ -554,12 +521,8 @@ def calculator(input: CalculatorInput) -> str: "message": { "content": ANY, "type": "ai", - "additional_kwargs": { - "tool_calls": ANY, - }, + "additional_kwargs": ANY, "response_metadata": ANY, - "name": None, - "id": ANY, }, } ] @@ -572,7 +535,6 @@ def calculator(input: CalculatorInput) -> str: }, "model_name": "gpt-4o-mini-2024-07-18", }, - "run": None, "type": "LLMResult", }, "metrics": { @@ -770,9 +732,6 @@ def say_bye(state: dict[str, str]): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, - "example": ANY, } ] ], @@ -792,8 +751,6 @@ def say_bye(state: dict[str, str]): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": None, - "id": ANY, }, } ] @@ -806,7 +763,6 @@ def say_bye(state: dict[str, str]): }, "model_name": "gpt-4o-mini-2024-07-18", }, - "run": None, "type": "LLMResult", }, "metrics": { @@ -1059,9 +1015,6 @@ def test_streaming_ttft(logger_memory_logger: LoggerMemoryLogger): { "additional_kwargs": {}, "content": "Count from 1 to 5.", - "example": False, - "id": None, - "name": None, "response_metadata": {}, "type": "human", } diff --git a/py/src/braintrust/integrations/langchain/tracing.py b/py/src/braintrust/integrations/langchain/tracing.py index 1a142e6b..efe2fd61 100644 --- a/py/src/braintrust/integrations/langchain/tracing.py +++ b/py/src/braintrust/integrations/langchain/tracing.py @@ -84,6 +84,17 @@ class BraintrustCallbackHandler: root_run_id: UUID | None = None + # Duck-typing attributes required by LangChain's callback manager. + # These mirror BaseCallbackHandler without requiring inheritance. + raise_error: bool = False + ignore_llm: bool = False + ignore_retry: bool = False + ignore_chain: bool = False + ignore_agent: bool = False + ignore_retriever: bool = False + ignore_chat_model: bool = False + ignore_custom_event: bool = False + def __init__( self, logger: Logger | Span | None = None, @@ -97,8 +108,8 @@ def __init__( r"^(l[sc]_|langgraph_|__pregel_|checkpoint_ns)" ) self.skipped_runs: set[UUID] = set() - # Set run_inline=True to avoid thread executor in async contexts - # This ensures memory logger context is preserved + # run_inline=True avoids thread executor in async contexts, + # ensuring the ContextVar state is preserved across callbacks. self.run_inline = True self._start_times: dict[UUID, float] = {} @@ -686,11 +697,18 @@ def _get_metrics_from_response(response: Any) -> dict[str, Any]: input_token_details = usage_metadata.get("input_token_details") if input_token_details and isinstance(input_token_details, dict): cache_read = input_token_details.get("cache_read") - cache_creation = input_token_details.get("cache_creation") + # langchain-anthropic >=1.4 sets cache_creation=0 when ephemeral + # breakdown keys are present, so sum those up as the true total. + cache_creation = input_token_details.get("cache_creation") or 0 + cache_creation += sum( + v + for k, v in input_token_details.items() + if k.startswith("ephemeral_") and k.endswith("_input_tokens") and v + ) if cache_read is not None: metrics["prompt_cached_tokens"] = cache_read - if cache_creation is not None: + if cache_creation: metrics["prompt_cache_creation_tokens"] = cache_creation if not metrics or not any(metrics.values()): From 2d50659bd7e912726d6763f25f1ac44b052a05a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 19:16:34 +0000 Subject: [PATCH 07/14] chore: maintain compatibility for test with pre v1 langchain --- py/noxfile.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/py/noxfile.py b/py/noxfile.py index 1c8c7389..d2042088 100644 --- a/py/noxfile.py +++ b/py/noxfile.py @@ -104,6 +104,7 @@ def _pinned_python_version(): DSPY_VERSIONS = (LATEST,) GOOGLE_ADK_VERSIONS = (LATEST, "1.14.1") LANGCHAIN_VERSIONS = (LATEST, "0.3.83") +LANGCHAIN_VERSION_IDS = (LATEST, "langchain-core-0.3.83") # temporalio 1.19.0+ requires Python >= 3.10; skip Python 3.9 entirely TEMPORAL_VERSIONS = (LATEST, "1.20.0", "1.19.0") PYTEST_VERSIONS = (LATEST, "8.4.2") @@ -196,14 +197,14 @@ def test_google_genai(session, version): @nox.session() -@nox.parametrize("version", LANGCHAIN_VERSIONS, ids=LANGCHAIN_VERSIONS) +@nox.parametrize("version", LANGCHAIN_VERSIONS, ids=LANGCHAIN_VERSION_IDS) def test_langchain(session, version): """Test LangChain integration.""" _install_test_deps(session) _install(session, "langchain-core", version) - _install(session, "langchain-openai", version) - _install(session, "langchain-anthropic", version) - session.install("langgraph", silent=SILENT_INSTALLS) + _install(session, "langchain-openai") + _install(session, "langchain-anthropic") + _install(session, "langgraph") _run_tests(session, f"{INTEGRATION_DIR}/langchain/test_langchain.py") _run_core_tests(session) From ab1c01b7fcdb7c3d1df12ffe0d2b542ac46593dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 20:25:57 +0000 Subject: [PATCH 08/14] chore: fix pylint error and test suite for older langchain --- py/noxfile.py | 11 +++++++--- .../integrations/langchain/__init__.py | 21 +------------------ 2 files changed, 9 insertions(+), 23 deletions(-) diff --git a/py/noxfile.py b/py/noxfile.py index d2042088..f0e0af64 100644 --- a/py/noxfile.py +++ b/py/noxfile.py @@ -103,8 +103,12 @@ def _pinned_python_version(): GENAI_VERSIONS = (LATEST,) DSPY_VERSIONS = (LATEST,) GOOGLE_ADK_VERSIONS = (LATEST, "1.14.1") -LANGCHAIN_VERSIONS = (LATEST, "0.3.83") -LANGCHAIN_VERSION_IDS = (LATEST, "langchain-core-0.3.83") +LANGCHAIN_VERSIONS = ( + ("langchain-core", LATEST), + ("langchain-core", "0.3.83"), + ("langchain", "0.3.28"), +) +LANGCHAIN_VERSION_IDS = (LATEST, "langchain-core-0.3.83", "langchain-0.3.28") # temporalio 1.19.0+ requires Python >= 3.10; skip Python 3.9 entirely TEMPORAL_VERSIONS = (LATEST, "1.20.0", "1.19.0") PYTEST_VERSIONS = (LATEST, "8.4.2") @@ -200,8 +204,9 @@ def test_google_genai(session, version): @nox.parametrize("version", LANGCHAIN_VERSIONS, ids=LANGCHAIN_VERSION_IDS) def test_langchain(session, version): """Test LangChain integration.""" + pkg, version = version _install_test_deps(session) - _install(session, "langchain-core", version) + _install(session, pkg, version) _install(session, "langchain-openai") _install(session, "langchain-anthropic") _install(session, "langgraph") diff --git a/py/src/braintrust/integrations/langchain/__init__.py b/py/src/braintrust/integrations/langchain/__init__.py index e1a19352..390e1c13 100644 --- a/py/src/braintrust/integrations/langchain/__init__.py +++ b/py/src/braintrust/integrations/langchain/__init__.py @@ -1,6 +1,7 @@ """Braintrust integration for LangChain.""" from .integration import LangChainIntegration +from .tracing import BraintrustCallbackHandler, BraintrustTracer, clear_global_handler, set_global_handler def setup_langchain() -> bool: @@ -19,26 +20,6 @@ def setup_langchain() -> bool: return LangChainIntegration.setup() -# Lazily imported to avoid circular imports at module load time -# (tracing.py imports from braintrust, which must be fully initialized first) -_LAZY_ATTRS = frozenset( - ["BraintrustCallbackHandler", "BraintrustTracer", "set_global_handler", "clear_global_handler"] -) - - -def __getattr__(name: str): - if name in _LAZY_ATTRS: - from .tracing import BraintrustCallbackHandler, BraintrustTracer, clear_global_handler, set_global_handler - - g = globals() - g["BraintrustCallbackHandler"] = BraintrustCallbackHandler - g["BraintrustTracer"] = BraintrustTracer - g["set_global_handler"] = set_global_handler - g["clear_global_handler"] = clear_global_handler - return g[name] - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") - - __all__ = [ "LangChainIntegration", "BraintrustCallbackHandler", From 0a72054e3f57b09cdf8fbfa7a91c9843e1fbed0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 13:59:39 -0700 Subject: [PATCH 09/14] chore: fix CI broken by merge --- py/src/braintrust/auto.py | 1 - py/src/braintrust/integrations/__init__.py | 8 +++++++- py/src/braintrust/integrations/langchain/tracing.py | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/py/src/braintrust/auto.py b/py/src/braintrust/auto.py index 86a6e8db..c8192a45 100644 --- a/py/src/braintrust/auto.py +++ b/py/src/braintrust/auto.py @@ -106,7 +106,6 @@ def auto_instrument( """ results = {} - if openai: results["openai"] = _instrument_openai() if anthropic: diff --git a/py/src/braintrust/integrations/__init__.py b/py/src/braintrust/integrations/__init__.py index eb3b7291..0c948847 100644 --- a/py/src/braintrust/integrations/__init__.py +++ b/py/src/braintrust/integrations/__init__.py @@ -5,4 +5,10 @@ from .langchain import LangChainIntegration -__all__ = ["ADKIntegration", "AgnoIntegration", "AnthropicIntegration", "ClaudeAgentSDKIntegration", "LangChainIntegration"] +__all__ = [ + "ADKIntegration", + "AgnoIntegration", + "AnthropicIntegration", + "ClaudeAgentSDKIntegration", + "LangChainIntegration", +] diff --git a/py/src/braintrust/integrations/langchain/tracing.py b/py/src/braintrust/integrations/langchain/tracing.py index efe2fd61..38179ba6 100644 --- a/py/src/braintrust/integrations/langchain/tracing.py +++ b/py/src/braintrust/integrations/langchain/tracing.py @@ -18,7 +18,7 @@ from uuid import UUID import braintrust -from braintrust import NOOP_SPAN, Logger, Span, SpanAttributes, SpanTypeAttribute, current_span, init_logger +from braintrust.logger import NOOP_SPAN, Logger, Span, SpanAttributes, SpanTypeAttribute, current_span, init_logger from braintrust.version import VERSION as sdk_version from typing_extensions import NotRequired From 7fa465f57a1cd1dbae79b4a7fad77a28b7813d93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 14:16:26 -0700 Subject: [PATCH 10/14] chore: fix ci? and regenerate uv.lock --- integrations/langchain-py/pyproject.toml | 3 + integrations/langchain-py/uv.lock | 369 +++++++++++++---------- 2 files changed, 221 insertions(+), 151 deletions(-) diff --git a/integrations/langchain-py/pyproject.toml b/integrations/langchain-py/pyproject.toml index 9d620604..c940ae12 100644 --- a/integrations/langchain-py/pyproject.toml +++ b/integrations/langchain-py/pyproject.toml @@ -42,6 +42,9 @@ members = [ ".", ] +[tool.uv.sources] +braintrust = { path = "../../py", editable = true } + [dependency-groups] dev = [ "build", diff --git a/integrations/langchain-py/uv.lock b/integrations/langchain-py/uv.lock index 93361087..fc0b51f0 100644 --- a/integrations/langchain-py/uv.lock +++ b/integrations/langchain-py/uv.lock @@ -58,6 +58,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a7/fa/e01228c2938de91d47b307831c62ab9e4001e747789d0b05baf779a6488c/async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028", size = 5721, upload-time = "2023-08-10T16:35:55.203Z" }, ] +[[package]] +name = "attrs" +version = "26.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9a/8e/82a0fe20a541c03148528be8cac2408564a6c9a0cc7e9171802bc1d26985/attrs-26.1.0.tar.gz", hash = "sha256:d03ceb89cb322a8fd706d4fb91940737b6642aa36998fe130a9bc96c985eff32", size = 952055, upload-time = "2026-03-19T14:22:25.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/b4/17d4b0b2a2dc85a6df63d1157e028ed19f90d4cd97c36717afef2bc2f395/attrs-26.1.0-py3-none-any.whl", hash = "sha256:c647aa4a12dfbad9333ca4e71fe62ddc36f4e63b2d260a37a8b83d2f043ac309", size = 67548, upload-time = "2026-03-19T14:22:23.645Z" }, +] + [[package]] name = "backports-asyncio-runner" version = "1.2.0" @@ -76,60 +85,65 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b9/fa/123043af240e49752f1c4bd24da5053b6bd00cad78c2be53c0d1e8b975bc/backports.tarfile-1.2.0-py3-none-any.whl", hash = "sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34", size = 30181, upload-time = "2024-05-28T17:01:53.112Z" }, ] -[[package]] -name = "black" -version = "25.9.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "click" }, - { name = "mypy-extensions" }, - { name = "packaging" }, - { name = "pathspec" }, - { name = "platformdirs" }, - { name = "pytokens" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/4b/43/20b5c90612d7bdb2bdbcceeb53d588acca3bb8f0e4c5d5c751a2c8fdd55a/black-25.9.0.tar.gz", hash = "sha256:0474bca9a0dd1b51791fcc507a4e02078a1c63f6d4e4ae5544b9848c7adfb619", size = 648393, upload-time = "2025-09-19T00:27:37.758Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/25/40/dbe31fc56b218a858c8fc6f5d8d3ba61c1fa7e989d43d4a4574b8b992840/black-25.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce41ed2614b706fd55fd0b4a6909d06b5bab344ffbfadc6ef34ae50adba3d4f7", size = 1715605, upload-time = "2025-09-19T00:36:13.483Z" }, - { url = "https://files.pythonhosted.org/packages/92/b2/f46800621200eab6479b1f4c0e3ede5b4c06b768e79ee228bc80270bcc74/black-25.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ab0ce111ef026790e9b13bd216fa7bc48edd934ffc4cbf78808b235793cbc92", size = 1571829, upload-time = "2025-09-19T00:32:42.13Z" }, - { url = "https://files.pythonhosted.org/packages/4e/64/5c7f66bd65af5c19b4ea86062bb585adc28d51d37babf70969e804dbd5c2/black-25.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f96b6726d690c96c60ba682955199f8c39abc1ae0c3a494a9c62c0184049a713", size = 1631888, upload-time = "2025-09-19T00:30:54.212Z" }, - { url = "https://files.pythonhosted.org/packages/3b/64/0b9e5bfcf67db25a6eef6d9be6726499a8a72ebab3888c2de135190853d3/black-25.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:d119957b37cc641596063cd7db2656c5be3752ac17877017b2ffcdb9dfc4d2b1", size = 1327056, upload-time = "2025-09-19T00:31:08.877Z" }, - { url = "https://files.pythonhosted.org/packages/b7/f4/7531d4a336d2d4ac6cc101662184c8e7d068b548d35d874415ed9f4116ef/black-25.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:456386fe87bad41b806d53c062e2974615825c7a52159cde7ccaeb0695fa28fa", size = 1698727, upload-time = "2025-09-19T00:31:14.264Z" }, - { url = "https://files.pythonhosted.org/packages/28/f9/66f26bfbbf84b949cc77a41a43e138d83b109502cd9c52dfc94070ca51f2/black-25.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a16b14a44c1af60a210d8da28e108e13e75a284bf21a9afa6b4571f96ab8bb9d", size = 1555679, upload-time = "2025-09-19T00:31:29.265Z" }, - { url = "https://files.pythonhosted.org/packages/bf/59/61475115906052f415f518a648a9ac679d7afbc8da1c16f8fdf68a8cebed/black-25.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aaf319612536d502fdd0e88ce52d8f1352b2c0a955cc2798f79eeca9d3af0608", size = 1617453, upload-time = "2025-09-19T00:30:42.24Z" }, - { url = "https://files.pythonhosted.org/packages/7f/5b/20fd5c884d14550c911e4fb1b0dae00d4abb60a4f3876b449c4d3a9141d5/black-25.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:c0372a93e16b3954208417bfe448e09b0de5cc721d521866cd9e0acac3c04a1f", size = 1333655, upload-time = "2025-09-19T00:30:56.715Z" }, - { url = "https://files.pythonhosted.org/packages/fb/8e/319cfe6c82f7e2d5bfb4d3353c6cc85b523d677ff59edc61fdb9ee275234/black-25.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1b9dc70c21ef8b43248f1d86aedd2aaf75ae110b958a7909ad8463c4aa0880b0", size = 1742012, upload-time = "2025-09-19T00:33:08.678Z" }, - { url = "https://files.pythonhosted.org/packages/94/cc/f562fe5d0a40cd2a4e6ae3f685e4c36e365b1f7e494af99c26ff7f28117f/black-25.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8e46eecf65a095fa62e53245ae2795c90bdecabd53b50c448d0a8bcd0d2e74c4", size = 1581421, upload-time = "2025-09-19T00:35:25.937Z" }, - { url = "https://files.pythonhosted.org/packages/84/67/6db6dff1ebc8965fd7661498aea0da5d7301074b85bba8606a28f47ede4d/black-25.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9101ee58ddc2442199a25cb648d46ba22cd580b00ca4b44234a324e3ec7a0f7e", size = 1655619, upload-time = "2025-09-19T00:30:49.241Z" }, - { url = "https://files.pythonhosted.org/packages/10/10/3faef9aa2a730306cf469d76f7f155a8cc1f66e74781298df0ba31f8b4c8/black-25.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:77e7060a00c5ec4b3367c55f39cf9b06e68965a4f2e61cecacd6d0d9b7ec945a", size = 1342481, upload-time = "2025-09-19T00:31:29.625Z" }, - { url = "https://files.pythonhosted.org/packages/48/99/3acfea65f5e79f45472c45f87ec13037b506522719cd9d4ac86484ff51ac/black-25.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0172a012f725b792c358d57fe7b6b6e8e67375dd157f64fa7a3097b3ed3e2175", size = 1742165, upload-time = "2025-09-19T00:34:10.402Z" }, - { url = "https://files.pythonhosted.org/packages/3a/18/799285282c8236a79f25d590f0222dbd6850e14b060dfaa3e720241fd772/black-25.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3bec74ee60f8dfef564b573a96b8930f7b6a538e846123d5ad77ba14a8d7a64f", size = 1581259, upload-time = "2025-09-19T00:32:49.685Z" }, - { url = "https://files.pythonhosted.org/packages/f1/ce/883ec4b6303acdeca93ee06b7622f1fa383c6b3765294824165d49b1a86b/black-25.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b756fc75871cb1bcac5499552d771822fd9db5a2bb8db2a7247936ca48f39831", size = 1655583, upload-time = "2025-09-19T00:30:44.505Z" }, - { url = "https://files.pythonhosted.org/packages/21/17/5c253aa80a0639ccc427a5c7144534b661505ae2b5a10b77ebe13fa25334/black-25.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:846d58e3ce7879ec1ffe816bb9df6d006cd9590515ed5d17db14e17666b2b357", size = 1343428, upload-time = "2025-09-19T00:32:13.839Z" }, - { url = "https://files.pythonhosted.org/packages/1b/46/863c90dcd3f9d41b109b7f19032ae0db021f0b2a81482ba0a1e28c84de86/black-25.9.0-py3-none-any.whl", hash = "sha256:474b34c1342cdc157d307b56c4c65bce916480c4a8f6551fdc6bf9b486a7c4ae", size = 203363, upload-time = "2025-09-19T00:27:35.724Z" }, -] - [[package]] name = "braintrust" -version = "0.2.9" -source = { registry = "https://pypi.org/simple" } +version = "0.10.0" +source = { editable = "../../py" } dependencies = [ { name = "chevron" }, { name = "exceptiongroup" }, { name = "gitpython" }, + { name = "jsonschema" }, + { name = "packaging" }, { name = "python-dotenv" }, { name = "python-slugify" }, { name = "requests" }, { name = "sseclient-py" }, { name = "tqdm" }, { name = "typing-extensions" }, + { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/32/40/d9931b0233f36fcf41316941022c44e8664d97a2ea6e8b973dee4ebf0749/braintrust-0.2.9.tar.gz", hash = "sha256:6874ab7aae8f9463c63ae8297927995f745807e2aed25c90f4c28dd11a5a90b6", size = 185157, upload-time = "2025-09-22T23:28:01.975Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/39/31/8a5e6534b53bf0a2e81ec008cce6d8e1cccd53912c404c008214b29684f5/braintrust-0.2.9-py3-none-any.whl", hash = "sha256:b0b9c50900f6cc44d997b58f33f3f1e4f2cd82e40f4557a625156bd31d042c78", size = 214728, upload-time = "2025-09-22T23:28:00.415Z" }, + +[package.metadata] +requires-dist = [ + { name = "boto3", marker = "extra == 'all'" }, + { name = "boto3", marker = "extra == 'cli'" }, + { name = "chevron" }, + { name = "exceptiongroup", specifier = ">=1.2.0" }, + { name = "gitpython" }, + { name = "jsonschema" }, + { name = "openai-agents", marker = "extra == 'all'" }, + { name = "openai-agents", marker = "extra == 'openai-agents'" }, + { name = "opentelemetry-api", marker = "extra == 'all'" }, + { name = "opentelemetry-api", marker = "extra == 'otel'" }, + { name = "opentelemetry-exporter-otlp-proto-http", marker = "extra == 'all'" }, + { name = "opentelemetry-exporter-otlp-proto-http", marker = "extra == 'otel'" }, + { name = "opentelemetry-sdk", marker = "extra == 'all'" }, + { name = "opentelemetry-sdk", marker = "extra == 'otel'" }, + { name = "orjson", marker = "platform_python_implementation != 'PyPy' and extra == 'all'" }, + { name = "orjson", marker = "platform_python_implementation != 'PyPy' and extra == 'performance'" }, + { name = "packaging" }, + { name = "psycopg2-binary", marker = "extra == 'all'" }, + { name = "psycopg2-binary", marker = "extra == 'cli'" }, + { name = "pydoc-markdown", marker = "extra == 'all'" }, + { name = "pydoc-markdown", marker = "extra == 'doc'" }, + { name = "python-dotenv" }, + { name = "python-slugify" }, + { name = "requests" }, + { name = "sseclient-py" }, + { name = "starlette", marker = "extra == 'all'" }, + { name = "starlette", marker = "extra == 'cli'" }, + { name = "temporalio", marker = "python_full_version >= '3.10' and extra == 'all'", specifier = ">=1.19.0" }, + { name = "temporalio", marker = "python_full_version >= '3.10' and extra == 'temporal'", specifier = ">=1.19.0" }, + { name = "tqdm" }, + { name = "typing-extensions", specifier = ">=4.1.0" }, + { name = "uv", marker = "extra == 'all'" }, + { name = "uv", marker = "extra == 'cli'" }, + { name = "uvicorn", marker = "extra == 'all'" }, + { name = "uvicorn", marker = "extra == 'cli'" }, + { name = "wrapt" }, ] +provides-extras = ["cli", "doc", "openai-agents", "otel", "performance", "temporal", "all"] [[package]] name = "braintrust-langchain" @@ -142,12 +156,8 @@ dependencies = [ [package.dev-dependencies] dev = [ - { name = "black" }, { name = "build" }, - { name = "flake8" }, - { name = "flake8-isort" }, { name = "httpx" }, - { name = "isort" }, { name = "langchain-anthropic" }, { name = "langchain-openai" }, { name = "langgraph" }, @@ -162,18 +172,14 @@ dev = [ [package.metadata] requires-dist = [ - { name = "braintrust", specifier = ">=0.2.1" }, + { name = "braintrust", editable = "../../py" }, { name = "langchain", specifier = ">=0.3.27" }, ] [package.metadata.requires-dev] dev = [ - { name = "black" }, { name = "build" }, - { name = "flake8" }, - { name = "flake8-isort" }, { name = "httpx" }, - { name = "isort", specifier = "==5.12.0" }, { name = "langchain-anthropic", specifier = ">=0.3.20" }, { name = "langchain-openai" }, { name = "langgraph", specifier = ">=0.2.1,<0.4.0" }, @@ -346,18 +352,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/52/93/342cc62a70ab727e093ed98e02a725d85b746345f05d2b5e5034649f4ec8/chevron-0.14.0-py3-none-any.whl", hash = "sha256:fbf996a709f8da2e745ef763f482ce2d311aa817d287593a5b990d6d6e4f0443", size = 11595, upload-time = "2021-01-02T22:47:57.847Z" }, ] -[[package]] -name = "click" -version = "8.3.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/46/61/de6cd827efad202d7057d93e0fed9294b96952e188f7384832791c7b2254/click-8.3.0.tar.gz", hash = "sha256:e7b8232224eba16f4ebe410c25ced9f7875cb5f3263ffc93cc3e8da705e229c4", size = 276943, upload-time = "2025-09-18T17:32:23.696Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/db/d3/9dcc0f5797f070ec8edf30fbadfb200e71d9db6b84d211e3b2085a7589a0/click-8.3.0-py3-none-any.whl", hash = "sha256:9b9f285302c6e3064f4330c05f05b81945b2a39544279343e6e7c5f27a9baddc", size = 107295, upload-time = "2025-09-18T17:32:22.42Z" }, -] - [[package]] name = "colorama" version = "0.4.6" @@ -473,33 +467,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/42/14/42b2651a2f46b022ccd948bca9f2d5af0fd8929c4eec235b8d6d844fbe67/filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d", size = 15988, upload-time = "2025-08-14T16:56:01.633Z" }, ] -[[package]] -name = "flake8" -version = "7.3.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "mccabe" }, - { name = "pycodestyle" }, - { name = "pyflakes" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/9b/af/fbfe3c4b5a657d79e5c47a2827a362f9e1b763336a52f926126aa6dc7123/flake8-7.3.0.tar.gz", hash = "sha256:fe044858146b9fc69b551a4b490d69cf960fcb78ad1edcb84e7fbb1b4a8e3872", size = 48326, upload-time = "2025-06-20T19:31:35.838Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/56/13ab06b4f93ca7cac71078fbe37fcea175d3216f31f85c3168a6bbd0bb9a/flake8-7.3.0-py2.py3-none-any.whl", hash = "sha256:b9696257b9ce8beb888cdbe31cf885c90d31928fe202be0889a7cdafad32f01e", size = 57922, upload-time = "2025-06-20T19:31:34.425Z" }, -] - -[[package]] -name = "flake8-isort" -version = "6.1.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "flake8" }, - { name = "isort" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/7c/ea/2f2662d4fefa6ab335c7119cb28e5bc57c935a86a69a7f72df3ea5fe7b2c/flake8_isort-6.1.2.tar.gz", hash = "sha256:9d0452acdf0e1cd6f2d6848e3605e66b54d920e73471fb4744eef0f93df62d5d", size = 17756, upload-time = "2025-01-29T12:29:25.753Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b3/10/295e982874f2a94f309baf7c45f852a191c87d59bd846b1701332303783f/flake8_isort-6.1.2-py3-none-any.whl", hash = "sha256:549197dedf0273502fb74f04c080beed9e62a7eb70244610413d27052e78bd3b", size = 18385, upload-time = "2025-01-29T12:29:23.46Z" }, -] - [[package]] name = "gitdb" version = "4.0.12" @@ -667,15 +634,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, ] -[[package]] -name = "isort" -version = "5.12.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a9/c4/dc00e42c158fc4dda2afebe57d2e948805c06d5169007f1724f0683010a9/isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504", size = 174643, upload-time = "2023-01-28T17:10:22.636Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0a/63/4036ae70eea279c63e2304b91ee0ac182f467f24f86394ecfe726092340b/isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6", size = 91198, upload-time = "2023-01-28T17:10:21.149Z" }, -] - [[package]] name = "jaraco-classes" version = "3.4.0" @@ -815,6 +773,33 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" }, ] +[[package]] +name = "jsonschema" +version = "4.26.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b3/fc/e067678238fa451312d4c62bf6e6cf5ec56375422aee02f9cb5f909b3047/jsonschema-4.26.0.tar.gz", hash = "sha256:0c26707e2efad8aa1bfc5b7ce170f3fccc2e4918ff85989ba9ffa9facb2be326", size = 366583, upload-time = "2026-01-07T13:41:07.246Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/90/f63fb5873511e014207a475e2bb4e8b2e570d655b00ac19a9a0ca0a385ee/jsonschema-4.26.0-py3-none-any.whl", hash = "sha256:d489f15263b8d200f8387e64b4c3a75f06629559fb73deb8fdfb525f2dab50ce", size = 90630, upload-time = "2026-01-07T13:41:05.306Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + [[package]] name = "keyring" version = "25.6.0" @@ -996,15 +981,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, ] -[[package]] -name = "mccabe" -version = "0.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, -] - [[package]] name = "mdurl" version = "0.1.2" @@ -1125,15 +1101,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fd/69/b547032297c7e63ba2af494edba695d781af8a0c6e89e4d06cf848b21d80/multidict-6.6.4-py3-none-any.whl", hash = "sha256:27d8f8e125c07cb954e54d75d04905a9bba8a439c1d84aca94949d4d03d8601c", size = 12313, upload-time = "2025-08-11T12:08:46.891Z" }, ] -[[package]] -name = "mypy-extensions" -version = "1.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, -] - [[package]] name = "nh3" version = "0.3.0" @@ -1321,15 +1288,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] -[[package]] -name = "pathspec" -version = "0.12.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, -] - [[package]] name = "platformdirs" version = "4.4.0" @@ -1453,15 +1411,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" }, ] -[[package]] -name = "pycodestyle" -version = "2.14.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/11/e0/abfd2a0d2efe47670df87f3e3a0e2edda42f055053c85361f19c0e2c1ca8/pycodestyle-2.14.0.tar.gz", hash = "sha256:c4b5b517d278089ff9d0abdec919cd97262a3367449ea1c8b49b91529167b783", size = 39472, upload-time = "2025-06-20T18:49:48.75Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/27/a58ddaf8c588a3ef080db9d0b7e0b97215cee3a45df74f3a94dbbf5c893a/pycodestyle-2.14.0-py2.py3-none-any.whl", hash = "sha256:dd6bf7cb4ee77f8e016f9c8e74a35ddd9f67e1d5fd4184d86c3b98e07099f42d", size = 31594, upload-time = "2025-06-20T18:49:47.491Z" }, -] - [[package]] name = "pycparser" version = "2.23" @@ -1573,15 +1522,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" }, ] -[[package]] -name = "pyflakes" -version = "3.4.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/45/dc/fd034dc20b4b264b3d015808458391acbf9df40b1e54750ef175d39180b1/pyflakes-3.4.0.tar.gz", hash = "sha256:b24f96fafb7d2ab0ec5075b7350b3d2d2218eab42003821c06344973d3ea2f58", size = 64669, upload-time = "2025-06-20T18:45:27.834Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c2/2f/81d580a0fb83baeb066698975cb14a618bdbed7720678566f1b046a95fe8/pyflakes-3.4.0-py2.py3-none-any.whl", hash = "sha256:f742a7dbd0d9cb9ea41e9a24a918996e8170c799fa528688d40dd582c8265f4f", size = 63551, upload-time = "2025-06-20T18:45:26.937Z" }, -] - [[package]] name = "pygments" version = "2.19.2" @@ -1666,15 +1606,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a4/62/02da182e544a51a5c3ccf4b03ab79df279f9c60c5e82d5e8bec7ca26ac11/python_slugify-8.0.4-py2.py3-none-any.whl", hash = "sha256:276540b79961052b66b7d116620b36518847f52d5fd9e3a70164fc8c50faa6b8", size = 10051, upload-time = "2024-02-08T18:32:43.911Z" }, ] -[[package]] -name = "pytokens" -version = "0.1.10" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/30/5f/e959a442435e24f6fb5a01aec6c657079ceaca1b3baf18561c3728d681da/pytokens-0.1.10.tar.gz", hash = "sha256:c9a4bfa0be1d26aebce03e6884ba454e842f186a59ea43a6d3b25af58223c044", size = 12171, upload-time = "2025-02-19T14:51:22.001Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/60/e5/63bed382f6a7a5ba70e7e132b8b7b8abbcf4888ffa6be4877698dcfbed7d/pytokens-0.1.10-py3-none-any.whl", hash = "sha256:db7b72284e480e69fb085d9f251f66b3d2df8b7166059261258ff35f50fb711b", size = 12046, upload-time = "2025-02-19T14:51:18.694Z" }, -] - [[package]] name = "pywin32-ctypes" version = "0.2.3" @@ -1742,6 +1673,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl", hash = "sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151", size = 13310, upload-time = "2024-07-08T15:00:56.577Z" }, ] +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + [[package]] name = "regex" version = "2025.9.18" @@ -1899,6 +1844,128 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e3/30/3c4d035596d3cf444529e0b2953ad0466f6049528a879d27534700580395/rich-14.1.0-py3-none-any.whl", hash = "sha256:536f5f1785986d6dbdea3c75205c473f970777b4a0d6c6dd1b696aa05a3fa04f", size = 243368, upload-time = "2025-07-25T07:32:56.73Z" }, ] +[[package]] +name = "rpds-py" +version = "0.30.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/af/3f2f423103f1113b36230496629986e0ef7e199d2aa8392452b484b38ced/rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84", size = 69469, upload-time = "2025-11-30T20:24:38.837Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/0c/0c411a0ec64ccb6d104dcabe0e713e05e153a9a2c3c2bd2b32ce412166fe/rpds_py-0.30.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:679ae98e00c0e8d68a7fda324e16b90fd5260945b45d3b824c892cec9eea3288", size = 370490, upload-time = "2025-11-30T20:21:33.256Z" }, + { url = "https://files.pythonhosted.org/packages/19/6a/4ba3d0fb7297ebae71171822554abe48d7cab29c28b8f9f2c04b79988c05/rpds_py-0.30.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4cc2206b76b4f576934f0ed374b10d7ca5f457858b157ca52064bdfc26b9fc00", size = 359751, upload-time = "2025-11-30T20:21:34.591Z" }, + { url = "https://files.pythonhosted.org/packages/cd/7c/e4933565ef7f7a0818985d87c15d9d273f1a649afa6a52ea35ad011195ea/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:389a2d49eded1896c3d48b0136ead37c48e221b391c052fba3f4055c367f60a6", size = 389696, upload-time = "2025-11-30T20:21:36.122Z" }, + { url = "https://files.pythonhosted.org/packages/5e/01/6271a2511ad0815f00f7ed4390cf2567bec1d4b1da39e2c27a41e6e3b4de/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:32c8528634e1bf7121f3de08fa85b138f4e0dc47657866630611b03967f041d7", size = 403136, upload-time = "2025-11-30T20:21:37.728Z" }, + { url = "https://files.pythonhosted.org/packages/55/64/c857eb7cd7541e9b4eee9d49c196e833128a55b89a9850a9c9ac33ccf897/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f207f69853edd6f6700b86efb84999651baf3789e78a466431df1331608e5324", size = 524699, upload-time = "2025-11-30T20:21:38.92Z" }, + { url = "https://files.pythonhosted.org/packages/9c/ed/94816543404078af9ab26159c44f9e98e20fe47e2126d5d32c9d9948d10a/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67b02ec25ba7a9e8fa74c63b6ca44cf5707f2fbfadae3ee8e7494297d56aa9df", size = 412022, upload-time = "2025-11-30T20:21:40.407Z" }, + { url = "https://files.pythonhosted.org/packages/61/b5/707f6cf0066a6412aacc11d17920ea2e19e5b2f04081c64526eb35b5c6e7/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0e95f6819a19965ff420f65578bacb0b00f251fefe2c8b23347c37174271f3", size = 390522, upload-time = "2025-11-30T20:21:42.17Z" }, + { url = "https://files.pythonhosted.org/packages/13/4e/57a85fda37a229ff4226f8cbcf09f2a455d1ed20e802ce5b2b4a7f5ed053/rpds_py-0.30.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:a452763cc5198f2f98898eb98f7569649fe5da666c2dc6b5ddb10fde5a574221", size = 404579, upload-time = "2025-11-30T20:21:43.769Z" }, + { url = "https://files.pythonhosted.org/packages/f9/da/c9339293513ec680a721e0e16bf2bac3db6e5d7e922488de471308349bba/rpds_py-0.30.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e0b65193a413ccc930671c55153a03ee57cecb49e6227204b04fae512eb657a7", size = 421305, upload-time = "2025-11-30T20:21:44.994Z" }, + { url = "https://files.pythonhosted.org/packages/f9/be/522cb84751114f4ad9d822ff5a1aa3c98006341895d5f084779b99596e5c/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:858738e9c32147f78b3ac24dc0edb6610000e56dc0f700fd5f651d0a0f0eb9ff", size = 572503, upload-time = "2025-11-30T20:21:46.91Z" }, + { url = "https://files.pythonhosted.org/packages/a2/9b/de879f7e7ceddc973ea6e4629e9b380213a6938a249e94b0cdbcc325bb66/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:da279aa314f00acbb803da1e76fa18666778e8a8f83484fba94526da5de2cba7", size = 598322, upload-time = "2025-11-30T20:21:48.709Z" }, + { url = "https://files.pythonhosted.org/packages/48/ac/f01fc22efec3f37d8a914fc1b2fb9bcafd56a299edbe96406f3053edea5a/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7c64d38fb49b6cdeda16ab49e35fe0da2e1e9b34bc38bd78386530f218b37139", size = 560792, upload-time = "2025-11-30T20:21:50.024Z" }, + { url = "https://files.pythonhosted.org/packages/e2/da/4e2b19d0f131f35b6146425f846563d0ce036763e38913d917187307a671/rpds_py-0.30.0-cp310-cp310-win32.whl", hash = "sha256:6de2a32a1665b93233cde140ff8b3467bdb9e2af2b91079f0333a0974d12d464", size = 221901, upload-time = "2025-11-30T20:21:51.32Z" }, + { url = "https://files.pythonhosted.org/packages/96/cb/156d7a5cf4f78a7cc571465d8aec7a3c447c94f6749c5123f08438bcf7bc/rpds_py-0.30.0-cp310-cp310-win_amd64.whl", hash = "sha256:1726859cd0de969f88dc8673bdd954185b9104e05806be64bcd87badbe313169", size = 235823, upload-time = "2025-11-30T20:21:52.505Z" }, + { url = "https://files.pythonhosted.org/packages/4d/6e/f964e88b3d2abee2a82c1ac8366da848fce1c6d834dc2132c3fda3970290/rpds_py-0.30.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a2bffea6a4ca9f01b3f8e548302470306689684e61602aa3d141e34da06cf425", size = 370157, upload-time = "2025-11-30T20:21:53.789Z" }, + { url = "https://files.pythonhosted.org/packages/94/ba/24e5ebb7c1c82e74c4e4f33b2112a5573ddc703915b13a073737b59b86e0/rpds_py-0.30.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dc4f992dfe1e2bc3ebc7444f6c7051b4bc13cd8e33e43511e8ffd13bf407010d", size = 359676, upload-time = "2025-11-30T20:21:55.475Z" }, + { url = "https://files.pythonhosted.org/packages/84/86/04dbba1b087227747d64d80c3b74df946b986c57af0a9f0c98726d4d7a3b/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:422c3cb9856d80b09d30d2eb255d0754b23e090034e1deb4083f8004bd0761e4", size = 389938, upload-time = "2025-11-30T20:21:57.079Z" }, + { url = "https://files.pythonhosted.org/packages/42/bb/1463f0b1722b7f45431bdd468301991d1328b16cffe0b1c2918eba2c4eee/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07ae8a593e1c3c6b82ca3292efbe73c30b61332fd612e05abee07c79359f292f", size = 402932, upload-time = "2025-11-30T20:21:58.47Z" }, + { url = "https://files.pythonhosted.org/packages/99/ee/2520700a5c1f2d76631f948b0736cdf9b0acb25abd0ca8e889b5c62ac2e3/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12f90dd7557b6bd57f40abe7747e81e0c0b119bef015ea7726e69fe550e394a4", size = 525830, upload-time = "2025-11-30T20:21:59.699Z" }, + { url = "https://files.pythonhosted.org/packages/e0/ad/bd0331f740f5705cc555a5e17fdf334671262160270962e69a2bdef3bf76/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99b47d6ad9a6da00bec6aabe5a6279ecd3c06a329d4aa4771034a21e335c3a97", size = 412033, upload-time = "2025-11-30T20:22:00.991Z" }, + { url = "https://files.pythonhosted.org/packages/f8/1e/372195d326549bb51f0ba0f2ecb9874579906b97e08880e7a65c3bef1a99/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33f559f3104504506a44bb666b93a33f5d33133765b0c216a5bf2f1e1503af89", size = 390828, upload-time = "2025-11-30T20:22:02.723Z" }, + { url = "https://files.pythonhosted.org/packages/ab/2b/d88bb33294e3e0c76bc8f351a3721212713629ffca1700fa94979cb3eae8/rpds_py-0.30.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:946fe926af6e44f3697abbc305ea168c2c31d3e3ef1058cf68f379bf0335a78d", size = 404683, upload-time = "2025-11-30T20:22:04.367Z" }, + { url = "https://files.pythonhosted.org/packages/50/32/c759a8d42bcb5289c1fac697cd92f6fe01a018dd937e62ae77e0e7f15702/rpds_py-0.30.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:495aeca4b93d465efde585977365187149e75383ad2684f81519f504f5c13038", size = 421583, upload-time = "2025-11-30T20:22:05.814Z" }, + { url = "https://files.pythonhosted.org/packages/2b/81/e729761dbd55ddf5d84ec4ff1f47857f4374b0f19bdabfcf929164da3e24/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9a0ca5da0386dee0655b4ccdf46119df60e0f10da268d04fe7cc87886872ba7", size = 572496, upload-time = "2025-11-30T20:22:07.713Z" }, + { url = "https://files.pythonhosted.org/packages/14/f6/69066a924c3557c9c30baa6ec3a0aa07526305684c6f86c696b08860726c/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d6d1cc13664ec13c1b84241204ff3b12f9bb82464b8ad6e7a5d3486975c2eed", size = 598669, upload-time = "2025-11-30T20:22:09.312Z" }, + { url = "https://files.pythonhosted.org/packages/5f/48/905896b1eb8a05630d20333d1d8ffd162394127b74ce0b0784ae04498d32/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3896fa1be39912cf0757753826bc8bdc8ca331a28a7c4ae46b7a21280b06bb85", size = 561011, upload-time = "2025-11-30T20:22:11.309Z" }, + { url = "https://files.pythonhosted.org/packages/22/16/cd3027c7e279d22e5eb431dd3c0fbc677bed58797fe7581e148f3f68818b/rpds_py-0.30.0-cp311-cp311-win32.whl", hash = "sha256:55f66022632205940f1827effeff17c4fa7ae1953d2b74a8581baaefb7d16f8c", size = 221406, upload-time = "2025-11-30T20:22:13.101Z" }, + { url = "https://files.pythonhosted.org/packages/fa/5b/e7b7aa136f28462b344e652ee010d4de26ee9fd16f1bfd5811f5153ccf89/rpds_py-0.30.0-cp311-cp311-win_amd64.whl", hash = "sha256:a51033ff701fca756439d641c0ad09a41d9242fa69121c7d8769604a0a629825", size = 236024, upload-time = "2025-11-30T20:22:14.853Z" }, + { url = "https://files.pythonhosted.org/packages/14/a6/364bba985e4c13658edb156640608f2c9e1d3ea3c81b27aa9d889fff0e31/rpds_py-0.30.0-cp311-cp311-win_arm64.whl", hash = "sha256:47b0ef6231c58f506ef0b74d44e330405caa8428e770fec25329ed2cb971a229", size = 229069, upload-time = "2025-11-30T20:22:16.577Z" }, + { url = "https://files.pythonhosted.org/packages/03/e7/98a2f4ac921d82f33e03f3835f5bf3a4a40aa1bfdc57975e74a97b2b4bdd/rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad", size = 375086, upload-time = "2025-11-30T20:22:17.93Z" }, + { url = "https://files.pythonhosted.org/packages/4d/a1/bca7fd3d452b272e13335db8d6b0b3ecde0f90ad6f16f3328c6fb150c889/rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05", size = 359053, upload-time = "2025-11-30T20:22:19.297Z" }, + { url = "https://files.pythonhosted.org/packages/65/1c/ae157e83a6357eceff62ba7e52113e3ec4834a84cfe07fa4b0757a7d105f/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28", size = 390763, upload-time = "2025-11-30T20:22:21.661Z" }, + { url = "https://files.pythonhosted.org/packages/d4/36/eb2eb8515e2ad24c0bd43c3ee9cd74c33f7ca6430755ccdb240fd3144c44/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd", size = 408951, upload-time = "2025-11-30T20:22:23.408Z" }, + { url = "https://files.pythonhosted.org/packages/d6/65/ad8dc1784a331fabbd740ef6f71ce2198c7ed0890dab595adb9ea2d775a1/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f", size = 514622, upload-time = "2025-11-30T20:22:25.16Z" }, + { url = "https://files.pythonhosted.org/packages/63/8e/0cfa7ae158e15e143fe03993b5bcd743a59f541f5952e1546b1ac1b5fd45/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d948b135c4693daff7bc2dcfc4ec57237a29bd37e60c2fabf5aff2bbacf3e2f1", size = 414492, upload-time = "2025-11-30T20:22:26.505Z" }, + { url = "https://files.pythonhosted.org/packages/60/1b/6f8f29f3f995c7ffdde46a626ddccd7c63aefc0efae881dc13b6e5d5bb16/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f236970bccb2233267d89173d3ad2703cd36a0e2a6e92d0560d333871a3d23", size = 394080, upload-time = "2025-11-30T20:22:27.934Z" }, + { url = "https://files.pythonhosted.org/packages/6d/d5/a266341051a7a3ca2f4b750a3aa4abc986378431fc2da508c5034d081b70/rpds_py-0.30.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:2e6ecb5a5bcacf59c3f912155044479af1d0b6681280048b338b28e364aca1f6", size = 408680, upload-time = "2025-11-30T20:22:29.341Z" }, + { url = "https://files.pythonhosted.org/packages/10/3b/71b725851df9ab7a7a4e33cf36d241933da66040d195a84781f49c50490c/rpds_py-0.30.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8fa71a2e078c527c3e9dc9fc5a98c9db40bcc8a92b4e8858e36d329f8684b51", size = 423589, upload-time = "2025-11-30T20:22:31.469Z" }, + { url = "https://files.pythonhosted.org/packages/00/2b/e59e58c544dc9bd8bd8384ecdb8ea91f6727f0e37a7131baeff8d6f51661/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5", size = 573289, upload-time = "2025-11-30T20:22:32.997Z" }, + { url = "https://files.pythonhosted.org/packages/da/3e/a18e6f5b460893172a7d6a680e86d3b6bc87a54c1f0b03446a3c8c7b588f/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e", size = 599737, upload-time = "2025-11-30T20:22:34.419Z" }, + { url = "https://files.pythonhosted.org/packages/5c/e2/714694e4b87b85a18e2c243614974413c60aa107fd815b8cbc42b873d1d7/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394", size = 563120, upload-time = "2025-11-30T20:22:35.903Z" }, + { url = "https://files.pythonhosted.org/packages/6f/ab/d5d5e3bcedb0a77f4f613706b750e50a5a3ba1c15ccd3665ecc636c968fd/rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf", size = 223782, upload-time = "2025-11-30T20:22:37.271Z" }, + { url = "https://files.pythonhosted.org/packages/39/3b/f786af9957306fdc38a74cef405b7b93180f481fb48453a114bb6465744a/rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b", size = 240463, upload-time = "2025-11-30T20:22:39.021Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d2/b91dc748126c1559042cfe41990deb92c4ee3e2b415f6b5234969ffaf0cc/rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e", size = 230868, upload-time = "2025-11-30T20:22:40.493Z" }, + { url = "https://files.pythonhosted.org/packages/ed/dc/d61221eb88ff410de3c49143407f6f3147acf2538c86f2ab7ce65ae7d5f9/rpds_py-0.30.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f83424d738204d9770830d35290ff3273fbb02b41f919870479fab14b9d303b2", size = 374887, upload-time = "2025-11-30T20:22:41.812Z" }, + { url = "https://files.pythonhosted.org/packages/fd/32/55fb50ae104061dbc564ef15cc43c013dc4a9f4527a1f4d99baddf56fe5f/rpds_py-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7536cd91353c5273434b4e003cbda89034d67e7710eab8761fd918ec6c69cf8", size = 358904, upload-time = "2025-11-30T20:22:43.479Z" }, + { url = "https://files.pythonhosted.org/packages/58/70/faed8186300e3b9bdd138d0273109784eea2396c68458ed580f885dfe7ad/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2771c6c15973347f50fece41fc447c054b7ac2ae0502388ce3b6738cd366e3d4", size = 389945, upload-time = "2025-11-30T20:22:44.819Z" }, + { url = "https://files.pythonhosted.org/packages/bd/a8/073cac3ed2c6387df38f71296d002ab43496a96b92c823e76f46b8af0543/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0a59119fc6e3f460315fe9d08149f8102aa322299deaa5cab5b40092345c2136", size = 407783, upload-time = "2025-11-30T20:22:46.103Z" }, + { url = "https://files.pythonhosted.org/packages/77/57/5999eb8c58671f1c11eba084115e77a8899d6e694d2a18f69f0ba471ec8b/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76fec018282b4ead0364022e3c54b60bf368b9d926877957a8624b58419169b7", size = 515021, upload-time = "2025-11-30T20:22:47.458Z" }, + { url = "https://files.pythonhosted.org/packages/e0/af/5ab4833eadc36c0a8ed2bc5c0de0493c04f6c06de223170bd0798ff98ced/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bef75a5525db97318e8cd061542b5a79812d711ea03dbc1f6f8dbb0c5f0d2", size = 414589, upload-time = "2025-11-30T20:22:48.872Z" }, + { url = "https://files.pythonhosted.org/packages/b7/de/f7192e12b21b9e9a68a6d0f249b4af3fdcdff8418be0767a627564afa1f1/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9027da1ce107104c50c81383cae773ef5c24d296dd11c99e2629dbd7967a20c6", size = 394025, upload-time = "2025-11-30T20:22:50.196Z" }, + { url = "https://files.pythonhosted.org/packages/91/c4/fc70cd0249496493500e7cc2de87504f5aa6509de1e88623431fec76d4b6/rpds_py-0.30.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:9cf69cdda1f5968a30a359aba2f7f9aa648a9ce4b580d6826437f2b291cfc86e", size = 408895, upload-time = "2025-11-30T20:22:51.87Z" }, + { url = "https://files.pythonhosted.org/packages/58/95/d9275b05ab96556fefff73a385813eb66032e4c99f411d0795372d9abcea/rpds_py-0.30.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4796a717bf12b9da9d3ad002519a86063dcac8988b030e405704ef7d74d2d9d", size = 422799, upload-time = "2025-11-30T20:22:53.341Z" }, + { url = "https://files.pythonhosted.org/packages/06/c1/3088fc04b6624eb12a57eb814f0d4997a44b0d208d6cace713033ff1a6ba/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d4c2aa7c50ad4728a094ebd5eb46c452e9cb7edbfdb18f9e1221f597a73e1e7", size = 572731, upload-time = "2025-11-30T20:22:54.778Z" }, + { url = "https://files.pythonhosted.org/packages/d8/42/c612a833183b39774e8ac8fecae81263a68b9583ee343db33ab571a7ce55/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ba81a9203d07805435eb06f536d95a266c21e5b2dfbf6517748ca40c98d19e31", size = 599027, upload-time = "2025-11-30T20:22:56.212Z" }, + { url = "https://files.pythonhosted.org/packages/5f/60/525a50f45b01d70005403ae0e25f43c0384369ad24ffe46e8d9068b50086/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:945dccface01af02675628334f7cf49c2af4c1c904748efc5cf7bbdf0b579f95", size = 563020, upload-time = "2025-11-30T20:22:58.2Z" }, + { url = "https://files.pythonhosted.org/packages/0b/5d/47c4655e9bcd5ca907148535c10e7d489044243cc9941c16ed7cd53be91d/rpds_py-0.30.0-cp313-cp313-win32.whl", hash = "sha256:b40fb160a2db369a194cb27943582b38f79fc4887291417685f3ad693c5a1d5d", size = 223139, upload-time = "2025-11-30T20:23:00.209Z" }, + { url = "https://files.pythonhosted.org/packages/f2/e1/485132437d20aa4d3e1d8b3fb5a5e65aa8139f1e097080c2a8443201742c/rpds_py-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:806f36b1b605e2d6a72716f321f20036b9489d29c51c91f4dd29a3e3afb73b15", size = 240224, upload-time = "2025-11-30T20:23:02.008Z" }, + { url = "https://files.pythonhosted.org/packages/24/95/ffd128ed1146a153d928617b0ef673960130be0009c77d8fbf0abe306713/rpds_py-0.30.0-cp313-cp313-win_arm64.whl", hash = "sha256:d96c2086587c7c30d44f31f42eae4eac89b60dabbac18c7669be3700f13c3ce1", size = 230645, upload-time = "2025-11-30T20:23:03.43Z" }, + { url = "https://files.pythonhosted.org/packages/ff/1b/b10de890a0def2a319a2626334a7f0ae388215eb60914dbac8a3bae54435/rpds_py-0.30.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:eb0b93f2e5c2189ee831ee43f156ed34e2a89a78a66b98cadad955972548be5a", size = 364443, upload-time = "2025-11-30T20:23:04.878Z" }, + { url = "https://files.pythonhosted.org/packages/0d/bf/27e39f5971dc4f305a4fb9c672ca06f290f7c4e261c568f3dea16a410d47/rpds_py-0.30.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:922e10f31f303c7c920da8981051ff6d8c1a56207dbdf330d9047f6d30b70e5e", size = 353375, upload-time = "2025-11-30T20:23:06.342Z" }, + { url = "https://files.pythonhosted.org/packages/40/58/442ada3bba6e8e6615fc00483135c14a7538d2ffac30e2d933ccf6852232/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdc62c8286ba9bf7f47befdcea13ea0e26bf294bda99758fd90535cbaf408000", size = 383850, upload-time = "2025-11-30T20:23:07.825Z" }, + { url = "https://files.pythonhosted.org/packages/14/14/f59b0127409a33c6ef6f5c1ebd5ad8e32d7861c9c7adfa9a624fc3889f6c/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47f9a91efc418b54fb8190a6b4aa7813a23fb79c51f4bb84e418f5476c38b8db", size = 392812, upload-time = "2025-11-30T20:23:09.228Z" }, + { url = "https://files.pythonhosted.org/packages/b3/66/e0be3e162ac299b3a22527e8913767d869e6cc75c46bd844aa43fb81ab62/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3587eb9b17f3789ad50824084fa6f81921bbf9a795826570bda82cb3ed91f2", size = 517841, upload-time = "2025-11-30T20:23:11.186Z" }, + { url = "https://files.pythonhosted.org/packages/3d/55/fa3b9cf31d0c963ecf1ba777f7cf4b2a2c976795ac430d24a1f43d25a6ba/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39c02563fc592411c2c61d26b6c5fe1e51eaa44a75aa2c8735ca88b0d9599daa", size = 408149, upload-time = "2025-11-30T20:23:12.864Z" }, + { url = "https://files.pythonhosted.org/packages/60/ca/780cf3b1a32b18c0f05c441958d3758f02544f1d613abf9488cd78876378/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a1234d8febafdfd33a42d97da7a43f5dcb120c1060e352a3fbc0c6d36e2083", size = 383843, upload-time = "2025-11-30T20:23:14.638Z" }, + { url = "https://files.pythonhosted.org/packages/82/86/d5f2e04f2aa6247c613da0c1dd87fcd08fa17107e858193566048a1e2f0a/rpds_py-0.30.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:eb2c4071ab598733724c08221091e8d80e89064cd472819285a9ab0f24bcedb9", size = 396507, upload-time = "2025-11-30T20:23:16.105Z" }, + { url = "https://files.pythonhosted.org/packages/4b/9a/453255d2f769fe44e07ea9785c8347edaf867f7026872e76c1ad9f7bed92/rpds_py-0.30.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bdfdb946967d816e6adf9a3d8201bfad269c67efe6cefd7093ef959683c8de0", size = 414949, upload-time = "2025-11-30T20:23:17.539Z" }, + { url = "https://files.pythonhosted.org/packages/a3/31/622a86cdc0c45d6df0e9ccb6becdba5074735e7033c20e401a6d9d0e2ca0/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c77afbd5f5250bf27bf516c7c4a016813eb2d3e116139aed0096940c5982da94", size = 565790, upload-time = "2025-11-30T20:23:19.029Z" }, + { url = "https://files.pythonhosted.org/packages/1c/5d/15bbf0fb4a3f58a3b1c67855ec1efcc4ceaef4e86644665fff03e1b66d8d/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:61046904275472a76c8c90c9ccee9013d70a6d0f73eecefd38c1ae7c39045a08", size = 590217, upload-time = "2025-11-30T20:23:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/6d/61/21b8c41f68e60c8cc3b2e25644f0e3681926020f11d06ab0b78e3c6bbff1/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c5f36a861bc4b7da6516dbdf302c55313afa09b81931e8280361a4f6c9a2d27", size = 555806, upload-time = "2025-11-30T20:23:22.488Z" }, + { url = "https://files.pythonhosted.org/packages/f9/39/7e067bb06c31de48de3eb200f9fc7c58982a4d3db44b07e73963e10d3be9/rpds_py-0.30.0-cp313-cp313t-win32.whl", hash = "sha256:3d4a69de7a3e50ffc214ae16d79d8fbb0922972da0356dcf4d0fdca2878559c6", size = 211341, upload-time = "2025-11-30T20:23:24.449Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4d/222ef0b46443cf4cf46764d9c630f3fe4abaa7245be9417e56e9f52b8f65/rpds_py-0.30.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f14fc5df50a716f7ece6a80b6c78bb35ea2ca47c499e422aa4463455dd96d56d", size = 225768, upload-time = "2025-11-30T20:23:25.908Z" }, + { url = "https://files.pythonhosted.org/packages/86/81/dad16382ebbd3d0e0328776d8fd7ca94220e4fa0798d1dc5e7da48cb3201/rpds_py-0.30.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:68f19c879420aa08f61203801423f6cd5ac5f0ac4ac82a2368a9fcd6a9a075e0", size = 362099, upload-time = "2025-11-30T20:23:27.316Z" }, + { url = "https://files.pythonhosted.org/packages/2b/60/19f7884db5d5603edf3c6bce35408f45ad3e97e10007df0e17dd57af18f8/rpds_py-0.30.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ec7c4490c672c1a0389d319b3a9cfcd098dcdc4783991553c332a15acf7249be", size = 353192, upload-time = "2025-11-30T20:23:29.151Z" }, + { url = "https://files.pythonhosted.org/packages/bf/c4/76eb0e1e72d1a9c4703c69607cec123c29028bff28ce41588792417098ac/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f251c812357a3fed308d684a5079ddfb9d933860fc6de89f2b7ab00da481e65f", size = 384080, upload-time = "2025-11-30T20:23:30.785Z" }, + { url = "https://files.pythonhosted.org/packages/72/87/87ea665e92f3298d1b26d78814721dc39ed8d2c74b86e83348d6b48a6f31/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac98b175585ecf4c0348fd7b29c3864bda53b805c773cbf7bfdaffc8070c976f", size = 394841, upload-time = "2025-11-30T20:23:32.209Z" }, + { url = "https://files.pythonhosted.org/packages/77/ad/7783a89ca0587c15dcbf139b4a8364a872a25f861bdb88ed99f9b0dec985/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e62880792319dbeb7eb866547f2e35973289e7d5696c6e295476448f5b63c87", size = 516670, upload-time = "2025-11-30T20:23:33.742Z" }, + { url = "https://files.pythonhosted.org/packages/5b/3c/2882bdac942bd2172f3da574eab16f309ae10a3925644e969536553cb4ee/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e7fc54e0900ab35d041b0601431b0a0eb495f0851a0639b6ef90f7741b39a18", size = 408005, upload-time = "2025-11-30T20:23:35.253Z" }, + { url = "https://files.pythonhosted.org/packages/ce/81/9a91c0111ce1758c92516a3e44776920b579d9a7c09b2b06b642d4de3f0f/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47e77dc9822d3ad616c3d5759ea5631a75e5809d5a28707744ef79d7a1bcfcad", size = 382112, upload-time = "2025-11-30T20:23:36.842Z" }, + { url = "https://files.pythonhosted.org/packages/cf/8e/1da49d4a107027e5fbc64daeab96a0706361a2918da10cb41769244b805d/rpds_py-0.30.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:b4dc1a6ff022ff85ecafef7979a2c6eb423430e05f1165d6688234e62ba99a07", size = 399049, upload-time = "2025-11-30T20:23:38.343Z" }, + { url = "https://files.pythonhosted.org/packages/df/5a/7ee239b1aa48a127570ec03becbb29c9d5a9eb092febbd1699d567cae859/rpds_py-0.30.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4559c972db3a360808309e06a74628b95eaccbf961c335c8fe0d590cf587456f", size = 415661, upload-time = "2025-11-30T20:23:40.263Z" }, + { url = "https://files.pythonhosted.org/packages/70/ea/caa143cf6b772f823bc7929a45da1fa83569ee49b11d18d0ada7f5ee6fd6/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ed177ed9bded28f8deb6ab40c183cd1192aa0de40c12f38be4d59cd33cb5c65", size = 565606, upload-time = "2025-11-30T20:23:42.186Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/ac20ba2d69303f961ad8cf55bf7dbdb4763f627291ba3d0d7d67333cced9/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ad1fa8db769b76ea911cb4e10f049d80bf518c104f15b3edb2371cc65375c46f", size = 591126, upload-time = "2025-11-30T20:23:44.086Z" }, + { url = "https://files.pythonhosted.org/packages/21/20/7ff5f3c8b00c8a95f75985128c26ba44503fb35b8e0259d812766ea966c7/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:46e83c697b1f1c72b50e5ee5adb4353eef7406fb3f2043d64c33f20ad1c2fc53", size = 553371, upload-time = "2025-11-30T20:23:46.004Z" }, + { url = "https://files.pythonhosted.org/packages/72/c7/81dadd7b27c8ee391c132a6b192111ca58d866577ce2d9b0ca157552cce0/rpds_py-0.30.0-cp314-cp314-win32.whl", hash = "sha256:ee454b2a007d57363c2dfd5b6ca4a5d7e2c518938f8ed3b706e37e5d470801ed", size = 215298, upload-time = "2025-11-30T20:23:47.696Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d2/1aaac33287e8cfb07aab2e6b8ac1deca62f6f65411344f1433c55e6f3eb8/rpds_py-0.30.0-cp314-cp314-win_amd64.whl", hash = "sha256:95f0802447ac2d10bcc69f6dc28fe95fdf17940367b21d34e34c737870758950", size = 228604, upload-time = "2025-11-30T20:23:49.501Z" }, + { url = "https://files.pythonhosted.org/packages/e8/95/ab005315818cc519ad074cb7784dae60d939163108bd2b394e60dc7b5461/rpds_py-0.30.0-cp314-cp314-win_arm64.whl", hash = "sha256:613aa4771c99f03346e54c3f038e4cc574ac09a3ddfb0e8878487335e96dead6", size = 222391, upload-time = "2025-11-30T20:23:50.96Z" }, + { url = "https://files.pythonhosted.org/packages/9e/68/154fe0194d83b973cdedcdcc88947a2752411165930182ae41d983dcefa6/rpds_py-0.30.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7e6ecfcb62edfd632e56983964e6884851786443739dbfe3582947e87274f7cb", size = 364868, upload-time = "2025-11-30T20:23:52.494Z" }, + { url = "https://files.pythonhosted.org/packages/83/69/8bbc8b07ec854d92a8b75668c24d2abcb1719ebf890f5604c61c9369a16f/rpds_py-0.30.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a1d0bc22a7cdc173fedebb73ef81e07faef93692b8c1ad3733b67e31e1b6e1b8", size = 353747, upload-time = "2025-11-30T20:23:54.036Z" }, + { url = "https://files.pythonhosted.org/packages/ab/00/ba2e50183dbd9abcce9497fa5149c62b4ff3e22d338a30d690f9af970561/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d08f00679177226c4cb8c5265012eea897c8ca3b93f429e546600c971bcbae7", size = 383795, upload-time = "2025-11-30T20:23:55.556Z" }, + { url = "https://files.pythonhosted.org/packages/05/6f/86f0272b84926bcb0e4c972262f54223e8ecc556b3224d281e6598fc9268/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5965af57d5848192c13534f90f9dd16464f3c37aaf166cc1da1cae1fd5a34898", size = 393330, upload-time = "2025-11-30T20:23:57.033Z" }, + { url = "https://files.pythonhosted.org/packages/cb/e9/0e02bb2e6dc63d212641da45df2b0bf29699d01715913e0d0f017ee29438/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a4e86e34e9ab6b667c27f3211ca48f73dba7cd3d90f8d5b11be56e5dbc3fb4e", size = 518194, upload-time = "2025-11-30T20:23:58.637Z" }, + { url = "https://files.pythonhosted.org/packages/ee/ca/be7bca14cf21513bdf9c0606aba17d1f389ea2b6987035eb4f62bd923f25/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d3e6b26f2c785d65cc25ef1e5267ccbe1b069c5c21b8cc724efee290554419", size = 408340, upload-time = "2025-11-30T20:24:00.2Z" }, + { url = "https://files.pythonhosted.org/packages/c2/c7/736e00ebf39ed81d75544c0da6ef7b0998f8201b369acf842f9a90dc8fce/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:626a7433c34566535b6e56a1b39a7b17ba961e97ce3b80ec62e6f1312c025551", size = 383765, upload-time = "2025-11-30T20:24:01.759Z" }, + { url = "https://files.pythonhosted.org/packages/4a/3f/da50dfde9956aaf365c4adc9533b100008ed31aea635f2b8d7b627e25b49/rpds_py-0.30.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:acd7eb3f4471577b9b5a41baf02a978e8bdeb08b4b355273994f8b87032000a8", size = 396834, upload-time = "2025-11-30T20:24:03.687Z" }, + { url = "https://files.pythonhosted.org/packages/4e/00/34bcc2565b6020eab2623349efbdec810676ad571995911f1abdae62a3a0/rpds_py-0.30.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fe5fa731a1fa8a0a56b0977413f8cacac1768dad38d16b3a296712709476fbd5", size = 415470, upload-time = "2025-11-30T20:24:05.232Z" }, + { url = "https://files.pythonhosted.org/packages/8c/28/882e72b5b3e6f718d5453bd4d0d9cf8df36fddeb4ddbbab17869d5868616/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:74a3243a411126362712ee1524dfc90c650a503502f135d54d1b352bd01f2404", size = 565630, upload-time = "2025-11-30T20:24:06.878Z" }, + { url = "https://files.pythonhosted.org/packages/3b/97/04a65539c17692de5b85c6e293520fd01317fd878ea1995f0367d4532fb1/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3e8eeb0544f2eb0d2581774be4c3410356eba189529a6b3e36bbbf9696175856", size = 591148, upload-time = "2025-11-30T20:24:08.445Z" }, + { url = "https://files.pythonhosted.org/packages/85/70/92482ccffb96f5441aab93e26c4d66489eb599efdcf96fad90c14bbfb976/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:dbd936cde57abfee19ab3213cf9c26be06d60750e60a8e4dd85d1ab12c8b1f40", size = 556030, upload-time = "2025-11-30T20:24:10.956Z" }, + { url = "https://files.pythonhosted.org/packages/20/53/7c7e784abfa500a2b6b583b147ee4bb5a2b3747a9166bab52fec4b5b5e7d/rpds_py-0.30.0-cp314-cp314t-win32.whl", hash = "sha256:dc824125c72246d924f7f796b4f63c1e9dc810c7d9e2355864b3c3a73d59ade0", size = 211570, upload-time = "2025-11-30T20:24:12.735Z" }, + { url = "https://files.pythonhosted.org/packages/d0/02/fa464cdfbe6b26e0600b62c528b72d8608f5cc49f96b8d6e38c95d60c676/rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3", size = 226532, upload-time = "2025-11-30T20:24:14.634Z" }, + { url = "https://files.pythonhosted.org/packages/69/71/3f34339ee70521864411f8b6992e7ab13ac30d8e4e3309e07c7361767d91/rpds_py-0.30.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c2262bdba0ad4fc6fb5545660673925c2d2a5d9e2e0fb603aad545427be0fc58", size = 372292, upload-time = "2025-11-30T20:24:16.537Z" }, + { url = "https://files.pythonhosted.org/packages/57/09/f183df9b8f2d66720d2ef71075c59f7e1b336bec7ee4c48f0a2b06857653/rpds_py-0.30.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ee6af14263f25eedc3bb918a3c04245106a42dfd4f5c2285ea6f997b1fc3f89a", size = 362128, upload-time = "2025-11-30T20:24:18.086Z" }, + { url = "https://files.pythonhosted.org/packages/7a/68/5c2594e937253457342e078f0cc1ded3dd7b2ad59afdbf2d354869110a02/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3adbb8179ce342d235c31ab8ec511e66c73faa27a47e076ccc92421add53e2bb", size = 391542, upload-time = "2025-11-30T20:24:20.092Z" }, + { url = "https://files.pythonhosted.org/packages/49/5c/31ef1afd70b4b4fbdb2800249f34c57c64beb687495b10aec0365f53dfc4/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:250fa00e9543ac9b97ac258bd37367ff5256666122c2d0f2bc97577c60a1818c", size = 404004, upload-time = "2025-11-30T20:24:22.231Z" }, + { url = "https://files.pythonhosted.org/packages/e3/63/0cfbea38d05756f3440ce6534d51a491d26176ac045e2707adc99bb6e60a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9854cf4f488b3d57b9aaeb105f06d78e5529d3145b1e4a41750167e8c213c6d3", size = 527063, upload-time = "2025-11-30T20:24:24.302Z" }, + { url = "https://files.pythonhosted.org/packages/42/e6/01e1f72a2456678b0f618fc9a1a13f882061690893c192fcad9f2926553a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:993914b8e560023bc0a8bf742c5f303551992dcb85e247b1e5c7f4a7d145bda5", size = 413099, upload-time = "2025-11-30T20:24:25.916Z" }, + { url = "https://files.pythonhosted.org/packages/b8/25/8df56677f209003dcbb180765520c544525e3ef21ea72279c98b9aa7c7fb/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58edca431fb9b29950807e301826586e5bbf24163677732429770a697ffe6738", size = 392177, upload-time = "2025-11-30T20:24:27.834Z" }, + { url = "https://files.pythonhosted.org/packages/4a/b4/0a771378c5f16f8115f796d1f437950158679bcd2a7c68cf251cfb00ed5b/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:dea5b552272a944763b34394d04577cf0f9bd013207bc32323b5a89a53cf9c2f", size = 406015, upload-time = "2025-11-30T20:24:29.457Z" }, + { url = "https://files.pythonhosted.org/packages/36/d8/456dbba0af75049dc6f63ff295a2f92766b9d521fa00de67a2bd6427d57a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ba3af48635eb83d03f6c9735dfb21785303e73d22ad03d489e88adae6eab8877", size = 423736, upload-time = "2025-11-30T20:24:31.22Z" }, + { url = "https://files.pythonhosted.org/packages/13/64/b4d76f227d5c45a7e0b796c674fd81b0a6c4fbd48dc29271857d8219571c/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:dff13836529b921e22f15cb099751209a60009731a68519630a24d61f0b1b30a", size = 573981, upload-time = "2025-11-30T20:24:32.934Z" }, + { url = "https://files.pythonhosted.org/packages/20/91/092bacadeda3edf92bf743cc96a7be133e13a39cdbfd7b5082e7ab638406/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:1b151685b23929ab7beec71080a8889d4d6d9fa9a983d213f07121205d48e2c4", size = 599782, upload-time = "2025-11-30T20:24:35.169Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b7/b95708304cd49b7b6f82fdd039f1748b66ec2b21d6a45180910802f1abf1/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ac37f9f516c51e5753f27dfdef11a88330f04de2d564be3991384b2f3535d02e", size = 562191, upload-time = "2025-11-30T20:24:36.853Z" }, +] + [[package]] name = "ruff" version = "0.13.1" From 265a3a5d322035686fe26cf0438645aa9cbff294 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 21:44:01 +0000 Subject: [PATCH 11/14] fix: no guard against closed file --- py/src/braintrust/auto.py | 8 +++++- py/src/braintrust/logger.py | 54 +++++++++++++++++++++++++++---------- 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/py/src/braintrust/auto.py b/py/src/braintrust/auto.py index c8192a45..4312fd20 100644 --- a/py/src/braintrust/auto.py +++ b/py/src/braintrust/auto.py @@ -7,7 +7,13 @@ import logging from contextlib import contextmanager -from braintrust.integrations import ADKIntegration, AgnoIntegration, AnthropicIntegration, ClaudeAgentSDKIntegration +from braintrust.integrations import ( + ADKIntegration, + AgnoIntegration, + AnthropicIntegration, + ClaudeAgentSDKIntegration, + LangChainIntegration, +) __all__ = ["auto_instrument"] diff --git a/py/src/braintrust/logger.py b/py/src/braintrust/logger.py index f7c56d2b..59bce505 100644 --- a/py/src/braintrust/logger.py +++ b/py/src/braintrust/logger.py @@ -1142,7 +1142,11 @@ def _get_max_request_size(self) -> dict[str, Any]: if isinstance(limit, (int, float)) and int(limit) > 0: server_limit = int(limit) except Exception as e: - print(f"Failed to fetch version info for payload limit: {e}", file=self.outfile) + try: + print(f"Failed to fetch version info for payload limit: {e}", file=self.outfile) + except ValueError as ve: + if "operation on closed file" not in str(ve): + raise valid_server_limit = server_limit if server_limit is not None and server_limit > 0 else None can_use_overflow = valid_server_limit is not None max_request_size = DEFAULT_MAX_REQUEST_SIZE @@ -1266,17 +1270,26 @@ def _unwrap_lazy_values( if not is_retrying and self.sync_flush: raise Exception(errmsg) from e else: - print(errmsg, file=self.outfile) - traceback.print_exc(file=self.outfile) + try: + print(errmsg, file=self.outfile) + traceback.print_exc(file=self.outfile) + if is_retrying: + sleep_time_s = BACKGROUND_LOGGER_BASE_SLEEP_TIME_S * (2**i) + print(f"Sleeping for {sleep_time_s}s", file=self.outfile) + except ValueError as ve: + if "operation on closed file" not in str(ve): + raise if is_retrying: - sleep_time_s = BACKGROUND_LOGGER_BASE_SLEEP_TIME_S * (2**i) - print(f"Sleeping for {sleep_time_s}s", file=self.outfile) time.sleep(sleep_time_s) - print( - f"Failed to construct log records to flush after {self.num_tries} attempts. Dropping batch", - file=self.outfile, - ) + try: + print( + f"Failed to construct log records to flush after {self.num_tries} attempts. Dropping batch", + file=self.outfile, + ) + except ValueError as ve: + if "operation on closed file" not in str(ve): + raise return [], [] def _request_logs3_overflow_upload( @@ -1403,13 +1416,22 @@ def _submit_logs_request(self, items: Sequence[LogItemWithMeta], max_request_siz if not is_retrying and self.sync_flush: raise Exception(errmsg) else: - print(errmsg, file=self.outfile) + try: + print(errmsg, file=self.outfile) + if is_retrying: + sleep_time_s = BACKGROUND_LOGGER_BASE_SLEEP_TIME_S * (2**i) + print(f"Sleeping for {sleep_time_s}s", file=self.outfile) + except ValueError as ve: + if "operation on closed file" not in str(ve): + raise if is_retrying: - sleep_time_s = BACKGROUND_LOGGER_BASE_SLEEP_TIME_S * (2**i) - print(f"Sleeping for {sleep_time_s}s", file=self.outfile) time.sleep(sleep_time_s) - print(f"log request failed after {self.num_tries} retries. Dropping batch", file=self.outfile) + try: + print(f"log request failed after {self.num_tries} retries. Dropping batch", file=self.outfile) + except ValueError as ve: + if "operation on closed file" not in str(ve): + raise def _dump_dropped_events(self, wrapped_items): publish_payloads_dir = [x for x in [self.all_publish_payloads_dir, self.failed_publish_payloads_dir] if x] @@ -1455,7 +1477,11 @@ def _write_payload_to_dir(payload_dir, payload, debug_logging_adjective=None): eprint(f"Failed to write failed payload to output file {payload_file}:\n", e) def _log_failed_payloads_dir(self): - print(f"Logging failed payloads to {self.failed_publish_payloads_dir}", file=self.outfile) + try: + print(f"Logging failed payloads to {self.failed_publish_payloads_dir}", file=self.outfile) + except ValueError as ve: + if "operation on closed file" not in str(ve): + raise # Should only be called by BraintrustState. def internal_replace_api_conn(self, api_conn: HTTPConnection): From de865f138246874d4f5cf12d10520e4ac3e96c4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 21:49:21 +0000 Subject: [PATCH 12/14] fix: old tests not updated, asking for too many arguments --- py/src/braintrust/logger.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/py/src/braintrust/logger.py b/py/src/braintrust/logger.py index 59bce505..bd31c310 100644 --- a/py/src/braintrust/logger.py +++ b/py/src/braintrust/logger.py @@ -1270,16 +1270,16 @@ def _unwrap_lazy_values( if not is_retrying and self.sync_flush: raise Exception(errmsg) from e else: + sleep_time_s = BACKGROUND_LOGGER_BASE_SLEEP_TIME_S * (2**i) if is_retrying else None try: print(errmsg, file=self.outfile) traceback.print_exc(file=self.outfile) - if is_retrying: - sleep_time_s = BACKGROUND_LOGGER_BASE_SLEEP_TIME_S * (2**i) + if sleep_time_s is not None: print(f"Sleeping for {sleep_time_s}s", file=self.outfile) except ValueError as ve: if "operation on closed file" not in str(ve): raise - if is_retrying: + if sleep_time_s is not None: time.sleep(sleep_time_s) try: @@ -1416,15 +1416,15 @@ def _submit_logs_request(self, items: Sequence[LogItemWithMeta], max_request_siz if not is_retrying and self.sync_flush: raise Exception(errmsg) else: + sleep_time_s = BACKGROUND_LOGGER_BASE_SLEEP_TIME_S * (2**i) if is_retrying else None try: print(errmsg, file=self.outfile) - if is_retrying: - sleep_time_s = BACKGROUND_LOGGER_BASE_SLEEP_TIME_S * (2**i) + if sleep_time_s is not None: print(f"Sleeping for {sleep_time_s}s", file=self.outfile) except ValueError as ve: if "operation on closed file" not in str(ve): raise - if is_retrying: + if sleep_time_s is not None: time.sleep(sleep_time_s) try: From b06cefd272b4c8a2c84649c1043f413ea73d15ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 21:52:13 +0000 Subject: [PATCH 13/14] fix: old tests not updated, asking for too many arguments --- .../langchain-py/src/tests/test_callbacks.py | 33 ------------------- .../langchain-py/src/tests/test_context.py | 11 ------- 2 files changed, 44 deletions(-) diff --git a/integrations/langchain-py/src/tests/test_callbacks.py b/integrations/langchain-py/src/tests/test_callbacks.py index 8cc9f926..8dcf9b65 100644 --- a/integrations/langchain-py/src/tests/test_callbacks.py +++ b/integrations/langchain-py/src/tests/test_callbacks.py @@ -58,9 +58,6 @@ def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": ANY, - "id": ANY, - "example": ANY, "tool_calls": ANY, "invalid_tool_calls": ANY, "usage_metadata": ANY, @@ -79,8 +76,6 @@ def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, } ] }, @@ -97,9 +92,6 @@ def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, - "example": ANY, } ] ], @@ -115,8 +107,6 @@ def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": None, - "id": ANY, }, } ] @@ -129,7 +119,6 @@ def test_llm_calls(logger_memory_logger: LoggerMemoryLogger): }, "model_name": "gpt-4o-mini-2024-07-18", }, - "run": None, "type": "LLMResult", }, "metrics": { @@ -200,8 +189,6 @@ def test_chain_with_memory(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, } ] }, @@ -218,9 +205,6 @@ def test_chain_with_memory(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, - "example": ANY, } ] ], @@ -236,8 +220,6 @@ def test_chain_with_memory(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": None, - "id": ANY, }, } ] @@ -250,7 +232,6 @@ def test_chain_with_memory(logger_memory_logger: LoggerMemoryLogger): }, "model_name": "gpt-4o-mini-2024-07-18", }, - "run": None, "type": "LLMResult", }, "metrics": { @@ -331,9 +312,6 @@ def calculator(input: CalculatorInput) -> str: "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, - "example": ANY, } ] ], @@ -366,8 +344,6 @@ def calculator(input: CalculatorInput) -> str: "tool_calls": ANY, # Tool call details }, "response_metadata": ANY, - "name": None, - "id": ANY, }, } ] @@ -380,7 +356,6 @@ def calculator(input: CalculatorInput) -> str: }, "model_name": "gpt-4o-mini-2024-07-18", }, - "run": None, "type": "LLMResult", }, "metrics": { @@ -585,9 +560,6 @@ def say_bye(state: Dict[str, str]): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, - "example": ANY, } ] ], @@ -607,8 +579,6 @@ def say_bye(state: Dict[str, str]): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": None, - "id": ANY, }, } ] @@ -910,9 +880,6 @@ def test_streaming_ttft(logger_memory_logger: LoggerMemoryLogger): { "additional_kwargs": {}, "content": "Count from 1 to 5.", - "example": False, - "id": None, - "name": None, "response_metadata": {}, "type": "human", } diff --git a/integrations/langchain-py/src/tests/test_context.py b/integrations/langchain-py/src/tests/test_context.py index c0567396..435ca931 100644 --- a/integrations/langchain-py/src/tests/test_context.py +++ b/integrations/langchain-py/src/tests/test_context.py @@ -61,9 +61,6 @@ def test_global_handler(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": ANY, - "id": ANY, - "example": ANY, "tool_calls": ANY, "invalid_tool_calls": ANY, "usage_metadata": ANY, @@ -82,8 +79,6 @@ def test_global_handler(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, } ] }, @@ -100,9 +95,6 @@ def test_global_handler(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": {}, "response_metadata": {}, "type": "human", - "name": None, - "id": None, - "example": ANY, } ] ], @@ -118,8 +110,6 @@ def test_global_handler(logger_memory_logger: LoggerMemoryLogger): "additional_kwargs": ANY, "response_metadata": ANY, "type": "ai", - "name": None, - "id": ANY, }, } ] @@ -132,7 +122,6 @@ def test_global_handler(logger_memory_logger: LoggerMemoryLogger): }, "model_name": "gpt-4o-mini-2024-07-18", }, - "run": None, "type": "LLMResult", }, "metrics": { From 2e94ee8840bae04ccb87a48e9c2c98f6c7886a58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Halber?= Date: Wed, 25 Mar 2026 21:53:02 +0000 Subject: [PATCH 14/14] fix: old tests not updated, asking for too many arguments --- integrations/langchain-py/src/tests/test_callbacks.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/integrations/langchain-py/src/tests/test_callbacks.py b/integrations/langchain-py/src/tests/test_callbacks.py index 8dcf9b65..eed77232 100644 --- a/integrations/langchain-py/src/tests/test_callbacks.py +++ b/integrations/langchain-py/src/tests/test_callbacks.py @@ -340,9 +340,7 @@ def calculator(input: CalculatorInput) -> str: "message": { "content": ANY, # May be empty for tool calls "type": "ai", - "additional_kwargs": { - "tool_calls": ANY, # Tool call details - }, + "additional_kwargs": ANY, "response_metadata": ANY, }, } @@ -591,7 +589,6 @@ def say_bye(state: Dict[str, str]): }, "model_name": "gpt-4o-mini-2024-07-18", }, - "run": None, "type": "LLMResult", }, "metrics": {