Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions src/agents/extensions/models/any_llm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,13 @@ async def _get_response_via_responses(
prompt=prompt,
)

status = getattr(response, "status", None)
if status in {"failed", "incomplete"}:
raise response_terminal_failure_error(
f"response.{status}",
response if isinstance(response, Response) else None,
)

if _debug.DONT_LOG_MODEL_DATA:
logger.debug("LLM responded")
else:
Expand Down
7 changes: 7 additions & 0 deletions src/agents/models/openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,6 +472,13 @@ async def get_response(
prompt=prompt,
)

status = getattr(response, "status", None)
if status in {"failed", "incomplete"}:
raise response_terminal_failure_error(
f"response.{status}",
response if isinstance(response, Response) else None,
)

if _debug.DONT_LOG_MODEL_DATA:
logger.debug("LLM responded")
else:
Expand Down
29 changes: 29 additions & 0 deletions tests/models/test_any_llm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -623,6 +623,35 @@ async def response_stream() -> AsyncIterator[ResponseErrorEvent]:
assert events[0].code == "invalid_request_error"


@pytest.mark.allow_call_model_methods
@pytest.mark.asyncio
@pytest.mark.parametrize("status", ["failed", "incomplete"])
async def test_any_llm_responses_path_rejects_failed_or_incomplete_status(
monkeypatch,
status: str,
) -> None:
response = _response("partial", response_id="resp-terminal")
response.status = status # type: ignore[assignment]
provider = FakeAnyLLMProvider(supports_responses=True, responses_response=response)
module, _create_calls = _import_any_llm_module(monkeypatch, provider)
AnyLLMModel = module.AnyLLMModel

model = AnyLLMModel(model="openai/gpt-5.4-mini")
with pytest.raises(ModelBehaviorError, match=f"response.{status}"):
await model.get_response(
system_instructions=None,
input="hi",
model_settings=ModelSettings(),
tools=[],
output_schema=None,
handoffs=[],
tracing=ModelTracing.DISABLED,
previous_response_id=None,
conversation_id=None,
prompt=None,
)


@pytest.mark.allow_call_model_methods
@pytest.mark.asyncio
async def test_any_llm_responses_path_passes_transport_kwargs_via_private_provider_api(
Expand Down
28 changes: 28 additions & 0 deletions tests/models/test_openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,34 @@ def __init__(self):
}


@pytest.mark.allow_call_model_methods
@pytest.mark.asyncio
@pytest.mark.parametrize("status", ["failed", "incomplete"])
async def test_get_response_rejects_failed_or_incomplete_response_status(status: str) -> None:
class DummyResponses:
async def create(self, **kwargs: Any) -> Any:
response = get_response_obj([], response_id="resp-terminal")
response.status = status # type: ignore[assignment]
return response

class DummyResponsesClient:
def __init__(self) -> None:
self.responses = DummyResponses()

model = OpenAIResponsesModel(model="gpt-4", openai_client=DummyResponsesClient()) # type: ignore[arg-type]

with pytest.raises(ModelBehaviorError, match=f"response.{status}"):
await model.get_response(
system_instructions=None,
input="hi",
model_settings=ModelSettings(),
tools=[],
output_schema=None,
handoffs=[],
tracing=ModelTracing.DISABLED,
)


def test_get_client_disables_provider_managed_retries_on_runner_retry() -> None:
class DummyResponsesClient:
def __init__(self) -> None:
Expand Down
Loading