From cfc87fb65a3a29f0584b7dffccc79f2442307fa9 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 22 Aug 2025 23:17:48 +0000
Subject: [PATCH 01/15] feat(api): api update
---
.stats.yml | 2 +-
src/codex/types/projects/query_log_list_groups_response.py | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/.stats.yml b/.stats.yml
index 890def2..c5d767f 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,3 +1,3 @@
configured_endpoints: 55
-openapi_spec_hash: 7daf4896ba4932714f8fe4fff277d7c7
+openapi_spec_hash: d69252c7252423bc98ecc17807eb33ec
config_hash: bed87752f4056d0c4bf2ddf856307800
diff --git a/src/codex/types/projects/query_log_list_groups_response.py b/src/codex/types/projects/query_log_list_groups_response.py
index 7b2d44c..7dbf192 100644
--- a/src/codex/types/projects/query_log_list_groups_response.py
+++ b/src/codex/types/projects/query_log_list_groups_response.py
@@ -337,6 +337,9 @@ class QueryLogListGroupsResponse(BaseModel):
formatted_original_question: Optional[str] = None
+ impact_score: float
+ """Impact score used for prioritization sorting"""
+
is_bad_response: bool
needs_review: bool
From 4151cf288ea4f75ac7c6eb5c07c9861d67b5ad92 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 23 Aug 2025 01:17:37 +0000
Subject: [PATCH 02/15] feat(api): api update
---
.stats.yml | 4 +-
api.md | 2 -
src/codex/resources/projects/projects.py | 22 ++--
src/codex/resources/projects/query_logs.py | 102 +---------------
src/codex/resources/tlm.py | 44 ++++---
src/codex/types/project_validate_params.py | 13 +-
src/codex/types/project_validate_response.py | 3 -
src/codex/types/projects/__init__.py | 2 -
.../query_log_add_user_feedback_params.py | 14 ---
.../query_log_add_user_feedback_response.py | 11 --
.../query_log_list_groups_response.py | 3 -
src/codex/types/tlm_prompt_params.py | 13 +-
src/codex/types/tlm_score_params.py | 13 +-
.../api_resources/projects/test_query_logs.py | 115 ------------------
tests/api_resources/test_projects.py | 2 -
tests/api_resources/test_tlm.py | 4 -
16 files changed, 57 insertions(+), 310 deletions(-)
delete mode 100644 src/codex/types/projects/query_log_add_user_feedback_params.py
delete mode 100644 src/codex/types/projects/query_log_add_user_feedback_response.py
diff --git a/.stats.yml b/.stats.yml
index c5d767f..362b30b 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,3 +1,3 @@
-configured_endpoints: 55
-openapi_spec_hash: d69252c7252423bc98ecc17807eb33ec
+configured_endpoints: 54
+openapi_spec_hash: 04e1b7aefbeff10daab249b153de147f
config_hash: bed87752f4056d0c4bf2ddf856307800
diff --git a/api.md b/api.md
index 693ee75..1646d0b 100644
--- a/api.md
+++ b/api.md
@@ -202,7 +202,6 @@ Types:
from codex.types.projects import (
QueryLogRetrieveResponse,
QueryLogListResponse,
- QueryLogAddUserFeedbackResponse,
QueryLogListByGroupResponse,
QueryLogListGroupsResponse,
QueryLogStartRemediationResponse,
@@ -213,7 +212,6 @@ Methods:
- client.projects.query_logs.retrieve(query_log_id, \*, project_id) -> QueryLogRetrieveResponse
- client.projects.query_logs.list(project_id, \*\*params) -> SyncOffsetPageQueryLogs[QueryLogListResponse]
-- client.projects.query_logs.add_user_feedback(query_log_id, \*, project_id, \*\*params) -> QueryLogAddUserFeedbackResponse
- client.projects.query_logs.list_by_group(project_id, \*\*params) -> QueryLogListByGroupResponse
- client.projects.query_logs.list_groups(project_id, \*\*params) -> SyncOffsetPageQueryLogGroups[QueryLogListGroupsResponse]
- client.projects.query_logs.start_remediation(query_log_id, \*, project_id) -> QueryLogStartRemediationResponse
diff --git a/src/codex/resources/projects/projects.py b/src/codex/resources/projects/projects.py
index 319097f..c4d5597 100644
--- a/src/codex/resources/projects/projects.py
+++ b/src/codex/resources/projects/projects.py
@@ -527,11 +527,12 @@ def validate(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
+ Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
+ "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
+ "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
+ models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
+ "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
+ "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -1105,11 +1106,12 @@ async def validate(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
+ Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
+ "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
+ "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
+ models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
+ "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
+ "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
diff --git a/src/codex/resources/projects/query_logs.py b/src/codex/resources/projects/query_logs.py
index e3b17ab..4527743 100644
--- a/src/codex/resources/projects/query_logs.py
+++ b/src/codex/resources/projects/query_logs.py
@@ -25,17 +25,11 @@
AsyncOffsetPageQueryLogGroups,
)
from ..._base_client import AsyncPaginator, make_request_options
-from ...types.projects import (
- query_log_list_params,
- query_log_list_groups_params,
- query_log_list_by_group_params,
- query_log_add_user_feedback_params,
-)
+from ...types.projects import query_log_list_params, query_log_list_groups_params, query_log_list_by_group_params
from ...types.projects.query_log_list_response import QueryLogListResponse
from ...types.projects.query_log_retrieve_response import QueryLogRetrieveResponse
from ...types.projects.query_log_list_groups_response import QueryLogListGroupsResponse
from ...types.projects.query_log_list_by_group_response import QueryLogListByGroupResponse
-from ...types.projects.query_log_add_user_feedback_response import QueryLogAddUserFeedbackResponse
from ...types.projects.query_log_start_remediation_response import QueryLogStartRemediationResponse
__all__ = ["QueryLogsResource", "AsyncQueryLogsResource"]
@@ -190,46 +184,6 @@ def list(
model=QueryLogListResponse,
)
- def add_user_feedback(
- self,
- query_log_id: str,
- *,
- project_id: str,
- key: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> QueryLogAddUserFeedbackResponse:
- """
- Add User Feedback Route
-
- Args:
- key: A key describing the criteria of the feedback, eg 'rating'
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not query_log_id:
- raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}")
- return self._post(
- f"/api/projects/{project_id}/query_logs/{query_log_id}/user_feedback",
- body=maybe_transform({"key": key}, query_log_add_user_feedback_params.QueryLogAddUserFeedbackParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=QueryLogAddUserFeedbackResponse,
- )
-
def list_by_group(
self,
project_id: str,
@@ -614,48 +568,6 @@ def list(
model=QueryLogListResponse,
)
- async def add_user_feedback(
- self,
- query_log_id: str,
- *,
- project_id: str,
- key: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> QueryLogAddUserFeedbackResponse:
- """
- Add User Feedback Route
-
- Args:
- key: A key describing the criteria of the feedback, eg 'rating'
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not project_id:
- raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
- if not query_log_id:
- raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}")
- return await self._post(
- f"/api/projects/{project_id}/query_logs/{query_log_id}/user_feedback",
- body=await async_maybe_transform(
- {"key": key}, query_log_add_user_feedback_params.QueryLogAddUserFeedbackParams
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=QueryLogAddUserFeedbackResponse,
- )
-
async def list_by_group(
self,
project_id: str,
@@ -901,9 +813,6 @@ def __init__(self, query_logs: QueryLogsResource) -> None:
self.list = to_raw_response_wrapper(
query_logs.list,
)
- self.add_user_feedback = to_raw_response_wrapper(
- query_logs.add_user_feedback,
- )
self.list_by_group = to_raw_response_wrapper(
query_logs.list_by_group,
)
@@ -925,9 +834,6 @@ def __init__(self, query_logs: AsyncQueryLogsResource) -> None:
self.list = async_to_raw_response_wrapper(
query_logs.list,
)
- self.add_user_feedback = async_to_raw_response_wrapper(
- query_logs.add_user_feedback,
- )
self.list_by_group = async_to_raw_response_wrapper(
query_logs.list_by_group,
)
@@ -949,9 +855,6 @@ def __init__(self, query_logs: QueryLogsResource) -> None:
self.list = to_streamed_response_wrapper(
query_logs.list,
)
- self.add_user_feedback = to_streamed_response_wrapper(
- query_logs.add_user_feedback,
- )
self.list_by_group = to_streamed_response_wrapper(
query_logs.list_by_group,
)
@@ -973,9 +876,6 @@ def __init__(self, query_logs: AsyncQueryLogsResource) -> None:
self.list = async_to_streamed_response_wrapper(
query_logs.list,
)
- self.add_user_feedback = async_to_streamed_response_wrapper(
- query_logs.add_user_feedback,
- )
self.list_by_group = async_to_streamed_response_wrapper(
query_logs.list_by_group,
)
diff --git a/src/codex/resources/tlm.py b/src/codex/resources/tlm.py
index 5d66ec0..2483e66 100644
--- a/src/codex/resources/tlm.py
+++ b/src/codex/resources/tlm.py
@@ -101,11 +101,12 @@ def prompt(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
+ Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
+ "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
+ "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
+ models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
+ "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
+ "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -244,11 +245,12 @@ def score(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
+ Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
+ "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
+ "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
+ models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
+ "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
+ "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -403,11 +405,12 @@ async def prompt(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
+ Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
+ "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
+ "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
+ models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
+ "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
+ "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -546,11 +549,12 @@ async def score(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
+ Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
+ "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
+ "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
+ models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
+ "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
+ "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
diff --git a/src/codex/types/project_validate_params.py b/src/codex/types/project_validate_params.py
index 719ad3d..4807463 100644
--- a/src/codex/types/project_validate_params.py
+++ b/src/codex/types/project_validate_params.py
@@ -130,11 +130,12 @@ class ProjectValidateParams(TypedDict, total=False):
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
+ Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
+ "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
+ "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
+ models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
+ "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
+ "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -652,8 +653,6 @@ class MessageChatCompletionDeveloperMessageParam(TypedDict, total=False):
class Options(TypedDict, total=False):
custom_eval_criteria: Iterable[object]
- disable_persistence: bool
-
disable_trustworthiness: bool
log: List[str]
diff --git a/src/codex/types/project_validate_response.py b/src/codex/types/project_validate_response.py
index 003b676..4488311 100644
--- a/src/codex/types/project_validate_response.py
+++ b/src/codex/types/project_validate_response.py
@@ -59,9 +59,6 @@ class ProjectValidateResponse(BaseModel):
to answer, if it does not already exist.
"""
- log_id: str
- """The UUID of the query log entry created for this validation request."""
-
should_guardrail: bool
"""
True if the response should be guardrailed by the AI system, False if the
diff --git a/src/codex/types/projects/__init__.py b/src/codex/types/projects/__init__.py
index b90cbd8..cb2989f 100644
--- a/src/codex/types/projects/__init__.py
+++ b/src/codex/types/projects/__init__.py
@@ -27,8 +27,6 @@
from .remediation_edit_answer_params import RemediationEditAnswerParams as RemediationEditAnswerParams
from .query_log_list_by_group_response import QueryLogListByGroupResponse as QueryLogListByGroupResponse
from .remediation_edit_answer_response import RemediationEditAnswerResponse as RemediationEditAnswerResponse
-from .query_log_add_user_feedback_params import QueryLogAddUserFeedbackParams as QueryLogAddUserFeedbackParams
-from .query_log_add_user_feedback_response import QueryLogAddUserFeedbackResponse as QueryLogAddUserFeedbackResponse
from .query_log_start_remediation_response import QueryLogStartRemediationResponse as QueryLogStartRemediationResponse
from .remediation_edit_draft_answer_params import RemediationEditDraftAnswerParams as RemediationEditDraftAnswerParams
from .remediation_edit_draft_answer_response import (
diff --git a/src/codex/types/projects/query_log_add_user_feedback_params.py b/src/codex/types/projects/query_log_add_user_feedback_params.py
deleted file mode 100644
index e841892..0000000
--- a/src/codex/types/projects/query_log_add_user_feedback_params.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-__all__ = ["QueryLogAddUserFeedbackParams"]
-
-
-class QueryLogAddUserFeedbackParams(TypedDict, total=False):
- project_id: Required[str]
-
- key: Required[str]
- """A key describing the criteria of the feedback, eg 'rating'"""
diff --git a/src/codex/types/projects/query_log_add_user_feedback_response.py b/src/codex/types/projects/query_log_add_user_feedback_response.py
deleted file mode 100644
index adec25f..0000000
--- a/src/codex/types/projects/query_log_add_user_feedback_response.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from ..._models import BaseModel
-
-__all__ = ["QueryLogAddUserFeedbackResponse"]
-
-
-class QueryLogAddUserFeedbackResponse(BaseModel):
- custom_metadata: object
-
- query_log_id: str
diff --git a/src/codex/types/projects/query_log_list_groups_response.py b/src/codex/types/projects/query_log_list_groups_response.py
index 7dbf192..7b2d44c 100644
--- a/src/codex/types/projects/query_log_list_groups_response.py
+++ b/src/codex/types/projects/query_log_list_groups_response.py
@@ -337,9 +337,6 @@ class QueryLogListGroupsResponse(BaseModel):
formatted_original_question: Optional[str] = None
- impact_score: float
- """Impact score used for prioritization sorting"""
-
is_bad_response: bool
needs_review: bool
diff --git a/src/codex/types/tlm_prompt_params.py b/src/codex/types/tlm_prompt_params.py
index 821c381..aaa8b32 100644
--- a/src/codex/types/tlm_prompt_params.py
+++ b/src/codex/types/tlm_prompt_params.py
@@ -52,11 +52,12 @@ class TlmPromptParams(TypedDict, total=False):
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
+ Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
+ "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
+ "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
+ models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
+ "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
+ "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -115,8 +116,6 @@ class TlmPromptParams(TypedDict, total=False):
class Options(TypedDict, total=False):
custom_eval_criteria: Iterable[object]
- disable_persistence: bool
-
disable_trustworthiness: bool
log: List[str]
diff --git a/src/codex/types/tlm_score_params.py b/src/codex/types/tlm_score_params.py
index d676a1d..a5a75c6 100644
--- a/src/codex/types/tlm_score_params.py
+++ b/src/codex/types/tlm_score_params.py
@@ -54,11 +54,12 @@ class TlmScoreParams(TypedDict, total=False):
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
+ Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
+ "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
+ "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
+ models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
+ "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
+ "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -117,8 +118,6 @@ class TlmScoreParams(TypedDict, total=False):
class Options(TypedDict, total=False):
custom_eval_criteria: Iterable[object]
- disable_persistence: bool
-
disable_trustworthiness: bool
log: List[str]
diff --git a/tests/api_resources/projects/test_query_logs.py b/tests/api_resources/projects/test_query_logs.py
index 05d3f46..5f7e02c 100644
--- a/tests/api_resources/projects/test_query_logs.py
+++ b/tests/api_resources/projects/test_query_logs.py
@@ -21,7 +21,6 @@
QueryLogRetrieveResponse,
QueryLogListGroupsResponse,
QueryLogListByGroupResponse,
- QueryLogAddUserFeedbackResponse,
QueryLogStartRemediationResponse,
)
@@ -147,63 +146,6 @@ def test_path_params_list(self, client: Codex) -> None:
project_id="",
)
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_add_user_feedback(self, client: Codex) -> None:
- query_log = client.projects.query_logs.add_user_feedback(
- query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- key="key",
- )
- assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_raw_response_add_user_feedback(self, client: Codex) -> None:
- response = client.projects.query_logs.with_raw_response.add_user_feedback(
- query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- key="key",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- query_log = response.parse()
- assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_streaming_response_add_user_feedback(self, client: Codex) -> None:
- with client.projects.query_logs.with_streaming_response.add_user_feedback(
- query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- key="key",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- query_log = response.parse()
- assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_path_params_add_user_feedback(self, client: Codex) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- client.projects.query_logs.with_raw_response.add_user_feedback(
- query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- project_id="",
- key="key",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `query_log_id` but received ''"):
- client.projects.query_logs.with_raw_response.add_user_feedback(
- query_log_id="",
- project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- key="key",
- )
-
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
def test_method_list_by_group(self, client: Codex) -> None:
@@ -509,63 +451,6 @@ async def test_path_params_list(self, async_client: AsyncCodex) -> None:
project_id="",
)
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_add_user_feedback(self, async_client: AsyncCodex) -> None:
- query_log = await async_client.projects.query_logs.add_user_feedback(
- query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- key="key",
- )
- assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_raw_response_add_user_feedback(self, async_client: AsyncCodex) -> None:
- response = await async_client.projects.query_logs.with_raw_response.add_user_feedback(
- query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- key="key",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- query_log = await response.parse()
- assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_streaming_response_add_user_feedback(self, async_client: AsyncCodex) -> None:
- async with async_client.projects.query_logs.with_streaming_response.add_user_feedback(
- query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- key="key",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- query_log = await response.parse()
- assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_path_params_add_user_feedback(self, async_client: AsyncCodex) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
- await async_client.projects.query_logs.with_raw_response.add_user_feedback(
- query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- project_id="",
- key="key",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `query_log_id` but received ''"):
- await async_client.projects.query_logs.with_raw_response.add_user_feedback(
- query_log_id="",
- project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- key="key",
- )
-
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
async def test_method_list_by_group(self, async_client: AsyncCodex) -> None:
diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py
index 04eef99..586b0fa 100644
--- a/tests/api_resources/test_projects.py
+++ b/tests/api_resources/test_projects.py
@@ -617,7 +617,6 @@ def test_method_validate_with_all_params(self, client: Codex) -> None:
],
options={
"custom_eval_criteria": [{}],
- "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
@@ -1293,7 +1292,6 @@ async def test_method_validate_with_all_params(self, async_client: AsyncCodex) -
],
options={
"custom_eval_criteria": [{}],
- "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
diff --git a/tests/api_resources/test_tlm.py b/tests/api_resources/test_tlm.py
index 6c8c177..fd977f6 100644
--- a/tests/api_resources/test_tlm.py
+++ b/tests/api_resources/test_tlm.py
@@ -33,7 +33,6 @@ def test_method_prompt_with_all_params(self, client: Codex) -> None:
constrain_outputs=["string"],
options={
"custom_eval_criteria": [{}],
- "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
@@ -94,7 +93,6 @@ def test_method_score_with_all_params(self, client: Codex) -> None:
constrain_outputs=["string"],
options={
"custom_eval_criteria": [{}],
- "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
@@ -161,7 +159,6 @@ async def test_method_prompt_with_all_params(self, async_client: AsyncCodex) ->
constrain_outputs=["string"],
options={
"custom_eval_criteria": [{}],
- "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
@@ -222,7 +219,6 @@ async def test_method_score_with_all_params(self, async_client: AsyncCodex) -> N
constrain_outputs=["string"],
options={
"custom_eval_criteria": [{}],
- "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
From 052599aee2e484d9cc305983da900878374db17d Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 26 Aug 2025 03:45:14 +0000
Subject: [PATCH 03/15] chore(internal): change ci workflow machines
---
.github/workflows/ci.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 4015238..49f178b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -42,7 +42,7 @@ jobs:
permissions:
contents: read
id-token: write
- runs-on: depot-ubuntu-24.04
+ runs-on: ${{ github.repository == 'stainless-sdks/codex-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
steps:
- uses: actions/checkout@v4
From ecbda1ca9f1aaf40a325e105cc0659ea0e318511 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 26 Aug 2025 17:17:45 +0000
Subject: [PATCH 04/15] feat(api): api update
---
.stats.yml | 4 +-
api.md | 2 +
src/codex/resources/projects/projects.py | 22 ++--
src/codex/resources/projects/query_logs.py | 102 +++++++++++++++-
src/codex/resources/tlm.py | 44 +++----
src/codex/types/project_validate_params.py | 13 +-
src/codex/types/project_validate_response.py | 3 +
src/codex/types/projects/__init__.py | 2 +
.../query_log_add_user_feedback_params.py | 14 +++
.../query_log_add_user_feedback_response.py | 11 ++
.../query_log_list_groups_response.py | 3 +
src/codex/types/tlm_prompt_params.py | 13 +-
src/codex/types/tlm_score_params.py | 13 +-
.../api_resources/projects/test_query_logs.py | 115 ++++++++++++++++++
tests/api_resources/test_projects.py | 2 +
tests/api_resources/test_tlm.py | 4 +
16 files changed, 310 insertions(+), 57 deletions(-)
create mode 100644 src/codex/types/projects/query_log_add_user_feedback_params.py
create mode 100644 src/codex/types/projects/query_log_add_user_feedback_response.py
diff --git a/.stats.yml b/.stats.yml
index 362b30b..c5d767f 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,3 +1,3 @@
-configured_endpoints: 54
-openapi_spec_hash: 04e1b7aefbeff10daab249b153de147f
+configured_endpoints: 55
+openapi_spec_hash: d69252c7252423bc98ecc17807eb33ec
config_hash: bed87752f4056d0c4bf2ddf856307800
diff --git a/api.md b/api.md
index 1646d0b..693ee75 100644
--- a/api.md
+++ b/api.md
@@ -202,6 +202,7 @@ Types:
from codex.types.projects import (
QueryLogRetrieveResponse,
QueryLogListResponse,
+ QueryLogAddUserFeedbackResponse,
QueryLogListByGroupResponse,
QueryLogListGroupsResponse,
QueryLogStartRemediationResponse,
@@ -212,6 +213,7 @@ Methods:
- client.projects.query_logs.retrieve(query_log_id, \*, project_id) -> QueryLogRetrieveResponse
- client.projects.query_logs.list(project_id, \*\*params) -> SyncOffsetPageQueryLogs[QueryLogListResponse]
+- client.projects.query_logs.add_user_feedback(query_log_id, \*, project_id, \*\*params) -> QueryLogAddUserFeedbackResponse
- client.projects.query_logs.list_by_group(project_id, \*\*params) -> QueryLogListByGroupResponse
- client.projects.query_logs.list_groups(project_id, \*\*params) -> SyncOffsetPageQueryLogGroups[QueryLogListGroupsResponse]
- client.projects.query_logs.start_remediation(query_log_id, \*, project_id) -> QueryLogStartRemediationResponse
diff --git a/src/codex/resources/projects/projects.py b/src/codex/resources/projects/projects.py
index c4d5597..319097f 100644
--- a/src/codex/resources/projects/projects.py
+++ b/src/codex/resources/projects/projects.py
@@ -527,12 +527,11 @@ def validate(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
- "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
- models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
- "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
- "nova-micro".
+ Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
+ "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
+ "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
+ "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
+ for low latency/costs: "gpt-4.1-nano", "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -1106,12 +1105,11 @@ async def validate(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
- "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
- models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
- "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
- "nova-micro".
+ Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
+ "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
+ "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
+ "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
+ for low latency/costs: "gpt-4.1-nano", "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
diff --git a/src/codex/resources/projects/query_logs.py b/src/codex/resources/projects/query_logs.py
index 4527743..e3b17ab 100644
--- a/src/codex/resources/projects/query_logs.py
+++ b/src/codex/resources/projects/query_logs.py
@@ -25,11 +25,17 @@
AsyncOffsetPageQueryLogGroups,
)
from ..._base_client import AsyncPaginator, make_request_options
-from ...types.projects import query_log_list_params, query_log_list_groups_params, query_log_list_by_group_params
+from ...types.projects import (
+ query_log_list_params,
+ query_log_list_groups_params,
+ query_log_list_by_group_params,
+ query_log_add_user_feedback_params,
+)
from ...types.projects.query_log_list_response import QueryLogListResponse
from ...types.projects.query_log_retrieve_response import QueryLogRetrieveResponse
from ...types.projects.query_log_list_groups_response import QueryLogListGroupsResponse
from ...types.projects.query_log_list_by_group_response import QueryLogListByGroupResponse
+from ...types.projects.query_log_add_user_feedback_response import QueryLogAddUserFeedbackResponse
from ...types.projects.query_log_start_remediation_response import QueryLogStartRemediationResponse
__all__ = ["QueryLogsResource", "AsyncQueryLogsResource"]
@@ -184,6 +190,46 @@ def list(
model=QueryLogListResponse,
)
+ def add_user_feedback(
+ self,
+ query_log_id: str,
+ *,
+ project_id: str,
+ key: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> QueryLogAddUserFeedbackResponse:
+ """
+ Add User Feedback Route
+
+ Args:
+ key: A key describing the criteria of the feedback, eg 'rating'
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not query_log_id:
+ raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}")
+ return self._post(
+ f"/api/projects/{project_id}/query_logs/{query_log_id}/user_feedback",
+ body=maybe_transform({"key": key}, query_log_add_user_feedback_params.QueryLogAddUserFeedbackParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=QueryLogAddUserFeedbackResponse,
+ )
+
def list_by_group(
self,
project_id: str,
@@ -568,6 +614,48 @@ def list(
model=QueryLogListResponse,
)
+ async def add_user_feedback(
+ self,
+ query_log_id: str,
+ *,
+ project_id: str,
+ key: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> QueryLogAddUserFeedbackResponse:
+ """
+ Add User Feedback Route
+
+ Args:
+ key: A key describing the criteria of the feedback, eg 'rating'
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not query_log_id:
+ raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}")
+ return await self._post(
+ f"/api/projects/{project_id}/query_logs/{query_log_id}/user_feedback",
+ body=await async_maybe_transform(
+ {"key": key}, query_log_add_user_feedback_params.QueryLogAddUserFeedbackParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=QueryLogAddUserFeedbackResponse,
+ )
+
async def list_by_group(
self,
project_id: str,
@@ -813,6 +901,9 @@ def __init__(self, query_logs: QueryLogsResource) -> None:
self.list = to_raw_response_wrapper(
query_logs.list,
)
+ self.add_user_feedback = to_raw_response_wrapper(
+ query_logs.add_user_feedback,
+ )
self.list_by_group = to_raw_response_wrapper(
query_logs.list_by_group,
)
@@ -834,6 +925,9 @@ def __init__(self, query_logs: AsyncQueryLogsResource) -> None:
self.list = async_to_raw_response_wrapper(
query_logs.list,
)
+ self.add_user_feedback = async_to_raw_response_wrapper(
+ query_logs.add_user_feedback,
+ )
self.list_by_group = async_to_raw_response_wrapper(
query_logs.list_by_group,
)
@@ -855,6 +949,9 @@ def __init__(self, query_logs: QueryLogsResource) -> None:
self.list = to_streamed_response_wrapper(
query_logs.list,
)
+ self.add_user_feedback = to_streamed_response_wrapper(
+ query_logs.add_user_feedback,
+ )
self.list_by_group = to_streamed_response_wrapper(
query_logs.list_by_group,
)
@@ -876,6 +973,9 @@ def __init__(self, query_logs: AsyncQueryLogsResource) -> None:
self.list = async_to_streamed_response_wrapper(
query_logs.list,
)
+ self.add_user_feedback = async_to_streamed_response_wrapper(
+ query_logs.add_user_feedback,
+ )
self.list_by_group = async_to_streamed_response_wrapper(
query_logs.list_by_group,
)
diff --git a/src/codex/resources/tlm.py b/src/codex/resources/tlm.py
index 2483e66..5d66ec0 100644
--- a/src/codex/resources/tlm.py
+++ b/src/codex/resources/tlm.py
@@ -101,12 +101,11 @@ def prompt(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
- "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
- models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
- "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
- "nova-micro".
+ Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
+ "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
+ "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
+ "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
+ for low latency/costs: "gpt-4.1-nano", "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -245,12 +244,11 @@ def score(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
- "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
- models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
- "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
- "nova-micro".
+ Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
+ "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
+ "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
+ "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
+ for low latency/costs: "gpt-4.1-nano", "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -405,12 +403,11 @@ async def prompt(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
- "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
- models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
- "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
- "nova-micro".
+ Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
+ "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
+ "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
+ "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
+ for low latency/costs: "gpt-4.1-nano", "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -549,12 +546,11 @@ async def score(
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
- "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
- models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
- "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
- "nova-micro".
+ Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
+ "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
+ "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
+ "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
+ for low latency/costs: "gpt-4.1-nano", "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
diff --git a/src/codex/types/project_validate_params.py b/src/codex/types/project_validate_params.py
index 4807463..719ad3d 100644
--- a/src/codex/types/project_validate_params.py
+++ b/src/codex/types/project_validate_params.py
@@ -130,12 +130,11 @@ class ProjectValidateParams(TypedDict, total=False):
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
- "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
- models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
- "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
- "nova-micro".
+ Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
+ "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
+ "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
+ "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
+ for low latency/costs: "gpt-4.1-nano", "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -653,6 +652,8 @@ class MessageChatCompletionDeveloperMessageParam(TypedDict, total=False):
class Options(TypedDict, total=False):
custom_eval_criteria: Iterable[object]
+ disable_persistence: bool
+
disable_trustworthiness: bool
log: List[str]
diff --git a/src/codex/types/project_validate_response.py b/src/codex/types/project_validate_response.py
index 4488311..003b676 100644
--- a/src/codex/types/project_validate_response.py
+++ b/src/codex/types/project_validate_response.py
@@ -59,6 +59,9 @@ class ProjectValidateResponse(BaseModel):
to answer, if it does not already exist.
"""
+ log_id: str
+ """The UUID of the query log entry created for this validation request."""
+
should_guardrail: bool
"""
True if the response should be guardrailed by the AI system, False if the
diff --git a/src/codex/types/projects/__init__.py b/src/codex/types/projects/__init__.py
index cb2989f..b90cbd8 100644
--- a/src/codex/types/projects/__init__.py
+++ b/src/codex/types/projects/__init__.py
@@ -27,6 +27,8 @@
from .remediation_edit_answer_params import RemediationEditAnswerParams as RemediationEditAnswerParams
from .query_log_list_by_group_response import QueryLogListByGroupResponse as QueryLogListByGroupResponse
from .remediation_edit_answer_response import RemediationEditAnswerResponse as RemediationEditAnswerResponse
+from .query_log_add_user_feedback_params import QueryLogAddUserFeedbackParams as QueryLogAddUserFeedbackParams
+from .query_log_add_user_feedback_response import QueryLogAddUserFeedbackResponse as QueryLogAddUserFeedbackResponse
from .query_log_start_remediation_response import QueryLogStartRemediationResponse as QueryLogStartRemediationResponse
from .remediation_edit_draft_answer_params import RemediationEditDraftAnswerParams as RemediationEditDraftAnswerParams
from .remediation_edit_draft_answer_response import (
diff --git a/src/codex/types/projects/query_log_add_user_feedback_params.py b/src/codex/types/projects/query_log_add_user_feedback_params.py
new file mode 100644
index 0000000..e841892
--- /dev/null
+++ b/src/codex/types/projects/query_log_add_user_feedback_params.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["QueryLogAddUserFeedbackParams"]
+
+
+class QueryLogAddUserFeedbackParams(TypedDict, total=False):
+ project_id: Required[str]
+
+ key: Required[str]
+ """A key describing the criteria of the feedback, eg 'rating'"""
diff --git a/src/codex/types/projects/query_log_add_user_feedback_response.py b/src/codex/types/projects/query_log_add_user_feedback_response.py
new file mode 100644
index 0000000..adec25f
--- /dev/null
+++ b/src/codex/types/projects/query_log_add_user_feedback_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ..._models import BaseModel
+
+__all__ = ["QueryLogAddUserFeedbackResponse"]
+
+
+class QueryLogAddUserFeedbackResponse(BaseModel):
+ custom_metadata: object
+
+ query_log_id: str
diff --git a/src/codex/types/projects/query_log_list_groups_response.py b/src/codex/types/projects/query_log_list_groups_response.py
index 7b2d44c..7dbf192 100644
--- a/src/codex/types/projects/query_log_list_groups_response.py
+++ b/src/codex/types/projects/query_log_list_groups_response.py
@@ -337,6 +337,9 @@ class QueryLogListGroupsResponse(BaseModel):
formatted_original_question: Optional[str] = None
+ impact_score: float
+ """Impact score used for prioritization sorting"""
+
is_bad_response: bool
needs_review: bool
diff --git a/src/codex/types/tlm_prompt_params.py b/src/codex/types/tlm_prompt_params.py
index aaa8b32..821c381 100644
--- a/src/codex/types/tlm_prompt_params.py
+++ b/src/codex/types/tlm_prompt_params.py
@@ -52,12 +52,11 @@ class TlmPromptParams(TypedDict, total=False):
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
- "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
- models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
- "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
- "nova-micro".
+ Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
+ "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
+ "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
+ "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
+ for low latency/costs: "gpt-4.1-nano", "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -116,6 +115,8 @@ class TlmPromptParams(TypedDict, total=False):
class Options(TypedDict, total=False):
custom_eval_criteria: Iterable[object]
+ disable_persistence: bool
+
disable_trustworthiness: bool
log: List[str]
diff --git a/src/codex/types/tlm_score_params.py b/src/codex/types/tlm_score_params.py
index a5a75c6..d676a1d 100644
--- a/src/codex/types/tlm_score_params.py
+++ b/src/codex/types/tlm_score_params.py
@@ -54,12 +54,11 @@ class TlmScoreParams(TypedDict, total=False):
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
(better models yield better results, faster models yield faster results). -
- Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
- "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
- models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
- "claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
- "nova-micro".
+ Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
+ "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
+ "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
+ "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
+ for low latency/costs: "gpt-4.1-nano", "nova-micro".
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -118,6 +117,8 @@ class TlmScoreParams(TypedDict, total=False):
class Options(TypedDict, total=False):
custom_eval_criteria: Iterable[object]
+ disable_persistence: bool
+
disable_trustworthiness: bool
log: List[str]
diff --git a/tests/api_resources/projects/test_query_logs.py b/tests/api_resources/projects/test_query_logs.py
index 5f7e02c..05d3f46 100644
--- a/tests/api_resources/projects/test_query_logs.py
+++ b/tests/api_resources/projects/test_query_logs.py
@@ -21,6 +21,7 @@
QueryLogRetrieveResponse,
QueryLogListGroupsResponse,
QueryLogListByGroupResponse,
+ QueryLogAddUserFeedbackResponse,
QueryLogStartRemediationResponse,
)
@@ -146,6 +147,63 @@ def test_path_params_list(self, client: Codex) -> None:
project_id="",
)
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_add_user_feedback(self, client: Codex) -> None:
+ query_log = client.projects.query_logs.add_user_feedback(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ key="key",
+ )
+ assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_add_user_feedback(self, client: Codex) -> None:
+ response = client.projects.query_logs.with_raw_response.add_user_feedback(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ key="key",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ query_log = response.parse()
+ assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_add_user_feedback(self, client: Codex) -> None:
+ with client.projects.query_logs.with_streaming_response.add_user_feedback(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ key="key",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ query_log = response.parse()
+ assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_add_user_feedback(self, client: Codex) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.projects.query_logs.with_raw_response.add_user_feedback(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="",
+ key="key",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `query_log_id` but received ''"):
+ client.projects.query_logs.with_raw_response.add_user_feedback(
+ query_log_id="",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ key="key",
+ )
+
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
def test_method_list_by_group(self, client: Codex) -> None:
@@ -451,6 +509,63 @@ async def test_path_params_list(self, async_client: AsyncCodex) -> None:
project_id="",
)
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_add_user_feedback(self, async_client: AsyncCodex) -> None:
+ query_log = await async_client.projects.query_logs.add_user_feedback(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ key="key",
+ )
+ assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_add_user_feedback(self, async_client: AsyncCodex) -> None:
+ response = await async_client.projects.query_logs.with_raw_response.add_user_feedback(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ key="key",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ query_log = await response.parse()
+ assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_add_user_feedback(self, async_client: AsyncCodex) -> None:
+ async with async_client.projects.query_logs.with_streaming_response.add_user_feedback(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ key="key",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ query_log = await response.parse()
+ assert_matches_type(QueryLogAddUserFeedbackResponse, query_log, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_add_user_feedback(self, async_client: AsyncCodex) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.projects.query_logs.with_raw_response.add_user_feedback(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="",
+ key="key",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `query_log_id` but received ''"):
+ await async_client.projects.query_logs.with_raw_response.add_user_feedback(
+ query_log_id="",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ key="key",
+ )
+
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
async def test_method_list_by_group(self, async_client: AsyncCodex) -> None:
diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py
index 586b0fa..04eef99 100644
--- a/tests/api_resources/test_projects.py
+++ b/tests/api_resources/test_projects.py
@@ -617,6 +617,7 @@ def test_method_validate_with_all_params(self, client: Codex) -> None:
],
options={
"custom_eval_criteria": [{}],
+ "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
@@ -1292,6 +1293,7 @@ async def test_method_validate_with_all_params(self, async_client: AsyncCodex) -
],
options={
"custom_eval_criteria": [{}],
+ "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
diff --git a/tests/api_resources/test_tlm.py b/tests/api_resources/test_tlm.py
index fd977f6..6c8c177 100644
--- a/tests/api_resources/test_tlm.py
+++ b/tests/api_resources/test_tlm.py
@@ -33,6 +33,7 @@ def test_method_prompt_with_all_params(self, client: Codex) -> None:
constrain_outputs=["string"],
options={
"custom_eval_criteria": [{}],
+ "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
@@ -93,6 +94,7 @@ def test_method_score_with_all_params(self, client: Codex) -> None:
constrain_outputs=["string"],
options={
"custom_eval_criteria": [{}],
+ "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
@@ -159,6 +161,7 @@ async def test_method_prompt_with_all_params(self, async_client: AsyncCodex) ->
constrain_outputs=["string"],
options={
"custom_eval_criteria": [{}],
+ "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
@@ -219,6 +222,7 @@ async def test_method_score_with_all_params(self, async_client: AsyncCodex) -> N
constrain_outputs=["string"],
options={
"custom_eval_criteria": [{}],
+ "disable_persistence": True,
"disable_trustworthiness": True,
"log": ["string"],
"max_tokens": 0,
From 7a7b9a3ee6cfed2e57359a54c1b9292f06cc71b6 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 27 Aug 2025 04:06:05 +0000
Subject: [PATCH 05/15] fix: avoid newer type syntax
---
src/codex/_models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/codex/_models.py b/src/codex/_models.py
index b8387ce..92f7c10 100644
--- a/src/codex/_models.py
+++ b/src/codex/_models.py
@@ -304,7 +304,7 @@ def model_dump(
exclude_none=exclude_none,
)
- return cast(dict[str, Any], json_safe(dumped)) if mode == "json" else dumped
+ return cast("dict[str, Any]", json_safe(dumped)) if mode == "json" else dumped
@override
def model_dump_json(
From cc58363a9c78ce08437508d3c64cb9b29b43e9bd Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 27 Aug 2025 04:37:56 +0000
Subject: [PATCH 06/15] chore(internal): update pyright exclude list
---
pyproject.toml | 1 +
1 file changed, 1 insertion(+)
diff --git a/pyproject.toml b/pyproject.toml
index 3209698..1cb20a6 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -148,6 +148,7 @@ exclude = [
"_dev",
".venv",
".nox",
+ ".git",
]
reportImplicitOverride = true
From 9bb8f17149849ea312caf7b895f02a2c38ac3270 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 27 Aug 2025 18:17:31 +0000
Subject: [PATCH 07/15] feat(api): api update
---
.stats.yml | 2 +-
src/codex/resources/projects/query_logs.py | 52 +++++++++++++++++--
.../query_log_list_by_group_params.py | 12 ++++-
.../types/projects/query_log_list_params.py | 12 ++++-
4 files changed, 71 insertions(+), 7 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index c5d767f..1be05f4 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,3 +1,3 @@
configured_endpoints: 55
-openapi_spec_hash: d69252c7252423bc98ecc17807eb33ec
+openapi_spec_hash: b260cce23d53dba37b982654e7928405
config_hash: bed87752f4056d0c4bf2ddf856307800
diff --git a/src/codex/resources/projects/query_logs.py b/src/codex/resources/projects/query_logs.py
index e3b17ab..6bbd748 100644
--- a/src/codex/resources/projects/query_logs.py
+++ b/src/codex/resources/projects/query_logs.py
@@ -115,7 +115,18 @@ def list(
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
| NotGiven = NOT_GIVEN,
- sort: Optional[Literal["created_at", "primary_eval_issue_score"]] | NotGiven = NOT_GIVEN,
+ sort: Optional[
+ Literal[
+ "created_at",
+ "primary_eval_issue_score",
+ "score_trustworthiness",
+ "score_context_sufficiency",
+ "score_response_helpfulness",
+ "score_query_ease",
+ "score_response_groundedness",
+ ]
+ ]
+ | NotGiven = NOT_GIVEN,
tool_call_names: Optional[List[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -250,7 +261,18 @@ def list_by_group(
]
| NotGiven = NOT_GIVEN,
remediation_ids: List[str] | NotGiven = NOT_GIVEN,
- sort: Optional[Literal["created_at", "primary_eval_issue_score"]] | NotGiven = NOT_GIVEN,
+ sort: Optional[
+ Literal[
+ "created_at",
+ "primary_eval_issue_score",
+ "score_trustworthiness",
+ "score_context_sufficiency",
+ "score_response_helpfulness",
+ "score_query_ease",
+ "score_response_groundedness",
+ ]
+ ]
+ | NotGiven = NOT_GIVEN,
tool_call_names: Optional[List[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -539,7 +561,18 @@ def list(
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
| NotGiven = NOT_GIVEN,
- sort: Optional[Literal["created_at", "primary_eval_issue_score"]] | NotGiven = NOT_GIVEN,
+ sort: Optional[
+ Literal[
+ "created_at",
+ "primary_eval_issue_score",
+ "score_trustworthiness",
+ "score_context_sufficiency",
+ "score_response_helpfulness",
+ "score_query_ease",
+ "score_response_groundedness",
+ ]
+ ]
+ | NotGiven = NOT_GIVEN,
tool_call_names: Optional[List[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -676,7 +709,18 @@ async def list_by_group(
]
| NotGiven = NOT_GIVEN,
remediation_ids: List[str] | NotGiven = NOT_GIVEN,
- sort: Optional[Literal["created_at", "primary_eval_issue_score"]] | NotGiven = NOT_GIVEN,
+ sort: Optional[
+ Literal[
+ "created_at",
+ "primary_eval_issue_score",
+ "score_trustworthiness",
+ "score_context_sufficiency",
+ "score_response_helpfulness",
+ "score_query_ease",
+ "score_response_groundedness",
+ ]
+ ]
+ | NotGiven = NOT_GIVEN,
tool_call_names: Optional[List[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
diff --git a/src/codex/types/projects/query_log_list_by_group_params.py b/src/codex/types/projects/query_log_list_by_group_params.py
index 0fbb280..5ca6d10 100644
--- a/src/codex/types/projects/query_log_list_by_group_params.py
+++ b/src/codex/types/projects/query_log_list_by_group_params.py
@@ -50,7 +50,17 @@ class QueryLogListByGroupParams(TypedDict, total=False):
remediation_ids: List[str]
"""List of groups to list child logs for"""
- sort: Optional[Literal["created_at", "primary_eval_issue_score"]]
+ sort: Optional[
+ Literal[
+ "created_at",
+ "primary_eval_issue_score",
+ "score_trustworthiness",
+ "score_context_sufficiency",
+ "score_response_helpfulness",
+ "score_query_ease",
+ "score_response_groundedness",
+ ]
+ ]
tool_call_names: Optional[List[str]]
"""Filter by names of tools called in the assistant response"""
diff --git a/src/codex/types/projects/query_log_list_params.py b/src/codex/types/projects/query_log_list_params.py
index 02c1707..2da646f 100644
--- a/src/codex/types/projects/query_log_list_params.py
+++ b/src/codex/types/projects/query_log_list_params.py
@@ -44,7 +44,17 @@ class QueryLogListParams(TypedDict, total=False):
]
"""Filter logs that have ANY of these primary evaluation issues (OR operation)"""
- sort: Optional[Literal["created_at", "primary_eval_issue_score"]]
+ sort: Optional[
+ Literal[
+ "created_at",
+ "primary_eval_issue_score",
+ "score_trustworthiness",
+ "score_context_sufficiency",
+ "score_response_helpfulness",
+ "score_query_ease",
+ "score_response_groundedness",
+ ]
+ ]
tool_call_names: Optional[List[str]]
"""Filter by names of tools called in the assistant response"""
From ead0105a68c77ae308cc0d40c34245265e9cb2b6 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 30 Aug 2025 02:42:24 +0000
Subject: [PATCH 08/15] chore(internal): add Sequence related utils
---
src/codex/_types.py | 36 +++++++++++++++++++++++++++++++++++-
src/codex/_utils/__init__.py | 1 +
src/codex/_utils/_typing.py | 5 +++++
tests/utils.py | 10 +++++++++-
4 files changed, 50 insertions(+), 2 deletions(-)
diff --git a/src/codex/_types.py b/src/codex/_types.py
index f2d17a1..99d66e2 100644
--- a/src/codex/_types.py
+++ b/src/codex/_types.py
@@ -13,10 +13,21 @@
Mapping,
TypeVar,
Callable,
+ Iterator,
Optional,
Sequence,
)
-from typing_extensions import Set, Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable
+from typing_extensions import (
+ Set,
+ Literal,
+ Protocol,
+ TypeAlias,
+ TypedDict,
+ SupportsIndex,
+ overload,
+ override,
+ runtime_checkable,
+)
import httpx
import pydantic
@@ -217,3 +228,26 @@ class _GenericAlias(Protocol):
class HttpxSendArgs(TypedDict, total=False):
auth: httpx.Auth
follow_redirects: bool
+
+
+_T_co = TypeVar("_T_co", covariant=True)
+
+
+if TYPE_CHECKING:
+ # This works because str.__contains__ does not accept object (either in typeshed or at runtime)
+ # https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285
+ class SequenceNotStr(Protocol[_T_co]):
+ @overload
+ def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
+ @overload
+ def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ...
+ def __contains__(self, value: object, /) -> bool: ...
+ def __len__(self) -> int: ...
+ def __iter__(self) -> Iterator[_T_co]: ...
+ def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ...
+ def count(self, value: Any, /) -> int: ...
+ def __reversed__(self) -> Iterator[_T_co]: ...
+else:
+ # just point this to a normal `Sequence` at runtime to avoid having to special case
+ # deserializing our custom sequence type
+ SequenceNotStr = Sequence
diff --git a/src/codex/_utils/__init__.py b/src/codex/_utils/__init__.py
index d4fda26..ca547ce 100644
--- a/src/codex/_utils/__init__.py
+++ b/src/codex/_utils/__init__.py
@@ -38,6 +38,7 @@
extract_type_arg as extract_type_arg,
is_iterable_type as is_iterable_type,
is_required_type as is_required_type,
+ is_sequence_type as is_sequence_type,
is_annotated_type as is_annotated_type,
is_type_alias_type as is_type_alias_type,
strip_annotated_type as strip_annotated_type,
diff --git a/src/codex/_utils/_typing.py b/src/codex/_utils/_typing.py
index 1bac954..845cd6b 100644
--- a/src/codex/_utils/_typing.py
+++ b/src/codex/_utils/_typing.py
@@ -26,6 +26,11 @@ def is_list_type(typ: type) -> bool:
return (get_origin(typ) or typ) == list
+def is_sequence_type(typ: type) -> bool:
+ origin = get_origin(typ) or typ
+ return origin == typing_extensions.Sequence or origin == typing.Sequence or origin == _c_abc.Sequence
+
+
def is_iterable_type(typ: type) -> bool:
"""If the given type is `typing.Iterable[T]`"""
origin = get_origin(typ) or typ
diff --git a/tests/utils.py b/tests/utils.py
index d56fd57..091f4e4 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -4,7 +4,7 @@
import inspect
import traceback
import contextlib
-from typing import Any, TypeVar, Iterator, cast
+from typing import Any, TypeVar, Iterator, Sequence, cast
from datetime import date, datetime
from typing_extensions import Literal, get_args, get_origin, assert_type
@@ -15,6 +15,7 @@
is_list_type,
is_union_type,
extract_type_arg,
+ is_sequence_type,
is_annotated_type,
is_type_alias_type,
)
@@ -71,6 +72,13 @@ def assert_matches_type(
if is_list_type(type_):
return _assert_list_type(type_, value)
+ if is_sequence_type(type_):
+ assert isinstance(value, Sequence)
+ inner_type = get_args(type_)[0]
+ for entry in value: # type: ignore
+ assert_type(inner_type, entry) # type: ignore
+ return
+
if origin == str:
assert isinstance(value, str)
elif origin == int:
From 07bdde83153c609ff2b86a4190911cab160aaae2 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 3 Sep 2025 02:25:12 +0000
Subject: [PATCH 09/15] feat(types): replace List[str] with SequenceNotStr in
params
---
src/codex/_utils/_transform.py | 6 +++
src/codex/resources/projects/projects.py | 8 ++--
src/codex/resources/projects/query_logs.py | 42 +++++++++----------
src/codex/resources/tlm.py | 12 +++---
src/codex/types/project_validate_params.py | 7 ++--
.../query_log_list_by_group_params.py | 9 ++--
.../projects/query_log_list_groups_params.py | 7 ++--
.../types/projects/query_log_list_params.py | 7 ++--
src/codex/types/tlm_prompt_params.py | 8 ++--
src/codex/types/tlm_score_params.py | 8 ++--
10 files changed, 64 insertions(+), 50 deletions(-)
diff --git a/src/codex/_utils/_transform.py b/src/codex/_utils/_transform.py
index b0cc20a..f0bcefd 100644
--- a/src/codex/_utils/_transform.py
+++ b/src/codex/_utils/_transform.py
@@ -16,6 +16,7 @@
lru_cache,
is_mapping,
is_iterable,
+ is_sequence,
)
from .._files import is_base64_file_input
from ._typing import (
@@ -24,6 +25,7 @@
extract_type_arg,
is_iterable_type,
is_required_type,
+ is_sequence_type,
is_annotated_type,
strip_annotated_type,
)
@@ -184,6 +186,8 @@ def _transform_recursive(
(is_list_type(stripped_type) and is_list(data))
# Iterable[T]
or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
+ # Sequence[T]
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
):
# dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
# intended as an iterable, so we don't transform it.
@@ -346,6 +350,8 @@ async def _async_transform_recursive(
(is_list_type(stripped_type) and is_list(data))
# Iterable[T]
or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
+ # Sequence[T]
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
):
# dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
# intended as an iterable, so we don't transform it.
diff --git a/src/codex/resources/projects/projects.py b/src/codex/resources/projects/projects.py
index 319097f..f0ef92a 100644
--- a/src/codex/resources/projects/projects.py
+++ b/src/codex/resources/projects/projects.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Dict, List, Iterable, Optional
+from typing import Dict, Iterable, Optional
from typing_extensions import Literal
import httpx
@@ -23,7 +23,7 @@
project_invite_sme_params,
project_retrieve_analytics_params,
)
-from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr
from ..._utils import maybe_transform, strip_not_given, async_maybe_transform
from ..._compat import cached_property
from .query_logs import (
@@ -450,7 +450,7 @@ def validate(
query: str,
response: project_validate_params.Response,
use_llm_matching: Optional[bool] | NotGiven = NOT_GIVEN,
- constrain_outputs: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
custom_eval_thresholds: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN,
custom_metadata: Optional[object] | NotGiven = NOT_GIVEN,
eval_scores: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN,
@@ -1028,7 +1028,7 @@ async def validate(
query: str,
response: project_validate_params.Response,
use_llm_matching: Optional[bool] | NotGiven = NOT_GIVEN,
- constrain_outputs: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
custom_eval_thresholds: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN,
custom_metadata: Optional[object] | NotGiven = NOT_GIVEN,
eval_scores: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN,
diff --git a/src/codex/resources/projects/query_logs.py b/src/codex/resources/projects/query_logs.py
index 6bbd748..8fb37c0 100644
--- a/src/codex/resources/projects/query_logs.py
+++ b/src/codex/resources/projects/query_logs.py
@@ -8,7 +8,7 @@
import httpx
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
@@ -104,13 +104,13 @@ def list(
created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
offset: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
@@ -127,7 +127,7 @@ def list(
]
]
| NotGiven = NOT_GIVEN,
- tool_call_names: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -248,19 +248,19 @@ def list_by_group(
created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
needs_review: Optional[bool] | NotGiven = NOT_GIVEN,
offset: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
| NotGiven = NOT_GIVEN,
- remediation_ids: List[str] | NotGiven = NOT_GIVEN,
+ remediation_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
sort: Optional[
Literal[
"created_at",
@@ -273,7 +273,7 @@ def list_by_group(
]
]
| NotGiven = NOT_GIVEN,
- tool_call_names: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -359,21 +359,21 @@ def list_groups(
created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
needs_review: Optional[bool] | NotGiven = NOT_GIVEN,
offset: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
| NotGiven = NOT_GIVEN,
sort: Optional[Literal["created_at", "primary_eval_issue_score", "total_count", "custom_rank", "impact_score"]]
| NotGiven = NOT_GIVEN,
- tool_call_names: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -550,13 +550,13 @@ def list(
created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
offset: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
@@ -573,7 +573,7 @@ def list(
]
]
| NotGiven = NOT_GIVEN,
- tool_call_names: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -696,19 +696,19 @@ async def list_by_group(
created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
needs_review: Optional[bool] | NotGiven = NOT_GIVEN,
offset: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
| NotGiven = NOT_GIVEN,
- remediation_ids: List[str] | NotGiven = NOT_GIVEN,
+ remediation_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
sort: Optional[
Literal[
"created_at",
@@ -721,7 +721,7 @@ async def list_by_group(
]
]
| NotGiven = NOT_GIVEN,
- tool_call_names: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -807,21 +807,21 @@ def list_groups(
created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
needs_review: Optional[bool] | NotGiven = NOT_GIVEN,
offset: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
| NotGiven = NOT_GIVEN,
sort: Optional[Literal["created_at", "primary_eval_issue_score", "total_count", "custom_rank", "impact_score"]]
| NotGiven = NOT_GIVEN,
- tool_call_names: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
diff --git a/src/codex/resources/tlm.py b/src/codex/resources/tlm.py
index 5d66ec0..de652ef 100644
--- a/src/codex/resources/tlm.py
+++ b/src/codex/resources/tlm.py
@@ -2,13 +2,13 @@
from __future__ import annotations
-from typing import List, Optional
+from typing import Optional
from typing_extensions import Literal
import httpx
from ..types import tlm_score_params, tlm_prompt_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr
from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
@@ -49,7 +49,7 @@ def prompt(
self,
*,
prompt: str,
- constrain_outputs: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
options: Optional[tlm_prompt_params.Options] | NotGiven = NOT_GIVEN,
quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
task: Optional[str] | NotGiven = NOT_GIVEN,
@@ -187,7 +187,7 @@ def score(
*,
prompt: str,
response: str,
- constrain_outputs: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
options: Optional[tlm_score_params.Options] | NotGiven = NOT_GIVEN,
quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
task: Optional[str] | NotGiven = NOT_GIVEN,
@@ -351,7 +351,7 @@ async def prompt(
self,
*,
prompt: str,
- constrain_outputs: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
options: Optional[tlm_prompt_params.Options] | NotGiven = NOT_GIVEN,
quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
task: Optional[str] | NotGiven = NOT_GIVEN,
@@ -489,7 +489,7 @@ async def score(
*,
prompt: str,
response: str,
- constrain_outputs: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
options: Optional[tlm_score_params.Options] | NotGiven = NOT_GIVEN,
quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
task: Optional[str] | NotGiven = NOT_GIVEN,
diff --git a/src/codex/types/project_validate_params.py b/src/codex/types/project_validate_params.py
index 719ad3d..56e2ae9 100644
--- a/src/codex/types/project_validate_params.py
+++ b/src/codex/types/project_validate_params.py
@@ -3,9 +3,10 @@
from __future__ import annotations
import builtins
-from typing import Dict, List, Union, Iterable, Optional
+from typing import Dict, Union, Iterable, Optional
from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict
+from .._types import SequenceNotStr
from .._utils import PropertyInfo
__all__ = [
@@ -68,7 +69,7 @@ class ProjectValidateParams(TypedDict, total=False):
use_llm_matching: Optional[bool]
- constrain_outputs: Optional[List[str]]
+ constrain_outputs: Optional[SequenceNotStr[str]]
custom_eval_thresholds: Optional[Dict[str, float]]
"""Optional custom thresholds for specific evals.
@@ -656,7 +657,7 @@ class Options(TypedDict, total=False):
disable_trustworthiness: bool
- log: List[str]
+ log: SequenceNotStr[str]
max_tokens: int
diff --git a/src/codex/types/projects/query_log_list_by_group_params.py b/src/codex/types/projects/query_log_list_by_group_params.py
index 5ca6d10..afdcb7f 100644
--- a/src/codex/types/projects/query_log_list_by_group_params.py
+++ b/src/codex/types/projects/query_log_list_by_group_params.py
@@ -6,6 +6,7 @@
from datetime import datetime
from typing_extensions import Literal, Annotated, TypedDict
+from ..._types import SequenceNotStr
from ..._utils import PropertyInfo
__all__ = ["QueryLogListByGroupParams"]
@@ -21,7 +22,7 @@ class QueryLogListByGroupParams(TypedDict, total=False):
custom_metadata: Optional[str]
"""Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}"""
- failed_evals: Optional[List[str]]
+ failed_evals: Optional[SequenceNotStr[str]]
"""Filter by evals that failed"""
guardrailed: Optional[bool]
@@ -39,7 +40,7 @@ class QueryLogListByGroupParams(TypedDict, total=False):
order: Literal["asc", "desc"]
- passed_evals: Optional[List[str]]
+ passed_evals: Optional[SequenceNotStr[str]]
"""Filter by evals that passed"""
primary_eval_issue: Optional[
@@ -47,7 +48,7 @@ class QueryLogListByGroupParams(TypedDict, total=False):
]
"""Filter logs that have ANY of these primary evaluation issues (OR operation)"""
- remediation_ids: List[str]
+ remediation_ids: SequenceNotStr[str]
"""List of groups to list child logs for"""
sort: Optional[
@@ -62,7 +63,7 @@ class QueryLogListByGroupParams(TypedDict, total=False):
]
]
- tool_call_names: Optional[List[str]]
+ tool_call_names: Optional[SequenceNotStr[str]]
"""Filter by names of tools called in the assistant response"""
was_cache_hit: Optional[bool]
diff --git a/src/codex/types/projects/query_log_list_groups_params.py b/src/codex/types/projects/query_log_list_groups_params.py
index 6adefdf..abb6a54 100644
--- a/src/codex/types/projects/query_log_list_groups_params.py
+++ b/src/codex/types/projects/query_log_list_groups_params.py
@@ -6,6 +6,7 @@
from datetime import datetime
from typing_extensions import Literal, Annotated, TypedDict
+from ..._types import SequenceNotStr
from ..._utils import PropertyInfo
__all__ = ["QueryLogListGroupsParams"]
@@ -21,7 +22,7 @@ class QueryLogListGroupsParams(TypedDict, total=False):
custom_metadata: Optional[str]
"""Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}"""
- failed_evals: Optional[List[str]]
+ failed_evals: Optional[SequenceNotStr[str]]
"""Filter by evals that failed"""
guardrailed: Optional[bool]
@@ -39,7 +40,7 @@ class QueryLogListGroupsParams(TypedDict, total=False):
order: Literal["asc", "desc"]
- passed_evals: Optional[List[str]]
+ passed_evals: Optional[SequenceNotStr[str]]
"""Filter by evals that passed"""
primary_eval_issue: Optional[
@@ -49,7 +50,7 @@ class QueryLogListGroupsParams(TypedDict, total=False):
sort: Optional[Literal["created_at", "primary_eval_issue_score", "total_count", "custom_rank", "impact_score"]]
- tool_call_names: Optional[List[str]]
+ tool_call_names: Optional[SequenceNotStr[str]]
"""Filter by names of tools called in the assistant response"""
was_cache_hit: Optional[bool]
diff --git a/src/codex/types/projects/query_log_list_params.py b/src/codex/types/projects/query_log_list_params.py
index 2da646f..20a209b 100644
--- a/src/codex/types/projects/query_log_list_params.py
+++ b/src/codex/types/projects/query_log_list_params.py
@@ -6,6 +6,7 @@
from datetime import datetime
from typing_extensions import Literal, Annotated, TypedDict
+from ..._types import SequenceNotStr
from ..._utils import PropertyInfo
__all__ = ["QueryLogListParams"]
@@ -21,7 +22,7 @@ class QueryLogListParams(TypedDict, total=False):
custom_metadata: Optional[str]
"""Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}"""
- failed_evals: Optional[List[str]]
+ failed_evals: Optional[SequenceNotStr[str]]
"""Filter by evals that failed"""
guardrailed: Optional[bool]
@@ -36,7 +37,7 @@ class QueryLogListParams(TypedDict, total=False):
order: Literal["asc", "desc"]
- passed_evals: Optional[List[str]]
+ passed_evals: Optional[SequenceNotStr[str]]
"""Filter by evals that passed"""
primary_eval_issue: Optional[
@@ -56,7 +57,7 @@ class QueryLogListParams(TypedDict, total=False):
]
]
- tool_call_names: Optional[List[str]]
+ tool_call_names: Optional[SequenceNotStr[str]]
"""Filter by names of tools called in the assistant response"""
was_cache_hit: Optional[bool]
diff --git a/src/codex/types/tlm_prompt_params.py b/src/codex/types/tlm_prompt_params.py
index 821c381..6a2a9da 100644
--- a/src/codex/types/tlm_prompt_params.py
+++ b/src/codex/types/tlm_prompt_params.py
@@ -2,16 +2,18 @@
from __future__ import annotations
-from typing import List, Iterable, Optional
+from typing import Iterable, Optional
from typing_extensions import Literal, Required, TypedDict
+from .._types import SequenceNotStr
+
__all__ = ["TlmPromptParams", "Options"]
class TlmPromptParams(TypedDict, total=False):
prompt: Required[str]
- constrain_outputs: Optional[List[str]]
+ constrain_outputs: Optional[SequenceNotStr[str]]
options: Optional[Options]
"""
@@ -119,7 +121,7 @@ class Options(TypedDict, total=False):
disable_trustworthiness: bool
- log: List[str]
+ log: SequenceNotStr[str]
max_tokens: int
diff --git a/src/codex/types/tlm_score_params.py b/src/codex/types/tlm_score_params.py
index d676a1d..cef4f49 100644
--- a/src/codex/types/tlm_score_params.py
+++ b/src/codex/types/tlm_score_params.py
@@ -2,9 +2,11 @@
from __future__ import annotations
-from typing import List, Iterable, Optional
+from typing import Iterable, Optional
from typing_extensions import Literal, Required, TypedDict
+from .._types import SequenceNotStr
+
__all__ = ["TlmScoreParams", "Options"]
@@ -13,7 +15,7 @@ class TlmScoreParams(TypedDict, total=False):
response: Required[str]
- constrain_outputs: Optional[List[str]]
+ constrain_outputs: Optional[SequenceNotStr[str]]
options: Optional[Options]
"""
@@ -121,7 +123,7 @@ class Options(TypedDict, total=False):
disable_trustworthiness: bool
- log: List[str]
+ log: SequenceNotStr[str]
max_tokens: int
From d9ffa3812e5eb836fc85532d57a92937ef3ad7ec Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 4 Sep 2025 02:29:39 +0000
Subject: [PATCH 10/15] feat: improve future compat with pydantic v3
---
src/codex/_base_client.py | 6 +-
src/codex/_compat.py | 96 ++++++++---------
src/codex/_models.py | 80 +++++++-------
src/codex/_utils/__init__.py | 10 +-
src/codex/_utils/_compat.py | 45 ++++++++
src/codex/_utils/_datetime_parse.py | 136 ++++++++++++++++++++++++
src/codex/_utils/_transform.py | 6 +-
src/codex/_utils/_typing.py | 2 +-
src/codex/_utils/_utils.py | 1 -
tests/test_models.py | 48 ++++-----
tests/test_transform.py | 16 +--
tests/test_utils/test_datetime_parse.py | 110 +++++++++++++++++++
tests/utils.py | 8 +-
13 files changed, 432 insertions(+), 132 deletions(-)
create mode 100644 src/codex/_utils/_compat.py
create mode 100644 src/codex/_utils/_datetime_parse.py
create mode 100644 tests/test_utils/test_datetime_parse.py
diff --git a/src/codex/_base_client.py b/src/codex/_base_client.py
index 870a472..e424fb7 100644
--- a/src/codex/_base_client.py
+++ b/src/codex/_base_client.py
@@ -59,7 +59,7 @@
ModelBuilderProtocol,
)
from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping
-from ._compat import PYDANTIC_V2, model_copy, model_dump
+from ._compat import PYDANTIC_V1, model_copy, model_dump
from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type
from ._response import (
APIResponse,
@@ -232,7 +232,7 @@ def _set_private_attributes(
model: Type[_T],
options: FinalRequestOptions,
) -> None:
- if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None:
+ if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None:
self.__pydantic_private__ = {}
self._model = model
@@ -320,7 +320,7 @@ def _set_private_attributes(
client: AsyncAPIClient,
options: FinalRequestOptions,
) -> None:
- if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None:
+ if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None:
self.__pydantic_private__ = {}
self._model = model
diff --git a/src/codex/_compat.py b/src/codex/_compat.py
index 92d9ee6..bdef67f 100644
--- a/src/codex/_compat.py
+++ b/src/codex/_compat.py
@@ -12,14 +12,13 @@
_T = TypeVar("_T")
_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel)
-# --------------- Pydantic v2 compatibility ---------------
+# --------------- Pydantic v2, v3 compatibility ---------------
# Pyright incorrectly reports some of our functions as overriding a method when they don't
# pyright: reportIncompatibleMethodOverride=false
-PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
+PYDANTIC_V1 = pydantic.VERSION.startswith("1.")
-# v1 re-exports
if TYPE_CHECKING:
def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001
@@ -44,90 +43,92 @@ def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001
...
else:
- if PYDANTIC_V2:
- from pydantic.v1.typing import (
+ # v1 re-exports
+ if PYDANTIC_V1:
+ from pydantic.typing import (
get_args as get_args,
is_union as is_union,
get_origin as get_origin,
is_typeddict as is_typeddict,
is_literal_type as is_literal_type,
)
- from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
+ from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
else:
- from pydantic.typing import (
+ from ._utils import (
get_args as get_args,
is_union as is_union,
get_origin as get_origin,
+ parse_date as parse_date,
is_typeddict as is_typeddict,
+ parse_datetime as parse_datetime,
is_literal_type as is_literal_type,
)
- from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
# refactored config
if TYPE_CHECKING:
from pydantic import ConfigDict as ConfigDict
else:
- if PYDANTIC_V2:
- from pydantic import ConfigDict
- else:
+ if PYDANTIC_V1:
# TODO: provide an error message here?
ConfigDict = None
+ else:
+ from pydantic import ConfigDict as ConfigDict
# renamed methods / properties
def parse_obj(model: type[_ModelT], value: object) -> _ModelT:
- if PYDANTIC_V2:
- return model.model_validate(value)
- else:
+ if PYDANTIC_V1:
return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
+ else:
+ return model.model_validate(value)
def field_is_required(field: FieldInfo) -> bool:
- if PYDANTIC_V2:
- return field.is_required()
- return field.required # type: ignore
+ if PYDANTIC_V1:
+ return field.required # type: ignore
+ return field.is_required()
def field_get_default(field: FieldInfo) -> Any:
value = field.get_default()
- if PYDANTIC_V2:
- from pydantic_core import PydanticUndefined
-
- if value == PydanticUndefined:
- return None
+ if PYDANTIC_V1:
return value
+ from pydantic_core import PydanticUndefined
+
+ if value == PydanticUndefined:
+ return None
return value
def field_outer_type(field: FieldInfo) -> Any:
- if PYDANTIC_V2:
- return field.annotation
- return field.outer_type_ # type: ignore
+ if PYDANTIC_V1:
+ return field.outer_type_ # type: ignore
+ return field.annotation
def get_model_config(model: type[pydantic.BaseModel]) -> Any:
- if PYDANTIC_V2:
- return model.model_config
- return model.__config__ # type: ignore
+ if PYDANTIC_V1:
+ return model.__config__ # type: ignore
+ return model.model_config
def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]:
- if PYDANTIC_V2:
- return model.model_fields
- return model.__fields__ # type: ignore
+ if PYDANTIC_V1:
+ return model.__fields__ # type: ignore
+ return model.model_fields
def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT:
- if PYDANTIC_V2:
- return model.model_copy(deep=deep)
- return model.copy(deep=deep) # type: ignore
+ if PYDANTIC_V1:
+ return model.copy(deep=deep) # type: ignore
+ return model.model_copy(deep=deep)
def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
- if PYDANTIC_V2:
- return model.model_dump_json(indent=indent)
- return model.json(indent=indent) # type: ignore
+ if PYDANTIC_V1:
+ return model.json(indent=indent) # type: ignore
+ return model.model_dump_json(indent=indent)
def model_dump(
@@ -139,14 +140,14 @@ def model_dump(
warnings: bool = True,
mode: Literal["json", "python"] = "python",
) -> dict[str, Any]:
- if PYDANTIC_V2 or hasattr(model, "model_dump"):
+ if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
return model.model_dump(
mode=mode,
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
# warnings are not supported in Pydantic v1
- warnings=warnings if PYDANTIC_V2 else True,
+ warnings=True if PYDANTIC_V1 else warnings,
)
return cast(
"dict[str, Any]",
@@ -159,9 +160,9 @@ def model_dump(
def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
- if PYDANTIC_V2:
- return model.model_validate(data)
- return model.parse_obj(data) # pyright: ignore[reportDeprecated]
+ if PYDANTIC_V1:
+ return model.parse_obj(data) # pyright: ignore[reportDeprecated]
+ return model.model_validate(data)
# generic models
@@ -170,17 +171,16 @@ def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
class GenericModel(pydantic.BaseModel): ...
else:
- if PYDANTIC_V2:
+ if PYDANTIC_V1:
+ import pydantic.generics
+
+ class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
+ else:
# there no longer needs to be a distinction in v2 but
# we still have to create our own subclass to avoid
# inconsistent MRO ordering errors
class GenericModel(pydantic.BaseModel): ...
- else:
- import pydantic.generics
-
- class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
-
# cached properties
if TYPE_CHECKING:
diff --git a/src/codex/_models.py b/src/codex/_models.py
index 92f7c10..3a6017e 100644
--- a/src/codex/_models.py
+++ b/src/codex/_models.py
@@ -50,7 +50,7 @@
strip_annotated_type,
)
from ._compat import (
- PYDANTIC_V2,
+ PYDANTIC_V1,
ConfigDict,
GenericModel as BaseGenericModel,
get_args,
@@ -81,11 +81,7 @@ class _ConfigProtocol(Protocol):
class BaseModel(pydantic.BaseModel):
- if PYDANTIC_V2:
- model_config: ClassVar[ConfigDict] = ConfigDict(
- extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true"))
- )
- else:
+ if PYDANTIC_V1:
@property
@override
@@ -95,6 +91,10 @@ def model_fields_set(self) -> set[str]:
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
extra: Any = pydantic.Extra.allow # type: ignore
+ else:
+ model_config: ClassVar[ConfigDict] = ConfigDict(
+ extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true"))
+ )
def to_dict(
self,
@@ -215,25 +215,25 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride]
if key not in model_fields:
parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value
- if PYDANTIC_V2:
- _extra[key] = parsed
- else:
+ if PYDANTIC_V1:
_fields_set.add(key)
fields_values[key] = parsed
+ else:
+ _extra[key] = parsed
object.__setattr__(m, "__dict__", fields_values)
- if PYDANTIC_V2:
- # these properties are copied from Pydantic's `model_construct()` method
- object.__setattr__(m, "__pydantic_private__", None)
- object.__setattr__(m, "__pydantic_extra__", _extra)
- object.__setattr__(m, "__pydantic_fields_set__", _fields_set)
- else:
+ if PYDANTIC_V1:
# init_private_attributes() does not exist in v2
m._init_private_attributes() # type: ignore
# copied from Pydantic v1's `construct()` method
object.__setattr__(m, "__fields_set__", _fields_set)
+ else:
+ # these properties are copied from Pydantic's `model_construct()` method
+ object.__setattr__(m, "__pydantic_private__", None)
+ object.__setattr__(m, "__pydantic_extra__", _extra)
+ object.__setattr__(m, "__pydantic_fields_set__", _fields_set)
return m
@@ -243,7 +243,7 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride]
# although not in practice
model_construct = construct
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
# we define aliases for some of the new pydantic v2 methods so
# that we can just document these methods without having to specify
# a specific pydantic version as some users may not know which
@@ -363,10 +363,10 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object:
if value is None:
return field_get_default(field)
- if PYDANTIC_V2:
- type_ = field.annotation
- else:
+ if PYDANTIC_V1:
type_ = cast(type, field.outer_type_) # type: ignore
+ else:
+ type_ = field.annotation # type: ignore
if type_ is None:
raise RuntimeError(f"Unexpected field type is None for {key}")
@@ -375,7 +375,7 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object:
def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None:
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
# TODO
return None
@@ -628,30 +628,30 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
for variant in get_args(union):
variant = strip_annotated_type(variant)
if is_basemodel_type(variant):
- if PYDANTIC_V2:
- field = _extract_field_schema_pv2(variant, discriminator_field_name)
- if not field:
+ if PYDANTIC_V1:
+ field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
+ if not field_info:
continue
# Note: if one variant defines an alias then they all should
- discriminator_alias = field.get("serialization_alias")
-
- field_schema = field["schema"]
+ discriminator_alias = field_info.alias
- if field_schema["type"] == "literal":
- for entry in cast("LiteralSchema", field_schema)["expected"]:
+ if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
+ for entry in get_args(annotation):
if isinstance(entry, str):
mapping[entry] = variant
else:
- field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
- if not field_info:
+ field = _extract_field_schema_pv2(variant, discriminator_field_name)
+ if not field:
continue
# Note: if one variant defines an alias then they all should
- discriminator_alias = field_info.alias
+ discriminator_alias = field.get("serialization_alias")
- if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
- for entry in get_args(annotation):
+ field_schema = field["schema"]
+
+ if field_schema["type"] == "literal":
+ for entry in cast("LiteralSchema", field_schema)["expected"]:
if isinstance(entry, str):
mapping[entry] = variant
@@ -714,7 +714,7 @@ class GenericModel(BaseGenericModel, BaseModel):
pass
-if PYDANTIC_V2:
+if not PYDANTIC_V1:
from pydantic import TypeAdapter as _TypeAdapter
_CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter))
@@ -782,12 +782,12 @@ class FinalRequestOptions(pydantic.BaseModel):
json_data: Union[Body, None] = None
extra_json: Union[AnyMapping, None] = None
- if PYDANTIC_V2:
- model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True)
- else:
+ if PYDANTIC_V1:
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
arbitrary_types_allowed: bool = True
+ else:
+ model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True)
def get_max_retries(self, max_retries: int) -> int:
if isinstance(self.max_retries, NotGiven):
@@ -820,9 +820,9 @@ def construct( # type: ignore
key: strip_not_given(value)
for key, value in values.items()
}
- if PYDANTIC_V2:
- return super().model_construct(_fields_set, **kwargs)
- return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated]
+ if PYDANTIC_V1:
+ return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated]
+ return super().model_construct(_fields_set, **kwargs)
if not TYPE_CHECKING:
# type checkers incorrectly complain about this assignment
diff --git a/src/codex/_utils/__init__.py b/src/codex/_utils/__init__.py
index ca547ce..dc64e29 100644
--- a/src/codex/_utils/__init__.py
+++ b/src/codex/_utils/__init__.py
@@ -10,7 +10,6 @@
lru_cache as lru_cache,
is_mapping as is_mapping,
is_tuple_t as is_tuple_t,
- parse_date as parse_date,
is_iterable as is_iterable,
is_sequence as is_sequence,
coerce_float as coerce_float,
@@ -23,7 +22,6 @@
coerce_boolean as coerce_boolean,
coerce_integer as coerce_integer,
file_from_path as file_from_path,
- parse_datetime as parse_datetime,
strip_not_given as strip_not_given,
deepcopy_minimal as deepcopy_minimal,
get_async_library as get_async_library,
@@ -32,6 +30,13 @@
maybe_coerce_boolean as maybe_coerce_boolean,
maybe_coerce_integer as maybe_coerce_integer,
)
+from ._compat import (
+ get_args as get_args,
+ is_union as is_union,
+ get_origin as get_origin,
+ is_typeddict as is_typeddict,
+ is_literal_type as is_literal_type,
+)
from ._typing import (
is_list_type as is_list_type,
is_union_type as is_union_type,
@@ -56,3 +61,4 @@
function_has_argument as function_has_argument,
assert_signatures_in_sync as assert_signatures_in_sync,
)
+from ._datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
diff --git a/src/codex/_utils/_compat.py b/src/codex/_utils/_compat.py
new file mode 100644
index 0000000..dd70323
--- /dev/null
+++ b/src/codex/_utils/_compat.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+import sys
+import typing_extensions
+from typing import Any, Type, Union, Literal, Optional
+from datetime import date, datetime
+from typing_extensions import get_args as _get_args, get_origin as _get_origin
+
+from .._types import StrBytesIntFloat
+from ._datetime_parse import parse_date as _parse_date, parse_datetime as _parse_datetime
+
+_LITERAL_TYPES = {Literal, typing_extensions.Literal}
+
+
+def get_args(tp: type[Any]) -> tuple[Any, ...]:
+ return _get_args(tp)
+
+
+def get_origin(tp: type[Any]) -> type[Any] | None:
+ return _get_origin(tp)
+
+
+def is_union(tp: Optional[Type[Any]]) -> bool:
+ if sys.version_info < (3, 10):
+ return tp is Union # type: ignore[comparison-overlap]
+ else:
+ import types
+
+ return tp is Union or tp is types.UnionType
+
+
+def is_typeddict(tp: Type[Any]) -> bool:
+ return typing_extensions.is_typeddict(tp)
+
+
+def is_literal_type(tp: Type[Any]) -> bool:
+ return get_origin(tp) in _LITERAL_TYPES
+
+
+def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
+ return _parse_date(value)
+
+
+def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
+ return _parse_datetime(value)
diff --git a/src/codex/_utils/_datetime_parse.py b/src/codex/_utils/_datetime_parse.py
new file mode 100644
index 0000000..7cb9d9e
--- /dev/null
+++ b/src/codex/_utils/_datetime_parse.py
@@ -0,0 +1,136 @@
+"""
+This file contains code from https://github.com/pydantic/pydantic/blob/main/pydantic/v1/datetime_parse.py
+without the Pydantic v1 specific errors.
+"""
+
+from __future__ import annotations
+
+import re
+from typing import Dict, Union, Optional
+from datetime import date, datetime, timezone, timedelta
+
+from .._types import StrBytesIntFloat
+
+date_expr = r"(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})"
+time_expr = (
+ r"(?P\d{1,2}):(?P\d{1,2})"
+ r"(?::(?P\d{1,2})(?:\.(?P\d{1,6})\d{0,6})?)?"
+ r"(?PZ|[+-]\d{2}(?::?\d{2})?)?$"
+)
+
+date_re = re.compile(f"{date_expr}$")
+datetime_re = re.compile(f"{date_expr}[T ]{time_expr}")
+
+
+EPOCH = datetime(1970, 1, 1)
+# if greater than this, the number is in ms, if less than or equal it's in seconds
+# (in seconds this is 11th October 2603, in ms it's 20th August 1970)
+MS_WATERSHED = int(2e10)
+# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9
+MAX_NUMBER = int(3e20)
+
+
+def _get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:
+ if isinstance(value, (int, float)):
+ return value
+ try:
+ return float(value)
+ except ValueError:
+ return None
+ except TypeError:
+ raise TypeError(f"invalid type; expected {native_expected_type}, string, bytes, int or float") from None
+
+
+def _from_unix_seconds(seconds: Union[int, float]) -> datetime:
+ if seconds > MAX_NUMBER:
+ return datetime.max
+ elif seconds < -MAX_NUMBER:
+ return datetime.min
+
+ while abs(seconds) > MS_WATERSHED:
+ seconds /= 1000
+ dt = EPOCH + timedelta(seconds=seconds)
+ return dt.replace(tzinfo=timezone.utc)
+
+
+def _parse_timezone(value: Optional[str]) -> Union[None, int, timezone]:
+ if value == "Z":
+ return timezone.utc
+ elif value is not None:
+ offset_mins = int(value[-2:]) if len(value) > 3 else 0
+ offset = 60 * int(value[1:3]) + offset_mins
+ if value[0] == "-":
+ offset = -offset
+ return timezone(timedelta(minutes=offset))
+ else:
+ return None
+
+
+def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
+ """
+ Parse a datetime/int/float/string and return a datetime.datetime.
+
+ This function supports time zone offsets. When the input contains one,
+ the output uses a timezone with a fixed offset from UTC.
+
+ Raise ValueError if the input is well formatted but not a valid datetime.
+ Raise ValueError if the input isn't well formatted.
+ """
+ if isinstance(value, datetime):
+ return value
+
+ number = _get_numeric(value, "datetime")
+ if number is not None:
+ return _from_unix_seconds(number)
+
+ if isinstance(value, bytes):
+ value = value.decode()
+
+ assert not isinstance(value, (float, int))
+
+ match = datetime_re.match(value)
+ if match is None:
+ raise ValueError("invalid datetime format")
+
+ kw = match.groupdict()
+ if kw["microsecond"]:
+ kw["microsecond"] = kw["microsecond"].ljust(6, "0")
+
+ tzinfo = _parse_timezone(kw.pop("tzinfo"))
+ kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
+ kw_["tzinfo"] = tzinfo
+
+ return datetime(**kw_) # type: ignore
+
+
+def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
+ """
+ Parse a date/int/float/string and return a datetime.date.
+
+ Raise ValueError if the input is well formatted but not a valid date.
+ Raise ValueError if the input isn't well formatted.
+ """
+ if isinstance(value, date):
+ if isinstance(value, datetime):
+ return value.date()
+ else:
+ return value
+
+ number = _get_numeric(value, "date")
+ if number is not None:
+ return _from_unix_seconds(number).date()
+
+ if isinstance(value, bytes):
+ value = value.decode()
+
+ assert not isinstance(value, (float, int))
+ match = date_re.match(value)
+ if match is None:
+ raise ValueError("invalid date format")
+
+ kw = {k: int(v) for k, v in match.groupdict().items()}
+
+ try:
+ return date(**kw)
+ except ValueError:
+ raise ValueError("invalid date format") from None
diff --git a/src/codex/_utils/_transform.py b/src/codex/_utils/_transform.py
index f0bcefd..c19124f 100644
--- a/src/codex/_utils/_transform.py
+++ b/src/codex/_utils/_transform.py
@@ -19,6 +19,7 @@
is_sequence,
)
from .._files import is_base64_file_input
+from ._compat import get_origin, is_typeddict
from ._typing import (
is_list_type,
is_union_type,
@@ -29,7 +30,6 @@
is_annotated_type,
strip_annotated_type,
)
-from .._compat import get_origin, model_dump, is_typeddict
_T = TypeVar("_T")
@@ -169,6 +169,8 @@ def _transform_recursive(
Defaults to the same value as the `annotation` argument.
"""
+ from .._compat import model_dump
+
if inner_type is None:
inner_type = annotation
@@ -333,6 +335,8 @@ async def _async_transform_recursive(
Defaults to the same value as the `annotation` argument.
"""
+ from .._compat import model_dump
+
if inner_type is None:
inner_type = annotation
diff --git a/src/codex/_utils/_typing.py b/src/codex/_utils/_typing.py
index 845cd6b..193109f 100644
--- a/src/codex/_utils/_typing.py
+++ b/src/codex/_utils/_typing.py
@@ -15,7 +15,7 @@
from ._utils import lru_cache
from .._types import InheritsGeneric
-from .._compat import is_union as _is_union
+from ._compat import is_union as _is_union
def is_annotated_type(typ: type) -> bool:
diff --git a/src/codex/_utils/_utils.py b/src/codex/_utils/_utils.py
index ea3cf3f..f081859 100644
--- a/src/codex/_utils/_utils.py
+++ b/src/codex/_utils/_utils.py
@@ -22,7 +22,6 @@
import sniffio
from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike
-from .._compat import parse_date as parse_date, parse_datetime as parse_datetime
_T = TypeVar("_T")
_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
diff --git a/tests/test_models.py b/tests/test_models.py
index a989702..61fe594 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -8,7 +8,7 @@
from pydantic import Field
from codex._utils import PropertyInfo
-from codex._compat import PYDANTIC_V2, parse_obj, model_dump, model_json
+from codex._compat import PYDANTIC_V1, parse_obj, model_dump, model_json
from codex._models import BaseModel, construct_type
@@ -294,12 +294,12 @@ class Model(BaseModel):
assert cast(bool, m.foo) is True
m = Model.construct(foo={"name": 3})
- if PYDANTIC_V2:
- assert isinstance(m.foo, Submodel1)
- assert m.foo.name == 3 # type: ignore
- else:
+ if PYDANTIC_V1:
assert isinstance(m.foo, Submodel2)
assert m.foo.name == "3"
+ else:
+ assert isinstance(m.foo, Submodel1)
+ assert m.foo.name == 3 # type: ignore
def test_list_of_unions() -> None:
@@ -426,10 +426,10 @@ class Model(BaseModel):
expected = datetime(2019, 12, 27, 18, 11, 19, 117000, tzinfo=timezone.utc)
- if PYDANTIC_V2:
- expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}'
- else:
+ if PYDANTIC_V1:
expected_json = '{"created_at": "2019-12-27T18:11:19.117000+00:00"}'
+ else:
+ expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}'
model = Model.construct(created_at="2019-12-27T18:11:19.117Z")
assert model.created_at == expected
@@ -531,7 +531,7 @@ class Model2(BaseModel):
assert m4.to_dict(mode="python") == {"created_at": datetime.fromisoformat(time_str)}
assert m4.to_dict(mode="json") == {"created_at": time_str}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"):
m.to_dict(warnings=False)
@@ -556,7 +556,7 @@ class Model(BaseModel):
assert m3.model_dump() == {"foo": None}
assert m3.model_dump(exclude_none=True) == {}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"):
m.model_dump(round_trip=True)
@@ -580,10 +580,10 @@ class Model(BaseModel):
assert json.loads(m.to_json()) == {"FOO": "hello"}
assert json.loads(m.to_json(use_api_names=False)) == {"foo": "hello"}
- if PYDANTIC_V2:
- assert m.to_json(indent=None) == '{"FOO":"hello"}'
- else:
+ if PYDANTIC_V1:
assert m.to_json(indent=None) == '{"FOO": "hello"}'
+ else:
+ assert m.to_json(indent=None) == '{"FOO":"hello"}'
m2 = Model()
assert json.loads(m2.to_json()) == {}
@@ -595,7 +595,7 @@ class Model(BaseModel):
assert json.loads(m3.to_json()) == {"FOO": None}
assert json.loads(m3.to_json(exclude_none=True)) == {}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"):
m.to_json(warnings=False)
@@ -622,7 +622,7 @@ class Model(BaseModel):
assert json.loads(m3.model_dump_json()) == {"foo": None}
assert json.loads(m3.model_dump_json(exclude_none=True)) == {}
- if not PYDANTIC_V2:
+ if PYDANTIC_V1:
with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"):
m.model_dump_json(round_trip=True)
@@ -679,12 +679,12 @@ class B(BaseModel):
)
assert isinstance(m, A)
assert m.type == "a"
- if PYDANTIC_V2:
- assert m.data == 100 # type: ignore[comparison-overlap]
- else:
+ if PYDANTIC_V1:
# pydantic v1 automatically converts inputs to strings
# if the expected type is a str
assert m.data == "100"
+ else:
+ assert m.data == 100 # type: ignore[comparison-overlap]
def test_discriminated_unions_unknown_variant() -> None:
@@ -768,12 +768,12 @@ class B(BaseModel):
)
assert isinstance(m, A)
assert m.foo_type == "a"
- if PYDANTIC_V2:
- assert m.data == 100 # type: ignore[comparison-overlap]
- else:
+ if PYDANTIC_V1:
# pydantic v1 automatically converts inputs to strings
# if the expected type is a str
assert m.data == "100"
+ else:
+ assert m.data == 100 # type: ignore[comparison-overlap]
def test_discriminated_unions_overlapping_discriminators_invalid_data() -> None:
@@ -833,7 +833,7 @@ class B(BaseModel):
assert UnionType.__discriminator__ is discriminator
-@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1")
+@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1")
def test_type_alias_type() -> None:
Alias = TypeAliasType("Alias", str) # pyright: ignore
@@ -849,7 +849,7 @@ class Model(BaseModel):
assert m.union == "bar"
-@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1")
+@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1")
def test_field_named_cls() -> None:
class Model(BaseModel):
cls: str
@@ -936,7 +936,7 @@ class Type2(BaseModel):
assert isinstance(model.value, InnerType2)
-@pytest.mark.skipif(not PYDANTIC_V2, reason="this is only supported in pydantic v2 for now")
+@pytest.mark.skipif(PYDANTIC_V1, reason="this is only supported in pydantic v2 for now")
def test_extra_properties() -> None:
class Item(BaseModel):
prop: int
diff --git a/tests/test_transform.py b/tests/test_transform.py
index 527845a..4067f58 100644
--- a/tests/test_transform.py
+++ b/tests/test_transform.py
@@ -15,7 +15,7 @@
parse_datetime,
async_transform as _async_transform,
)
-from codex._compat import PYDANTIC_V2
+from codex._compat import PYDANTIC_V1
from codex._models import BaseModel
_T = TypeVar("_T")
@@ -189,7 +189,7 @@ class DateModel(BaseModel):
@pytest.mark.asyncio
async def test_iso8601_format(use_async: bool) -> None:
dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00")
- tz = "Z" if PYDANTIC_V2 else "+00:00"
+ tz = "+00:00" if PYDANTIC_V1 else "Z"
assert await transform({"foo": dt}, DatetimeDict, use_async) == {"foo": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap]
assert await transform(DatetimeModel(foo=dt), Any, use_async) == {"foo": "2023-02-23T14:16:36.337692" + tz} # type: ignore[comparison-overlap]
@@ -297,11 +297,11 @@ async def test_pydantic_unknown_field(use_async: bool) -> None:
@pytest.mark.asyncio
async def test_pydantic_mismatched_types(use_async: bool) -> None:
model = MyModel.construct(foo=True)
- if PYDANTIC_V2:
+ if PYDANTIC_V1:
+ params = await transform(model, Any, use_async)
+ else:
with pytest.warns(UserWarning):
params = await transform(model, Any, use_async)
- else:
- params = await transform(model, Any, use_async)
assert cast(Any, params) == {"foo": True}
@@ -309,11 +309,11 @@ async def test_pydantic_mismatched_types(use_async: bool) -> None:
@pytest.mark.asyncio
async def test_pydantic_mismatched_object_type(use_async: bool) -> None:
model = MyModel.construct(foo=MyModel.construct(hello="world"))
- if PYDANTIC_V2:
+ if PYDANTIC_V1:
+ params = await transform(model, Any, use_async)
+ else:
with pytest.warns(UserWarning):
params = await transform(model, Any, use_async)
- else:
- params = await transform(model, Any, use_async)
assert cast(Any, params) == {"foo": {"hello": "world"}}
diff --git a/tests/test_utils/test_datetime_parse.py b/tests/test_utils/test_datetime_parse.py
new file mode 100644
index 0000000..f7007b7
--- /dev/null
+++ b/tests/test_utils/test_datetime_parse.py
@@ -0,0 +1,110 @@
+"""
+Copied from https://github.com/pydantic/pydantic/blob/v1.10.22/tests/test_datetime_parse.py
+with modifications so it works without pydantic v1 imports.
+"""
+
+from typing import Type, Union
+from datetime import date, datetime, timezone, timedelta
+
+import pytest
+
+from codex._utils import parse_date, parse_datetime
+
+
+def create_tz(minutes: int) -> timezone:
+ return timezone(timedelta(minutes=minutes))
+
+
+@pytest.mark.parametrize(
+ "value,result",
+ [
+ # Valid inputs
+ ("1494012444.883309", date(2017, 5, 5)),
+ (b"1494012444.883309", date(2017, 5, 5)),
+ (1_494_012_444.883_309, date(2017, 5, 5)),
+ ("1494012444", date(2017, 5, 5)),
+ (1_494_012_444, date(2017, 5, 5)),
+ (0, date(1970, 1, 1)),
+ ("2012-04-23", date(2012, 4, 23)),
+ (b"2012-04-23", date(2012, 4, 23)),
+ ("2012-4-9", date(2012, 4, 9)),
+ (date(2012, 4, 9), date(2012, 4, 9)),
+ (datetime(2012, 4, 9, 12, 15), date(2012, 4, 9)),
+ # Invalid inputs
+ ("x20120423", ValueError),
+ ("2012-04-56", ValueError),
+ (19_999_999_999, date(2603, 10, 11)), # just before watershed
+ (20_000_000_001, date(1970, 8, 20)), # just after watershed
+ (1_549_316_052, date(2019, 2, 4)), # nowish in s
+ (1_549_316_052_104, date(2019, 2, 4)), # nowish in ms
+ (1_549_316_052_104_324, date(2019, 2, 4)), # nowish in μs
+ (1_549_316_052_104_324_096, date(2019, 2, 4)), # nowish in ns
+ ("infinity", date(9999, 12, 31)),
+ ("inf", date(9999, 12, 31)),
+ (float("inf"), date(9999, 12, 31)),
+ ("infinity ", date(9999, 12, 31)),
+ (int("1" + "0" * 100), date(9999, 12, 31)),
+ (1e1000, date(9999, 12, 31)),
+ ("-infinity", date(1, 1, 1)),
+ ("-inf", date(1, 1, 1)),
+ ("nan", ValueError),
+ ],
+)
+def test_date_parsing(value: Union[str, bytes, int, float], result: Union[date, Type[Exception]]) -> None:
+ if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance]
+ with pytest.raises(result):
+ parse_date(value)
+ else:
+ assert parse_date(value) == result
+
+
+@pytest.mark.parametrize(
+ "value,result",
+ [
+ # Valid inputs
+ # values in seconds
+ ("1494012444.883309", datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
+ (1_494_012_444.883_309, datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
+ ("1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ (b"1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ (1_494_012_444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ # values in ms
+ ("1494012444000.883309", datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)),
+ ("-1494012444000.883309", datetime(1922, 8, 29, 4, 32, 35, 999117, tzinfo=timezone.utc)),
+ (1_494_012_444_000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
+ ("2012-04-23T09:15:00", datetime(2012, 4, 23, 9, 15)),
+ ("2012-4-9 4:8:16", datetime(2012, 4, 9, 4, 8, 16)),
+ ("2012-04-23T09:15:00Z", datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)),
+ ("2012-4-9 4:8:16-0320", datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))),
+ ("2012-04-23T10:20:30.400+02:30", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(150))),
+ ("2012-04-23T10:20:30.400+02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(120))),
+ ("2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
+ (b"2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
+ (datetime(2017, 5, 5), datetime(2017, 5, 5)),
+ (0, datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc)),
+ # Invalid inputs
+ ("x20120423091500", ValueError),
+ ("2012-04-56T09:15:90", ValueError),
+ ("2012-04-23T11:05:00-25:00", ValueError),
+ (19_999_999_999, datetime(2603, 10, 11, 11, 33, 19, tzinfo=timezone.utc)), # just before watershed
+ (20_000_000_001, datetime(1970, 8, 20, 11, 33, 20, 1000, tzinfo=timezone.utc)), # just after watershed
+ (1_549_316_052, datetime(2019, 2, 4, 21, 34, 12, 0, tzinfo=timezone.utc)), # nowish in s
+ (1_549_316_052_104, datetime(2019, 2, 4, 21, 34, 12, 104_000, tzinfo=timezone.utc)), # nowish in ms
+ (1_549_316_052_104_324, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in μs
+ (1_549_316_052_104_324_096, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in ns
+ ("infinity", datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ ("inf", datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ ("inf ", datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ (1e50, datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ (float("inf"), datetime(9999, 12, 31, 23, 59, 59, 999999)),
+ ("-infinity", datetime(1, 1, 1, 0, 0)),
+ ("-inf", datetime(1, 1, 1, 0, 0)),
+ ("nan", ValueError),
+ ],
+)
+def test_datetime_parsing(value: Union[str, bytes, int, float], result: Union[datetime, Type[Exception]]) -> None:
+ if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance]
+ with pytest.raises(result):
+ parse_datetime(value)
+ else:
+ assert parse_datetime(value) == result
diff --git a/tests/utils.py b/tests/utils.py
index 091f4e4..f53a1fd 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -19,7 +19,7 @@
is_annotated_type,
is_type_alias_type,
)
-from codex._compat import PYDANTIC_V2, field_outer_type, get_model_fields
+from codex._compat import PYDANTIC_V1, field_outer_type, get_model_fields
from codex._models import BaseModel
BaseModelT = TypeVar("BaseModelT", bound=BaseModel)
@@ -28,12 +28,12 @@
def assert_matches_model(model: type[BaseModelT], value: BaseModelT, *, path: list[str]) -> bool:
for name, field in get_model_fields(model).items():
field_value = getattr(value, name)
- if PYDANTIC_V2:
- allow_none = False
- else:
+ if PYDANTIC_V1:
# in v1 nullability was structured differently
# https://docs.pydantic.dev/2.0/migration/#required-optional-and-nullable-fields
allow_none = getattr(field, "allow_none", False)
+ else:
+ allow_none = False
assert_matches_type(
field_outer_type(field),
From 3b745afa4569af091b3a62228af62c036c84718d Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 4 Sep 2025 20:17:22 +0000
Subject: [PATCH 11/15] feat(api): api update
---
.stats.yml | 2 +-
src/codex/resources/projects/query_logs.py | 52 ++-----------------
.../query_log_list_by_group_params.py | 12 +----
.../types/projects/query_log_list_params.py | 12 +----
.../api_resources/projects/test_query_logs.py | 8 +--
5 files changed, 11 insertions(+), 75 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 1be05f4..1b83cc3 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,3 +1,3 @@
configured_endpoints: 55
-openapi_spec_hash: b260cce23d53dba37b982654e7928405
+openapi_spec_hash: 3d3361adb6a836f42a49850b5aea1df5
config_hash: bed87752f4056d0c4bf2ddf856307800
diff --git a/src/codex/resources/projects/query_logs.py b/src/codex/resources/projects/query_logs.py
index 8fb37c0..98954bf 100644
--- a/src/codex/resources/projects/query_logs.py
+++ b/src/codex/resources/projects/query_logs.py
@@ -115,18 +115,7 @@ def list(
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
| NotGiven = NOT_GIVEN,
- sort: Optional[
- Literal[
- "created_at",
- "primary_eval_issue_score",
- "score_trustworthiness",
- "score_context_sufficiency",
- "score_response_helpfulness",
- "score_query_ease",
- "score_response_groundedness",
- ]
- ]
- | NotGiven = NOT_GIVEN,
+ sort: Optional[str] | NotGiven = NOT_GIVEN,
tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -261,18 +250,7 @@ def list_by_group(
]
| NotGiven = NOT_GIVEN,
remediation_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
- sort: Optional[
- Literal[
- "created_at",
- "primary_eval_issue_score",
- "score_trustworthiness",
- "score_context_sufficiency",
- "score_response_helpfulness",
- "score_query_ease",
- "score_response_groundedness",
- ]
- ]
- | NotGiven = NOT_GIVEN,
+ sort: Optional[str] | NotGiven = NOT_GIVEN,
tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -561,18 +539,7 @@ def list(
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
| NotGiven = NOT_GIVEN,
- sort: Optional[
- Literal[
- "created_at",
- "primary_eval_issue_score",
- "score_trustworthiness",
- "score_context_sufficiency",
- "score_response_helpfulness",
- "score_query_ease",
- "score_response_groundedness",
- ]
- ]
- | NotGiven = NOT_GIVEN,
+ sort: Optional[str] | NotGiven = NOT_GIVEN,
tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -709,18 +676,7 @@ async def list_by_group(
]
| NotGiven = NOT_GIVEN,
remediation_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
- sort: Optional[
- Literal[
- "created_at",
- "primary_eval_issue_score",
- "score_trustworthiness",
- "score_context_sufficiency",
- "score_response_helpfulness",
- "score_query_ease",
- "score_response_groundedness",
- ]
- ]
- | NotGiven = NOT_GIVEN,
+ sort: Optional[str] | NotGiven = NOT_GIVEN,
tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
diff --git a/src/codex/types/projects/query_log_list_by_group_params.py b/src/codex/types/projects/query_log_list_by_group_params.py
index afdcb7f..7eb2410 100644
--- a/src/codex/types/projects/query_log_list_by_group_params.py
+++ b/src/codex/types/projects/query_log_list_by_group_params.py
@@ -51,17 +51,7 @@ class QueryLogListByGroupParams(TypedDict, total=False):
remediation_ids: SequenceNotStr[str]
"""List of groups to list child logs for"""
- sort: Optional[
- Literal[
- "created_at",
- "primary_eval_issue_score",
- "score_trustworthiness",
- "score_context_sufficiency",
- "score_response_helpfulness",
- "score_query_ease",
- "score_response_groundedness",
- ]
- ]
+ sort: Optional[str]
tool_call_names: Optional[SequenceNotStr[str]]
"""Filter by names of tools called in the assistant response"""
diff --git a/src/codex/types/projects/query_log_list_params.py b/src/codex/types/projects/query_log_list_params.py
index 20a209b..f003763 100644
--- a/src/codex/types/projects/query_log_list_params.py
+++ b/src/codex/types/projects/query_log_list_params.py
@@ -45,17 +45,7 @@ class QueryLogListParams(TypedDict, total=False):
]
"""Filter logs that have ANY of these primary evaluation issues (OR operation)"""
- sort: Optional[
- Literal[
- "created_at",
- "primary_eval_issue_score",
- "score_trustworthiness",
- "score_context_sufficiency",
- "score_response_helpfulness",
- "score_query_ease",
- "score_response_groundedness",
- ]
- ]
+ sort: Optional[str]
tool_call_names: Optional[SequenceNotStr[str]]
"""Filter by names of tools called in the assistant response"""
diff --git a/tests/api_resources/projects/test_query_logs.py b/tests/api_resources/projects/test_query_logs.py
index 05d3f46..385d44a 100644
--- a/tests/api_resources/projects/test_query_logs.py
+++ b/tests/api_resources/projects/test_query_logs.py
@@ -107,7 +107,7 @@ def test_method_list_with_all_params(self, client: Codex) -> None:
order="asc",
passed_evals=["string"],
primary_eval_issue=["hallucination"],
- sort="created_at",
+ sort="sort",
tool_call_names=["string"],
was_cache_hit=True,
)
@@ -230,7 +230,7 @@ def test_method_list_by_group_with_all_params(self, client: Codex) -> None:
passed_evals=["string"],
primary_eval_issue=["hallucination"],
remediation_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"],
- sort="created_at",
+ sort="sort",
tool_call_names=["string"],
was_cache_hit=True,
)
@@ -469,7 +469,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncCodex) -> No
order="asc",
passed_evals=["string"],
primary_eval_issue=["hallucination"],
- sort="created_at",
+ sort="sort",
tool_call_names=["string"],
was_cache_hit=True,
)
@@ -592,7 +592,7 @@ async def test_method_list_by_group_with_all_params(self, async_client: AsyncCod
passed_evals=["string"],
primary_eval_issue=["hallucination"],
remediation_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"],
- sort="created_at",
+ sort="sort",
tool_call_names=["string"],
was_cache_hit=True,
)
From 6285dbe7fdc7e6eede39d589ac92d8481595e5e3 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 5 Sep 2025 02:43:04 +0000
Subject: [PATCH 12/15] chore(internal): move mypy configurations to
`pyproject.toml` file
---
mypy.ini | 50 ------------------------------------------------
pyproject.toml | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 52 insertions(+), 50 deletions(-)
delete mode 100644 mypy.ini
diff --git a/mypy.ini b/mypy.ini
deleted file mode 100644
index 92f9243..0000000
--- a/mypy.ini
+++ /dev/null
@@ -1,50 +0,0 @@
-[mypy]
-pretty = True
-show_error_codes = True
-
-# Exclude _files.py because mypy isn't smart enough to apply
-# the correct type narrowing and as this is an internal module
-# it's fine to just use Pyright.
-#
-# We also exclude our `tests` as mypy doesn't always infer
-# types correctly and Pyright will still catch any type errors.
-exclude = ^(src/codex/_files\.py|_dev/.*\.py|tests/.*)$
-
-strict_equality = True
-implicit_reexport = True
-check_untyped_defs = True
-no_implicit_optional = True
-
-warn_return_any = True
-warn_unreachable = True
-warn_unused_configs = True
-
-# Turn these options off as it could cause conflicts
-# with the Pyright options.
-warn_unused_ignores = False
-warn_redundant_casts = False
-
-disallow_any_generics = True
-disallow_untyped_defs = True
-disallow_untyped_calls = True
-disallow_subclassing_any = True
-disallow_incomplete_defs = True
-disallow_untyped_decorators = True
-cache_fine_grained = True
-
-# By default, mypy reports an error if you assign a value to the result
-# of a function call that doesn't return anything. We do this in our test
-# cases:
-# ```
-# result = ...
-# assert result is None
-# ```
-# Changing this codegen to make mypy happy would increase complexity
-# and would not be worth it.
-disable_error_code = func-returns-value,overload-cannot-match
-
-# https://github.com/python/mypy/issues/12162
-[mypy.overrides]
-module = "black.files.*"
-ignore_errors = true
-ignore_missing_imports = true
diff --git a/pyproject.toml b/pyproject.toml
index 1cb20a6..446ad55 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -157,6 +157,58 @@ reportOverlappingOverload = false
reportImportCycles = false
reportPrivateUsage = false
+[tool.mypy]
+pretty = true
+show_error_codes = true
+
+# Exclude _files.py because mypy isn't smart enough to apply
+# the correct type narrowing and as this is an internal module
+# it's fine to just use Pyright.
+#
+# We also exclude our `tests` as mypy doesn't always infer
+# types correctly and Pyright will still catch any type errors.
+exclude = ['src/codex/_files.py', '_dev/.*.py', 'tests/.*']
+
+strict_equality = true
+implicit_reexport = true
+check_untyped_defs = true
+no_implicit_optional = true
+
+warn_return_any = true
+warn_unreachable = true
+warn_unused_configs = true
+
+# Turn these options off as it could cause conflicts
+# with the Pyright options.
+warn_unused_ignores = false
+warn_redundant_casts = false
+
+disallow_any_generics = true
+disallow_untyped_defs = true
+disallow_untyped_calls = true
+disallow_subclassing_any = true
+disallow_incomplete_defs = true
+disallow_untyped_decorators = true
+cache_fine_grained = true
+
+# By default, mypy reports an error if you assign a value to the result
+# of a function call that doesn't return anything. We do this in our test
+# cases:
+# ```
+# result = ...
+# assert result is None
+# ```
+# Changing this codegen to make mypy happy would increase complexity
+# and would not be worth it.
+disable_error_code = "func-returns-value,overload-cannot-match"
+
+# https://github.com/python/mypy/issues/12162
+[[tool.mypy.overrides]]
+module = "black.files.*"
+ignore_errors = true
+ignore_missing_imports = true
+
+
[tool.ruff]
line-length = 120
output-format = "grouped"
From 0d4647ba1ced086d81b8e35bb90013f0742f75b1 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 5 Sep 2025 20:17:26 +0000
Subject: [PATCH 13/15] codegen metadata
---
.stats.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.stats.yml b/.stats.yml
index 1b83cc3..caeeb0f 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,3 +1,3 @@
configured_endpoints: 55
-openapi_spec_hash: 3d3361adb6a836f42a49850b5aea1df5
+openapi_spec_hash: 12260ab88069ff15d254606e041debfb
config_hash: bed87752f4056d0c4bf2ddf856307800
From 5a4e424744a6a0e8efc8e6d96c1472508bcc3b00 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 5 Sep 2025 20:21:36 +0000
Subject: [PATCH 14/15] feat(api): manual updates
---
.stats.yml | 4 +-
api.md | 2 +
src/codex/resources/projects/query_logs.py | 90 ++++++++++++++
src/codex/types/projects/__init__.py | 2 +
.../query_log_update_metadata_params.py | 13 ++
.../query_log_update_metadata_response.py | 17 +++
.../api_resources/projects/test_query_logs.py | 115 ++++++++++++++++++
7 files changed, 241 insertions(+), 2 deletions(-)
create mode 100644 src/codex/types/projects/query_log_update_metadata_params.py
create mode 100644 src/codex/types/projects/query_log_update_metadata_response.py
diff --git a/.stats.yml b/.stats.yml
index caeeb0f..386ca3e 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,3 +1,3 @@
-configured_endpoints: 55
+configured_endpoints: 56
openapi_spec_hash: 12260ab88069ff15d254606e041debfb
-config_hash: bed87752f4056d0c4bf2ddf856307800
+config_hash: 6c3ad84d97bf1d0989ad2ec0cae64078
diff --git a/api.md b/api.md
index 693ee75..a78c4fc 100644
--- a/api.md
+++ b/api.md
@@ -206,6 +206,7 @@ from codex.types.projects import (
QueryLogListByGroupResponse,
QueryLogListGroupsResponse,
QueryLogStartRemediationResponse,
+ QueryLogUpdateMetadataResponse,
)
```
@@ -217,6 +218,7 @@ Methods:
- client.projects.query_logs.list_by_group(project_id, \*\*params) -> QueryLogListByGroupResponse
- client.projects.query_logs.list_groups(project_id, \*\*params) -> SyncOffsetPageQueryLogGroups[QueryLogListGroupsResponse]
- client.projects.query_logs.start_remediation(query_log_id, \*, project_id) -> QueryLogStartRemediationResponse
+- client.projects.query_logs.update_metadata(query_log_id, \*, project_id, \*\*params) -> QueryLogUpdateMetadataResponse
## Remediations
diff --git a/src/codex/resources/projects/query_logs.py b/src/codex/resources/projects/query_logs.py
index 98954bf..8c939f9 100644
--- a/src/codex/resources/projects/query_logs.py
+++ b/src/codex/resources/projects/query_logs.py
@@ -29,12 +29,14 @@
query_log_list_params,
query_log_list_groups_params,
query_log_list_by_group_params,
+ query_log_update_metadata_params,
query_log_add_user_feedback_params,
)
from ...types.projects.query_log_list_response import QueryLogListResponse
from ...types.projects.query_log_retrieve_response import QueryLogRetrieveResponse
from ...types.projects.query_log_list_groups_response import QueryLogListGroupsResponse
from ...types.projects.query_log_list_by_group_response import QueryLogListByGroupResponse
+from ...types.projects.query_log_update_metadata_response import QueryLogUpdateMetadataResponse
from ...types.projects.query_log_add_user_feedback_response import QueryLogAddUserFeedbackResponse
from ...types.projects.query_log_start_remediation_response import QueryLogStartRemediationResponse
@@ -464,6 +466,44 @@ def start_remediation(
cast_to=QueryLogStartRemediationResponse,
)
+ def update_metadata(
+ self,
+ query_log_id: str,
+ *,
+ project_id: str,
+ body: object,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> QueryLogUpdateMetadataResponse:
+ """
+ Update Metadata Route
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not query_log_id:
+ raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}")
+ return self._put(
+ f"/api/projects/{project_id}/query_logs/{query_log_id}/metadata",
+ body=maybe_transform(body, query_log_update_metadata_params.QueryLogUpdateMetadataParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=QueryLogUpdateMetadataResponse,
+ )
+
class AsyncQueryLogsResource(AsyncAPIResource):
@cached_property
@@ -890,6 +930,44 @@ async def start_remediation(
cast_to=QueryLogStartRemediationResponse,
)
+ async def update_metadata(
+ self,
+ query_log_id: str,
+ *,
+ project_id: str,
+ body: object,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> QueryLogUpdateMetadataResponse:
+ """
+ Update Metadata Route
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not project_id:
+ raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
+ if not query_log_id:
+ raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}")
+ return await self._put(
+ f"/api/projects/{project_id}/query_logs/{query_log_id}/metadata",
+ body=await async_maybe_transform(body, query_log_update_metadata_params.QueryLogUpdateMetadataParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=QueryLogUpdateMetadataResponse,
+ )
+
class QueryLogsResourceWithRawResponse:
def __init__(self, query_logs: QueryLogsResource) -> None:
@@ -913,6 +991,9 @@ def __init__(self, query_logs: QueryLogsResource) -> None:
self.start_remediation = to_raw_response_wrapper(
query_logs.start_remediation,
)
+ self.update_metadata = to_raw_response_wrapper(
+ query_logs.update_metadata,
+ )
class AsyncQueryLogsResourceWithRawResponse:
@@ -937,6 +1018,9 @@ def __init__(self, query_logs: AsyncQueryLogsResource) -> None:
self.start_remediation = async_to_raw_response_wrapper(
query_logs.start_remediation,
)
+ self.update_metadata = async_to_raw_response_wrapper(
+ query_logs.update_metadata,
+ )
class QueryLogsResourceWithStreamingResponse:
@@ -961,6 +1045,9 @@ def __init__(self, query_logs: QueryLogsResource) -> None:
self.start_remediation = to_streamed_response_wrapper(
query_logs.start_remediation,
)
+ self.update_metadata = to_streamed_response_wrapper(
+ query_logs.update_metadata,
+ )
class AsyncQueryLogsResourceWithStreamingResponse:
@@ -985,3 +1072,6 @@ def __init__(self, query_logs: AsyncQueryLogsResource) -> None:
self.start_remediation = async_to_streamed_response_wrapper(
query_logs.start_remediation,
)
+ self.update_metadata = async_to_streamed_response_wrapper(
+ query_logs.update_metadata,
+ )
diff --git a/src/codex/types/projects/__init__.py b/src/codex/types/projects/__init__.py
index b90cbd8..c8a6b9a 100644
--- a/src/codex/types/projects/__init__.py
+++ b/src/codex/types/projects/__init__.py
@@ -26,8 +26,10 @@
from .query_log_list_groups_response import QueryLogListGroupsResponse as QueryLogListGroupsResponse
from .remediation_edit_answer_params import RemediationEditAnswerParams as RemediationEditAnswerParams
from .query_log_list_by_group_response import QueryLogListByGroupResponse as QueryLogListByGroupResponse
+from .query_log_update_metadata_params import QueryLogUpdateMetadataParams as QueryLogUpdateMetadataParams
from .remediation_edit_answer_response import RemediationEditAnswerResponse as RemediationEditAnswerResponse
from .query_log_add_user_feedback_params import QueryLogAddUserFeedbackParams as QueryLogAddUserFeedbackParams
+from .query_log_update_metadata_response import QueryLogUpdateMetadataResponse as QueryLogUpdateMetadataResponse
from .query_log_add_user_feedback_response import QueryLogAddUserFeedbackResponse as QueryLogAddUserFeedbackResponse
from .query_log_start_remediation_response import QueryLogStartRemediationResponse as QueryLogStartRemediationResponse
from .remediation_edit_draft_answer_params import RemediationEditDraftAnswerParams as RemediationEditDraftAnswerParams
diff --git a/src/codex/types/projects/query_log_update_metadata_params.py b/src/codex/types/projects/query_log_update_metadata_params.py
new file mode 100644
index 0000000..6f58c5d
--- /dev/null
+++ b/src/codex/types/projects/query_log_update_metadata_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["QueryLogUpdateMetadataParams"]
+
+
+class QueryLogUpdateMetadataParams(TypedDict, total=False):
+ project_id: Required[str]
+
+ body: Required[object]
diff --git a/src/codex/types/projects/query_log_update_metadata_response.py b/src/codex/types/projects/query_log_update_metadata_response.py
new file mode 100644
index 0000000..9937542
--- /dev/null
+++ b/src/codex/types/projects/query_log_update_metadata_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["QueryLogUpdateMetadataResponse"]
+
+
+class QueryLogUpdateMetadataResponse(BaseModel):
+ id: str
+
+ custom_metadata: Optional[object] = None
+ """Arbitrary metadata supplied by the user/system"""
+
+ custom_metadata_keys: Optional[List[str]] = None
+ """Keys of the custom metadata"""
diff --git a/tests/api_resources/projects/test_query_logs.py b/tests/api_resources/projects/test_query_logs.py
index 385d44a..5f9d1e9 100644
--- a/tests/api_resources/projects/test_query_logs.py
+++ b/tests/api_resources/projects/test_query_logs.py
@@ -21,6 +21,7 @@
QueryLogRetrieveResponse,
QueryLogListGroupsResponse,
QueryLogListByGroupResponse,
+ QueryLogUpdateMetadataResponse,
QueryLogAddUserFeedbackResponse,
QueryLogStartRemediationResponse,
)
@@ -387,6 +388,63 @@ def test_path_params_start_remediation(self, client: Codex) -> None:
project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_update_metadata(self, client: Codex) -> None:
+ query_log = client.projects.query_logs.update_metadata(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ body={},
+ )
+ assert_matches_type(QueryLogUpdateMetadataResponse, query_log, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_update_metadata(self, client: Codex) -> None:
+ response = client.projects.query_logs.with_raw_response.update_metadata(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ body={},
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ query_log = response.parse()
+ assert_matches_type(QueryLogUpdateMetadataResponse, query_log, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_update_metadata(self, client: Codex) -> None:
+ with client.projects.query_logs.with_streaming_response.update_metadata(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ body={},
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ query_log = response.parse()
+ assert_matches_type(QueryLogUpdateMetadataResponse, query_log, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_update_metadata(self, client: Codex) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ client.projects.query_logs.with_raw_response.update_metadata(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="",
+ body={},
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `query_log_id` but received ''"):
+ client.projects.query_logs.with_raw_response.update_metadata(
+ query_log_id="",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ body={},
+ )
+
class TestAsyncQueryLogs:
parametrize = pytest.mark.parametrize(
@@ -748,3 +806,60 @@ async def test_path_params_start_remediation(self, async_client: AsyncCodex) ->
query_log_id="",
project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
)
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_update_metadata(self, async_client: AsyncCodex) -> None:
+ query_log = await async_client.projects.query_logs.update_metadata(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ body={},
+ )
+ assert_matches_type(QueryLogUpdateMetadataResponse, query_log, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_update_metadata(self, async_client: AsyncCodex) -> None:
+ response = await async_client.projects.query_logs.with_raw_response.update_metadata(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ body={},
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ query_log = await response.parse()
+ assert_matches_type(QueryLogUpdateMetadataResponse, query_log, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_update_metadata(self, async_client: AsyncCodex) -> None:
+ async with async_client.projects.query_logs.with_streaming_response.update_metadata(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ body={},
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ query_log = await response.parse()
+ assert_matches_type(QueryLogUpdateMetadataResponse, query_log, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_update_metadata(self, async_client: AsyncCodex) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
+ await async_client.projects.query_logs.with_raw_response.update_metadata(
+ query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ project_id="",
+ body={},
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `query_log_id` but received ''"):
+ await async_client.projects.query_logs.with_raw_response.update_metadata(
+ query_log_id="",
+ project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ body={},
+ )
From 0aa647b88b7bd005ca64ce593342e35191562e31 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 5 Sep 2025 20:21:53 +0000
Subject: [PATCH 15/15] release: 0.1.0-alpha.26
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 28 ++++++++++++++++++++++++++++
pyproject.toml | 2 +-
src/codex/_version.py | 2 +-
4 files changed, 31 insertions(+), 3 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 3188ced..315f7d3 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.1.0-alpha.25"
+ ".": "0.1.0-alpha.26"
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 12b39fc..03ab7e8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,33 @@
# Changelog
+## 0.1.0-alpha.26 (2025-09-05)
+
+Full Changelog: [v0.1.0-alpha.25...v0.1.0-alpha.26](https://github.com/cleanlab/codex-python/compare/v0.1.0-alpha.25...v0.1.0-alpha.26)
+
+### Features
+
+* **api:** api update ([3b745af](https://github.com/cleanlab/codex-python/commit/3b745afa4569af091b3a62228af62c036c84718d))
+* **api:** api update ([9bb8f17](https://github.com/cleanlab/codex-python/commit/9bb8f17149849ea312caf7b895f02a2c38ac3270))
+* **api:** api update ([ecbda1c](https://github.com/cleanlab/codex-python/commit/ecbda1ca9f1aaf40a325e105cc0659ea0e318511))
+* **api:** api update ([4151cf2](https://github.com/cleanlab/codex-python/commit/4151cf288ea4f75ac7c6eb5c07c9861d67b5ad92))
+* **api:** api update ([cfc87fb](https://github.com/cleanlab/codex-python/commit/cfc87fb65a3a29f0584b7dffccc79f2442307fa9))
+* **api:** manual updates ([5a4e424](https://github.com/cleanlab/codex-python/commit/5a4e424744a6a0e8efc8e6d96c1472508bcc3b00))
+* improve future compat with pydantic v3 ([d9ffa38](https://github.com/cleanlab/codex-python/commit/d9ffa3812e5eb836fc85532d57a92937ef3ad7ec))
+* **types:** replace List[str] with SequenceNotStr in params ([07bdde8](https://github.com/cleanlab/codex-python/commit/07bdde83153c609ff2b86a4190911cab160aaae2))
+
+
+### Bug Fixes
+
+* avoid newer type syntax ([7a7b9a3](https://github.com/cleanlab/codex-python/commit/7a7b9a3ee6cfed2e57359a54c1b9292f06cc71b6))
+
+
+### Chores
+
+* **internal:** add Sequence related utils ([ead0105](https://github.com/cleanlab/codex-python/commit/ead0105a68c77ae308cc0d40c34245265e9cb2b6))
+* **internal:** change ci workflow machines ([052599a](https://github.com/cleanlab/codex-python/commit/052599aee2e484d9cc305983da900878374db17d))
+* **internal:** move mypy configurations to `pyproject.toml` file ([6285dbe](https://github.com/cleanlab/codex-python/commit/6285dbe7fdc7e6eede39d589ac92d8481595e5e3))
+* **internal:** update pyright exclude list ([cc58363](https://github.com/cleanlab/codex-python/commit/cc58363a9c78ce08437508d3c64cb9b29b43e9bd))
+
## 0.1.0-alpha.25 (2025-08-22)
Full Changelog: [v0.1.0-alpha.24...v0.1.0-alpha.25](https://github.com/cleanlab/codex-python/compare/v0.1.0-alpha.24...v0.1.0-alpha.25)
diff --git a/pyproject.toml b/pyproject.toml
index 446ad55..e09e8bc 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "codex-sdk"
-version = "0.1.0-alpha.25"
+version = "0.1.0-alpha.26"
description = "Internal SDK used within cleanlab-codex package. Refer to https://pypi.org/project/cleanlab-codex/ instead."
dynamic = ["readme"]
license = "MIT"
diff --git a/src/codex/_version.py b/src/codex/_version.py
index 656ce65..1e7d5f1 100644
--- a/src/codex/_version.py
+++ b/src/codex/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "codex"
-__version__ = "0.1.0-alpha.25" # x-release-please-version
+__version__ = "0.1.0-alpha.26" # x-release-please-version