From d12ca4b11cc6a0f022f2c3b8ab8752a6f600f504 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 18:16:44 +0000 Subject: [PATCH 01/32] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 12a0365a..76c12f55 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: 9d81a4b0eca6d3629ba9d5432a65655c +openapi_spec_hash: 19d3afd940d8ed57b76401ef026e5f47 config_hash: 659f65b6ccf5612986f920f7f9abbcb5 From 3aa98843e0f042734eb5b74ea86c8dcca8636954 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 02:39:50 +0000 Subject: [PATCH 02/32] chore(ci): fix installation instructions --- scripts/utils/upload-artifact.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index ebb04789..8f922b51 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -18,7 +18,7 @@ UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/codex-python/$SHA'\033[0m" + echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/codex-python/$SHA'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 From 18f661d21b849f15cbe85ce5063ef0dea877d89f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 17 May 2025 02:50:02 +0000 Subject: [PATCH 03/32] chore(internal): codegen related update --- scripts/utils/upload-artifact.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index 8f922b51..62d150a4 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -18,7 +18,7 @@ UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/codex-python/$SHA'\033[0m" + echo -e "\033[32mInstallation: pip install --pre 'https://pkg.stainless.com/s/codex-python/$SHA'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 From 40ae04a279ba1e2573d17a17e097f71d1347a3d3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 21 May 2025 18:16:58 +0000 Subject: [PATCH 04/32] feat(api): api update --- .stats.yml | 2 +- .../types/projects/cluster_list_response.py | 20 +++++++++++++++++++ src/codex/types/projects/entry.py | 20 +++++++++++++++++++ .../types/projects/entry_query_response.py | 20 +++++++++++++++++++ 4 files changed, 61 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 76c12f55..aac346a7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: 19d3afd940d8ed57b76401ef026e5f47 +openapi_spec_hash: f25ca671adcc0b224451c721048d9220 config_hash: 659f65b6ccf5612986f920f7f9abbcb5 diff --git a/src/codex/types/projects/cluster_list_response.py b/src/codex/types/projects/cluster_list_response.py index 2e8b5426..1fc8bd5e 100644 --- a/src/codex/types/projects/cluster_list_response.py +++ b/src/codex/types/projects/cluster_list_response.py @@ -13,6 +13,7 @@ "ManagedMetadataContextSufficiency", "ManagedMetadataHTMLFormatScores", "ManagedMetadataQueryEaseCustomized", + "ManagedMetadataResponseGroundedness", "ManagedMetadataResponseHelpfulness", "ManagedMetadataTrustworthiness", ] @@ -82,6 +83,22 @@ class ManagedMetadataQueryEaseCustomized(BaseModel): scores: Optional[List[float]] = None +class ManagedMetadataResponseGroundedness(BaseModel): + average: Optional[float] = None + """The average of all scores.""" + + latest: Optional[float] = None + """The most recent score.""" + + max: Optional[float] = None + """The maximum score.""" + + min: Optional[float] = None + """The minimum score.""" + + scores: Optional[List[float]] = None + + class ManagedMetadataResponseHelpfulness(BaseModel): average: Optional[float] = None """The average of all scores.""" @@ -147,6 +164,9 @@ class ManagedMetadata(BaseModel): query_ease_customized: Optional[ManagedMetadataQueryEaseCustomized] = None """Holds a list of scores and computes aggregate statistics.""" + response_groundedness: Optional[ManagedMetadataResponseGroundedness] = None + """Holds a list of scores and computes aggregate statistics.""" + response_helpfulness: Optional[ManagedMetadataResponseHelpfulness] = None """Holds a list of scores and computes aggregate statistics.""" diff --git a/src/codex/types/projects/entry.py b/src/codex/types/projects/entry.py index eb2a2217..3f7a86da 100644 --- a/src/codex/types/projects/entry.py +++ b/src/codex/types/projects/entry.py @@ -13,6 +13,7 @@ "ManagedMetadataContextSufficiency", "ManagedMetadataHTMLFormatScores", "ManagedMetadataQueryEaseCustomized", + "ManagedMetadataResponseGroundedness", "ManagedMetadataResponseHelpfulness", "ManagedMetadataTrustworthiness", ] @@ -82,6 +83,22 @@ class ManagedMetadataQueryEaseCustomized(BaseModel): scores: Optional[List[float]] = None +class ManagedMetadataResponseGroundedness(BaseModel): + average: Optional[float] = None + """The average of all scores.""" + + latest: Optional[float] = None + """The most recent score.""" + + max: Optional[float] = None + """The maximum score.""" + + min: Optional[float] = None + """The minimum score.""" + + scores: Optional[List[float]] = None + + class ManagedMetadataResponseHelpfulness(BaseModel): average: Optional[float] = None """The average of all scores.""" @@ -147,6 +164,9 @@ class ManagedMetadata(BaseModel): query_ease_customized: Optional[ManagedMetadataQueryEaseCustomized] = None """Holds a list of scores and computes aggregate statistics.""" + response_groundedness: Optional[ManagedMetadataResponseGroundedness] = None + """Holds a list of scores and computes aggregate statistics.""" + response_helpfulness: Optional[ManagedMetadataResponseHelpfulness] = None """Holds a list of scores and computes aggregate statistics.""" diff --git a/src/codex/types/projects/entry_query_response.py b/src/codex/types/projects/entry_query_response.py index 318636b9..cd5a4c97 100644 --- a/src/codex/types/projects/entry_query_response.py +++ b/src/codex/types/projects/entry_query_response.py @@ -12,6 +12,7 @@ "EntryManagedMetadataContextSufficiency", "EntryManagedMetadataHTMLFormatScores", "EntryManagedMetadataQueryEaseCustomized", + "EntryManagedMetadataResponseGroundedness", "EntryManagedMetadataResponseHelpfulness", "EntryManagedMetadataTrustworthiness", ] @@ -81,6 +82,22 @@ class EntryManagedMetadataQueryEaseCustomized(BaseModel): scores: Optional[List[float]] = None +class EntryManagedMetadataResponseGroundedness(BaseModel): + average: Optional[float] = None + """The average of all scores.""" + + latest: Optional[float] = None + """The most recent score.""" + + max: Optional[float] = None + """The maximum score.""" + + min: Optional[float] = None + """The minimum score.""" + + scores: Optional[List[float]] = None + + class EntryManagedMetadataResponseHelpfulness(BaseModel): average: Optional[float] = None """The average of all scores.""" @@ -146,6 +163,9 @@ class EntryManagedMetadata(BaseModel): query_ease_customized: Optional[EntryManagedMetadataQueryEaseCustomized] = None """Holds a list of scores and computes aggregate statistics.""" + response_groundedness: Optional[EntryManagedMetadataResponseGroundedness] = None + """Holds a list of scores and computes aggregate statistics.""" + response_helpfulness: Optional[EntryManagedMetadataResponseHelpfulness] = None """Holds a list of scores and computes aggregate statistics.""" From 7bbf57ae5327ddd85e6729997a4f85b427758258 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 21 May 2025 22:16:41 +0000 Subject: [PATCH 05/32] feat(api): api update --- .stats.yml | 2 +- tests/api_resources/test_projects.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.stats.yml b/.stats.yml index aac346a7..374e6728 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: f25ca671adcc0b224451c721048d9220 +openapi_spec_hash: 67d5aeebff72f48ee4730227ca0b47c2 config_hash: 659f65b6ccf5612986f920f7f9abbcb5 diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py index 19e41a0a..5c29fddf 100644 --- a/tests/api_resources/test_projects.py +++ b/tests/api_resources/test_projects.py @@ -204,7 +204,7 @@ def test_method_list(self, client: Codex) -> None: def test_method_list_with_all_params(self, client: Codex) -> None: project = client.projects.list( include_entry_counts=True, - limit=0, + limit=1, offset=0, order="asc", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", @@ -699,7 +699,7 @@ async def test_method_list(self, async_client: AsyncCodex) -> None: async def test_method_list_with_all_params(self, async_client: AsyncCodex) -> None: project = await async_client.projects.list( include_entry_counts=True, - limit=0, + limit=1, offset=0, order="asc", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", From 428e5001b6b5576f5383c0f2ffd3ad5fe085128a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 22 May 2025 02:29:17 +0000 Subject: [PATCH 06/32] chore(docs): grammar improvements --- SECURITY.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index 9fc6ee28..07808285 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -16,11 +16,11 @@ before making any information public. ## Reporting Non-SDK Related Security Issues If you encounter security issues that are not directly related to SDKs but pertain to the services -or products provided by Codex please follow the respective company's security reporting guidelines. +or products provided by Codex, please follow the respective company's security reporting guidelines. ### Codex Terms and Policies -Please contact team@cleanlab.ai for any questions or concerns regarding security of our services. +Please contact team@cleanlab.ai for any questions or concerns regarding the security of our services. --- From 3a5293161e7313d7c18ec61be1b8e7ee56bad8c9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Jun 2025 21:53:29 +0000 Subject: [PATCH 07/32] feat(api): api update --- .stats.yml | 2 +- src/codex/types/project_create_params.py | 285 ++++++++++++++++++- src/codex/types/project_list_response.py | 285 ++++++++++++++++++- src/codex/types/project_retrieve_response.py | 284 +++++++++++++++++- src/codex/types/project_return_schema.py | 284 +++++++++++++++++- src/codex/types/project_update_params.py | 285 ++++++++++++++++++- tests/api_resources/test_projects.py | 264 +++++++++++++++++ 7 files changed, 1676 insertions(+), 13 deletions(-) diff --git a/.stats.yml b/.stats.yml index 374e6728..e80f0e13 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: 67d5aeebff72f48ee4730227ca0b47c2 +openapi_spec_hash: 0f1841fad65926e7ddfb22dd7a642b46 config_hash: 659f65b6ccf5612986f920f7f9abbcb5 diff --git a/src/codex/types/project_create_params.py b/src/codex/types/project_create_params.py index ecdd194d..75892e0b 100644 --- a/src/codex/types/project_create_params.py +++ b/src/codex/types/project_create_params.py @@ -2,10 +2,22 @@ from __future__ import annotations -from typing import Optional -from typing_extensions import Required, TypedDict +from typing import Dict, Optional +from typing_extensions import Literal, Required, TypedDict -__all__ = ["ProjectCreateParams", "Config"] +__all__ = [ + "ProjectCreateParams", + "Config", + "ConfigEvalConfig", + "ConfigEvalConfigCustomEvals", + "ConfigEvalConfigCustomEvalsEvals", + "ConfigEvalConfigDefaultEvals", + "ConfigEvalConfigDefaultEvalsContextSufficiency", + "ConfigEvalConfigDefaultEvalsQueryEase", + "ConfigEvalConfigDefaultEvalsResponseGroundedness", + "ConfigEvalConfigDefaultEvalsResponseHelpfulness", + "ConfigEvalConfigDefaultEvalsTrustworthiness", +] class ProjectCreateParams(TypedDict, total=False): @@ -18,9 +30,276 @@ class ProjectCreateParams(TypedDict, total=False): description: Optional[str] +class ConfigEvalConfigCustomEvalsEvals(TypedDict, total=False): + criteria: Required[str] + """ + The evaluation criteria text that describes what aspect is being evaluated and + how + """ + + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + context_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the retrieved + context. + """ + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + query_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the user's query. + """ + + response_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the RAG/LLM + response. + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigCustomEvals(TypedDict, total=False): + evals: Dict[str, ConfigEvalConfigCustomEvalsEvals] + + +class ConfigEvalConfigDefaultEvalsContextSufficiency(TypedDict, total=False): + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsQueryEase(TypedDict, total=False): + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsResponseGroundedness(TypedDict, total=False): + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsResponseHelpfulness(TypedDict, total=False): + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsTrustworthiness(TypedDict, total=False): + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvals(TypedDict, total=False): + context_sufficiency: ConfigEvalConfigDefaultEvalsContextSufficiency + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + query_ease: ConfigEvalConfigDefaultEvalsQueryEase + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + response_groundedness: ConfigEvalConfigDefaultEvalsResponseGroundedness + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + response_helpfulness: ConfigEvalConfigDefaultEvalsResponseHelpfulness + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + trustworthiness: ConfigEvalConfigDefaultEvalsTrustworthiness + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + +class ConfigEvalConfig(TypedDict, total=False): + custom_evals: ConfigEvalConfigCustomEvals + """Configuration for custom evaluation metrics.""" + + default_evals: ConfigEvalConfigDefaultEvals + """Configuration for default evaluation metrics.""" + + class Config(TypedDict, total=False): clustering_use_llm_matching: bool + eval_config: ConfigEvalConfig + """Configuration for project-specific evaluation metrics""" + llm_matching_model: str llm_matching_quality_preset: str diff --git a/src/codex/types/project_list_response.py b/src/codex/types/project_list_response.py index 2b4fec42..59d3bf81 100644 --- a/src/codex/types/project_list_response.py +++ b/src/codex/types/project_list_response.py @@ -1,16 +1,297 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import Dict, List, Optional from datetime import datetime +from typing_extensions import Literal from .._models import BaseModel -__all__ = ["ProjectListResponse", "Project", "ProjectConfig"] +__all__ = [ + "ProjectListResponse", + "Project", + "ProjectConfig", + "ProjectConfigEvalConfig", + "ProjectConfigEvalConfigCustomEvals", + "ProjectConfigEvalConfigCustomEvalsEvals", + "ProjectConfigEvalConfigDefaultEvals", + "ProjectConfigEvalConfigDefaultEvalsContextSufficiency", + "ProjectConfigEvalConfigDefaultEvalsQueryEase", + "ProjectConfigEvalConfigDefaultEvalsResponseGroundedness", + "ProjectConfigEvalConfigDefaultEvalsResponseHelpfulness", + "ProjectConfigEvalConfigDefaultEvalsTrustworthiness", +] + + +class ProjectConfigEvalConfigCustomEvalsEvals(BaseModel): + criteria: str + """ + The evaluation criteria text that describes what aspect is being evaluated and + how + """ + + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + context_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the retrieved + context. + """ + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + query_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the user's query. + """ + + response_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the RAG/LLM + response. + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ProjectConfigEvalConfigCustomEvals(BaseModel): + evals: Optional[Dict[str, ProjectConfigEvalConfigCustomEvalsEvals]] = None + + +class ProjectConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ProjectConfigEvalConfigDefaultEvalsQueryEase(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ProjectConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ProjectConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ProjectConfigEvalConfigDefaultEvalsTrustworthiness(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ProjectConfigEvalConfigDefaultEvals(BaseModel): + context_sufficiency: Optional[ProjectConfigEvalConfigDefaultEvalsContextSufficiency] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + query_ease: Optional[ProjectConfigEvalConfigDefaultEvalsQueryEase] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + response_groundedness: Optional[ProjectConfigEvalConfigDefaultEvalsResponseGroundedness] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + response_helpfulness: Optional[ProjectConfigEvalConfigDefaultEvalsResponseHelpfulness] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + trustworthiness: Optional[ProjectConfigEvalConfigDefaultEvalsTrustworthiness] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + +class ProjectConfigEvalConfig(BaseModel): + custom_evals: Optional[ProjectConfigEvalConfigCustomEvals] = None + """Configuration for custom evaluation metrics.""" + + default_evals: Optional[ProjectConfigEvalConfigDefaultEvals] = None + """Configuration for default evaluation metrics.""" class ProjectConfig(BaseModel): clustering_use_llm_matching: Optional[bool] = None + eval_config: Optional[ProjectConfigEvalConfig] = None + """Configuration for project-specific evaluation metrics""" + llm_matching_model: Optional[str] = None llm_matching_quality_preset: Optional[str] = None diff --git a/src/codex/types/project_retrieve_response.py b/src/codex/types/project_retrieve_response.py index 62209d32..a631f0c2 100644 --- a/src/codex/types/project_retrieve_response.py +++ b/src/codex/types/project_retrieve_response.py @@ -1,16 +1,296 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional +from typing import Dict, Optional from datetime import datetime +from typing_extensions import Literal from .._models import BaseModel -__all__ = ["ProjectRetrieveResponse", "Config"] +__all__ = [ + "ProjectRetrieveResponse", + "Config", + "ConfigEvalConfig", + "ConfigEvalConfigCustomEvals", + "ConfigEvalConfigCustomEvalsEvals", + "ConfigEvalConfigDefaultEvals", + "ConfigEvalConfigDefaultEvalsContextSufficiency", + "ConfigEvalConfigDefaultEvalsQueryEase", + "ConfigEvalConfigDefaultEvalsResponseGroundedness", + "ConfigEvalConfigDefaultEvalsResponseHelpfulness", + "ConfigEvalConfigDefaultEvalsTrustworthiness", +] + + +class ConfigEvalConfigCustomEvalsEvals(BaseModel): + criteria: str + """ + The evaluation criteria text that describes what aspect is being evaluated and + how + """ + + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + context_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the retrieved + context. + """ + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + query_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the user's query. + """ + + response_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the RAG/LLM + response. + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigCustomEvals(BaseModel): + evals: Optional[Dict[str, ConfigEvalConfigCustomEvalsEvals]] = None + + +class ConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsQueryEase(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsTrustworthiness(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvals(BaseModel): + context_sufficiency: Optional[ConfigEvalConfigDefaultEvalsContextSufficiency] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + query_ease: Optional[ConfigEvalConfigDefaultEvalsQueryEase] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + response_groundedness: Optional[ConfigEvalConfigDefaultEvalsResponseGroundedness] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + response_helpfulness: Optional[ConfigEvalConfigDefaultEvalsResponseHelpfulness] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + trustworthiness: Optional[ConfigEvalConfigDefaultEvalsTrustworthiness] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + +class ConfigEvalConfig(BaseModel): + custom_evals: Optional[ConfigEvalConfigCustomEvals] = None + """Configuration for custom evaluation metrics.""" + + default_evals: Optional[ConfigEvalConfigDefaultEvals] = None + """Configuration for default evaluation metrics.""" class Config(BaseModel): clustering_use_llm_matching: Optional[bool] = None + eval_config: Optional[ConfigEvalConfig] = None + """Configuration for project-specific evaluation metrics""" + llm_matching_model: Optional[str] = None llm_matching_quality_preset: Optional[str] = None diff --git a/src/codex/types/project_return_schema.py b/src/codex/types/project_return_schema.py index 51a6c1ad..7da2e615 100644 --- a/src/codex/types/project_return_schema.py +++ b/src/codex/types/project_return_schema.py @@ -1,16 +1,296 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional +from typing import Dict, Optional from datetime import datetime +from typing_extensions import Literal from .._models import BaseModel -__all__ = ["ProjectReturnSchema", "Config"] +__all__ = [ + "ProjectReturnSchema", + "Config", + "ConfigEvalConfig", + "ConfigEvalConfigCustomEvals", + "ConfigEvalConfigCustomEvalsEvals", + "ConfigEvalConfigDefaultEvals", + "ConfigEvalConfigDefaultEvalsContextSufficiency", + "ConfigEvalConfigDefaultEvalsQueryEase", + "ConfigEvalConfigDefaultEvalsResponseGroundedness", + "ConfigEvalConfigDefaultEvalsResponseHelpfulness", + "ConfigEvalConfigDefaultEvalsTrustworthiness", +] + + +class ConfigEvalConfigCustomEvalsEvals(BaseModel): + criteria: str + """ + The evaluation criteria text that describes what aspect is being evaluated and + how + """ + + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + context_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the retrieved + context. + """ + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + query_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the user's query. + """ + + response_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the RAG/LLM + response. + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigCustomEvals(BaseModel): + evals: Optional[Dict[str, ConfigEvalConfigCustomEvalsEvals]] = None + + +class ConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsQueryEase(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsTrustworthiness(BaseModel): + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvals(BaseModel): + context_sufficiency: Optional[ConfigEvalConfigDefaultEvalsContextSufficiency] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + query_ease: Optional[ConfigEvalConfigDefaultEvalsQueryEase] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + response_groundedness: Optional[ConfigEvalConfigDefaultEvalsResponseGroundedness] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + response_helpfulness: Optional[ConfigEvalConfigDefaultEvalsResponseHelpfulness] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + trustworthiness: Optional[ConfigEvalConfigDefaultEvalsTrustworthiness] = None + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + +class ConfigEvalConfig(BaseModel): + custom_evals: Optional[ConfigEvalConfigCustomEvals] = None + """Configuration for custom evaluation metrics.""" + + default_evals: Optional[ConfigEvalConfigDefaultEvals] = None + """Configuration for default evaluation metrics.""" class Config(BaseModel): clustering_use_llm_matching: Optional[bool] = None + eval_config: Optional[ConfigEvalConfig] = None + """Configuration for project-specific evaluation metrics""" + llm_matching_model: Optional[str] = None llm_matching_quality_preset: Optional[str] = None diff --git a/src/codex/types/project_update_params.py b/src/codex/types/project_update_params.py index 0a5aa540..d58dd591 100644 --- a/src/codex/types/project_update_params.py +++ b/src/codex/types/project_update_params.py @@ -2,10 +2,22 @@ from __future__ import annotations -from typing import Optional -from typing_extensions import Required, TypedDict +from typing import Dict, Optional +from typing_extensions import Literal, Required, TypedDict -__all__ = ["ProjectUpdateParams", "Config"] +__all__ = [ + "ProjectUpdateParams", + "Config", + "ConfigEvalConfig", + "ConfigEvalConfigCustomEvals", + "ConfigEvalConfigCustomEvalsEvals", + "ConfigEvalConfigDefaultEvals", + "ConfigEvalConfigDefaultEvalsContextSufficiency", + "ConfigEvalConfigDefaultEvalsQueryEase", + "ConfigEvalConfigDefaultEvalsResponseGroundedness", + "ConfigEvalConfigDefaultEvalsResponseHelpfulness", + "ConfigEvalConfigDefaultEvalsTrustworthiness", +] class ProjectUpdateParams(TypedDict, total=False): @@ -16,9 +28,276 @@ class ProjectUpdateParams(TypedDict, total=False): description: Optional[str] +class ConfigEvalConfigCustomEvalsEvals(TypedDict, total=False): + criteria: Required[str] + """ + The evaluation criteria text that describes what aspect is being evaluated and + how + """ + + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + context_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the retrieved + context. + """ + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + query_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the user's query. + """ + + response_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the RAG/LLM + response. + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigCustomEvals(TypedDict, total=False): + evals: Dict[str, ConfigEvalConfigCustomEvalsEvals] + + +class ConfigEvalConfigDefaultEvalsContextSufficiency(TypedDict, total=False): + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsQueryEase(TypedDict, total=False): + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsResponseGroundedness(TypedDict, total=False): + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsResponseHelpfulness(TypedDict, total=False): + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvalsTrustworthiness(TypedDict, total=False): + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class ConfigEvalConfigDefaultEvals(TypedDict, total=False): + context_sufficiency: ConfigEvalConfigDefaultEvalsContextSufficiency + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + query_ease: ConfigEvalConfigDefaultEvalsQueryEase + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + response_groundedness: ConfigEvalConfigDefaultEvalsResponseGroundedness + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + response_helpfulness: ConfigEvalConfigDefaultEvalsResponseHelpfulness + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + trustworthiness: ConfigEvalConfigDefaultEvalsTrustworthiness + """A pre-configured evaluation metric from TrustworthyRAG or built into the system. + + The evaluation criteria and identifiers are immutable and system-managed, while + other properties like thresholds and priorities can be configured. + """ + + +class ConfigEvalConfig(TypedDict, total=False): + custom_evals: ConfigEvalConfigCustomEvals + """Configuration for custom evaluation metrics.""" + + default_evals: ConfigEvalConfigDefaultEvals + """Configuration for default evaluation metrics.""" + + class Config(TypedDict, total=False): clustering_use_llm_matching: bool + eval_config: ConfigEvalConfig + """Configuration for project-specific evaluation metrics""" + llm_matching_model: str llm_matching_quality_preset: str diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py index 5c29fddf..d5e0e1cc 100644 --- a/tests/api_resources/test_projects.py +++ b/tests/api_resources/test_projects.py @@ -39,6 +39,72 @@ def test_method_create_with_all_params(self, client: Codex) -> None: project = client.projects.create( config={ "clustering_use_llm_matching": True, + "eval_config": { + "custom_evals": { + "evals": { + "foo": { + "criteria": "criteria", + "eval_key": "eval_key", + "name": "name", + "context_identifier": "context_identifier", + "enabled": True, + "priority": 0, + "query_identifier": "query_identifier", + "response_identifier": "response_identifier", + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + } + } + }, + "default_evals": { + "context_sufficiency": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "query_ease": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "response_groundedness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "response_helpfulness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "trustworthiness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + }, + }, "llm_matching_model": "llm_matching_model", "llm_matching_quality_preset": "llm_matching_quality_preset", "lower_llm_match_distance_threshold": 0, @@ -141,6 +207,72 @@ def test_method_update_with_all_params(self, client: Codex) -> None: project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", config={ "clustering_use_llm_matching": True, + "eval_config": { + "custom_evals": { + "evals": { + "foo": { + "criteria": "criteria", + "eval_key": "eval_key", + "name": "name", + "context_identifier": "context_identifier", + "enabled": True, + "priority": 0, + "query_identifier": "query_identifier", + "response_identifier": "response_identifier", + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + } + } + }, + "default_evals": { + "context_sufficiency": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "query_ease": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "response_groundedness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "response_helpfulness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "trustworthiness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + }, + }, "llm_matching_model": "llm_matching_model", "llm_matching_quality_preset": "llm_matching_quality_preset", "lower_llm_match_distance_threshold": 0, @@ -534,6 +666,72 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> project = await async_client.projects.create( config={ "clustering_use_llm_matching": True, + "eval_config": { + "custom_evals": { + "evals": { + "foo": { + "criteria": "criteria", + "eval_key": "eval_key", + "name": "name", + "context_identifier": "context_identifier", + "enabled": True, + "priority": 0, + "query_identifier": "query_identifier", + "response_identifier": "response_identifier", + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + } + } + }, + "default_evals": { + "context_sufficiency": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "query_ease": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "response_groundedness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "response_helpfulness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "trustworthiness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + }, + }, "llm_matching_model": "llm_matching_model", "llm_matching_quality_preset": "llm_matching_quality_preset", "lower_llm_match_distance_threshold": 0, @@ -636,6 +834,72 @@ async def test_method_update_with_all_params(self, async_client: AsyncCodex) -> project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", config={ "clustering_use_llm_matching": True, + "eval_config": { + "custom_evals": { + "evals": { + "foo": { + "criteria": "criteria", + "eval_key": "eval_key", + "name": "name", + "context_identifier": "context_identifier", + "enabled": True, + "priority": 0, + "query_identifier": "query_identifier", + "response_identifier": "response_identifier", + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + } + } + }, + "default_evals": { + "context_sufficiency": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "query_ease": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "response_groundedness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "response_helpfulness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + "trustworthiness": { + "eval_key": "eval_key", + "name": "name", + "enabled": True, + "priority": 0, + "should_escalate": True, + "threshold": 0, + "threshold_direction": "above", + }, + }, + }, "llm_matching_model": "llm_matching_model", "llm_matching_quality_preset": "llm_matching_quality_preset", "lower_llm_match_distance_threshold": 0, From 7ba3858c1c968c093c676478a1c7e5e13b92c12a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 28 May 2025 03:26:20 +0000 Subject: [PATCH 08/32] fix(docs/api): remove references to nonexistent types --- api.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/api.md b/api.md index f3a2ea14..8aac76fd 100644 --- a/api.md +++ b/api.md @@ -139,8 +139,6 @@ from codex.types import ( ProjectReturnSchema, ProjectRetrieveResponse, ProjectListResponse, - ProjectExportResponse, - ProjectIncrementQueriesResponse, ProjectRetrieveAnalyticsResponse, ProjectValidateResponse, ) @@ -153,8 +151,8 @@ Methods: - client.projects.update(project_id, \*\*params) -> ProjectReturnSchema - client.projects.list(\*\*params) -> ProjectListResponse - client.projects.delete(project_id) -> None -- client.projects.export(project_id) -> object -- client.projects.increment_queries(project_id, \*\*params) -> object +- client.projects.export(project_id) -> object +- client.projects.increment_queries(project_id, \*\*params) -> object - client.projects.retrieve_analytics(project_id, \*\*params) -> ProjectRetrieveAnalyticsResponse - client.projects.validate(project_id, \*\*params) -> ProjectValidateResponse From 57f522fbd04637849146636a83976fe696160a97 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 03:09:34 +0000 Subject: [PATCH 09/32] chore(api): mark some methods as deprecated --- src/codex/resources/projects/entries.py | 27 ++- src/codex/resources/projects/projects.py | 27 ++- tests/api_resources/projects/test_entries.py | 164 ++++++++++--------- tests/api_resources/test_projects.py | 100 ++++++----- 4 files changed, 186 insertions(+), 132 deletions(-) diff --git a/src/codex/resources/projects/entries.py b/src/codex/resources/projects/entries.py index 346dd353..c6b43a4a 100644 --- a/src/codex/resources/projects/entries.py +++ b/src/codex/resources/projects/entries.py @@ -2,6 +2,7 @@ from __future__ import annotations +import typing_extensions from typing import Iterable, Optional import httpx @@ -312,6 +313,7 @@ def publish_draft_answer( cast_to=Entry, ) + @typing_extensions.deprecated("deprecated") def query( self, project_id: str, @@ -707,6 +709,7 @@ async def publish_draft_answer( cast_to=Entry, ) + @typing_extensions.deprecated("deprecated") async def query( self, project_id: str, @@ -839,8 +842,10 @@ def __init__(self, entries: EntriesResource) -> None: self.publish_draft_answer = to_raw_response_wrapper( entries.publish_draft_answer, ) - self.query = to_raw_response_wrapper( - entries.query, + self.query = ( # pyright: ignore[reportDeprecated] + to_raw_response_wrapper( + entries.query # pyright: ignore[reportDeprecated], + ) ) self.unpublish_answer = to_raw_response_wrapper( entries.unpublish_answer, @@ -869,8 +874,10 @@ def __init__(self, entries: AsyncEntriesResource) -> None: self.publish_draft_answer = async_to_raw_response_wrapper( entries.publish_draft_answer, ) - self.query = async_to_raw_response_wrapper( - entries.query, + self.query = ( # pyright: ignore[reportDeprecated] + async_to_raw_response_wrapper( + entries.query # pyright: ignore[reportDeprecated], + ) ) self.unpublish_answer = async_to_raw_response_wrapper( entries.unpublish_answer, @@ -899,8 +906,10 @@ def __init__(self, entries: EntriesResource) -> None: self.publish_draft_answer = to_streamed_response_wrapper( entries.publish_draft_answer, ) - self.query = to_streamed_response_wrapper( - entries.query, + self.query = ( # pyright: ignore[reportDeprecated] + to_streamed_response_wrapper( + entries.query # pyright: ignore[reportDeprecated], + ) ) self.unpublish_answer = to_streamed_response_wrapper( entries.unpublish_answer, @@ -929,8 +938,10 @@ def __init__(self, entries: AsyncEntriesResource) -> None: self.publish_draft_answer = async_to_streamed_response_wrapper( entries.publish_draft_answer, ) - self.query = async_to_streamed_response_wrapper( - entries.query, + self.query = ( # pyright: ignore[reportDeprecated] + async_to_streamed_response_wrapper( + entries.query # pyright: ignore[reportDeprecated], + ) ) self.unpublish_answer = async_to_streamed_response_wrapper( entries.unpublish_answer, diff --git a/src/codex/resources/projects/projects.py b/src/codex/resources/projects/projects.py index 6195d1a4..cf8c0f82 100644 --- a/src/codex/resources/projects/projects.py +++ b/src/codex/resources/projects/projects.py @@ -2,6 +2,7 @@ from __future__ import annotations +import typing_extensions from typing import Dict, List, Optional from typing_extensions import Literal @@ -330,6 +331,7 @@ def export( cast_to=object, ) + @typing_extensions.deprecated("deprecated") def increment_queries( self, project_id: str, @@ -872,6 +874,7 @@ async def export( cast_to=object, ) + @typing_extensions.deprecated("deprecated") async def increment_queries( self, project_id: str, @@ -1167,8 +1170,10 @@ def __init__(self, projects: ProjectsResource) -> None: self.export = to_raw_response_wrapper( projects.export, ) - self.increment_queries = to_raw_response_wrapper( - projects.increment_queries, + self.increment_queries = ( # pyright: ignore[reportDeprecated] + to_raw_response_wrapper( + projects.increment_queries # pyright: ignore[reportDeprecated], + ) ) self.retrieve_analytics = to_raw_response_wrapper( projects.retrieve_analytics, @@ -1212,8 +1217,10 @@ def __init__(self, projects: AsyncProjectsResource) -> None: self.export = async_to_raw_response_wrapper( projects.export, ) - self.increment_queries = async_to_raw_response_wrapper( - projects.increment_queries, + self.increment_queries = ( # pyright: ignore[reportDeprecated] + async_to_raw_response_wrapper( + projects.increment_queries # pyright: ignore[reportDeprecated], + ) ) self.retrieve_analytics = async_to_raw_response_wrapper( projects.retrieve_analytics, @@ -1257,8 +1264,10 @@ def __init__(self, projects: ProjectsResource) -> None: self.export = to_streamed_response_wrapper( projects.export, ) - self.increment_queries = to_streamed_response_wrapper( - projects.increment_queries, + self.increment_queries = ( # pyright: ignore[reportDeprecated] + to_streamed_response_wrapper( + projects.increment_queries # pyright: ignore[reportDeprecated], + ) ) self.retrieve_analytics = to_streamed_response_wrapper( projects.retrieve_analytics, @@ -1302,8 +1311,10 @@ def __init__(self, projects: AsyncProjectsResource) -> None: self.export = async_to_streamed_response_wrapper( projects.export, ) - self.increment_queries = async_to_streamed_response_wrapper( - projects.increment_queries, + self.increment_queries = ( # pyright: ignore[reportDeprecated] + async_to_streamed_response_wrapper( + projects.increment_queries # pyright: ignore[reportDeprecated], + ) ) self.retrieve_analytics = async_to_streamed_response_wrapper( projects.retrieve_analytics, diff --git a/tests/api_resources/projects/test_entries.py b/tests/api_resources/projects/test_entries.py index 73a45ad4..32b0452e 100644 --- a/tests/api_resources/projects/test_entries.py +++ b/tests/api_resources/projects/test_entries.py @@ -15,6 +15,8 @@ EntryNotifySmeResponse, ) +# pyright: reportDeprecated=false + base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -382,40 +384,45 @@ def test_path_params_publish_draft_answer(self, client: Codex) -> None: @pytest.mark.skip() @parametrize def test_method_query(self, client: Codex) -> None: - entry = client.projects.entries.query( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - question="question", - ) + with pytest.warns(DeprecationWarning): + entry = client.projects.entries.query( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) + assert_matches_type(EntryQueryResponse, entry, path=["response"]) @pytest.mark.skip() @parametrize def test_method_query_with_all_params(self, client: Codex) -> None: - entry = client.projects.entries.query( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - question="question", - use_llm_matching=True, - client_metadata={}, - query_metadata={ - "context": "string", - "custom_metadata": {}, - "eval_scores": {"foo": 0}, - "evaluated_response": "evaluated_response", - }, - x_client_library_version="x-client-library-version", - x_integration_type="x-integration-type", - x_source="x-source", - x_stainless_package_version="x-stainless-package-version", - ) + with pytest.warns(DeprecationWarning): + entry = client.projects.entries.query( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + use_llm_matching=True, + client_metadata={}, + query_metadata={ + "context": "string", + "custom_metadata": {}, + "eval_scores": {"foo": 0}, + "evaluated_response": "evaluated_response", + }, + x_client_library_version="x-client-library-version", + x_integration_type="x-integration-type", + x_source="x-source", + x_stainless_package_version="x-stainless-package-version", + ) + assert_matches_type(EntryQueryResponse, entry, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_query(self, client: Codex) -> None: - response = client.projects.entries.with_raw_response.query( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - question="question", - ) + with pytest.warns(DeprecationWarning): + response = client.projects.entries.with_raw_response.query( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -425,26 +432,28 @@ def test_raw_response_query(self, client: Codex) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_query(self, client: Codex) -> None: - with client.projects.entries.with_streaming_response.query( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - question="question", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" + with pytest.warns(DeprecationWarning): + with client.projects.entries.with_streaming_response.query( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" - entry = response.parse() - assert_matches_type(EntryQueryResponse, entry, path=["response"]) + entry = response.parse() + assert_matches_type(EntryQueryResponse, entry, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_path_params_query(self, client: Codex) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.projects.entries.with_raw_response.query( - project_id="", - question="question", - ) + with pytest.warns(DeprecationWarning): + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.entries.with_raw_response.query( + project_id="", + question="question", + ) @pytest.mark.skip() @parametrize @@ -863,40 +872,45 @@ async def test_path_params_publish_draft_answer(self, async_client: AsyncCodex) @pytest.mark.skip() @parametrize async def test_method_query(self, async_client: AsyncCodex) -> None: - entry = await async_client.projects.entries.query( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - question="question", - ) + with pytest.warns(DeprecationWarning): + entry = await async_client.projects.entries.query( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) + assert_matches_type(EntryQueryResponse, entry, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_query_with_all_params(self, async_client: AsyncCodex) -> None: - entry = await async_client.projects.entries.query( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - question="question", - use_llm_matching=True, - client_metadata={}, - query_metadata={ - "context": "string", - "custom_metadata": {}, - "eval_scores": {"foo": 0}, - "evaluated_response": "evaluated_response", - }, - x_client_library_version="x-client-library-version", - x_integration_type="x-integration-type", - x_source="x-source", - x_stainless_package_version="x-stainless-package-version", - ) + with pytest.warns(DeprecationWarning): + entry = await async_client.projects.entries.query( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + use_llm_matching=True, + client_metadata={}, + query_metadata={ + "context": "string", + "custom_metadata": {}, + "eval_scores": {"foo": 0}, + "evaluated_response": "evaluated_response", + }, + x_client_library_version="x-client-library-version", + x_integration_type="x-integration-type", + x_source="x-source", + x_stainless_package_version="x-stainless-package-version", + ) + assert_matches_type(EntryQueryResponse, entry, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_query(self, async_client: AsyncCodex) -> None: - response = await async_client.projects.entries.with_raw_response.query( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - question="question", - ) + with pytest.warns(DeprecationWarning): + response = await async_client.projects.entries.with_raw_response.query( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -906,26 +920,28 @@ async def test_raw_response_query(self, async_client: AsyncCodex) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_query(self, async_client: AsyncCodex) -> None: - async with async_client.projects.entries.with_streaming_response.query( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - question="question", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" + with pytest.warns(DeprecationWarning): + async with async_client.projects.entries.with_streaming_response.query( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" - entry = await response.parse() - assert_matches_type(EntryQueryResponse, entry, path=["response"]) + entry = await response.parse() + assert_matches_type(EntryQueryResponse, entry, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_path_params_query(self, async_client: AsyncCodex) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.projects.entries.with_raw_response.query( - project_id="", - question="question", - ) + with pytest.warns(DeprecationWarning): + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.entries.with_raw_response.query( + project_id="", + question="question", + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py index d5e0e1cc..f7c3f017 100644 --- a/tests/api_resources/test_projects.py +++ b/tests/api_resources/test_projects.py @@ -17,6 +17,8 @@ ) from tests.utils import assert_matches_type +# pyright: reportDeprecated=false + base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -454,26 +456,31 @@ def test_path_params_export(self, client: Codex) -> None: @pytest.mark.skip() @parametrize def test_method_increment_queries(self, client: Codex) -> None: - project = client.projects.increment_queries( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) + with pytest.warns(DeprecationWarning): + project = client.projects.increment_queries( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(object, project, path=["response"]) @pytest.mark.skip() @parametrize def test_method_increment_queries_with_all_params(self, client: Codex) -> None: - project = client.projects.increment_queries( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - count=0, - ) + with pytest.warns(DeprecationWarning): + project = client.projects.increment_queries( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + count=0, + ) + assert_matches_type(object, project, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_increment_queries(self, client: Codex) -> None: - response = client.projects.with_raw_response.increment_queries( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) + with pytest.warns(DeprecationWarning): + response = client.projects.with_raw_response.increment_queries( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -483,24 +490,26 @@ def test_raw_response_increment_queries(self, client: Codex) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_increment_queries(self, client: Codex) -> None: - with client.projects.with_streaming_response.increment_queries( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" + with pytest.warns(DeprecationWarning): + with client.projects.with_streaming_response.increment_queries( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = response.parse() - assert_matches_type(object, project, path=["response"]) + project = response.parse() + assert_matches_type(object, project, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_path_params_increment_queries(self, client: Codex) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - client.projects.with_raw_response.increment_queries( - project_id="", - ) + with pytest.warns(DeprecationWarning): + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.with_raw_response.increment_queries( + project_id="", + ) @pytest.mark.skip() @parametrize @@ -1081,26 +1090,31 @@ async def test_path_params_export(self, async_client: AsyncCodex) -> None: @pytest.mark.skip() @parametrize async def test_method_increment_queries(self, async_client: AsyncCodex) -> None: - project = await async_client.projects.increment_queries( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) + with pytest.warns(DeprecationWarning): + project = await async_client.projects.increment_queries( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(object, project, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_increment_queries_with_all_params(self, async_client: AsyncCodex) -> None: - project = await async_client.projects.increment_queries( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - count=0, - ) + with pytest.warns(DeprecationWarning): + project = await async_client.projects.increment_queries( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + count=0, + ) + assert_matches_type(object, project, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_increment_queries(self, async_client: AsyncCodex) -> None: - response = await async_client.projects.with_raw_response.increment_queries( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) + with pytest.warns(DeprecationWarning): + response = await async_client.projects.with_raw_response.increment_queries( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -1110,24 +1124,26 @@ async def test_raw_response_increment_queries(self, async_client: AsyncCodex) -> @pytest.mark.skip() @parametrize async def test_streaming_response_increment_queries(self, async_client: AsyncCodex) -> None: - async with async_client.projects.with_streaming_response.increment_queries( - project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" + with pytest.warns(DeprecationWarning): + async with async_client.projects.with_streaming_response.increment_queries( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" - project = await response.parse() - assert_matches_type(object, project, path=["response"]) + project = await response.parse() + assert_matches_type(object, project, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_path_params_increment_queries(self, async_client: AsyncCodex) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): - await async_client.projects.with_raw_response.increment_queries( - project_id="", - ) + with pytest.warns(DeprecationWarning): + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.with_raw_response.increment_queries( + project_id="", + ) @pytest.mark.skip() @parametrize From ef4acf292c728a838aecd6539d278b8c128be68a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:41:44 +0000 Subject: [PATCH 10/32] feat(api): api update --- .stats.yml | 2 +- src/codex/types/project_create_params.py | 3 +++ src/codex/types/project_list_response.py | 3 +++ src/codex/types/project_retrieve_response.py | 3 +++ src/codex/types/project_return_schema.py | 3 +++ src/codex/types/project_update_params.py | 3 +++ tests/api_resources/test_projects.py | 4 ++++ 7 files changed, 20 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index e80f0e13..ddf72408 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: 0f1841fad65926e7ddfb22dd7a642b46 +openapi_spec_hash: dfccb5c181396678a22b9c079847889f config_hash: 659f65b6ccf5612986f920f7f9abbcb5 diff --git a/src/codex/types/project_create_params.py b/src/codex/types/project_create_params.py index 75892e0b..31427557 100644 --- a/src/codex/types/project_create_params.py +++ b/src/codex/types/project_create_params.py @@ -55,6 +55,9 @@ class ConfigEvalConfigCustomEvalsEvals(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" + is_default: bool + """Whether the eval is a default, built-in eval or a custom eval""" + priority: Optional[int] """ Priority order for evals (lower number = higher priority) to determine primary diff --git a/src/codex/types/project_list_response.py b/src/codex/types/project_list_response.py index 59d3bf81..d4805732 100644 --- a/src/codex/types/project_list_response.py +++ b/src/codex/types/project_list_response.py @@ -47,6 +47,9 @@ class ProjectConfigEvalConfigCustomEvalsEvals(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" + is_default: Optional[bool] = None + """Whether the eval is a default, built-in eval or a custom eval""" + priority: Optional[int] = None """ Priority order for evals (lower number = higher priority) to determine primary diff --git a/src/codex/types/project_retrieve_response.py b/src/codex/types/project_retrieve_response.py index a631f0c2..fb62cff3 100644 --- a/src/codex/types/project_retrieve_response.py +++ b/src/codex/types/project_retrieve_response.py @@ -46,6 +46,9 @@ class ConfigEvalConfigCustomEvalsEvals(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" + is_default: Optional[bool] = None + """Whether the eval is a default, built-in eval or a custom eval""" + priority: Optional[int] = None """ Priority order for evals (lower number = higher priority) to determine primary diff --git a/src/codex/types/project_return_schema.py b/src/codex/types/project_return_schema.py index 7da2e615..420ec6e7 100644 --- a/src/codex/types/project_return_schema.py +++ b/src/codex/types/project_return_schema.py @@ -46,6 +46,9 @@ class ConfigEvalConfigCustomEvalsEvals(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" + is_default: Optional[bool] = None + """Whether the eval is a default, built-in eval or a custom eval""" + priority: Optional[int] = None """ Priority order for evals (lower number = higher priority) to determine primary diff --git a/src/codex/types/project_update_params.py b/src/codex/types/project_update_params.py index d58dd591..d1999550 100644 --- a/src/codex/types/project_update_params.py +++ b/src/codex/types/project_update_params.py @@ -53,6 +53,9 @@ class ConfigEvalConfigCustomEvalsEvals(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" + is_default: bool + """Whether the eval is a default, built-in eval or a custom eval""" + priority: Optional[int] """ Priority order for evals (lower number = higher priority) to determine primary diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py index f7c3f017..8ba69a14 100644 --- a/tests/api_resources/test_projects.py +++ b/tests/api_resources/test_projects.py @@ -50,6 +50,7 @@ def test_method_create_with_all_params(self, client: Codex) -> None: "name": "name", "context_identifier": "context_identifier", "enabled": True, + "is_default": True, "priority": 0, "query_identifier": "query_identifier", "response_identifier": "response_identifier", @@ -218,6 +219,7 @@ def test_method_update_with_all_params(self, client: Codex) -> None: "name": "name", "context_identifier": "context_identifier", "enabled": True, + "is_default": True, "priority": 0, "query_identifier": "query_identifier", "response_identifier": "response_identifier", @@ -684,6 +686,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> "name": "name", "context_identifier": "context_identifier", "enabled": True, + "is_default": True, "priority": 0, "query_identifier": "query_identifier", "response_identifier": "response_identifier", @@ -852,6 +855,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncCodex) -> "name": "name", "context_identifier": "context_identifier", "enabled": True, + "is_default": True, "priority": 0, "query_identifier": "query_identifier", "response_identifier": "response_identifier", From 8fcb74d6d5ef8db36a78031c9bf59f0840def1d4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Jun 2025 02:20:36 +0000 Subject: [PATCH 11/32] chore(docs): remove reference to rye shell --- CONTRIBUTING.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b9fa9a13..548ff4c7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,8 +17,7 @@ $ rye sync --all-features You can then run scripts using `rye run python script.py` or by activating the virtual environment: ```sh -$ rye shell -# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work +# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work $ source .venv/bin/activate # now you can omit the `rye run` prefix From 2ef6184b1f5acfb9e924b6adb0ca6b453507ddd2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 20 Jun 2025 23:27:53 +0000 Subject: [PATCH 12/32] chore(docs): remove unnecessary param examples From f8a8e15df5a70e7065127943dfae2261724ca3f2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Jun 2025 03:34:08 +0000 Subject: [PATCH 13/32] feat(client): add follow_redirects request option --- src/codex/_base_client.py | 6 +++++ src/codex/_models.py | 2 ++ src/codex/_types.py | 2 ++ tests/test_client.py | 54 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 64 insertions(+) diff --git a/src/codex/_base_client.py b/src/codex/_base_client.py index 0bf6ca7f..dff802e4 100644 --- a/src/codex/_base_client.py +++ b/src/codex/_base_client.py @@ -960,6 +960,9 @@ def request( if self.custom_auth is not None: kwargs["auth"] = self.custom_auth + if options.follow_redirects is not None: + kwargs["follow_redirects"] = options.follow_redirects + log.debug("Sending HTTP Request: %s %s", request.method, request.url) response = None @@ -1460,6 +1463,9 @@ async def request( if self.custom_auth is not None: kwargs["auth"] = self.custom_auth + if options.follow_redirects is not None: + kwargs["follow_redirects"] = options.follow_redirects + log.debug("Sending HTTP Request: %s %s", request.method, request.url) response = None diff --git a/src/codex/_models.py b/src/codex/_models.py index 798956f1..4f214980 100644 --- a/src/codex/_models.py +++ b/src/codex/_models.py @@ -737,6 +737,7 @@ class FinalRequestOptionsInput(TypedDict, total=False): idempotency_key: str json_data: Body extra_json: AnyMapping + follow_redirects: bool @final @@ -750,6 +751,7 @@ class FinalRequestOptions(pydantic.BaseModel): files: Union[HttpxRequestFiles, None] = None idempotency_key: Union[str, None] = None post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven() + follow_redirects: Union[bool, None] = None # It should be noted that we cannot use `json` here as that would override # a BaseModel method in an incompatible fashion. diff --git a/src/codex/_types.py b/src/codex/_types.py index dfa51c2f..f2d17a1f 100644 --- a/src/codex/_types.py +++ b/src/codex/_types.py @@ -100,6 +100,7 @@ class RequestOptions(TypedDict, total=False): params: Query extra_json: AnyMapping idempotency_key: str + follow_redirects: bool # Sentinel class used until PEP 0661 is accepted @@ -215,3 +216,4 @@ class _GenericAlias(Protocol): class HttpxSendArgs(TypedDict, total=False): auth: httpx.Auth + follow_redirects: bool diff --git a/tests/test_client.py b/tests/test_client.py index 2d356fac..bc381273 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -803,6 +803,33 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.http_request.headers.get("x-stainless-retry-count") == "42" + @pytest.mark.respx(base_url=base_url) + def test_follow_redirects(self, respx_mock: MockRouter) -> None: + # Test that the default follow_redirects=True allows following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) + + response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + + @pytest.mark.respx(base_url=base_url) + def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + # Test that follow_redirects=False prevents following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + + with pytest.raises(APIStatusError) as exc_info: + self.client.post( + "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response + ) + + assert exc_info.value.response.status_code == 302 + assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" + class TestAsyncCodex: client = AsyncCodex(base_url=base_url, _strict_response_validation=True) @@ -1609,3 +1636,30 @@ async def test_main() -> None: raise AssertionError("calling get_platform using asyncify resulted in a hung process") time.sleep(0.1) + + @pytest.mark.respx(base_url=base_url) + async def test_follow_redirects(self, respx_mock: MockRouter) -> None: + # Test that the default follow_redirects=True allows following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) + + response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + + @pytest.mark.respx(base_url=base_url) + async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + # Test that follow_redirects=False prevents following redirects + respx_mock.post("/redirect").mock( + return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + ) + + with pytest.raises(APIStatusError) as exc_info: + await self.client.post( + "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response + ) + + assert exc_info.value.response.status_code == 302 + assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" From 66125220140227fb290685bb52493eb4adc611f0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Jun 2025 22:17:18 +0000 Subject: [PATCH 14/32] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index ddf72408..145bcaa1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: dfccb5c181396678a22b9c079847889f +openapi_spec_hash: 12f575bae07a188ff130fdee07d46312 config_hash: 659f65b6ccf5612986f920f7f9abbcb5 From b3878c14ef133a66228f1aeba20615c0796e6802 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 5 Jun 2025 15:17:31 +0000 Subject: [PATCH 15/32] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 145bcaa1..82cbfb62 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: 12f575bae07a188ff130fdee07d46312 +openapi_spec_hash: a8387ccffe9a593cea310f37eb64ea0e config_hash: 659f65b6ccf5612986f920f7f9abbcb5 From 8489bc74223f2132cec88583f3fabbf5620dc599 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 17:17:08 +0000 Subject: [PATCH 16/32] feat(api): api update --- .stats.yml | 2 +- src/codex/types/project_create_params.py | 2 +- src/codex/types/project_list_response.py | 2 +- src/codex/types/project_retrieve_response.py | 2 +- src/codex/types/project_return_schema.py | 2 +- src/codex/types/project_update_params.py | 2 +- tests/api_resources/test_projects.py | 8 ++++---- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.stats.yml b/.stats.yml index 82cbfb62..0a2b18bb 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: a8387ccffe9a593cea310f37eb64ea0e +openapi_spec_hash: ded11525d5fce121fac9be1f434c42af config_hash: 659f65b6ccf5612986f920f7f9abbcb5 diff --git a/src/codex/types/project_create_params.py b/src/codex/types/project_create_params.py index 31427557..13338463 100644 --- a/src/codex/types/project_create_params.py +++ b/src/codex/types/project_create_params.py @@ -305,7 +305,7 @@ class Config(TypedDict, total=False): llm_matching_model: str - llm_matching_quality_preset: str + llm_matching_quality_preset: Literal["best", "high", "medium", "low", "base"] lower_llm_match_distance_threshold: float diff --git a/src/codex/types/project_list_response.py b/src/codex/types/project_list_response.py index d4805732..9051542a 100644 --- a/src/codex/types/project_list_response.py +++ b/src/codex/types/project_list_response.py @@ -297,7 +297,7 @@ class ProjectConfig(BaseModel): llm_matching_model: Optional[str] = None - llm_matching_quality_preset: Optional[str] = None + llm_matching_quality_preset: Optional[Literal["best", "high", "medium", "low", "base"]] = None lower_llm_match_distance_threshold: Optional[float] = None diff --git a/src/codex/types/project_retrieve_response.py b/src/codex/types/project_retrieve_response.py index fb62cff3..5cf4f32b 100644 --- a/src/codex/types/project_retrieve_response.py +++ b/src/codex/types/project_retrieve_response.py @@ -296,7 +296,7 @@ class Config(BaseModel): llm_matching_model: Optional[str] = None - llm_matching_quality_preset: Optional[str] = None + llm_matching_quality_preset: Optional[Literal["best", "high", "medium", "low", "base"]] = None lower_llm_match_distance_threshold: Optional[float] = None diff --git a/src/codex/types/project_return_schema.py b/src/codex/types/project_return_schema.py index 420ec6e7..979be221 100644 --- a/src/codex/types/project_return_schema.py +++ b/src/codex/types/project_return_schema.py @@ -296,7 +296,7 @@ class Config(BaseModel): llm_matching_model: Optional[str] = None - llm_matching_quality_preset: Optional[str] = None + llm_matching_quality_preset: Optional[Literal["best", "high", "medium", "low", "base"]] = None lower_llm_match_distance_threshold: Optional[float] = None diff --git a/src/codex/types/project_update_params.py b/src/codex/types/project_update_params.py index d1999550..6df16f43 100644 --- a/src/codex/types/project_update_params.py +++ b/src/codex/types/project_update_params.py @@ -303,7 +303,7 @@ class Config(TypedDict, total=False): llm_matching_model: str - llm_matching_quality_preset: str + llm_matching_quality_preset: Literal["best", "high", "medium", "low", "base"] lower_llm_match_distance_threshold: float diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py index 8ba69a14..7ca0ff9f 100644 --- a/tests/api_resources/test_projects.py +++ b/tests/api_resources/test_projects.py @@ -109,7 +109,7 @@ def test_method_create_with_all_params(self, client: Codex) -> None: }, }, "llm_matching_model": "llm_matching_model", - "llm_matching_quality_preset": "llm_matching_quality_preset", + "llm_matching_quality_preset": "best", "lower_llm_match_distance_threshold": 0, "max_distance": 0, "query_use_llm_matching": True, @@ -278,7 +278,7 @@ def test_method_update_with_all_params(self, client: Codex) -> None: }, }, "llm_matching_model": "llm_matching_model", - "llm_matching_quality_preset": "llm_matching_quality_preset", + "llm_matching_quality_preset": "best", "lower_llm_match_distance_threshold": 0, "max_distance": 0, "query_use_llm_matching": True, @@ -745,7 +745,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> }, }, "llm_matching_model": "llm_matching_model", - "llm_matching_quality_preset": "llm_matching_quality_preset", + "llm_matching_quality_preset": "best", "lower_llm_match_distance_threshold": 0, "max_distance": 0, "query_use_llm_matching": True, @@ -914,7 +914,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncCodex) -> }, }, "llm_matching_model": "llm_matching_model", - "llm_matching_quality_preset": "llm_matching_quality_preset", + "llm_matching_quality_preset": "best", "lower_llm_match_distance_threshold": 0, "max_distance": 0, "query_use_llm_matching": True, From 0a636fa5930c53671923de2f191a4e5c74328f7f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 10 Jun 2025 23:17:07 +0000 Subject: [PATCH 17/32] feat(api): api update --- .stats.yml | 2 +- src/codex/resources/projects/projects.py | 8 ++++++++ src/codex/types/project_create_params.py | 2 ++ src/codex/types/project_list_response.py | 2 ++ src/codex/types/project_retrieve_response.py | 2 ++ src/codex/types/project_return_schema.py | 2 ++ src/codex/types/project_update_params.py | 2 ++ tests/api_resources/test_projects.py | 4 ++++ 8 files changed, 23 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 0a2b18bb..735d7e30 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: ded11525d5fce121fac9be1f434c42af +openapi_spec_hash: b342ef2514a2e44178169d241aff9273 config_hash: 659f65b6ccf5612986f920f7f9abbcb5 diff --git a/src/codex/resources/projects/projects.py b/src/codex/resources/projects/projects.py index cf8c0f82..5fc3428c 100644 --- a/src/codex/resources/projects/projects.py +++ b/src/codex/resources/projects/projects.py @@ -98,6 +98,7 @@ def create( config: project_create_params.Config, name: str, organization_id: str, + auto_clustering_enabled: bool | NotGiven = NOT_GIVEN, description: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -125,6 +126,7 @@ def create( "config": config, "name": name, "organization_id": organization_id, + "auto_clustering_enabled": auto_clustering_enabled, "description": description, }, project_create_params.ProjectCreateParams, @@ -174,6 +176,7 @@ def update( *, config: project_update_params.Config, name: str, + auto_clustering_enabled: bool | NotGiven = NOT_GIVEN, description: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -202,6 +205,7 @@ def update( { "config": config, "name": name, + "auto_clustering_enabled": auto_clustering_enabled, "description": description, }, project_update_params.ProjectUpdateParams, @@ -641,6 +645,7 @@ async def create( config: project_create_params.Config, name: str, organization_id: str, + auto_clustering_enabled: bool | NotGiven = NOT_GIVEN, description: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -668,6 +673,7 @@ async def create( "config": config, "name": name, "organization_id": organization_id, + "auto_clustering_enabled": auto_clustering_enabled, "description": description, }, project_create_params.ProjectCreateParams, @@ -717,6 +723,7 @@ async def update( *, config: project_update_params.Config, name: str, + auto_clustering_enabled: bool | NotGiven = NOT_GIVEN, description: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -745,6 +752,7 @@ async def update( { "config": config, "name": name, + "auto_clustering_enabled": auto_clustering_enabled, "description": description, }, project_update_params.ProjectUpdateParams, diff --git a/src/codex/types/project_create_params.py b/src/codex/types/project_create_params.py index 13338463..4d9d31f5 100644 --- a/src/codex/types/project_create_params.py +++ b/src/codex/types/project_create_params.py @@ -27,6 +27,8 @@ class ProjectCreateParams(TypedDict, total=False): organization_id: Required[str] + auto_clustering_enabled: bool + description: Optional[str] diff --git a/src/codex/types/project_list_response.py b/src/codex/types/project_list_response.py index 9051542a..465ed157 100644 --- a/src/codex/types/project_list_response.py +++ b/src/codex/types/project_list_response.py @@ -323,6 +323,8 @@ class Project(BaseModel): updated_at: datetime + auto_clustering_enabled: Optional[bool] = None + description: Optional[str] = None unanswered_entries_count: Optional[int] = None diff --git a/src/codex/types/project_retrieve_response.py b/src/codex/types/project_retrieve_response.py index 5cf4f32b..53624096 100644 --- a/src/codex/types/project_retrieve_response.py +++ b/src/codex/types/project_retrieve_response.py @@ -322,6 +322,8 @@ class ProjectRetrieveResponse(BaseModel): updated_at: datetime + auto_clustering_enabled: Optional[bool] = None + custom_rank_enabled: Optional[bool] = None description: Optional[str] = None diff --git a/src/codex/types/project_return_schema.py b/src/codex/types/project_return_schema.py index 979be221..2ad7433a 100644 --- a/src/codex/types/project_return_schema.py +++ b/src/codex/types/project_return_schema.py @@ -322,4 +322,6 @@ class ProjectReturnSchema(BaseModel): updated_at: datetime + auto_clustering_enabled: Optional[bool] = None + description: Optional[str] = None diff --git a/src/codex/types/project_update_params.py b/src/codex/types/project_update_params.py index 6df16f43..fc6c52d4 100644 --- a/src/codex/types/project_update_params.py +++ b/src/codex/types/project_update_params.py @@ -25,6 +25,8 @@ class ProjectUpdateParams(TypedDict, total=False): name: Required[str] + auto_clustering_enabled: bool + description: Optional[str] diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py index 7ca0ff9f..4c95e587 100644 --- a/tests/api_resources/test_projects.py +++ b/tests/api_resources/test_projects.py @@ -117,6 +117,7 @@ def test_method_create_with_all_params(self, client: Codex) -> None: }, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + auto_clustering_enabled=True, description="description", ) assert_matches_type(ProjectReturnSchema, project, path=["response"]) @@ -285,6 +286,7 @@ def test_method_update_with_all_params(self, client: Codex) -> None: "upper_llm_match_distance_threshold": 0, }, name="name", + auto_clustering_enabled=True, description="description", ) assert_matches_type(ProjectReturnSchema, project, path=["response"]) @@ -753,6 +755,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> }, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + auto_clustering_enabled=True, description="description", ) assert_matches_type(ProjectReturnSchema, project, path=["response"]) @@ -921,6 +924,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncCodex) -> "upper_llm_match_distance_threshold": 0, }, name="name", + auto_clustering_enabled=True, description="description", ) assert_matches_type(ProjectReturnSchema, project, path=["response"]) From 0a3aaa1955e03895d340cfb8b5512bbcc4556d88 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Jun 2025 02:06:17 +0000 Subject: [PATCH 18/32] chore(tests): run tests in parallel --- pyproject.toml | 3 ++- requirements-dev.lock | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 04d039a2..03a1e985 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,6 +54,7 @@ dev-dependencies = [ "importlib-metadata>=6.7.0", "rich>=13.7.1", "nest_asyncio==1.6.0", + "pytest-xdist>=3.6.1", ] [tool.rye.scripts] @@ -125,7 +126,7 @@ replacement = '[\1](https://github.com/cleanlab/codex-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] -addopts = "--tb=short" +addopts = "--tb=short -n auto" xfail_strict = true asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "session" diff --git a/requirements-dev.lock b/requirements-dev.lock index 7fcfe657..9e127b74 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -30,6 +30,8 @@ distro==1.8.0 exceptiongroup==1.2.2 # via anyio # via pytest +execnet==2.1.1 + # via pytest-xdist filelock==3.12.4 # via virtualenv h11==0.14.0 @@ -72,7 +74,9 @@ pygments==2.18.0 pyright==1.1.399 pytest==8.3.3 # via pytest-asyncio + # via pytest-xdist pytest-asyncio==0.24.0 +pytest-xdist==3.7.0 python-dateutil==2.8.2 # via time-machine pytz==2023.3.post1 From 9debf1d4208f1c65a67211fc4e7adce3193ed291 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Jun 2025 02:32:12 +0000 Subject: [PATCH 19/32] fix(client): correctly parse binary response | stream --- src/codex/_base_client.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/codex/_base_client.py b/src/codex/_base_client.py index dff802e4..6ef7dd5f 100644 --- a/src/codex/_base_client.py +++ b/src/codex/_base_client.py @@ -1071,7 +1071,14 @@ def _process_response( ) -> ResponseT: origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, APIResponse): raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") @@ -1574,7 +1581,14 @@ async def _process_response( ) -> ResponseT: origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, AsyncAPIResponse): raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}") From 658d5272b47d0e6a03ae3b4ba82f5e54ee4ffef7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 16 Jun 2025 18:17:09 +0000 Subject: [PATCH 20/32] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 735d7e30..c98d193e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: b342ef2514a2e44178169d241aff9273 +openapi_spec_hash: 392a7547e79deafa47b8c452f0f5b79e config_hash: 659f65b6ccf5612986f920f7f9abbcb5 From 425caff55b0a813f637afee7a973b66053bd366f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 16 Jun 2025 22:17:08 +0000 Subject: [PATCH 21/32] feat(api): api update --- .stats.yml | 2 +- src/codex/resources/projects/projects.py | 8 ++++---- src/codex/types/project_list_params.py | 2 +- src/codex/types/project_list_response.py | 2 +- tests/api_resources/test_projects.py | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.stats.yml b/.stats.yml index c98d193e..09b48e62 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: 392a7547e79deafa47b8c452f0f5b79e +openapi_spec_hash: 66a1eb040f577f552228baf4ddbd17d0 config_hash: 659f65b6ccf5612986f920f7f9abbcb5 diff --git a/src/codex/resources/projects/projects.py b/src/codex/resources/projects/projects.py index 5fc3428c..b048b284 100644 --- a/src/codex/resources/projects/projects.py +++ b/src/codex/resources/projects/projects.py @@ -219,7 +219,7 @@ def update( def list( self, *, - include_entry_counts: bool | NotGiven = NOT_GIVEN, + include_unaddressed_counts: bool | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, offset: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, @@ -254,7 +254,7 @@ def list( timeout=timeout, query=maybe_transform( { - "include_entry_counts": include_entry_counts, + "include_unaddressed_counts": include_unaddressed_counts, "limit": limit, "offset": offset, "order": order, @@ -766,7 +766,7 @@ async def update( async def list( self, *, - include_entry_counts: bool | NotGiven = NOT_GIVEN, + include_unaddressed_counts: bool | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, offset: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, @@ -801,7 +801,7 @@ async def list( timeout=timeout, query=await async_maybe_transform( { - "include_entry_counts": include_entry_counts, + "include_unaddressed_counts": include_unaddressed_counts, "limit": limit, "offset": offset, "order": order, diff --git a/src/codex/types/project_list_params.py b/src/codex/types/project_list_params.py index 0ab3b84b..0c4ec1bd 100644 --- a/src/codex/types/project_list_params.py +++ b/src/codex/types/project_list_params.py @@ -9,7 +9,7 @@ class ProjectListParams(TypedDict, total=False): - include_entry_counts: bool + include_unaddressed_counts: bool limit: int diff --git a/src/codex/types/project_list_response.py b/src/codex/types/project_list_response.py index 465ed157..a7df3f38 100644 --- a/src/codex/types/project_list_response.py +++ b/src/codex/types/project_list_response.py @@ -327,7 +327,7 @@ class Project(BaseModel): description: Optional[str] = None - unanswered_entries_count: Optional[int] = None + unaddressed_count: Optional[int] = None class ProjectListResponse(BaseModel): diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py index 4c95e587..cf6b3171 100644 --- a/tests/api_resources/test_projects.py +++ b/tests/api_resources/test_projects.py @@ -341,7 +341,7 @@ def test_method_list(self, client: Codex) -> None: @parametrize def test_method_list_with_all_params(self, client: Codex) -> None: project = client.projects.list( - include_entry_counts=True, + include_unaddressed_counts=True, limit=1, offset=0, order="asc", @@ -979,7 +979,7 @@ async def test_method_list(self, async_client: AsyncCodex) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncCodex) -> None: project = await async_client.projects.list( - include_entry_counts=True, + include_unaddressed_counts=True, limit=1, offset=0, order="asc", From bf466627bf3e9dc4a8d1f3283f94a75bab241a58 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 02:34:19 +0000 Subject: [PATCH 22/32] chore(tests): add tests for httpx client instantiation & proxies --- tests/test_client.py | 53 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/tests/test_client.py b/tests/test_client.py index bc381273..d5815d3b 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -27,7 +27,14 @@ from codex._models import BaseModel, FinalRequestOptions from codex._constants import RAW_RESPONSE_HEADER from codex._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError -from codex._base_client import DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, make_request_options +from codex._base_client import ( + DEFAULT_TIMEOUT, + HTTPX_DEFAULT_TIMEOUT, + BaseClient, + DefaultHttpxClient, + DefaultAsyncHttpxClient, + make_request_options, +) from codex.types.project_create_params import ProjectCreateParams from .utils import update_env @@ -803,6 +810,28 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.http_request.headers.get("x-stainless-retry-count") == "42" + def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + @pytest.mark.respx(base_url=base_url) def test_follow_redirects(self, respx_mock: MockRouter) -> None: # Test that the default follow_redirects=True allows following redirects @@ -1637,6 +1666,28 @@ async def test_main() -> None: time.sleep(0.1) + async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultAsyncHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + async def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultAsyncHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + @pytest.mark.respx(base_url=base_url) async def test_follow_redirects(self, respx_mock: MockRouter) -> None: # Test that the default follow_redirects=True allows following redirects From 9bd89258f9ce4448e92957c31cc0ff2457f105c9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 04:04:00 +0000 Subject: [PATCH 23/32] chore(internal): update conftest.py --- tests/conftest.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index c93053b1..05c77729 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,5 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from __future__ import annotations import os From a28cd9c9e47987a1193e345fb48c3dc75512e87a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 06:33:35 +0000 Subject: [PATCH 24/32] chore(ci): enable for pull requests --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c02247a3..2f7778ae 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,6 +7,10 @@ on: - 'integrated/**' - 'stl-preview-head/**' - 'stl-preview-base/**' + pull_request: + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: From 0bb6eec41fc0ccb6fcc2d987552dd56a602d6ccb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 02:08:16 +0000 Subject: [PATCH 25/32] chore(readme): update badges --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 434ebb1c..5a5a8052 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # Codex SDK -[![PyPI version](https://img.shields.io/pypi/v/codex-sdk.svg)](https://pypi.org/project/codex-sdk/) +[![PyPI version]()](https://pypi.org/project/codex-sdk/) This library is not meant to be used directly. Refer to https://pypi.org/project/cleanlab-codex/ instead. From 14a4f3cff659d280f002fa694024e6c9814b3922 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 05:44:40 +0000 Subject: [PATCH 26/32] fix(tests): fix: tests which call HTTP endpoints directly with the example parameters --- tests/test_client.py | 73 ++++++++++---------------------------------- 1 file changed, 16 insertions(+), 57 deletions(-) diff --git a/tests/test_client.py b/tests/test_client.py index d5815d3b..9cf7e943 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -23,9 +23,7 @@ from codex import Codex, AsyncCodex, APIResponseValidationError from codex._types import Omit -from codex._utils import maybe_transform from codex._models import BaseModel, FinalRequestOptions -from codex._constants import RAW_RESPONSE_HEADER from codex._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError from codex._base_client import ( DEFAULT_TIMEOUT, @@ -35,7 +33,6 @@ DefaultAsyncHttpxClient, make_request_options, ) -from codex.types.project_create_params import ProjectCreateParams from .utils import update_env @@ -683,44 +680,25 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str @mock.patch("codex._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: Codex) -> None: respx_mock.post("/api/projects/").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - self.client.post( - "/api/projects/", - body=cast( - object, - maybe_transform( - dict(config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"), - ProjectCreateParams, - ), - ), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + client.projects.with_streaming_response.create( + config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" + ).__enter__() assert _get_open_connections(self.client) == 0 @mock.patch("codex._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: Codex) -> None: respx_mock.post("/api/projects/").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - self.client.post( - "/api/projects/", - body=cast( - object, - maybe_transform( - dict(config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"), - ProjectCreateParams, - ), - ), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + client.projects.with_streaming_response.create( + config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" + ).__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1489,44 +1467,25 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte @mock.patch("codex._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, async_client: AsyncCodex) -> None: respx_mock.post("/api/projects/").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await self.client.post( - "/api/projects/", - body=cast( - object, - maybe_transform( - dict(config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"), - ProjectCreateParams, - ), - ), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + await async_client.projects.with_streaming_response.create( + config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" + ).__aenter__() assert _get_open_connections(self.client) == 0 @mock.patch("codex._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, async_client: AsyncCodex) -> None: respx_mock.post("/api/projects/").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await self.client.post( - "/api/projects/", - body=cast( - object, - maybe_transform( - dict(config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"), - ProjectCreateParams, - ), - ), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + await async_client.projects.with_streaming_response.create( + config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" + ).__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) From 20a7b367fab70147c0ce071ad56b9ee02d27df7b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 18:17:12 +0000 Subject: [PATCH 27/32] feat(api): api update --- .stats.yml | 2 +- src/codex/types/organization_list_members_response.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 09b48e62..40a2bb70 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: 66a1eb040f577f552228baf4ddbd17d0 +openapi_spec_hash: 947c1396f52c4f78645b067ac0b708e5 config_hash: 659f65b6ccf5612986f920f7f9abbcb5 diff --git a/src/codex/types/organization_list_members_response.py b/src/codex/types/organization_list_members_response.py index 37897d56..1fa593ea 100644 --- a/src/codex/types/organization_list_members_response.py +++ b/src/codex/types/organization_list_members_response.py @@ -13,5 +13,7 @@ class OrganizationListMembersResponseItem(BaseModel): name: str + user_id: str + OrganizationListMembersResponse: TypeAlias = List[OrganizationListMembersResponseItem] From 2836ef3a6e7fca6fb2a92d6b13fbe0e38f30f1ad Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 20 Jun 2025 23:29:36 +0000 Subject: [PATCH 28/32] docs(client): fix httpx.Timeout documentation reference From b8729ddd238b931c8739b960f0c5f5c185c69119 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 20 Jun 2025 20:17:22 +0000 Subject: [PATCH 29/32] feat(api): api update --- .stats.yml | 2 +- src/codex/resources/projects/projects.py | 96 +++--- src/codex/resources/tlm.py | 164 +++++------ src/codex/types/project_validate_params.py | 278 ++++++++++++++++-- .../types/projects/entry_query_params.py | 243 ++++++++++++++- src/codex/types/tlm_prompt_params.py | 41 ++- src/codex/types/tlm_score_params.py | 41 ++- tests/api_resources/projects/test_entries.py | 16 + tests/api_resources/test_projects.py | 14 + 9 files changed, 690 insertions(+), 205 deletions(-) diff --git a/.stats.yml b/.stats.yml index 40a2bb70..9317ce1b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 44 -openapi_spec_hash: 947c1396f52c4f78645b067ac0b708e5 +openapi_spec_hash: eeb8ebc5600523bdfad046381a929572 config_hash: 659f65b6ccf5612986f920f7f9abbcb5 diff --git a/src/codex/resources/projects/projects.py b/src/codex/resources/projects/projects.py index b048b284..11b95d4d 100644 --- a/src/codex/resources/projects/projects.py +++ b/src/codex/resources/projects/projects.py @@ -3,7 +3,7 @@ from __future__ import annotations import typing_extensions -from typing import Dict, List, Optional +from typing import Dict, List, Iterable, Optional from typing_extensions import Literal import httpx @@ -436,6 +436,7 @@ def validate( custom_eval_thresholds: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN, custom_metadata: Optional[object] | NotGiven = NOT_GIVEN, eval_scores: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN, + messages: Optional[Iterable[project_validate_params.Message]] | NotGiven = NOT_GIVEN, options: Optional[project_validate_params.Options] | NotGiven = NOT_GIVEN, quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN, task: Optional[str] | NotGiven = NOT_GIVEN, @@ -465,6 +466,10 @@ def validate( eval_scores: Scores assessing different aspects of the RAG system. If not provided, TLM will be used to generate scores. + messages: Optional message history to provide conversation context for the query. Used to + rewrite query into a self-contained version of itself. If not provided, the + query will be treated as self-contained. + options: Typed dict of advanced configuration options for the Trustworthy Language Model. Many of these configurations are determined by the quality preset selected (learn about quality presets in the TLM [initialization method](./#class-tlm)). @@ -490,27 +495,24 @@ def validate( `use_self_reflection` = True. - **base:** `num_candidate_responses` = 1, `num_consistency_samples` = 0, `use_self_reflection` = False. When using `get_trustworthiness_score()` on - "base" preset, a cheaper self-reflection will be used to compute the - trustworthiness score. - - By default, the TLM uses the "medium" quality preset. The default base LLM - `model` used is "gpt-4o-mini", and `max_tokens` is 512 for all quality presets. - You can set custom values for these arguments regardless of the quality preset - specified. - - Args: model ({"gpt-4o-mini", "gpt-4o", "gpt-4.1", "gpt-4.1-mini", - "gpt-4.1-nano", "o4-mini", "o3", "o3-mini", "o1", "o1-mini", "gpt-4", - "gpt-4.5-preview", "gpt-3.5-turbo-16k", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2", "claude-3.5-sonnet", "claude-3.5-haiku", - "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, default = - "gpt-4o-mini"): Underlying base LLM to use (better models yield better results, - faster models yield faster/cheaper results). - Models still in beta: "o3", "o1", - "o4-mini", "o3-mini", "o1-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "claude-3.7-sonnet", "claude-3.5-sonnet-v2", - "claude-3.5-haiku", "nova-micro", "nova-lite", "nova-pro". - Recommended models - for accuracy: "gpt-4.1", "o4-mini", "o3", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2". - Recommended models for low latency/costs: - "gpt-4.1-nano", "nova-micro". + "base" preset, a faster self-reflection is employed. + + By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base + `model`, and `max_tokens` is set to 512. You can set custom values for these + arguments regardless of the quality preset specified. + + Args: model ({"gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "o4-mini", "o3", + "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o", "o3-mini", "o1", "o1-mini", "gpt-4", + "gpt-3.5-turbo-16k", "claude-opus-4-0", "claude-sonnet-4-0", + "claude-3.7-sonnet", "claude-3.5-sonnet-v2", "claude-3.5-sonnet", + "claude-3.5-haiku", "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, + default = "gpt-4.1-mini"): Underlying base LLM to use (better models yield + better results, faster models yield faster results). - Models still in beta: + "o3", "o1", "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", + "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet", + "claude-3.5-haiku". - Recommended models for accuracy: "gpt-4.1", "o4-mini", + "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models for low + latency/costs: "gpt-4.1-nano", "nova-micro". max_tokens (int, default = 512): the maximum number of tokens that can be generated in the TLM response (and in internal trustworthiness scoring). Higher values here may produce better (more reliable) TLM responses and trustworthiness scores, but at higher runtimes/costs. @@ -536,7 +538,7 @@ def validate( similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "semantic"): how the trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model. - Supported similarity measures include: "semantic" (based on natural language inference), + Supported similarity measures include - "semantic" (based on natural language inference), "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model), "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies), and "string" (based on character/word overlap). Set this to "string" for minimal runtimes/costs. @@ -588,6 +590,7 @@ def validate( "custom_eval_thresholds": custom_eval_thresholds, "custom_metadata": custom_metadata, "eval_scores": eval_scores, + "messages": messages, "options": options, "quality_preset": quality_preset, "task": task, @@ -985,6 +988,7 @@ async def validate( custom_eval_thresholds: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN, custom_metadata: Optional[object] | NotGiven = NOT_GIVEN, eval_scores: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN, + messages: Optional[Iterable[project_validate_params.Message]] | NotGiven = NOT_GIVEN, options: Optional[project_validate_params.Options] | NotGiven = NOT_GIVEN, quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN, task: Optional[str] | NotGiven = NOT_GIVEN, @@ -1014,6 +1018,10 @@ async def validate( eval_scores: Scores assessing different aspects of the RAG system. If not provided, TLM will be used to generate scores. + messages: Optional message history to provide conversation context for the query. Used to + rewrite query into a self-contained version of itself. If not provided, the + query will be treated as self-contained. + options: Typed dict of advanced configuration options for the Trustworthy Language Model. Many of these configurations are determined by the quality preset selected (learn about quality presets in the TLM [initialization method](./#class-tlm)). @@ -1039,27 +1047,24 @@ async def validate( `use_self_reflection` = True. - **base:** `num_candidate_responses` = 1, `num_consistency_samples` = 0, `use_self_reflection` = False. When using `get_trustworthiness_score()` on - "base" preset, a cheaper self-reflection will be used to compute the - trustworthiness score. - - By default, the TLM uses the "medium" quality preset. The default base LLM - `model` used is "gpt-4o-mini", and `max_tokens` is 512 for all quality presets. - You can set custom values for these arguments regardless of the quality preset - specified. - - Args: model ({"gpt-4o-mini", "gpt-4o", "gpt-4.1", "gpt-4.1-mini", - "gpt-4.1-nano", "o4-mini", "o3", "o3-mini", "o1", "o1-mini", "gpt-4", - "gpt-4.5-preview", "gpt-3.5-turbo-16k", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2", "claude-3.5-sonnet", "claude-3.5-haiku", - "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, default = - "gpt-4o-mini"): Underlying base LLM to use (better models yield better results, - faster models yield faster/cheaper results). - Models still in beta: "o3", "o1", - "o4-mini", "o3-mini", "o1-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "claude-3.7-sonnet", "claude-3.5-sonnet-v2", - "claude-3.5-haiku", "nova-micro", "nova-lite", "nova-pro". - Recommended models - for accuracy: "gpt-4.1", "o4-mini", "o3", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2". - Recommended models for low latency/costs: - "gpt-4.1-nano", "nova-micro". + "base" preset, a faster self-reflection is employed. + + By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base + `model`, and `max_tokens` is set to 512. You can set custom values for these + arguments regardless of the quality preset specified. + + Args: model ({"gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "o4-mini", "o3", + "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o", "o3-mini", "o1", "o1-mini", "gpt-4", + "gpt-3.5-turbo-16k", "claude-opus-4-0", "claude-sonnet-4-0", + "claude-3.7-sonnet", "claude-3.5-sonnet-v2", "claude-3.5-sonnet", + "claude-3.5-haiku", "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, + default = "gpt-4.1-mini"): Underlying base LLM to use (better models yield + better results, faster models yield faster results). - Models still in beta: + "o3", "o1", "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", + "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet", + "claude-3.5-haiku". - Recommended models for accuracy: "gpt-4.1", "o4-mini", + "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models for low + latency/costs: "gpt-4.1-nano", "nova-micro". max_tokens (int, default = 512): the maximum number of tokens that can be generated in the TLM response (and in internal trustworthiness scoring). Higher values here may produce better (more reliable) TLM responses and trustworthiness scores, but at higher runtimes/costs. @@ -1085,7 +1090,7 @@ async def validate( similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "semantic"): how the trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model. - Supported similarity measures include: "semantic" (based on natural language inference), + Supported similarity measures include - "semantic" (based on natural language inference), "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model), "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies), and "string" (based on character/word overlap). Set this to "string" for minimal runtimes/costs. @@ -1137,6 +1142,7 @@ async def validate( "custom_eval_thresholds": custom_eval_thresholds, "custom_metadata": custom_metadata, "eval_scores": eval_scores, + "messages": messages, "options": options, "quality_preset": quality_preset, "task": task, diff --git a/src/codex/resources/tlm.py b/src/codex/resources/tlm.py index 5e4cd7e1..12ff6c0d 100644 --- a/src/codex/resources/tlm.py +++ b/src/codex/resources/tlm.py @@ -89,27 +89,24 @@ def prompt( `use_self_reflection` = True. - **base:** `num_candidate_responses` = 1, `num_consistency_samples` = 0, `use_self_reflection` = False. When using `get_trustworthiness_score()` on - "base" preset, a cheaper self-reflection will be used to compute the - trustworthiness score. - - By default, the TLM uses the "medium" quality preset. The default base LLM - `model` used is "gpt-4o-mini", and `max_tokens` is 512 for all quality presets. - You can set custom values for these arguments regardless of the quality preset - specified. - - Args: model ({"gpt-4o-mini", "gpt-4o", "gpt-4.1", "gpt-4.1-mini", - "gpt-4.1-nano", "o4-mini", "o3", "o3-mini", "o1", "o1-mini", "gpt-4", - "gpt-4.5-preview", "gpt-3.5-turbo-16k", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2", "claude-3.5-sonnet", "claude-3.5-haiku", - "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, default = - "gpt-4o-mini"): Underlying base LLM to use (better models yield better results, - faster models yield faster/cheaper results). - Models still in beta: "o3", "o1", - "o4-mini", "o3-mini", "o1-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "claude-3.7-sonnet", "claude-3.5-sonnet-v2", - "claude-3.5-haiku", "nova-micro", "nova-lite", "nova-pro". - Recommended models - for accuracy: "gpt-4.1", "o4-mini", "o3", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2". - Recommended models for low latency/costs: - "gpt-4.1-nano", "nova-micro". + "base" preset, a faster self-reflection is employed. + + By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base + `model`, and `max_tokens` is set to 512. You can set custom values for these + arguments regardless of the quality preset specified. + + Args: model ({"gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "o4-mini", "o3", + "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o", "o3-mini", "o1", "o1-mini", "gpt-4", + "gpt-3.5-turbo-16k", "claude-opus-4-0", "claude-sonnet-4-0", + "claude-3.7-sonnet", "claude-3.5-sonnet-v2", "claude-3.5-sonnet", + "claude-3.5-haiku", "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, + default = "gpt-4.1-mini"): Underlying base LLM to use (better models yield + better results, faster models yield faster results). - Models still in beta: + "o3", "o1", "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", + "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet", + "claude-3.5-haiku". - Recommended models for accuracy: "gpt-4.1", "o4-mini", + "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models for low + latency/costs: "gpt-4.1-nano", "nova-micro". max_tokens (int, default = 512): the maximum number of tokens that can be generated in the TLM response (and in internal trustworthiness scoring). Higher values here may produce better (more reliable) TLM responses and trustworthiness scores, but at higher runtimes/costs. @@ -135,7 +132,7 @@ def prompt( similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "semantic"): how the trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model. - Supported similarity measures include: "semantic" (based on natural language inference), + Supported similarity measures include - "semantic" (based on natural language inference), "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model), "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies), and "string" (based on character/word overlap). Set this to "string" for minimal runtimes/costs. @@ -230,27 +227,24 @@ def score( `use_self_reflection` = True. - **base:** `num_candidate_responses` = 1, `num_consistency_samples` = 0, `use_self_reflection` = False. When using `get_trustworthiness_score()` on - "base" preset, a cheaper self-reflection will be used to compute the - trustworthiness score. - - By default, the TLM uses the "medium" quality preset. The default base LLM - `model` used is "gpt-4o-mini", and `max_tokens` is 512 for all quality presets. - You can set custom values for these arguments regardless of the quality preset - specified. - - Args: model ({"gpt-4o-mini", "gpt-4o", "gpt-4.1", "gpt-4.1-mini", - "gpt-4.1-nano", "o4-mini", "o3", "o3-mini", "o1", "o1-mini", "gpt-4", - "gpt-4.5-preview", "gpt-3.5-turbo-16k", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2", "claude-3.5-sonnet", "claude-3.5-haiku", - "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, default = - "gpt-4o-mini"): Underlying base LLM to use (better models yield better results, - faster models yield faster/cheaper results). - Models still in beta: "o3", "o1", - "o4-mini", "o3-mini", "o1-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "claude-3.7-sonnet", "claude-3.5-sonnet-v2", - "claude-3.5-haiku", "nova-micro", "nova-lite", "nova-pro". - Recommended models - for accuracy: "gpt-4.1", "o4-mini", "o3", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2". - Recommended models for low latency/costs: - "gpt-4.1-nano", "nova-micro". + "base" preset, a faster self-reflection is employed. + + By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base + `model`, and `max_tokens` is set to 512. You can set custom values for these + arguments regardless of the quality preset specified. + + Args: model ({"gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "o4-mini", "o3", + "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o", "o3-mini", "o1", "o1-mini", "gpt-4", + "gpt-3.5-turbo-16k", "claude-opus-4-0", "claude-sonnet-4-0", + "claude-3.7-sonnet", "claude-3.5-sonnet-v2", "claude-3.5-sonnet", + "claude-3.5-haiku", "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, + default = "gpt-4.1-mini"): Underlying base LLM to use (better models yield + better results, faster models yield faster results). - Models still in beta: + "o3", "o1", "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", + "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet", + "claude-3.5-haiku". - Recommended models for accuracy: "gpt-4.1", "o4-mini", + "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models for low + latency/costs: "gpt-4.1-nano", "nova-micro". max_tokens (int, default = 512): the maximum number of tokens that can be generated in the TLM response (and in internal trustworthiness scoring). Higher values here may produce better (more reliable) TLM responses and trustworthiness scores, but at higher runtimes/costs. @@ -276,7 +270,7 @@ def score( similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "semantic"): how the trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model. - Supported similarity measures include: "semantic" (based on natural language inference), + Supported similarity measures include - "semantic" (based on natural language inference), "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model), "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies), and "string" (based on character/word overlap). Set this to "string" for minimal runtimes/costs. @@ -387,27 +381,24 @@ async def prompt( `use_self_reflection` = True. - **base:** `num_candidate_responses` = 1, `num_consistency_samples` = 0, `use_self_reflection` = False. When using `get_trustworthiness_score()` on - "base" preset, a cheaper self-reflection will be used to compute the - trustworthiness score. - - By default, the TLM uses the "medium" quality preset. The default base LLM - `model` used is "gpt-4o-mini", and `max_tokens` is 512 for all quality presets. - You can set custom values for these arguments regardless of the quality preset - specified. - - Args: model ({"gpt-4o-mini", "gpt-4o", "gpt-4.1", "gpt-4.1-mini", - "gpt-4.1-nano", "o4-mini", "o3", "o3-mini", "o1", "o1-mini", "gpt-4", - "gpt-4.5-preview", "gpt-3.5-turbo-16k", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2", "claude-3.5-sonnet", "claude-3.5-haiku", - "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, default = - "gpt-4o-mini"): Underlying base LLM to use (better models yield better results, - faster models yield faster/cheaper results). - Models still in beta: "o3", "o1", - "o4-mini", "o3-mini", "o1-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "claude-3.7-sonnet", "claude-3.5-sonnet-v2", - "claude-3.5-haiku", "nova-micro", "nova-lite", "nova-pro". - Recommended models - for accuracy: "gpt-4.1", "o4-mini", "o3", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2". - Recommended models for low latency/costs: - "gpt-4.1-nano", "nova-micro". + "base" preset, a faster self-reflection is employed. + + By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base + `model`, and `max_tokens` is set to 512. You can set custom values for these + arguments regardless of the quality preset specified. + + Args: model ({"gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "o4-mini", "o3", + "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o", "o3-mini", "o1", "o1-mini", "gpt-4", + "gpt-3.5-turbo-16k", "claude-opus-4-0", "claude-sonnet-4-0", + "claude-3.7-sonnet", "claude-3.5-sonnet-v2", "claude-3.5-sonnet", + "claude-3.5-haiku", "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, + default = "gpt-4.1-mini"): Underlying base LLM to use (better models yield + better results, faster models yield faster results). - Models still in beta: + "o3", "o1", "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", + "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet", + "claude-3.5-haiku". - Recommended models for accuracy: "gpt-4.1", "o4-mini", + "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models for low + latency/costs: "gpt-4.1-nano", "nova-micro". max_tokens (int, default = 512): the maximum number of tokens that can be generated in the TLM response (and in internal trustworthiness scoring). Higher values here may produce better (more reliable) TLM responses and trustworthiness scores, but at higher runtimes/costs. @@ -433,7 +424,7 @@ async def prompt( similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "semantic"): how the trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model. - Supported similarity measures include: "semantic" (based on natural language inference), + Supported similarity measures include - "semantic" (based on natural language inference), "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model), "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies), and "string" (based on character/word overlap). Set this to "string" for minimal runtimes/costs. @@ -528,27 +519,24 @@ async def score( `use_self_reflection` = True. - **base:** `num_candidate_responses` = 1, `num_consistency_samples` = 0, `use_self_reflection` = False. When using `get_trustworthiness_score()` on - "base" preset, a cheaper self-reflection will be used to compute the - trustworthiness score. - - By default, the TLM uses the "medium" quality preset. The default base LLM - `model` used is "gpt-4o-mini", and `max_tokens` is 512 for all quality presets. - You can set custom values for these arguments regardless of the quality preset - specified. - - Args: model ({"gpt-4o-mini", "gpt-4o", "gpt-4.1", "gpt-4.1-mini", - "gpt-4.1-nano", "o4-mini", "o3", "o3-mini", "o1", "o1-mini", "gpt-4", - "gpt-4.5-preview", "gpt-3.5-turbo-16k", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2", "claude-3.5-sonnet", "claude-3.5-haiku", - "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, default = - "gpt-4o-mini"): Underlying base LLM to use (better models yield better results, - faster models yield faster/cheaper results). - Models still in beta: "o3", "o1", - "o4-mini", "o3-mini", "o1-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "claude-3.7-sonnet", "claude-3.5-sonnet-v2", - "claude-3.5-haiku", "nova-micro", "nova-lite", "nova-pro". - Recommended models - for accuracy: "gpt-4.1", "o4-mini", "o3", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2". - Recommended models for low latency/costs: - "gpt-4.1-nano", "nova-micro". + "base" preset, a faster self-reflection is employed. + + By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base + `model`, and `max_tokens` is set to 512. You can set custom values for these + arguments regardless of the quality preset specified. + + Args: model ({"gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "o4-mini", "o3", + "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o", "o3-mini", "o1", "o1-mini", "gpt-4", + "gpt-3.5-turbo-16k", "claude-opus-4-0", "claude-sonnet-4-0", + "claude-3.7-sonnet", "claude-3.5-sonnet-v2", "claude-3.5-sonnet", + "claude-3.5-haiku", "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, + default = "gpt-4.1-mini"): Underlying base LLM to use (better models yield + better results, faster models yield faster results). - Models still in beta: + "o3", "o1", "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", + "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet", + "claude-3.5-haiku". - Recommended models for accuracy: "gpt-4.1", "o4-mini", + "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models for low + latency/costs: "gpt-4.1-nano", "nova-micro". max_tokens (int, default = 512): the maximum number of tokens that can be generated in the TLM response (and in internal trustworthiness scoring). Higher values here may produce better (more reliable) TLM responses and trustworthiness scores, but at higher runtimes/costs. @@ -574,7 +562,7 @@ async def score( similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "semantic"): how the trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model. - Supported similarity measures include: "semantic" (based on natural language inference), + Supported similarity measures include - "semantic" (based on natural language inference), "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model), "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies), and "string" (based on character/word overlap). Set this to "string" for minimal runtimes/costs. diff --git a/src/codex/types/project_validate_params.py b/src/codex/types/project_validate_params.py index a855aa6f..8b38ebfa 100644 --- a/src/codex/types/project_validate_params.py +++ b/src/codex/types/project_validate_params.py @@ -2,12 +2,40 @@ from __future__ import annotations -from typing import Dict, List, Iterable, Optional -from typing_extensions import Literal, Required, Annotated, TypedDict +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict from .._utils import PropertyInfo -__all__ = ["ProjectValidateParams", "Options"] +__all__ = [ + "ProjectValidateParams", + "Message", + "MessageChatCompletionDeveloperMessageParam", + "MessageChatCompletionDeveloperMessageParamContentUnionMember1", + "MessageChatCompletionSystemMessageParam", + "MessageChatCompletionSystemMessageParamContentUnionMember1", + "MessageChatCompletionUserMessageParam", + "MessageChatCompletionUserMessageParamContentUnionMember1", + "MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartTextParam", + "MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParam", + "MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParamImageURL", + "MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParam", + "MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParamInputAudio", + "MessageChatCompletionUserMessageParamContentUnionMember1File", + "MessageChatCompletionUserMessageParamContentUnionMember1FileFile", + "MessageChatCompletionAssistantMessageParam", + "MessageChatCompletionAssistantMessageParamAudio", + "MessageChatCompletionAssistantMessageParamContentUnionMember1", + "MessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartTextParam", + "MessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartRefusalParam", + "MessageChatCompletionAssistantMessageParamFunctionCall", + "MessageChatCompletionAssistantMessageParamToolCall", + "MessageChatCompletionAssistantMessageParamToolCallFunction", + "MessageChatCompletionToolMessageParam", + "MessageChatCompletionToolMessageParamContentUnionMember1", + "MessageChatCompletionFunctionMessageParam", + "Options", +] class ProjectValidateParams(TypedDict, total=False): @@ -38,6 +66,13 @@ class ProjectValidateParams(TypedDict, total=False): If not provided, TLM will be used to generate scores. """ + messages: Optional[Iterable[Message]] + """Optional message history to provide conversation context for the query. + + Used to rewrite query into a self-contained version of itself. If not provided, + the query will be treated as self-contained. + """ + options: Optional[Options] """ Typed dict of advanced configuration options for the Trustworthy Language Model. @@ -65,27 +100,24 @@ class ProjectValidateParams(TypedDict, total=False): `use_self_reflection` = True. - **base:** `num_candidate_responses` = 1, `num_consistency_samples` = 0, `use_self_reflection` = False. When using `get_trustworthiness_score()` on - "base" preset, a cheaper self-reflection will be used to compute the - trustworthiness score. - - By default, the TLM uses the "medium" quality preset. The default base LLM - `model` used is "gpt-4o-mini", and `max_tokens` is 512 for all quality presets. - You can set custom values for these arguments regardless of the quality preset - specified. - - Args: model ({"gpt-4o-mini", "gpt-4o", "gpt-4.1", "gpt-4.1-mini", - "gpt-4.1-nano", "o4-mini", "o3", "o3-mini", "o1", "o1-mini", "gpt-4", - "gpt-4.5-preview", "gpt-3.5-turbo-16k", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2", "claude-3.5-sonnet", "claude-3.5-haiku", - "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, default = - "gpt-4o-mini"): Underlying base LLM to use (better models yield better results, - faster models yield faster/cheaper results). - Models still in beta: "o3", "o1", - "o4-mini", "o3-mini", "o1-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "claude-3.7-sonnet", "claude-3.5-sonnet-v2", - "claude-3.5-haiku", "nova-micro", "nova-lite", "nova-pro". - Recommended models - for accuracy: "gpt-4.1", "o4-mini", "o3", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2". - Recommended models for low latency/costs: - "gpt-4.1-nano", "nova-micro". + "base" preset, a faster self-reflection is employed. + + By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base + `model`, and `max_tokens` is set to 512. You can set custom values for these + arguments regardless of the quality preset specified. + + Args: model ({"gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "o4-mini", "o3", + "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o", "o3-mini", "o1", "o1-mini", "gpt-4", + "gpt-3.5-turbo-16k", "claude-opus-4-0", "claude-sonnet-4-0", + "claude-3.7-sonnet", "claude-3.5-sonnet-v2", "claude-3.5-sonnet", + "claude-3.5-haiku", "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, + default = "gpt-4.1-mini"): Underlying base LLM to use (better models yield + better results, faster models yield faster results). - Models still in beta: + "o3", "o1", "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", + "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet", + "claude-3.5-haiku". - Recommended models for accuracy: "gpt-4.1", "o4-mini", + "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models for low + latency/costs: "gpt-4.1-nano", "nova-micro". max_tokens (int, default = 512): the maximum number of tokens that can be generated in the TLM response (and in internal trustworthiness scoring). Higher values here may produce better (more reliable) TLM responses and trustworthiness scores, but at higher runtimes/costs. @@ -111,7 +143,7 @@ class ProjectValidateParams(TypedDict, total=False): similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "semantic"): how the trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model. - Supported similarity measures include: "semantic" (based on natural language inference), + Supported similarity measures include - "semantic" (based on natural language inference), "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model), "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies), and "string" (based on character/word overlap). Set this to "string" for minimal runtimes/costs. @@ -143,6 +175,202 @@ class ProjectValidateParams(TypedDict, total=False): x_stainless_package_version: Annotated[str, PropertyInfo(alias="x-stainless-package-version")] +class MessageChatCompletionDeveloperMessageParamContentUnionMember1(TypedDict, total=False): + text: Required[str] + + type: Required[Literal["text"]] + + +class MessageChatCompletionDeveloperMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[MessageChatCompletionDeveloperMessageParamContentUnionMember1]]] + + role: Required[Literal["developer"]] + + name: str + + +class MessageChatCompletionSystemMessageParamContentUnionMember1(TypedDict, total=False): + text: Required[str] + + type: Required[Literal["text"]] + + +class MessageChatCompletionSystemMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[MessageChatCompletionSystemMessageParamContentUnionMember1]]] + + role: Required[Literal["system"]] + + name: str + + +class MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartTextParam( + TypedDict, total=False +): + text: Required[str] + + type: Required[Literal["text"]] + + +class MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParamImageURL( + TypedDict, total=False +): + url: Required[str] + + detail: Literal["auto", "low", "high"] + + +class MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParam( + TypedDict, total=False +): + image_url: Required[ + MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParamImageURL + ] + + type: Required[Literal["image_url"]] + + +class MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParamInputAudio( + TypedDict, total=False +): + data: Required[str] + + format: Required[Literal["wav", "mp3"]] + + +class MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParam( + TypedDict, total=False +): + input_audio: Required[ + MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParamInputAudio + ] + + type: Required[Literal["input_audio"]] + + +class MessageChatCompletionUserMessageParamContentUnionMember1FileFile(TypedDict, total=False): + file_data: str + + file_id: str + + filename: str + + +class MessageChatCompletionUserMessageParamContentUnionMember1File(TypedDict, total=False): + file: Required[MessageChatCompletionUserMessageParamContentUnionMember1FileFile] + + type: Required[Literal["file"]] + + +MessageChatCompletionUserMessageParamContentUnionMember1: TypeAlias = Union[ + MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartTextParam, + MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParam, + MessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParam, + MessageChatCompletionUserMessageParamContentUnionMember1File, +] + + +class MessageChatCompletionUserMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[MessageChatCompletionUserMessageParamContentUnionMember1]]] + + role: Required[Literal["user"]] + + name: str + + +class MessageChatCompletionAssistantMessageParamAudio(TypedDict, total=False): + id: Required[str] + + +class MessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartTextParam( + TypedDict, total=False +): + text: Required[str] + + type: Required[Literal["text"]] + + +class MessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartRefusalParam( + TypedDict, total=False +): + refusal: Required[str] + + type: Required[Literal["refusal"]] + + +MessageChatCompletionAssistantMessageParamContentUnionMember1: TypeAlias = Union[ + MessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartTextParam, + MessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartRefusalParam, +] + + +class MessageChatCompletionAssistantMessageParamFunctionCall(TypedDict, total=False): + arguments: Required[str] + + name: Required[str] + + +class MessageChatCompletionAssistantMessageParamToolCallFunction(TypedDict, total=False): + arguments: Required[str] + + name: Required[str] + + +class MessageChatCompletionAssistantMessageParamToolCall(TypedDict, total=False): + id: Required[str] + + function: Required[MessageChatCompletionAssistantMessageParamToolCallFunction] + + type: Required[Literal["function"]] + + +class MessageChatCompletionAssistantMessageParam(TypedDict, total=False): + role: Required[Literal["assistant"]] + + audio: Optional[MessageChatCompletionAssistantMessageParamAudio] + + content: Union[str, Iterable[MessageChatCompletionAssistantMessageParamContentUnionMember1], None] + + function_call: Optional[MessageChatCompletionAssistantMessageParamFunctionCall] + + name: str + + refusal: Optional[str] + + tool_calls: Iterable[MessageChatCompletionAssistantMessageParamToolCall] + + +class MessageChatCompletionToolMessageParamContentUnionMember1(TypedDict, total=False): + text: Required[str] + + type: Required[Literal["text"]] + + +class MessageChatCompletionToolMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[MessageChatCompletionToolMessageParamContentUnionMember1]]] + + role: Required[Literal["tool"]] + + tool_call_id: Required[str] + + +class MessageChatCompletionFunctionMessageParam(TypedDict, total=False): + content: Required[Optional[str]] + + name: Required[str] + + role: Required[Literal["function"]] + + +Message: TypeAlias = Union[ + MessageChatCompletionDeveloperMessageParam, + MessageChatCompletionSystemMessageParam, + MessageChatCompletionUserMessageParam, + MessageChatCompletionAssistantMessageParam, + MessageChatCompletionToolMessageParam, + MessageChatCompletionFunctionMessageParam, +] + + class Options(TypedDict, total=False): custom_eval_criteria: Iterable[object] diff --git a/src/codex/types/projects/entry_query_params.py b/src/codex/types/projects/entry_query_params.py index 1edabbed..2ba33b82 100644 --- a/src/codex/types/projects/entry_query_params.py +++ b/src/codex/types/projects/entry_query_params.py @@ -3,11 +3,40 @@ from __future__ import annotations from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Required, Annotated, TypedDict +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict from ..._utils import PropertyInfo -__all__ = ["EntryQueryParams", "QueryMetadata", "QueryMetadataContextUnionMember3"] +__all__ = [ + "EntryQueryParams", + "QueryMetadata", + "QueryMetadataContextUnionMember3", + "QueryMetadataMessage", + "QueryMetadataMessageChatCompletionDeveloperMessageParam", + "QueryMetadataMessageChatCompletionDeveloperMessageParamContentUnionMember1", + "QueryMetadataMessageChatCompletionSystemMessageParam", + "QueryMetadataMessageChatCompletionSystemMessageParamContentUnionMember1", + "QueryMetadataMessageChatCompletionUserMessageParam", + "QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1", + "QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartTextParam", + "QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParam", + "QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParamImageURL", + "QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParam", + "QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParamInputAudio", + "QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1File", + "QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1FileFile", + "QueryMetadataMessageChatCompletionAssistantMessageParam", + "QueryMetadataMessageChatCompletionAssistantMessageParamAudio", + "QueryMetadataMessageChatCompletionAssistantMessageParamContentUnionMember1", + "QueryMetadataMessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartTextParam", + "QueryMetadataMessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartRefusalParam", + "QueryMetadataMessageChatCompletionAssistantMessageParamFunctionCall", + "QueryMetadataMessageChatCompletionAssistantMessageParamToolCall", + "QueryMetadataMessageChatCompletionAssistantMessageParamToolCallFunction", + "QueryMetadataMessageChatCompletionToolMessageParam", + "QueryMetadataMessageChatCompletionToolMessageParamContentUnionMember1", + "QueryMetadataMessageChatCompletionFunctionMessageParam", +] class EntryQueryParams(TypedDict, total=False): @@ -47,6 +76,202 @@ class QueryMetadataContextUnionMember3(TypedDict, total=False): """Title or heading of the document. Useful for display and context.""" +class QueryMetadataMessageChatCompletionDeveloperMessageParamContentUnionMember1(TypedDict, total=False): + text: Required[str] + + type: Required[Literal["text"]] + + +class QueryMetadataMessageChatCompletionDeveloperMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[QueryMetadataMessageChatCompletionDeveloperMessageParamContentUnionMember1]]] + + role: Required[Literal["developer"]] + + name: str + + +class QueryMetadataMessageChatCompletionSystemMessageParamContentUnionMember1(TypedDict, total=False): + text: Required[str] + + type: Required[Literal["text"]] + + +class QueryMetadataMessageChatCompletionSystemMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[QueryMetadataMessageChatCompletionSystemMessageParamContentUnionMember1]]] + + role: Required[Literal["system"]] + + name: str + + +class QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartTextParam( + TypedDict, total=False +): + text: Required[str] + + type: Required[Literal["text"]] + + +class QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParamImageURL( + TypedDict, total=False +): + url: Required[str] + + detail: Literal["auto", "low", "high"] + + +class QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParam( + TypedDict, total=False +): + image_url: Required[ + QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParamImageURL + ] + + type: Required[Literal["image_url"]] + + +class QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParamInputAudio( + TypedDict, total=False +): + data: Required[str] + + format: Required[Literal["wav", "mp3"]] + + +class QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParam( + TypedDict, total=False +): + input_audio: Required[ + QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParamInputAudio + ] + + type: Required[Literal["input_audio"]] + + +class QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1FileFile(TypedDict, total=False): + file_data: str + + file_id: str + + filename: str + + +class QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1File(TypedDict, total=False): + file: Required[QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1FileFile] + + type: Required[Literal["file"]] + + +QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1: TypeAlias = Union[ + QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartTextParam, + QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartImageParam, + QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1ChatCompletionContentPartInputAudioParam, + QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1File, +] + + +class QueryMetadataMessageChatCompletionUserMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[QueryMetadataMessageChatCompletionUserMessageParamContentUnionMember1]]] + + role: Required[Literal["user"]] + + name: str + + +class QueryMetadataMessageChatCompletionAssistantMessageParamAudio(TypedDict, total=False): + id: Required[str] + + +class QueryMetadataMessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartTextParam( + TypedDict, total=False +): + text: Required[str] + + type: Required[Literal["text"]] + + +class QueryMetadataMessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartRefusalParam( + TypedDict, total=False +): + refusal: Required[str] + + type: Required[Literal["refusal"]] + + +QueryMetadataMessageChatCompletionAssistantMessageParamContentUnionMember1: TypeAlias = Union[ + QueryMetadataMessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartTextParam, + QueryMetadataMessageChatCompletionAssistantMessageParamContentUnionMember1ChatCompletionContentPartRefusalParam, +] + + +class QueryMetadataMessageChatCompletionAssistantMessageParamFunctionCall(TypedDict, total=False): + arguments: Required[str] + + name: Required[str] + + +class QueryMetadataMessageChatCompletionAssistantMessageParamToolCallFunction(TypedDict, total=False): + arguments: Required[str] + + name: Required[str] + + +class QueryMetadataMessageChatCompletionAssistantMessageParamToolCall(TypedDict, total=False): + id: Required[str] + + function: Required[QueryMetadataMessageChatCompletionAssistantMessageParamToolCallFunction] + + type: Required[Literal["function"]] + + +class QueryMetadataMessageChatCompletionAssistantMessageParam(TypedDict, total=False): + role: Required[Literal["assistant"]] + + audio: Optional[QueryMetadataMessageChatCompletionAssistantMessageParamAudio] + + content: Union[str, Iterable[QueryMetadataMessageChatCompletionAssistantMessageParamContentUnionMember1], None] + + function_call: Optional[QueryMetadataMessageChatCompletionAssistantMessageParamFunctionCall] + + name: str + + refusal: Optional[str] + + tool_calls: Iterable[QueryMetadataMessageChatCompletionAssistantMessageParamToolCall] + + +class QueryMetadataMessageChatCompletionToolMessageParamContentUnionMember1(TypedDict, total=False): + text: Required[str] + + type: Required[Literal["text"]] + + +class QueryMetadataMessageChatCompletionToolMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[QueryMetadataMessageChatCompletionToolMessageParamContentUnionMember1]]] + + role: Required[Literal["tool"]] + + tool_call_id: Required[str] + + +class QueryMetadataMessageChatCompletionFunctionMessageParam(TypedDict, total=False): + content: Required[Optional[str]] + + name: Required[str] + + role: Required[Literal["function"]] + + +QueryMetadataMessage: TypeAlias = Union[ + QueryMetadataMessageChatCompletionDeveloperMessageParam, + QueryMetadataMessageChatCompletionSystemMessageParam, + QueryMetadataMessageChatCompletionUserMessageParam, + QueryMetadataMessageChatCompletionAssistantMessageParam, + QueryMetadataMessageChatCompletionToolMessageParam, + QueryMetadataMessageChatCompletionFunctionMessageParam, +] + + class QueryMetadata(TypedDict, total=False): context: Union[str, List[str], Iterable[object], Iterable[QueryMetadataContextUnionMember3], None] """RAG context used for the query""" @@ -59,3 +284,17 @@ class QueryMetadata(TypedDict, total=False): evaluated_response: Optional[str] """The response being evaluated from the RAG system(before any remediation)""" + + messages: Optional[Iterable[QueryMetadataMessage]] + """Optional message history to provide conversation context for the query. + + Used to rewrite query into a self-contained version of itself. If not provided, + the query will be treated as self-contained. + """ + + original_question: Optional[str] + """The original question that was asked before any rewriting or processing. + + For all non-conversational RAG, original_question should be the same as the + final question seen in Codex. + """ diff --git a/src/codex/types/tlm_prompt_params.py b/src/codex/types/tlm_prompt_params.py index 94536055..3c04bfc4 100644 --- a/src/codex/types/tlm_prompt_params.py +++ b/src/codex/types/tlm_prompt_params.py @@ -40,27 +40,24 @@ class TlmPromptParams(TypedDict, total=False): `use_self_reflection` = True. - **base:** `num_candidate_responses` = 1, `num_consistency_samples` = 0, `use_self_reflection` = False. When using `get_trustworthiness_score()` on - "base" preset, a cheaper self-reflection will be used to compute the - trustworthiness score. - - By default, the TLM uses the "medium" quality preset. The default base LLM - `model` used is "gpt-4o-mini", and `max_tokens` is 512 for all quality presets. - You can set custom values for these arguments regardless of the quality preset - specified. - - Args: model ({"gpt-4o-mini", "gpt-4o", "gpt-4.1", "gpt-4.1-mini", - "gpt-4.1-nano", "o4-mini", "o3", "o3-mini", "o1", "o1-mini", "gpt-4", - "gpt-4.5-preview", "gpt-3.5-turbo-16k", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2", "claude-3.5-sonnet", "claude-3.5-haiku", - "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, default = - "gpt-4o-mini"): Underlying base LLM to use (better models yield better results, - faster models yield faster/cheaper results). - Models still in beta: "o3", "o1", - "o4-mini", "o3-mini", "o1-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "claude-3.7-sonnet", "claude-3.5-sonnet-v2", - "claude-3.5-haiku", "nova-micro", "nova-lite", "nova-pro". - Recommended models - for accuracy: "gpt-4.1", "o4-mini", "o3", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2". - Recommended models for low latency/costs: - "gpt-4.1-nano", "nova-micro". + "base" preset, a faster self-reflection is employed. + + By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base + `model`, and `max_tokens` is set to 512. You can set custom values for these + arguments regardless of the quality preset specified. + + Args: model ({"gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "o4-mini", "o3", + "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o", "o3-mini", "o1", "o1-mini", "gpt-4", + "gpt-3.5-turbo-16k", "claude-opus-4-0", "claude-sonnet-4-0", + "claude-3.7-sonnet", "claude-3.5-sonnet-v2", "claude-3.5-sonnet", + "claude-3.5-haiku", "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, + default = "gpt-4.1-mini"): Underlying base LLM to use (better models yield + better results, faster models yield faster results). - Models still in beta: + "o3", "o1", "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", + "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet", + "claude-3.5-haiku". - Recommended models for accuracy: "gpt-4.1", "o4-mini", + "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models for low + latency/costs: "gpt-4.1-nano", "nova-micro". max_tokens (int, default = 512): the maximum number of tokens that can be generated in the TLM response (and in internal trustworthiness scoring). Higher values here may produce better (more reliable) TLM responses and trustworthiness scores, but at higher runtimes/costs. @@ -86,7 +83,7 @@ class TlmPromptParams(TypedDict, total=False): similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "semantic"): how the trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model. - Supported similarity measures include: "semantic" (based on natural language inference), + Supported similarity measures include - "semantic" (based on natural language inference), "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model), "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies), and "string" (based on character/word overlap). Set this to "string" for minimal runtimes/costs. diff --git a/src/codex/types/tlm_score_params.py b/src/codex/types/tlm_score_params.py index a0d90175..95bcc4c4 100644 --- a/src/codex/types/tlm_score_params.py +++ b/src/codex/types/tlm_score_params.py @@ -42,27 +42,24 @@ class TlmScoreParams(TypedDict, total=False): `use_self_reflection` = True. - **base:** `num_candidate_responses` = 1, `num_consistency_samples` = 0, `use_self_reflection` = False. When using `get_trustworthiness_score()` on - "base" preset, a cheaper self-reflection will be used to compute the - trustworthiness score. - - By default, the TLM uses the "medium" quality preset. The default base LLM - `model` used is "gpt-4o-mini", and `max_tokens` is 512 for all quality presets. - You can set custom values for these arguments regardless of the quality preset - specified. - - Args: model ({"gpt-4o-mini", "gpt-4o", "gpt-4.1", "gpt-4.1-mini", - "gpt-4.1-nano", "o4-mini", "o3", "o3-mini", "o1", "o1-mini", "gpt-4", - "gpt-4.5-preview", "gpt-3.5-turbo-16k", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2", "claude-3.5-sonnet", "claude-3.5-haiku", - "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, default = - "gpt-4o-mini"): Underlying base LLM to use (better models yield better results, - faster models yield faster/cheaper results). - Models still in beta: "o3", "o1", - "o4-mini", "o3-mini", "o1-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-4.5-preview", "claude-3.7-sonnet", "claude-3.5-sonnet-v2", - "claude-3.5-haiku", "nova-micro", "nova-lite", "nova-pro". - Recommended models - for accuracy: "gpt-4.1", "o4-mini", "o3", "claude-3.7-sonnet", - "claude-3.5-sonnet-v2". - Recommended models for low latency/costs: - "gpt-4.1-nano", "nova-micro". + "base" preset, a faster self-reflection is employed. + + By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base + `model`, and `max_tokens` is set to 512. You can set custom values for these + arguments regardless of the quality preset specified. + + Args: model ({"gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "o4-mini", "o3", + "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o", "o3-mini", "o1", "o1-mini", "gpt-4", + "gpt-3.5-turbo-16k", "claude-opus-4-0", "claude-sonnet-4-0", + "claude-3.7-sonnet", "claude-3.5-sonnet-v2", "claude-3.5-sonnet", + "claude-3.5-haiku", "claude-3-haiku", "nova-micro", "nova-lite", "nova-pro"}, + default = "gpt-4.1-mini"): Underlying base LLM to use (better models yield + better results, faster models yield faster results). - Models still in beta: + "o3", "o1", "o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", + "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet", + "claude-3.5-haiku". - Recommended models for accuracy: "gpt-4.1", "o4-mini", + "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models for low + latency/costs: "gpt-4.1-nano", "nova-micro". max_tokens (int, default = 512): the maximum number of tokens that can be generated in the TLM response (and in internal trustworthiness scoring). Higher values here may produce better (more reliable) TLM responses and trustworthiness scores, but at higher runtimes/costs. @@ -88,7 +85,7 @@ class TlmScoreParams(TypedDict, total=False): similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "semantic"): how the trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model. - Supported similarity measures include: "semantic" (based on natural language inference), + Supported similarity measures include - "semantic" (based on natural language inference), "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model), "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies), and "string" (based on character/word overlap). Set this to "string" for minimal runtimes/costs. diff --git a/tests/api_resources/projects/test_entries.py b/tests/api_resources/projects/test_entries.py index 32b0452e..eb6fd372 100644 --- a/tests/api_resources/projects/test_entries.py +++ b/tests/api_resources/projects/test_entries.py @@ -406,6 +406,14 @@ def test_method_query_with_all_params(self, client: Codex) -> None: "custom_metadata": {}, "eval_scores": {"foo": 0}, "evaluated_response": "evaluated_response", + "messages": [ + { + "content": "string", + "role": "developer", + "name": "name", + } + ], + "original_question": "original_question", }, x_client_library_version="x-client-library-version", x_integration_type="x-integration-type", @@ -894,6 +902,14 @@ async def test_method_query_with_all_params(self, async_client: AsyncCodex) -> N "custom_metadata": {}, "eval_scores": {"foo": 0}, "evaluated_response": "evaluated_response", + "messages": [ + { + "content": "string", + "role": "developer", + "name": "name", + } + ], + "original_question": "original_question", }, x_client_library_version="x-client-library-version", x_integration_type="x-integration-type", diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py index cf6b3171..c2b5b7db 100644 --- a/tests/api_resources/test_projects.py +++ b/tests/api_resources/test_projects.py @@ -593,6 +593,13 @@ def test_method_validate_with_all_params(self, client: Codex) -> None: custom_eval_thresholds={"foo": 0}, custom_metadata={}, eval_scores={"foo": 0}, + messages=[ + { + "content": "string", + "role": "developer", + "name": "name", + } + ], options={ "custom_eval_criteria": [{}], "log": ["string"], @@ -1231,6 +1238,13 @@ async def test_method_validate_with_all_params(self, async_client: AsyncCodex) - custom_eval_thresholds={"foo": 0}, custom_metadata={}, eval_scores={"foo": 0}, + messages=[ + { + "content": "string", + "role": "developer", + "name": "name", + } + ], options={ "custom_eval_criteria": [{}], "log": ["string"], From 7a729283e11ae9dfb3ea7b337db31f179a5b108c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 20 Jun 2025 23:30:22 +0000 Subject: [PATCH 30/32] feat(api): add new endpoints --- .stats.yml | 4 +- api.md | 72 + src/codex/resources/projects/__init__.py | 42 + src/codex/resources/projects/evals.py | 803 +++++++++++ src/codex/resources/projects/projects.py | 202 +++ src/codex/resources/projects/query_logs.py | 741 ++++++++++ src/codex/resources/projects/remediations.py | 1135 +++++++++++++++ src/codex/types/__init__.py | 2 + src/codex/types/project_invite_sme_params.py | 15 + .../types/project_invite_sme_response.py | 11 + src/codex/types/projects/__init__.py | 31 + .../types/projects/eval_create_params.py | 66 + .../types/projects/eval_list_response.py | 69 + .../types/projects/eval_update_params.py | 104 ++ .../query_log_list_by_group_params.py | 41 + .../query_log_list_by_group_response.py | 96 ++ .../projects/query_log_list_groups_params.py | 38 + .../query_log_list_groups_response.py | 91 ++ .../types/projects/query_log_list_params.py | 38 + .../types/projects/query_log_list_response.py | 87 ++ .../projects/query_log_retrieve_response.py | 79 ++ .../query_log_start_remediation_response.py | 33 + .../projects/remediation_create_params.py | 16 + .../projects/remediation_create_response.py | 33 + .../remediation_edit_answer_params.py | 13 + .../remediation_edit_answer_response.py | 33 + .../remediation_edit_draft_answer_params.py | 13 + .../remediation_edit_draft_answer_response.py | 33 + ...iation_get_resolved_logs_count_response.py | 35 + .../types/projects/remediation_list_params.py | 39 + ...remediation_list_resolved_logs_response.py | 85 ++ .../projects/remediation_list_response.py | 41 + .../projects/remediation_pause_response.py | 33 + .../projects/remediation_publish_response.py | 33 + .../projects/remediation_retrieve_response.py | 33 + .../projects/remediation_unpause_response.py | 33 + tests/api_resources/projects/test_evals.py | 679 +++++++++ .../api_resources/projects/test_query_logs.py | 593 ++++++++ .../projects/test_remediations.py | 1224 +++++++++++++++++ tests/api_resources/test_projects.py | 109 ++ 40 files changed, 6876 insertions(+), 2 deletions(-) create mode 100644 src/codex/resources/projects/evals.py create mode 100644 src/codex/resources/projects/query_logs.py create mode 100644 src/codex/resources/projects/remediations.py create mode 100644 src/codex/types/project_invite_sme_params.py create mode 100644 src/codex/types/project_invite_sme_response.py create mode 100644 src/codex/types/projects/eval_create_params.py create mode 100644 src/codex/types/projects/eval_list_response.py create mode 100644 src/codex/types/projects/eval_update_params.py create mode 100644 src/codex/types/projects/query_log_list_by_group_params.py create mode 100644 src/codex/types/projects/query_log_list_by_group_response.py create mode 100644 src/codex/types/projects/query_log_list_groups_params.py create mode 100644 src/codex/types/projects/query_log_list_groups_response.py create mode 100644 src/codex/types/projects/query_log_list_params.py create mode 100644 src/codex/types/projects/query_log_list_response.py create mode 100644 src/codex/types/projects/query_log_retrieve_response.py create mode 100644 src/codex/types/projects/query_log_start_remediation_response.py create mode 100644 src/codex/types/projects/remediation_create_params.py create mode 100644 src/codex/types/projects/remediation_create_response.py create mode 100644 src/codex/types/projects/remediation_edit_answer_params.py create mode 100644 src/codex/types/projects/remediation_edit_answer_response.py create mode 100644 src/codex/types/projects/remediation_edit_draft_answer_params.py create mode 100644 src/codex/types/projects/remediation_edit_draft_answer_response.py create mode 100644 src/codex/types/projects/remediation_get_resolved_logs_count_response.py create mode 100644 src/codex/types/projects/remediation_list_params.py create mode 100644 src/codex/types/projects/remediation_list_resolved_logs_response.py create mode 100644 src/codex/types/projects/remediation_list_response.py create mode 100644 src/codex/types/projects/remediation_pause_response.py create mode 100644 src/codex/types/projects/remediation_publish_response.py create mode 100644 src/codex/types/projects/remediation_retrieve_response.py create mode 100644 src/codex/types/projects/remediation_unpause_response.py create mode 100644 tests/api_resources/projects/test_evals.py create mode 100644 tests/api_resources/projects/test_query_logs.py create mode 100644 tests/api_resources/projects/test_remediations.py diff --git a/.stats.yml b/.stats.yml index 9317ce1b..bd77f787 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ -configured_endpoints: 44 +configured_endpoints: 65 openapi_spec_hash: eeb8ebc5600523bdfad046381a929572 -config_hash: 659f65b6ccf5612986f920f7f9abbcb5 +config_hash: 63e520502003839482d0dbeb82132064 diff --git a/api.md b/api.md index 8aac76fd..13e82147 100644 --- a/api.md +++ b/api.md @@ -139,6 +139,7 @@ from codex.types import ( ProjectReturnSchema, ProjectRetrieveResponse, ProjectListResponse, + ProjectInviteSmeResponse, ProjectRetrieveAnalyticsResponse, ProjectValidateResponse, ) @@ -153,6 +154,7 @@ Methods: - client.projects.delete(project_id) -> None - client.projects.export(project_id) -> object - client.projects.increment_queries(project_id, \*\*params) -> object +- client.projects.invite_sme(project_id, \*\*params) -> ProjectInviteSmeResponse - client.projects.retrieve_analytics(project_id, \*\*params) -> ProjectRetrieveAnalyticsResponse - client.projects.validate(project_id, \*\*params) -> ProjectValidateResponse @@ -210,6 +212,76 @@ Methods: - client.projects.clusters.list(project_id, \*\*params) -> SyncOffsetPageClusters[ClusterListResponse] - client.projects.clusters.list_variants(representative_entry_id, \*, project_id) -> ClusterListVariantsResponse +## Evals + +Types: + +```python +from codex.types.projects import EvalListResponse +``` + +Methods: + +- client.projects.evals.create(project_id, \*\*params) -> ProjectReturnSchema +- client.projects.evals.update(path_eval_key, \*, project_id, \*\*params) -> ProjectReturnSchema +- client.projects.evals.list(project_id) -> EvalListResponse +- client.projects.evals.delete(eval_key, \*, project_id) -> ProjectReturnSchema + +## QueryLogs + +Types: + +```python +from codex.types.projects import ( + QueryLogRetrieveResponse, + QueryLogListResponse, + QueryLogListByGroupResponse, + QueryLogListGroupsResponse, + QueryLogStartRemediationResponse, +) +``` + +Methods: + +- client.projects.query_logs.retrieve(query_log_id, \*, project_id) -> QueryLogRetrieveResponse +- client.projects.query_logs.list(project_id, \*\*params) -> QueryLogListResponse +- client.projects.query_logs.list_by_group(project_id, \*\*params) -> QueryLogListByGroupResponse +- client.projects.query_logs.list_groups(project_id, \*\*params) -> QueryLogListGroupsResponse +- client.projects.query_logs.start_remediation(query_log_id, \*, project_id) -> QueryLogStartRemediationResponse + +## Remediations + +Types: + +```python +from codex.types.projects import ( + RemediationCreateResponse, + RemediationRetrieveResponse, + RemediationListResponse, + RemediationEditAnswerResponse, + RemediationEditDraftAnswerResponse, + RemediationGetResolvedLogsCountResponse, + RemediationListResolvedLogsResponse, + RemediationPauseResponse, + RemediationPublishResponse, + RemediationUnpauseResponse, +) +``` + +Methods: + +- client.projects.remediations.create(project_id, \*\*params) -> RemediationCreateResponse +- client.projects.remediations.retrieve(remediation_id, \*, project_id) -> RemediationRetrieveResponse +- client.projects.remediations.list(project_id, \*\*params) -> RemediationListResponse +- client.projects.remediations.delete(remediation_id, \*, project_id) -> None +- client.projects.remediations.edit_answer(remediation_id, \*, project_id, \*\*params) -> RemediationEditAnswerResponse +- client.projects.remediations.edit_draft_answer(remediation_id, \*, project_id, \*\*params) -> RemediationEditDraftAnswerResponse +- client.projects.remediations.get_resolved_logs_count(remediation_id, \*, project_id) -> RemediationGetResolvedLogsCountResponse +- client.projects.remediations.list_resolved_logs(remediation_id, \*, project_id) -> RemediationListResolvedLogsResponse +- client.projects.remediations.pause(remediation_id, \*, project_id) -> RemediationPauseResponse +- client.projects.remediations.publish(remediation_id, \*, project_id) -> RemediationPublishResponse +- client.projects.remediations.unpause(remediation_id, \*, project_id) -> RemediationUnpauseResponse + # Tlm Types: diff --git a/src/codex/resources/projects/__init__.py b/src/codex/resources/projects/__init__.py index 2c0595d2..178855ac 100644 --- a/src/codex/resources/projects/__init__.py +++ b/src/codex/resources/projects/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .evals import ( + EvalsResource, + AsyncEvalsResource, + EvalsResourceWithRawResponse, + AsyncEvalsResourceWithRawResponse, + EvalsResourceWithStreamingResponse, + AsyncEvalsResourceWithStreamingResponse, +) from .entries import ( EntriesResource, AsyncEntriesResource, @@ -24,6 +32,14 @@ ProjectsResourceWithStreamingResponse, AsyncProjectsResourceWithStreamingResponse, ) +from .query_logs import ( + QueryLogsResource, + AsyncQueryLogsResource, + QueryLogsResourceWithRawResponse, + AsyncQueryLogsResourceWithRawResponse, + QueryLogsResourceWithStreamingResponse, + AsyncQueryLogsResourceWithStreamingResponse, +) from .access_keys import ( AccessKeysResource, AsyncAccessKeysResource, @@ -32,6 +48,14 @@ AccessKeysResourceWithStreamingResponse, AsyncAccessKeysResourceWithStreamingResponse, ) +from .remediations import ( + RemediationsResource, + AsyncRemediationsResource, + RemediationsResourceWithRawResponse, + AsyncRemediationsResourceWithRawResponse, + RemediationsResourceWithStreamingResponse, + AsyncRemediationsResourceWithStreamingResponse, +) __all__ = [ "AccessKeysResource", @@ -52,6 +76,24 @@ "AsyncClustersResourceWithRawResponse", "ClustersResourceWithStreamingResponse", "AsyncClustersResourceWithStreamingResponse", + "EvalsResource", + "AsyncEvalsResource", + "EvalsResourceWithRawResponse", + "AsyncEvalsResourceWithRawResponse", + "EvalsResourceWithStreamingResponse", + "AsyncEvalsResourceWithStreamingResponse", + "QueryLogsResource", + "AsyncQueryLogsResource", + "QueryLogsResourceWithRawResponse", + "AsyncQueryLogsResourceWithRawResponse", + "QueryLogsResourceWithStreamingResponse", + "AsyncQueryLogsResourceWithStreamingResponse", + "RemediationsResource", + "AsyncRemediationsResource", + "RemediationsResourceWithRawResponse", + "AsyncRemediationsResourceWithRawResponse", + "RemediationsResourceWithStreamingResponse", + "AsyncRemediationsResourceWithStreamingResponse", "ProjectsResource", "AsyncProjectsResource", "ProjectsResourceWithRawResponse", diff --git a/src/codex/resources/projects/evals.py b/src/codex/resources/projects/evals.py new file mode 100644 index 00000000..1a6a6876 --- /dev/null +++ b/src/codex/resources/projects/evals.py @@ -0,0 +1,803 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, overload + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import required_args, maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.projects import eval_create_params, eval_update_params +from ...types.project_return_schema import ProjectReturnSchema +from ...types.projects.eval_list_response import EvalListResponse + +__all__ = ["EvalsResource", "AsyncEvalsResource"] + + +class EvalsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> EvalsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/cleanlab/codex-python#accessing-raw-response-data-eg-headers + """ + return EvalsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> EvalsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/cleanlab/codex-python#with_streaming_response + """ + return EvalsResourceWithStreamingResponse(self) + + def create( + self, + project_id: str, + *, + criteria: str, + eval_key: str, + name: str, + context_identifier: Optional[str] | NotGiven = NOT_GIVEN, + enabled: bool | NotGiven = NOT_GIVEN, + is_default: bool | NotGiven = NOT_GIVEN, + priority: Optional[int] | NotGiven = NOT_GIVEN, + query_identifier: Optional[str] | NotGiven = NOT_GIVEN, + response_identifier: Optional[str] | NotGiven = NOT_GIVEN, + should_escalate: bool | NotGiven = NOT_GIVEN, + threshold: float | NotGiven = NOT_GIVEN, + threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectReturnSchema: + """ + Create a new custom eval for a project. + + Args: + criteria: The evaluation criteria text that describes what aspect is being evaluated and + how + + eval_key: Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + + name: Display name/label for the evaluation metric + + context_identifier: The exact string used in your evaluation criteria to reference the retrieved + context. + + enabled: Allows the evaluation to be disabled without removing it + + is_default: Whether the eval is a default, built-in eval or a custom eval + + priority: Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + + query_identifier: The exact string used in your evaluation criteria to reference the user's query. + + response_identifier: The exact string used in your evaluation criteria to reference the RAG/LLM + response. + + should_escalate: If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + + threshold: Threshold value that determines if the evaluation fails + + threshold_direction: Whether the evaluation fails when score is above or below the threshold + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._post( + f"/api/projects/{project_id}/evals", + body=maybe_transform( + { + "criteria": criteria, + "eval_key": eval_key, + "name": name, + "context_identifier": context_identifier, + "enabled": enabled, + "is_default": is_default, + "priority": priority, + "query_identifier": query_identifier, + "response_identifier": response_identifier, + "should_escalate": should_escalate, + "threshold": threshold, + "threshold_direction": threshold_direction, + }, + eval_create_params.EvalCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectReturnSchema, + ) + + @overload + def update( + self, + path_eval_key: str, + *, + project_id: str, + criteria: str, + body_eval_key: str, + name: str, + context_identifier: Optional[str] | NotGiven = NOT_GIVEN, + enabled: bool | NotGiven = NOT_GIVEN, + is_default: bool | NotGiven = NOT_GIVEN, + priority: Optional[int] | NotGiven = NOT_GIVEN, + query_identifier: Optional[str] | NotGiven = NOT_GIVEN, + response_identifier: Optional[str] | NotGiven = NOT_GIVEN, + should_escalate: bool | NotGiven = NOT_GIVEN, + threshold: float | NotGiven = NOT_GIVEN, + threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectReturnSchema: + """ + Update an existing eval for a project. + + Args: + criteria: The evaluation criteria text that describes what aspect is being evaluated and + how + + body_eval_key: Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + + name: Display name/label for the evaluation metric + + context_identifier: The exact string used in your evaluation criteria to reference the retrieved + context. + + enabled: Allows the evaluation to be disabled without removing it + + is_default: Whether the eval is a default, built-in eval or a custom eval + + priority: Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + + query_identifier: The exact string used in your evaluation criteria to reference the user's query. + + response_identifier: The exact string used in your evaluation criteria to reference the RAG/LLM + response. + + should_escalate: If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + + threshold: Threshold value that determines if the evaluation fails + + threshold_direction: Whether the evaluation fails when score is above or below the threshold + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def update( + self, + path_eval_key: str, + *, + project_id: str, + body_eval_key: str, + enabled: bool | NotGiven = NOT_GIVEN, + priority: Optional[int] | NotGiven = NOT_GIVEN, + should_escalate: bool | NotGiven = NOT_GIVEN, + threshold: float | NotGiven = NOT_GIVEN, + threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectReturnSchema: + """ + Update an existing eval for a project. + + Args: + body_eval_key: Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + + enabled: Allows the evaluation to be disabled without removing it + + priority: Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + + should_escalate: If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + + threshold: Threshold value that determines if the evaluation fails + + threshold_direction: Whether the evaluation fails when score is above or below the threshold + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["project_id", "criteria", "body_eval_key", "name"], ["project_id", "body_eval_key"]) + def update( + self, + path_eval_key: str, + *, + project_id: str, + criteria: str | NotGiven = NOT_GIVEN, + body_eval_key: str, + name: str | NotGiven = NOT_GIVEN, + context_identifier: Optional[str] | NotGiven = NOT_GIVEN, + enabled: bool | NotGiven = NOT_GIVEN, + is_default: bool | NotGiven = NOT_GIVEN, + priority: Optional[int] | NotGiven = NOT_GIVEN, + query_identifier: Optional[str] | NotGiven = NOT_GIVEN, + response_identifier: Optional[str] | NotGiven = NOT_GIVEN, + should_escalate: bool | NotGiven = NOT_GIVEN, + threshold: float | NotGiven = NOT_GIVEN, + threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectReturnSchema: + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not path_eval_key: + raise ValueError(f"Expected a non-empty value for `path_eval_key` but received {path_eval_key!r}") + return self._put( + f"/api/projects/{project_id}/evals/{path_eval_key}", + body=maybe_transform( + { + "criteria": criteria, + "body_eval_key": body_eval_key, + "name": name, + "context_identifier": context_identifier, + "enabled": enabled, + "is_default": is_default, + "priority": priority, + "query_identifier": query_identifier, + "response_identifier": response_identifier, + "should_escalate": should_escalate, + "threshold": threshold, + "threshold_direction": threshold_direction, + }, + eval_update_params.EvalUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectReturnSchema, + ) + + def list( + self, + project_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvalListResponse: + """ + Get the evaluations config for a project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._get( + f"/api/projects/{project_id}/evals", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvalListResponse, + ) + + def delete( + self, + eval_key: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectReturnSchema: + """ + Remove a custom eval for a project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not eval_key: + raise ValueError(f"Expected a non-empty value for `eval_key` but received {eval_key!r}") + return self._delete( + f"/api/projects/{project_id}/evals/{eval_key}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectReturnSchema, + ) + + +class AsyncEvalsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncEvalsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/cleanlab/codex-python#accessing-raw-response-data-eg-headers + """ + return AsyncEvalsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncEvalsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/cleanlab/codex-python#with_streaming_response + """ + return AsyncEvalsResourceWithStreamingResponse(self) + + async def create( + self, + project_id: str, + *, + criteria: str, + eval_key: str, + name: str, + context_identifier: Optional[str] | NotGiven = NOT_GIVEN, + enabled: bool | NotGiven = NOT_GIVEN, + is_default: bool | NotGiven = NOT_GIVEN, + priority: Optional[int] | NotGiven = NOT_GIVEN, + query_identifier: Optional[str] | NotGiven = NOT_GIVEN, + response_identifier: Optional[str] | NotGiven = NOT_GIVEN, + should_escalate: bool | NotGiven = NOT_GIVEN, + threshold: float | NotGiven = NOT_GIVEN, + threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectReturnSchema: + """ + Create a new custom eval for a project. + + Args: + criteria: The evaluation criteria text that describes what aspect is being evaluated and + how + + eval_key: Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + + name: Display name/label for the evaluation metric + + context_identifier: The exact string used in your evaluation criteria to reference the retrieved + context. + + enabled: Allows the evaluation to be disabled without removing it + + is_default: Whether the eval is a default, built-in eval or a custom eval + + priority: Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + + query_identifier: The exact string used in your evaluation criteria to reference the user's query. + + response_identifier: The exact string used in your evaluation criteria to reference the RAG/LLM + response. + + should_escalate: If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + + threshold: Threshold value that determines if the evaluation fails + + threshold_direction: Whether the evaluation fails when score is above or below the threshold + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._post( + f"/api/projects/{project_id}/evals", + body=await async_maybe_transform( + { + "criteria": criteria, + "eval_key": eval_key, + "name": name, + "context_identifier": context_identifier, + "enabled": enabled, + "is_default": is_default, + "priority": priority, + "query_identifier": query_identifier, + "response_identifier": response_identifier, + "should_escalate": should_escalate, + "threshold": threshold, + "threshold_direction": threshold_direction, + }, + eval_create_params.EvalCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectReturnSchema, + ) + + @overload + async def update( + self, + path_eval_key: str, + *, + project_id: str, + criteria: str, + body_eval_key: str, + name: str, + context_identifier: Optional[str] | NotGiven = NOT_GIVEN, + enabled: bool | NotGiven = NOT_GIVEN, + is_default: bool | NotGiven = NOT_GIVEN, + priority: Optional[int] | NotGiven = NOT_GIVEN, + query_identifier: Optional[str] | NotGiven = NOT_GIVEN, + response_identifier: Optional[str] | NotGiven = NOT_GIVEN, + should_escalate: bool | NotGiven = NOT_GIVEN, + threshold: float | NotGiven = NOT_GIVEN, + threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectReturnSchema: + """ + Update an existing eval for a project. + + Args: + criteria: The evaluation criteria text that describes what aspect is being evaluated and + how + + body_eval_key: Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + + name: Display name/label for the evaluation metric + + context_identifier: The exact string used in your evaluation criteria to reference the retrieved + context. + + enabled: Allows the evaluation to be disabled without removing it + + is_default: Whether the eval is a default, built-in eval or a custom eval + + priority: Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + + query_identifier: The exact string used in your evaluation criteria to reference the user's query. + + response_identifier: The exact string used in your evaluation criteria to reference the RAG/LLM + response. + + should_escalate: If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + + threshold: Threshold value that determines if the evaluation fails + + threshold_direction: Whether the evaluation fails when score is above or below the threshold + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def update( + self, + path_eval_key: str, + *, + project_id: str, + body_eval_key: str, + enabled: bool | NotGiven = NOT_GIVEN, + priority: Optional[int] | NotGiven = NOT_GIVEN, + should_escalate: bool | NotGiven = NOT_GIVEN, + threshold: float | NotGiven = NOT_GIVEN, + threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectReturnSchema: + """ + Update an existing eval for a project. + + Args: + body_eval_key: Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + + enabled: Allows the evaluation to be disabled without removing it + + priority: Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + + should_escalate: If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + + threshold: Threshold value that determines if the evaluation fails + + threshold_direction: Whether the evaluation fails when score is above or below the threshold + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["project_id", "criteria", "body_eval_key", "name"], ["project_id", "body_eval_key"]) + async def update( + self, + path_eval_key: str, + *, + project_id: str, + criteria: str | NotGiven = NOT_GIVEN, + body_eval_key: str, + name: str | NotGiven = NOT_GIVEN, + context_identifier: Optional[str] | NotGiven = NOT_GIVEN, + enabled: bool | NotGiven = NOT_GIVEN, + is_default: bool | NotGiven = NOT_GIVEN, + priority: Optional[int] | NotGiven = NOT_GIVEN, + query_identifier: Optional[str] | NotGiven = NOT_GIVEN, + response_identifier: Optional[str] | NotGiven = NOT_GIVEN, + should_escalate: bool | NotGiven = NOT_GIVEN, + threshold: float | NotGiven = NOT_GIVEN, + threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectReturnSchema: + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not path_eval_key: + raise ValueError(f"Expected a non-empty value for `path_eval_key` but received {path_eval_key!r}") + return await self._put( + f"/api/projects/{project_id}/evals/{path_eval_key}", + body=await async_maybe_transform( + { + "criteria": criteria, + "body_eval_key": body_eval_key, + "name": name, + "context_identifier": context_identifier, + "enabled": enabled, + "is_default": is_default, + "priority": priority, + "query_identifier": query_identifier, + "response_identifier": response_identifier, + "should_escalate": should_escalate, + "threshold": threshold, + "threshold_direction": threshold_direction, + }, + eval_update_params.EvalUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectReturnSchema, + ) + + async def list( + self, + project_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvalListResponse: + """ + Get the evaluations config for a project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._get( + f"/api/projects/{project_id}/evals", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvalListResponse, + ) + + async def delete( + self, + eval_key: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectReturnSchema: + """ + Remove a custom eval for a project. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not eval_key: + raise ValueError(f"Expected a non-empty value for `eval_key` but received {eval_key!r}") + return await self._delete( + f"/api/projects/{project_id}/evals/{eval_key}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectReturnSchema, + ) + + +class EvalsResourceWithRawResponse: + def __init__(self, evals: EvalsResource) -> None: + self._evals = evals + + self.create = to_raw_response_wrapper( + evals.create, + ) + self.update = to_raw_response_wrapper( + evals.update, + ) + self.list = to_raw_response_wrapper( + evals.list, + ) + self.delete = to_raw_response_wrapper( + evals.delete, + ) + + +class AsyncEvalsResourceWithRawResponse: + def __init__(self, evals: AsyncEvalsResource) -> None: + self._evals = evals + + self.create = async_to_raw_response_wrapper( + evals.create, + ) + self.update = async_to_raw_response_wrapper( + evals.update, + ) + self.list = async_to_raw_response_wrapper( + evals.list, + ) + self.delete = async_to_raw_response_wrapper( + evals.delete, + ) + + +class EvalsResourceWithStreamingResponse: + def __init__(self, evals: EvalsResource) -> None: + self._evals = evals + + self.create = to_streamed_response_wrapper( + evals.create, + ) + self.update = to_streamed_response_wrapper( + evals.update, + ) + self.list = to_streamed_response_wrapper( + evals.list, + ) + self.delete = to_streamed_response_wrapper( + evals.delete, + ) + + +class AsyncEvalsResourceWithStreamingResponse: + def __init__(self, evals: AsyncEvalsResource) -> None: + self._evals = evals + + self.create = async_to_streamed_response_wrapper( + evals.create, + ) + self.update = async_to_streamed_response_wrapper( + evals.update, + ) + self.list = async_to_streamed_response_wrapper( + evals.list, + ) + self.delete = async_to_streamed_response_wrapper( + evals.delete, + ) diff --git a/src/codex/resources/projects/projects.py b/src/codex/resources/projects/projects.py index 11b95d4d..22b5caff 100644 --- a/src/codex/resources/projects/projects.py +++ b/src/codex/resources/projects/projects.py @@ -8,11 +8,20 @@ import httpx +from .evals import ( + EvalsResource, + AsyncEvalsResource, + EvalsResourceWithRawResponse, + AsyncEvalsResourceWithRawResponse, + EvalsResourceWithStreamingResponse, + AsyncEvalsResourceWithStreamingResponse, +) from ...types import ( project_list_params, project_create_params, project_update_params, project_validate_params, + project_invite_sme_params, project_increment_queries_params, project_retrieve_analytics_params, ) @@ -35,6 +44,14 @@ AsyncClustersResourceWithStreamingResponse, ) from ..._compat import cached_property +from .query_logs import ( + QueryLogsResource, + AsyncQueryLogsResource, + QueryLogsResourceWithRawResponse, + AsyncQueryLogsResourceWithRawResponse, + QueryLogsResourceWithStreamingResponse, + AsyncQueryLogsResourceWithStreamingResponse, +) from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( to_raw_response_wrapper, @@ -50,11 +67,20 @@ AccessKeysResourceWithStreamingResponse, AsyncAccessKeysResourceWithStreamingResponse, ) +from .remediations import ( + RemediationsResource, + AsyncRemediationsResource, + RemediationsResourceWithRawResponse, + AsyncRemediationsResourceWithRawResponse, + RemediationsResourceWithStreamingResponse, + AsyncRemediationsResourceWithStreamingResponse, +) from ..._base_client import make_request_options from ...types.project_list_response import ProjectListResponse from ...types.project_return_schema import ProjectReturnSchema from ...types.project_retrieve_response import ProjectRetrieveResponse from ...types.project_validate_response import ProjectValidateResponse +from ...types.project_invite_sme_response import ProjectInviteSmeResponse from ...types.project_retrieve_analytics_response import ProjectRetrieveAnalyticsResponse __all__ = ["ProjectsResource", "AsyncProjectsResource"] @@ -73,6 +99,18 @@ def entries(self) -> EntriesResource: def clusters(self) -> ClustersResource: return ClustersResource(self._client) + @cached_property + def evals(self) -> EvalsResource: + return EvalsResource(self._client) + + @cached_property + def query_logs(self) -> QueryLogsResource: + return QueryLogsResource(self._client) + + @cached_property + def remediations(self) -> RemediationsResource: + return RemediationsResource(self._client) + @cached_property def with_raw_response(self) -> ProjectsResourceWithRawResponse: """ @@ -374,6 +412,52 @@ def increment_queries( cast_to=object, ) + def invite_sme( + self, + project_id: str, + *, + email: str, + page_type: Literal["query_log", "remediation"], + url_query_string: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectInviteSmeResponse: + """ + Invite a subject matter expert to view a specific query log or remediation. + + Returns: SMERemediationNotificationResponse with status and notification details + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._post( + f"/api/projects/{project_id}/notifications", + body=maybe_transform( + { + "email": email, + "page_type": page_type, + "url_query_string": url_query_string, + }, + project_invite_sme_params.ProjectInviteSmeParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectInviteSmeResponse, + ) + def retrieve_analytics( self, project_id: str, @@ -623,6 +707,18 @@ def entries(self) -> AsyncEntriesResource: def clusters(self) -> AsyncClustersResource: return AsyncClustersResource(self._client) + @cached_property + def evals(self) -> AsyncEvalsResource: + return AsyncEvalsResource(self._client) + + @cached_property + def query_logs(self) -> AsyncQueryLogsResource: + return AsyncQueryLogsResource(self._client) + + @cached_property + def remediations(self) -> AsyncRemediationsResource: + return AsyncRemediationsResource(self._client) + @cached_property def with_raw_response(self) -> AsyncProjectsResourceWithRawResponse: """ @@ -926,6 +1022,52 @@ async def increment_queries( cast_to=object, ) + async def invite_sme( + self, + project_id: str, + *, + email: str, + page_type: Literal["query_log", "remediation"], + url_query_string: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProjectInviteSmeResponse: + """ + Invite a subject matter expert to view a specific query log or remediation. + + Returns: SMERemediationNotificationResponse with status and notification details + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._post( + f"/api/projects/{project_id}/notifications", + body=await async_maybe_transform( + { + "email": email, + "page_type": page_type, + "url_query_string": url_query_string, + }, + project_invite_sme_params.ProjectInviteSmeParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProjectInviteSmeResponse, + ) + async def retrieve_analytics( self, project_id: str, @@ -1189,6 +1331,9 @@ def __init__(self, projects: ProjectsResource) -> None: projects.increment_queries # pyright: ignore[reportDeprecated], ) ) + self.invite_sme = to_raw_response_wrapper( + projects.invite_sme, + ) self.retrieve_analytics = to_raw_response_wrapper( projects.retrieve_analytics, ) @@ -1208,6 +1353,18 @@ def entries(self) -> EntriesResourceWithRawResponse: def clusters(self) -> ClustersResourceWithRawResponse: return ClustersResourceWithRawResponse(self._projects.clusters) + @cached_property + def evals(self) -> EvalsResourceWithRawResponse: + return EvalsResourceWithRawResponse(self._projects.evals) + + @cached_property + def query_logs(self) -> QueryLogsResourceWithRawResponse: + return QueryLogsResourceWithRawResponse(self._projects.query_logs) + + @cached_property + def remediations(self) -> RemediationsResourceWithRawResponse: + return RemediationsResourceWithRawResponse(self._projects.remediations) + class AsyncProjectsResourceWithRawResponse: def __init__(self, projects: AsyncProjectsResource) -> None: @@ -1236,6 +1393,9 @@ def __init__(self, projects: AsyncProjectsResource) -> None: projects.increment_queries # pyright: ignore[reportDeprecated], ) ) + self.invite_sme = async_to_raw_response_wrapper( + projects.invite_sme, + ) self.retrieve_analytics = async_to_raw_response_wrapper( projects.retrieve_analytics, ) @@ -1255,6 +1415,18 @@ def entries(self) -> AsyncEntriesResourceWithRawResponse: def clusters(self) -> AsyncClustersResourceWithRawResponse: return AsyncClustersResourceWithRawResponse(self._projects.clusters) + @cached_property + def evals(self) -> AsyncEvalsResourceWithRawResponse: + return AsyncEvalsResourceWithRawResponse(self._projects.evals) + + @cached_property + def query_logs(self) -> AsyncQueryLogsResourceWithRawResponse: + return AsyncQueryLogsResourceWithRawResponse(self._projects.query_logs) + + @cached_property + def remediations(self) -> AsyncRemediationsResourceWithRawResponse: + return AsyncRemediationsResourceWithRawResponse(self._projects.remediations) + class ProjectsResourceWithStreamingResponse: def __init__(self, projects: ProjectsResource) -> None: @@ -1283,6 +1455,9 @@ def __init__(self, projects: ProjectsResource) -> None: projects.increment_queries # pyright: ignore[reportDeprecated], ) ) + self.invite_sme = to_streamed_response_wrapper( + projects.invite_sme, + ) self.retrieve_analytics = to_streamed_response_wrapper( projects.retrieve_analytics, ) @@ -1302,6 +1477,18 @@ def entries(self) -> EntriesResourceWithStreamingResponse: def clusters(self) -> ClustersResourceWithStreamingResponse: return ClustersResourceWithStreamingResponse(self._projects.clusters) + @cached_property + def evals(self) -> EvalsResourceWithStreamingResponse: + return EvalsResourceWithStreamingResponse(self._projects.evals) + + @cached_property + def query_logs(self) -> QueryLogsResourceWithStreamingResponse: + return QueryLogsResourceWithStreamingResponse(self._projects.query_logs) + + @cached_property + def remediations(self) -> RemediationsResourceWithStreamingResponse: + return RemediationsResourceWithStreamingResponse(self._projects.remediations) + class AsyncProjectsResourceWithStreamingResponse: def __init__(self, projects: AsyncProjectsResource) -> None: @@ -1330,6 +1517,9 @@ def __init__(self, projects: AsyncProjectsResource) -> None: projects.increment_queries # pyright: ignore[reportDeprecated], ) ) + self.invite_sme = async_to_streamed_response_wrapper( + projects.invite_sme, + ) self.retrieve_analytics = async_to_streamed_response_wrapper( projects.retrieve_analytics, ) @@ -1348,3 +1538,15 @@ def entries(self) -> AsyncEntriesResourceWithStreamingResponse: @cached_property def clusters(self) -> AsyncClustersResourceWithStreamingResponse: return AsyncClustersResourceWithStreamingResponse(self._projects.clusters) + + @cached_property + def evals(self) -> AsyncEvalsResourceWithStreamingResponse: + return AsyncEvalsResourceWithStreamingResponse(self._projects.evals) + + @cached_property + def query_logs(self) -> AsyncQueryLogsResourceWithStreamingResponse: + return AsyncQueryLogsResourceWithStreamingResponse(self._projects.query_logs) + + @cached_property + def remediations(self) -> AsyncRemediationsResourceWithStreamingResponse: + return AsyncRemediationsResourceWithStreamingResponse(self._projects.remediations) diff --git a/src/codex/resources/projects/query_logs.py b/src/codex/resources/projects/query_logs.py new file mode 100644 index 00000000..32ec7390 --- /dev/null +++ b/src/codex/resources/projects/query_logs.py @@ -0,0 +1,741 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from datetime import datetime +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.projects import query_log_list_params, query_log_list_groups_params, query_log_list_by_group_params +from ...types.projects.query_log_list_response import QueryLogListResponse +from ...types.projects.query_log_retrieve_response import QueryLogRetrieveResponse +from ...types.projects.query_log_list_groups_response import QueryLogListGroupsResponse +from ...types.projects.query_log_list_by_group_response import QueryLogListByGroupResponse +from ...types.projects.query_log_start_remediation_response import QueryLogStartRemediationResponse + +__all__ = ["QueryLogsResource", "AsyncQueryLogsResource"] + + +class QueryLogsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> QueryLogsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/cleanlab/codex-python#accessing-raw-response-data-eg-headers + """ + return QueryLogsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> QueryLogsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/cleanlab/codex-python#with_streaming_response + """ + return QueryLogsResourceWithStreamingResponse(self) + + def retrieve( + self, + query_log_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> QueryLogRetrieveResponse: + """ + Get Query Log Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not query_log_id: + raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}") + return self._get( + f"/api/projects/{project_id}/query_logs/{query_log_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=QueryLogRetrieveResponse, + ) + + def list( + self, + project_id: str, + *, + created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + custom_metadata: Optional[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + offset: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + primary_eval_issue: Optional[ + List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]] + ] + | NotGiven = NOT_GIVEN, + sort: Optional[Literal["created_at", "primary_eval_issue_score"]] | NotGiven = NOT_GIVEN, + was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> QueryLogListResponse: + """ + List query logs by project ID. + + Args: + created_at_end: Filter logs created at or before this timestamp + + created_at_start: Filter logs created at or after this timestamp + + custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"} + + primary_eval_issue: Filter logs that have ANY of these primary evaluation issues (OR operation) + + was_cache_hit: Filter by cache hit status + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._get( + f"/api/projects/{project_id}/query_logs/", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "created_at_end": created_at_end, + "created_at_start": created_at_start, + "custom_metadata": custom_metadata, + "limit": limit, + "offset": offset, + "order": order, + "primary_eval_issue": primary_eval_issue, + "sort": sort, + "was_cache_hit": was_cache_hit, + }, + query_log_list_params.QueryLogListParams, + ), + ), + cast_to=QueryLogListResponse, + ) + + def list_by_group( + self, + project_id: str, + *, + created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + custom_metadata: Optional[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + offset: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + primary_eval_issue: Optional[ + List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]] + ] + | NotGiven = NOT_GIVEN, + remediation_ids: List[str] | NotGiven = NOT_GIVEN, + sort: Optional[Literal["created_at", "primary_eval_issue_score"]] | NotGiven = NOT_GIVEN, + was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> QueryLogListByGroupResponse: + """ + List query log group by remediation ID. + + Args: + created_at_end: Filter logs created at or before this timestamp + + created_at_start: Filter logs created at or after this timestamp + + custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"} + + primary_eval_issue: Filter logs that have ANY of these primary evaluation issues (OR operation) + + remediation_ids: List of groups to list child logs for + + was_cache_hit: Filter by cache hit status + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._get( + f"/api/projects/{project_id}/query_logs/logs_by_group", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "created_at_end": created_at_end, + "created_at_start": created_at_start, + "custom_metadata": custom_metadata, + "limit": limit, + "offset": offset, + "order": order, + "primary_eval_issue": primary_eval_issue, + "remediation_ids": remediation_ids, + "sort": sort, + "was_cache_hit": was_cache_hit, + }, + query_log_list_by_group_params.QueryLogListByGroupParams, + ), + ), + cast_to=QueryLogListByGroupResponse, + ) + + def list_groups( + self, + project_id: str, + *, + created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + custom_metadata: Optional[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + offset: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + primary_eval_issue: Optional[ + List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]] + ] + | NotGiven = NOT_GIVEN, + sort: Optional[Literal["created_at", "primary_eval_issue_score", "total_count", "custom_rank"]] + | NotGiven = NOT_GIVEN, + was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> QueryLogListGroupsResponse: + """ + List query log groups by project ID. + + Args: + created_at_end: Filter logs created at or before this timestamp + + created_at_start: Filter logs created at or after this timestamp + + custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"} + + primary_eval_issue: Filter logs that have ANY of these primary evaluation issues (OR operation) + + was_cache_hit: Filter by cache hit status + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._get( + f"/api/projects/{project_id}/query_logs/groups", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "created_at_end": created_at_end, + "created_at_start": created_at_start, + "custom_metadata": custom_metadata, + "limit": limit, + "offset": offset, + "order": order, + "primary_eval_issue": primary_eval_issue, + "sort": sort, + "was_cache_hit": was_cache_hit, + }, + query_log_list_groups_params.QueryLogListGroupsParams, + ), + ), + cast_to=QueryLogListGroupsResponse, + ) + + def start_remediation( + self, + query_log_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> QueryLogStartRemediationResponse: + """ + Start Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not query_log_id: + raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}") + return self._post( + f"/api/projects/{project_id}/query_logs/{query_log_id}/start_remediation", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=QueryLogStartRemediationResponse, + ) + + +class AsyncQueryLogsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncQueryLogsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/cleanlab/codex-python#accessing-raw-response-data-eg-headers + """ + return AsyncQueryLogsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncQueryLogsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/cleanlab/codex-python#with_streaming_response + """ + return AsyncQueryLogsResourceWithStreamingResponse(self) + + async def retrieve( + self, + query_log_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> QueryLogRetrieveResponse: + """ + Get Query Log Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not query_log_id: + raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}") + return await self._get( + f"/api/projects/{project_id}/query_logs/{query_log_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=QueryLogRetrieveResponse, + ) + + async def list( + self, + project_id: str, + *, + created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + custom_metadata: Optional[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + offset: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + primary_eval_issue: Optional[ + List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]] + ] + | NotGiven = NOT_GIVEN, + sort: Optional[Literal["created_at", "primary_eval_issue_score"]] | NotGiven = NOT_GIVEN, + was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> QueryLogListResponse: + """ + List query logs by project ID. + + Args: + created_at_end: Filter logs created at or before this timestamp + + created_at_start: Filter logs created at or after this timestamp + + custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"} + + primary_eval_issue: Filter logs that have ANY of these primary evaluation issues (OR operation) + + was_cache_hit: Filter by cache hit status + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._get( + f"/api/projects/{project_id}/query_logs/", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "created_at_end": created_at_end, + "created_at_start": created_at_start, + "custom_metadata": custom_metadata, + "limit": limit, + "offset": offset, + "order": order, + "primary_eval_issue": primary_eval_issue, + "sort": sort, + "was_cache_hit": was_cache_hit, + }, + query_log_list_params.QueryLogListParams, + ), + ), + cast_to=QueryLogListResponse, + ) + + async def list_by_group( + self, + project_id: str, + *, + created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + custom_metadata: Optional[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + offset: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + primary_eval_issue: Optional[ + List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]] + ] + | NotGiven = NOT_GIVEN, + remediation_ids: List[str] | NotGiven = NOT_GIVEN, + sort: Optional[Literal["created_at", "primary_eval_issue_score"]] | NotGiven = NOT_GIVEN, + was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> QueryLogListByGroupResponse: + """ + List query log group by remediation ID. + + Args: + created_at_end: Filter logs created at or before this timestamp + + created_at_start: Filter logs created at or after this timestamp + + custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"} + + primary_eval_issue: Filter logs that have ANY of these primary evaluation issues (OR operation) + + remediation_ids: List of groups to list child logs for + + was_cache_hit: Filter by cache hit status + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._get( + f"/api/projects/{project_id}/query_logs/logs_by_group", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "created_at_end": created_at_end, + "created_at_start": created_at_start, + "custom_metadata": custom_metadata, + "limit": limit, + "offset": offset, + "order": order, + "primary_eval_issue": primary_eval_issue, + "remediation_ids": remediation_ids, + "sort": sort, + "was_cache_hit": was_cache_hit, + }, + query_log_list_by_group_params.QueryLogListByGroupParams, + ), + ), + cast_to=QueryLogListByGroupResponse, + ) + + async def list_groups( + self, + project_id: str, + *, + created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + custom_metadata: Optional[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + offset: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + primary_eval_issue: Optional[ + List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]] + ] + | NotGiven = NOT_GIVEN, + sort: Optional[Literal["created_at", "primary_eval_issue_score", "total_count", "custom_rank"]] + | NotGiven = NOT_GIVEN, + was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> QueryLogListGroupsResponse: + """ + List query log groups by project ID. + + Args: + created_at_end: Filter logs created at or before this timestamp + + created_at_start: Filter logs created at or after this timestamp + + custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"} + + primary_eval_issue: Filter logs that have ANY of these primary evaluation issues (OR operation) + + was_cache_hit: Filter by cache hit status + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._get( + f"/api/projects/{project_id}/query_logs/groups", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "created_at_end": created_at_end, + "created_at_start": created_at_start, + "custom_metadata": custom_metadata, + "limit": limit, + "offset": offset, + "order": order, + "primary_eval_issue": primary_eval_issue, + "sort": sort, + "was_cache_hit": was_cache_hit, + }, + query_log_list_groups_params.QueryLogListGroupsParams, + ), + ), + cast_to=QueryLogListGroupsResponse, + ) + + async def start_remediation( + self, + query_log_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> QueryLogStartRemediationResponse: + """ + Start Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not query_log_id: + raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}") + return await self._post( + f"/api/projects/{project_id}/query_logs/{query_log_id}/start_remediation", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=QueryLogStartRemediationResponse, + ) + + +class QueryLogsResourceWithRawResponse: + def __init__(self, query_logs: QueryLogsResource) -> None: + self._query_logs = query_logs + + self.retrieve = to_raw_response_wrapper( + query_logs.retrieve, + ) + self.list = to_raw_response_wrapper( + query_logs.list, + ) + self.list_by_group = to_raw_response_wrapper( + query_logs.list_by_group, + ) + self.list_groups = to_raw_response_wrapper( + query_logs.list_groups, + ) + self.start_remediation = to_raw_response_wrapper( + query_logs.start_remediation, + ) + + +class AsyncQueryLogsResourceWithRawResponse: + def __init__(self, query_logs: AsyncQueryLogsResource) -> None: + self._query_logs = query_logs + + self.retrieve = async_to_raw_response_wrapper( + query_logs.retrieve, + ) + self.list = async_to_raw_response_wrapper( + query_logs.list, + ) + self.list_by_group = async_to_raw_response_wrapper( + query_logs.list_by_group, + ) + self.list_groups = async_to_raw_response_wrapper( + query_logs.list_groups, + ) + self.start_remediation = async_to_raw_response_wrapper( + query_logs.start_remediation, + ) + + +class QueryLogsResourceWithStreamingResponse: + def __init__(self, query_logs: QueryLogsResource) -> None: + self._query_logs = query_logs + + self.retrieve = to_streamed_response_wrapper( + query_logs.retrieve, + ) + self.list = to_streamed_response_wrapper( + query_logs.list, + ) + self.list_by_group = to_streamed_response_wrapper( + query_logs.list_by_group, + ) + self.list_groups = to_streamed_response_wrapper( + query_logs.list_groups, + ) + self.start_remediation = to_streamed_response_wrapper( + query_logs.start_remediation, + ) + + +class AsyncQueryLogsResourceWithStreamingResponse: + def __init__(self, query_logs: AsyncQueryLogsResource) -> None: + self._query_logs = query_logs + + self.retrieve = async_to_streamed_response_wrapper( + query_logs.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + query_logs.list, + ) + self.list_by_group = async_to_streamed_response_wrapper( + query_logs.list_by_group, + ) + self.list_groups = async_to_streamed_response_wrapper( + query_logs.list_groups, + ) + self.start_remediation = async_to_streamed_response_wrapper( + query_logs.start_remediation, + ) diff --git a/src/codex/resources/projects/remediations.py b/src/codex/resources/projects/remediations.py new file mode 100644 index 00000000..65015a14 --- /dev/null +++ b/src/codex/resources/projects/remediations.py @@ -0,0 +1,1135 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from datetime import datetime +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.projects import ( + remediation_list_params, + remediation_create_params, + remediation_edit_answer_params, + remediation_edit_draft_answer_params, +) +from ...types.projects.remediation_list_response import RemediationListResponse +from ...types.projects.remediation_pause_response import RemediationPauseResponse +from ...types.projects.remediation_create_response import RemediationCreateResponse +from ...types.projects.remediation_publish_response import RemediationPublishResponse +from ...types.projects.remediation_unpause_response import RemediationUnpauseResponse +from ...types.projects.remediation_retrieve_response import RemediationRetrieveResponse +from ...types.projects.remediation_edit_answer_response import RemediationEditAnswerResponse +from ...types.projects.remediation_edit_draft_answer_response import RemediationEditDraftAnswerResponse +from ...types.projects.remediation_list_resolved_logs_response import RemediationListResolvedLogsResponse +from ...types.projects.remediation_get_resolved_logs_count_response import RemediationGetResolvedLogsCountResponse + +__all__ = ["RemediationsResource", "AsyncRemediationsResource"] + + +class RemediationsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> RemediationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/cleanlab/codex-python#accessing-raw-response-data-eg-headers + """ + return RemediationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RemediationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/cleanlab/codex-python#with_streaming_response + """ + return RemediationsResourceWithStreamingResponse(self) + + def create( + self, + project_id: str, + *, + question: str, + answer: Optional[str] | NotGiven = NOT_GIVEN, + draft_answer: Optional[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationCreateResponse: + """ + Create Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._post( + f"/api/projects/{project_id}/remediations/", + body=maybe_transform( + { + "question": question, + "answer": answer, + "draft_answer": draft_answer, + }, + remediation_create_params.RemediationCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationCreateResponse, + ) + + def retrieve( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationRetrieveResponse: + """ + Get Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return self._get( + f"/api/projects/{project_id}/remediations/{remediation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationRetrieveResponse, + ) + + def list( + self, + project_id: str, + *, + created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + last_edited_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + last_edited_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + last_edited_by: Optional[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + offset: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + sort: Optional[Literal["created_at", "last_edited_at", "resolved_logs_count"]] | NotGiven = NOT_GIVEN, + status: Optional[List[Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "PAUSED"]]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationListResponse: + """ + List remediations by project ID. + + Args: + created_at_end: Filter remediations created at or before this timestamp + + created_at_start: Filter remediations created at or after this timestamp + + last_edited_at_end: Filter remediations last edited at or before this timestamp + + last_edited_at_start: Filter remediations last edited at or after this timestamp + + last_edited_by: Filter by last edited by user ID + + status: Filter remediations that have ANY of these statuses (OR operation) + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return self._get( + f"/api/projects/{project_id}/remediations/", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "created_at_end": created_at_end, + "created_at_start": created_at_start, + "last_edited_at_end": last_edited_at_end, + "last_edited_at_start": last_edited_at_start, + "last_edited_by": last_edited_by, + "limit": limit, + "offset": offset, + "order": order, + "sort": sort, + "status": status, + }, + remediation_list_params.RemediationListParams, + ), + ), + cast_to=RemediationListResponse, + ) + + def delete( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Delete Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/api/projects/{project_id}/remediations/{remediation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def edit_answer( + self, + remediation_id: str, + *, + project_id: str, + answer: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationEditAnswerResponse: + """ + Edit Answer Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return self._patch( + f"/api/projects/{project_id}/remediations/{remediation_id}/edit_answer", + body=maybe_transform({"answer": answer}, remediation_edit_answer_params.RemediationEditAnswerParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationEditAnswerResponse, + ) + + def edit_draft_answer( + self, + remediation_id: str, + *, + project_id: str, + draft_answer: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationEditDraftAnswerResponse: + """ + Edit Draft Answer Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return self._patch( + f"/api/projects/{project_id}/remediations/{remediation_id}/edit_draft_answer", + body=maybe_transform( + {"draft_answer": draft_answer}, remediation_edit_draft_answer_params.RemediationEditDraftAnswerParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationEditDraftAnswerResponse, + ) + + def get_resolved_logs_count( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationGetResolvedLogsCountResponse: + """ + Get Remediation With Resolved Logs Count Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return self._get( + f"/api/projects/{project_id}/remediations/{remediation_id}/resolved_logs_count", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationGetResolvedLogsCountResponse, + ) + + def list_resolved_logs( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationListResolvedLogsResponse: + """ + List resolved logs by remediation ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return self._get( + f"/api/projects/{project_id}/remediations/{remediation_id}/resolved_logs", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationListResolvedLogsResponse, + ) + + def pause( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationPauseResponse: + """ + Pause Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return self._patch( + f"/api/projects/{project_id}/remediations/{remediation_id}/pause", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationPauseResponse, + ) + + def publish( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationPublishResponse: + """ + Publish Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return self._patch( + f"/api/projects/{project_id}/remediations/{remediation_id}/publish", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationPublishResponse, + ) + + def unpause( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationUnpauseResponse: + """ + Unpause Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return self._patch( + f"/api/projects/{project_id}/remediations/{remediation_id}/unpause", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationUnpauseResponse, + ) + + +class AsyncRemediationsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncRemediationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/cleanlab/codex-python#accessing-raw-response-data-eg-headers + """ + return AsyncRemediationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRemediationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/cleanlab/codex-python#with_streaming_response + """ + return AsyncRemediationsResourceWithStreamingResponse(self) + + async def create( + self, + project_id: str, + *, + question: str, + answer: Optional[str] | NotGiven = NOT_GIVEN, + draft_answer: Optional[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationCreateResponse: + """ + Create Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._post( + f"/api/projects/{project_id}/remediations/", + body=await async_maybe_transform( + { + "question": question, + "answer": answer, + "draft_answer": draft_answer, + }, + remediation_create_params.RemediationCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationCreateResponse, + ) + + async def retrieve( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationRetrieveResponse: + """ + Get Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return await self._get( + f"/api/projects/{project_id}/remediations/{remediation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationRetrieveResponse, + ) + + async def list( + self, + project_id: str, + *, + created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + last_edited_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + last_edited_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN, + last_edited_by: Optional[str] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + offset: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + sort: Optional[Literal["created_at", "last_edited_at", "resolved_logs_count"]] | NotGiven = NOT_GIVEN, + status: Optional[List[Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "PAUSED"]]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationListResponse: + """ + List remediations by project ID. + + Args: + created_at_end: Filter remediations created at or before this timestamp + + created_at_start: Filter remediations created at or after this timestamp + + last_edited_at_end: Filter remediations last edited at or before this timestamp + + last_edited_at_start: Filter remediations last edited at or after this timestamp + + last_edited_by: Filter by last edited by user ID + + status: Filter remediations that have ANY of these statuses (OR operation) + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + return await self._get( + f"/api/projects/{project_id}/remediations/", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "created_at_end": created_at_end, + "created_at_start": created_at_start, + "last_edited_at_end": last_edited_at_end, + "last_edited_at_start": last_edited_at_start, + "last_edited_by": last_edited_by, + "limit": limit, + "offset": offset, + "order": order, + "sort": sort, + "status": status, + }, + remediation_list_params.RemediationListParams, + ), + ), + cast_to=RemediationListResponse, + ) + + async def delete( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Delete Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/api/projects/{project_id}/remediations/{remediation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def edit_answer( + self, + remediation_id: str, + *, + project_id: str, + answer: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationEditAnswerResponse: + """ + Edit Answer Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return await self._patch( + f"/api/projects/{project_id}/remediations/{remediation_id}/edit_answer", + body=await async_maybe_transform( + {"answer": answer}, remediation_edit_answer_params.RemediationEditAnswerParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationEditAnswerResponse, + ) + + async def edit_draft_answer( + self, + remediation_id: str, + *, + project_id: str, + draft_answer: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationEditDraftAnswerResponse: + """ + Edit Draft Answer Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return await self._patch( + f"/api/projects/{project_id}/remediations/{remediation_id}/edit_draft_answer", + body=await async_maybe_transform( + {"draft_answer": draft_answer}, remediation_edit_draft_answer_params.RemediationEditDraftAnswerParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationEditDraftAnswerResponse, + ) + + async def get_resolved_logs_count( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationGetResolvedLogsCountResponse: + """ + Get Remediation With Resolved Logs Count Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return await self._get( + f"/api/projects/{project_id}/remediations/{remediation_id}/resolved_logs_count", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationGetResolvedLogsCountResponse, + ) + + async def list_resolved_logs( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationListResolvedLogsResponse: + """ + List resolved logs by remediation ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return await self._get( + f"/api/projects/{project_id}/remediations/{remediation_id}/resolved_logs", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationListResolvedLogsResponse, + ) + + async def pause( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationPauseResponse: + """ + Pause Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return await self._patch( + f"/api/projects/{project_id}/remediations/{remediation_id}/pause", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationPauseResponse, + ) + + async def publish( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationPublishResponse: + """ + Publish Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return await self._patch( + f"/api/projects/{project_id}/remediations/{remediation_id}/publish", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationPublishResponse, + ) + + async def unpause( + self, + remediation_id: str, + *, + project_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RemediationUnpauseResponse: + """ + Unpause Remediation Route + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not project_id: + raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}") + if not remediation_id: + raise ValueError(f"Expected a non-empty value for `remediation_id` but received {remediation_id!r}") + return await self._patch( + f"/api/projects/{project_id}/remediations/{remediation_id}/unpause", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RemediationUnpauseResponse, + ) + + +class RemediationsResourceWithRawResponse: + def __init__(self, remediations: RemediationsResource) -> None: + self._remediations = remediations + + self.create = to_raw_response_wrapper( + remediations.create, + ) + self.retrieve = to_raw_response_wrapper( + remediations.retrieve, + ) + self.list = to_raw_response_wrapper( + remediations.list, + ) + self.delete = to_raw_response_wrapper( + remediations.delete, + ) + self.edit_answer = to_raw_response_wrapper( + remediations.edit_answer, + ) + self.edit_draft_answer = to_raw_response_wrapper( + remediations.edit_draft_answer, + ) + self.get_resolved_logs_count = to_raw_response_wrapper( + remediations.get_resolved_logs_count, + ) + self.list_resolved_logs = to_raw_response_wrapper( + remediations.list_resolved_logs, + ) + self.pause = to_raw_response_wrapper( + remediations.pause, + ) + self.publish = to_raw_response_wrapper( + remediations.publish, + ) + self.unpause = to_raw_response_wrapper( + remediations.unpause, + ) + + +class AsyncRemediationsResourceWithRawResponse: + def __init__(self, remediations: AsyncRemediationsResource) -> None: + self._remediations = remediations + + self.create = async_to_raw_response_wrapper( + remediations.create, + ) + self.retrieve = async_to_raw_response_wrapper( + remediations.retrieve, + ) + self.list = async_to_raw_response_wrapper( + remediations.list, + ) + self.delete = async_to_raw_response_wrapper( + remediations.delete, + ) + self.edit_answer = async_to_raw_response_wrapper( + remediations.edit_answer, + ) + self.edit_draft_answer = async_to_raw_response_wrapper( + remediations.edit_draft_answer, + ) + self.get_resolved_logs_count = async_to_raw_response_wrapper( + remediations.get_resolved_logs_count, + ) + self.list_resolved_logs = async_to_raw_response_wrapper( + remediations.list_resolved_logs, + ) + self.pause = async_to_raw_response_wrapper( + remediations.pause, + ) + self.publish = async_to_raw_response_wrapper( + remediations.publish, + ) + self.unpause = async_to_raw_response_wrapper( + remediations.unpause, + ) + + +class RemediationsResourceWithStreamingResponse: + def __init__(self, remediations: RemediationsResource) -> None: + self._remediations = remediations + + self.create = to_streamed_response_wrapper( + remediations.create, + ) + self.retrieve = to_streamed_response_wrapper( + remediations.retrieve, + ) + self.list = to_streamed_response_wrapper( + remediations.list, + ) + self.delete = to_streamed_response_wrapper( + remediations.delete, + ) + self.edit_answer = to_streamed_response_wrapper( + remediations.edit_answer, + ) + self.edit_draft_answer = to_streamed_response_wrapper( + remediations.edit_draft_answer, + ) + self.get_resolved_logs_count = to_streamed_response_wrapper( + remediations.get_resolved_logs_count, + ) + self.list_resolved_logs = to_streamed_response_wrapper( + remediations.list_resolved_logs, + ) + self.pause = to_streamed_response_wrapper( + remediations.pause, + ) + self.publish = to_streamed_response_wrapper( + remediations.publish, + ) + self.unpause = to_streamed_response_wrapper( + remediations.unpause, + ) + + +class AsyncRemediationsResourceWithStreamingResponse: + def __init__(self, remediations: AsyncRemediationsResource) -> None: + self._remediations = remediations + + self.create = async_to_streamed_response_wrapper( + remediations.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + remediations.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + remediations.list, + ) + self.delete = async_to_streamed_response_wrapper( + remediations.delete, + ) + self.edit_answer = async_to_streamed_response_wrapper( + remediations.edit_answer, + ) + self.edit_draft_answer = async_to_streamed_response_wrapper( + remediations.edit_draft_answer, + ) + self.get_resolved_logs_count = async_to_streamed_response_wrapper( + remediations.get_resolved_logs_count, + ) + self.list_resolved_logs = async_to_streamed_response_wrapper( + remediations.list_resolved_logs, + ) + self.pause = async_to_streamed_response_wrapper( + remediations.pause, + ) + self.publish = async_to_streamed_response_wrapper( + remediations.publish, + ) + self.unpause = async_to_streamed_response_wrapper( + remediations.unpause, + ) diff --git a/src/codex/types/__init__.py b/src/codex/types/__init__.py index 8e0cc4a4..70713a3b 100644 --- a/src/codex/types/__init__.py +++ b/src/codex/types/__init__.py @@ -13,9 +13,11 @@ from .project_return_schema import ProjectReturnSchema as ProjectReturnSchema from .project_update_params import ProjectUpdateParams as ProjectUpdateParams from .project_validate_params import ProjectValidateParams as ProjectValidateParams +from .project_invite_sme_params import ProjectInviteSmeParams as ProjectInviteSmeParams from .project_retrieve_response import ProjectRetrieveResponse as ProjectRetrieveResponse from .project_validate_response import ProjectValidateResponse as ProjectValidateResponse from .organization_schema_public import OrganizationSchemaPublic as OrganizationSchemaPublic +from .project_invite_sme_response import ProjectInviteSmeResponse as ProjectInviteSmeResponse from .user_activate_account_params import UserActivateAccountParams as UserActivateAccountParams from .project_increment_queries_params import ProjectIncrementQueriesParams as ProjectIncrementQueriesParams from .project_retrieve_analytics_params import ProjectRetrieveAnalyticsParams as ProjectRetrieveAnalyticsParams diff --git a/src/codex/types/project_invite_sme_params.py b/src/codex/types/project_invite_sme_params.py new file mode 100644 index 00000000..f2694632 --- /dev/null +++ b/src/codex/types/project_invite_sme_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ProjectInviteSmeParams"] + + +class ProjectInviteSmeParams(TypedDict, total=False): + email: Required[str] + + page_type: Required[Literal["query_log", "remediation"]] + + url_query_string: Required[str] diff --git a/src/codex/types/project_invite_sme_response.py b/src/codex/types/project_invite_sme_response.py new file mode 100644 index 00000000..8c871a57 --- /dev/null +++ b/src/codex/types/project_invite_sme_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["ProjectInviteSmeResponse"] + + +class ProjectInviteSmeResponse(BaseModel): + recipient_email: str + + status: str diff --git a/src/codex/types/projects/__init__.py b/src/codex/types/projects/__init__.py index 2b69e570..8a26aa00 100644 --- a/src/codex/types/projects/__init__.py +++ b/src/codex/types/projects/__init__.py @@ -5,17 +5,48 @@ from .entry import Entry as Entry from .access_key_schema import AccessKeySchema as AccessKeySchema from .entry_query_params import EntryQueryParams as EntryQueryParams +from .eval_create_params import EvalCreateParams as EvalCreateParams +from .eval_list_response import EvalListResponse as EvalListResponse +from .eval_update_params import EvalUpdateParams as EvalUpdateParams from .cluster_list_params import ClusterListParams as ClusterListParams from .entry_create_params import EntryCreateParams as EntryCreateParams from .entry_update_params import EntryUpdateParams as EntryUpdateParams from .entry_query_response import EntryQueryResponse as EntryQueryResponse from .cluster_list_response import ClusterListResponse as ClusterListResponse +from .query_log_list_params import QueryLogListParams as QueryLogListParams from .entry_notify_sme_params import EntryNotifySmeParams as EntryNotifySmeParams +from .query_log_list_response import QueryLogListResponse as QueryLogListResponse +from .remediation_list_params import RemediationListParams as RemediationListParams from .access_key_create_params import AccessKeyCreateParams as AccessKeyCreateParams from .access_key_list_response import AccessKeyListResponse as AccessKeyListResponse from .access_key_update_params import AccessKeyUpdateParams as AccessKeyUpdateParams from .entry_notify_sme_response import EntryNotifySmeResponse as EntryNotifySmeResponse +from .remediation_create_params import RemediationCreateParams as RemediationCreateParams +from .remediation_list_response import RemediationListResponse as RemediationListResponse +from .remediation_pause_response import RemediationPauseResponse as RemediationPauseResponse +from .query_log_retrieve_response import QueryLogRetrieveResponse as QueryLogRetrieveResponse +from .remediation_create_response import RemediationCreateResponse as RemediationCreateResponse +from .query_log_list_groups_params import QueryLogListGroupsParams as QueryLogListGroupsParams +from .remediation_publish_response import RemediationPublishResponse as RemediationPublishResponse +from .remediation_unpause_response import RemediationUnpauseResponse as RemediationUnpauseResponse +from .remediation_retrieve_response import RemediationRetrieveResponse as RemediationRetrieveResponse from .cluster_list_variants_response import ClusterListVariantsResponse as ClusterListVariantsResponse +from .query_log_list_by_group_params import QueryLogListByGroupParams as QueryLogListByGroupParams +from .query_log_list_groups_response import QueryLogListGroupsResponse as QueryLogListGroupsResponse +from .remediation_edit_answer_params import RemediationEditAnswerParams as RemediationEditAnswerParams +from .query_log_list_by_group_response import QueryLogListByGroupResponse as QueryLogListByGroupResponse +from .remediation_edit_answer_response import RemediationEditAnswerResponse as RemediationEditAnswerResponse +from .query_log_start_remediation_response import QueryLogStartRemediationResponse as QueryLogStartRemediationResponse +from .remediation_edit_draft_answer_params import RemediationEditDraftAnswerParams as RemediationEditDraftAnswerParams +from .remediation_edit_draft_answer_response import ( + RemediationEditDraftAnswerResponse as RemediationEditDraftAnswerResponse, +) from .access_key_retrieve_project_id_response import ( AccessKeyRetrieveProjectIDResponse as AccessKeyRetrieveProjectIDResponse, ) +from .remediation_list_resolved_logs_response import ( + RemediationListResolvedLogsResponse as RemediationListResolvedLogsResponse, +) +from .remediation_get_resolved_logs_count_response import ( + RemediationGetResolvedLogsCountResponse as RemediationGetResolvedLogsCountResponse, +) diff --git a/src/codex/types/projects/eval_create_params.py b/src/codex/types/projects/eval_create_params.py new file mode 100644 index 00000000..20dcdd3d --- /dev/null +++ b/src/codex/types/projects/eval_create_params.py @@ -0,0 +1,66 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["EvalCreateParams"] + + +class EvalCreateParams(TypedDict, total=False): + criteria: Required[str] + """ + The evaluation criteria text that describes what aspect is being evaluated and + how + """ + + eval_key: Required[str] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + context_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the retrieved + context. + """ + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + is_default: bool + """Whether the eval is a default, built-in eval or a custom eval""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + query_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the user's query. + """ + + response_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the RAG/LLM + response. + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" diff --git a/src/codex/types/projects/eval_list_response.py b/src/codex/types/projects/eval_list_response.py new file mode 100644 index 00000000..e7f2b1b3 --- /dev/null +++ b/src/codex/types/projects/eval_list_response.py @@ -0,0 +1,69 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel + +__all__ = ["EvalListResponse", "EvalListResponseItem"] + + +class EvalListResponseItem(BaseModel): + criteria: str + """ + The evaluation criteria text that describes what aspect is being evaluated and + how + """ + + eval_key: str + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: str + """Display name/label for the evaluation metric""" + + context_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the retrieved + context. + """ + + enabled: Optional[bool] = None + """Allows the evaluation to be disabled without removing it""" + + is_default: Optional[bool] = None + """Whether the eval is a default, built-in eval or a custom eval""" + + priority: Optional[int] = None + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + query_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the user's query. + """ + + response_identifier: Optional[str] = None + """ + The exact string used in your evaluation criteria to reference the RAG/LLM + response. + """ + + should_escalate: Optional[bool] = None + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: Optional[float] = None + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Optional[Literal["above", "below"]] = None + """Whether the evaluation fails when score is above or below the threshold""" + + +EvalListResponse: TypeAlias = List[EvalListResponseItem] diff --git a/src/codex/types/projects/eval_update_params.py b/src/codex/types/projects/eval_update_params.py new file mode 100644 index 00000000..b690ec43 --- /dev/null +++ b/src/codex/types/projects/eval_update_params.py @@ -0,0 +1,104 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Optional +from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["EvalUpdateParams", "CustomEvalCreateOrUpdateSchema", "DefaultEvalUpdateSchema"] + + +class CustomEvalCreateOrUpdateSchema(TypedDict, total=False): + project_id: Required[str] + + criteria: Required[str] + """ + The evaluation criteria text that describes what aspect is being evaluated and + how + """ + + body_eval_key: Required[Annotated[str, PropertyInfo(alias="eval_key")]] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + name: Required[str] + """Display name/label for the evaluation metric""" + + context_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the retrieved + context. + """ + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + is_default: bool + """Whether the eval is a default, built-in eval or a custom eval""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + query_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the user's query. + """ + + response_identifier: Optional[str] + """ + The exact string used in your evaluation criteria to reference the RAG/LLM + response. + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +class DefaultEvalUpdateSchema(TypedDict, total=False): + project_id: Required[str] + + body_eval_key: Required[Annotated[str, PropertyInfo(alias="eval_key")]] + """ + Unique key for eval metric - currently maps to the TrustworthyRAG name property + and eval_scores dictionary key to check against threshold + """ + + enabled: bool + """Allows the evaluation to be disabled without removing it""" + + priority: Optional[int] + """ + Priority order for evals (lower number = higher priority) to determine primary + eval issue to surface + """ + + should_escalate: bool + """ + If true, failing this eval means the response is considered bad and can trigger + escalation to Codex/SME + """ + + threshold: float + """Threshold value that determines if the evaluation fails""" + + threshold_direction: Literal["above", "below"] + """Whether the evaluation fails when score is above or below the threshold""" + + +EvalUpdateParams: TypeAlias = Union[CustomEvalCreateOrUpdateSchema, DefaultEvalUpdateSchema] diff --git a/src/codex/types/projects/query_log_list_by_group_params.py b/src/codex/types/projects/query_log_list_by_group_params.py new file mode 100644 index 00000000..66166a19 --- /dev/null +++ b/src/codex/types/projects/query_log_list_by_group_params.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from datetime import datetime +from typing_extensions import Literal, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["QueryLogListByGroupParams"] + + +class QueryLogListByGroupParams(TypedDict, total=False): + created_at_end: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")] + """Filter logs created at or before this timestamp""" + + created_at_start: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")] + """Filter logs created at or after this timestamp""" + + custom_metadata: Optional[str] + """Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}""" + + limit: int + + offset: int + + order: Literal["asc", "desc"] + + primary_eval_issue: Optional[ + List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]] + ] + """Filter logs that have ANY of these primary evaluation issues (OR operation)""" + + remediation_ids: List[str] + """List of groups to list child logs for""" + + sort: Optional[Literal["created_at", "primary_eval_issue_score"]] + + was_cache_hit: Optional[bool] + """Filter by cache hit status""" diff --git a/src/codex/types/projects/query_log_list_by_group_response.py b/src/codex/types/projects/query_log_list_by_group_response.py new file mode 100644 index 00000000..ee79b6fa --- /dev/null +++ b/src/codex/types/projects/query_log_list_by_group_response.py @@ -0,0 +1,96 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = [ + "QueryLogListByGroupResponse", + "QueryLogsByGroup", + "QueryLogsByGroupQueryLog", + "QueryLogsByGroupQueryLogContext", +] + + +class QueryLogsByGroupQueryLogContext(BaseModel): + content: str + """The actual content/text of the document.""" + + id: Optional[str] = None + """Unique identifier for the document. Useful for tracking documents""" + + source: Optional[str] = None + """Source or origin of the document. Useful for citations.""" + + tags: Optional[List[str]] = None + """Tags or categories for the document. Useful for filtering""" + + title: Optional[str] = None + """Title or heading of the document. Useful for display and context.""" + + +class QueryLogsByGroupQueryLog(BaseModel): + id: str + + created_at: datetime + + formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None + """Format evaluation scores for frontend display with pass/fail status. + + Returns: Dictionary mapping eval keys to their formatted representation: { + "eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if + eval_scores is None. + """ + + is_bad_response: bool + """If an eval with should_escalate=True failed""" + + project_id: str + + question: str + + remediation_id: str + + was_cache_hit: Optional[bool] = None + """If similar query already answered, or None if cache was not checked""" + + context: Optional[List[QueryLogsByGroupQueryLogContext]] = None + """RAG context used for the query""" + + custom_metadata: Optional[object] = None + """Arbitrary metadata supplied by the user/system""" + + custom_metadata_keys: Optional[List[str]] = None + """Keys of the custom metadata""" + + eval_issue_labels: Optional[List[str]] = None + """Labels derived from evaluation scores""" + + eval_scores: Optional[Dict[str, float]] = None + """Evaluation scores for the original response""" + + eval_thresholds: Optional[Dict[str, Dict[str, Union[float, str]]]] = None + """Evaluation thresholds and directions at time of creation""" + + evaluated_response: Optional[str] = None + """The response being evaluated from the RAG system (before any remediation)""" + + primary_eval_issue: Optional[str] = None + """Primary issue identified in evaluation""" + + primary_eval_issue_score: Optional[float] = None + """Score of the primary eval issue""" + + +class QueryLogsByGroup(BaseModel): + query_logs: List[QueryLogsByGroupQueryLog] + + total_count: int + + +class QueryLogListByGroupResponse(BaseModel): + custom_metadata_columns: List[str] + + query_logs_by_group: Dict[str, QueryLogsByGroup] diff --git a/src/codex/types/projects/query_log_list_groups_params.py b/src/codex/types/projects/query_log_list_groups_params.py new file mode 100644 index 00000000..558ac0b2 --- /dev/null +++ b/src/codex/types/projects/query_log_list_groups_params.py @@ -0,0 +1,38 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from datetime import datetime +from typing_extensions import Literal, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["QueryLogListGroupsParams"] + + +class QueryLogListGroupsParams(TypedDict, total=False): + created_at_end: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")] + """Filter logs created at or before this timestamp""" + + created_at_start: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")] + """Filter logs created at or after this timestamp""" + + custom_metadata: Optional[str] + """Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}""" + + limit: int + + offset: int + + order: Literal["asc", "desc"] + + primary_eval_issue: Optional[ + List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]] + ] + """Filter logs that have ANY of these primary evaluation issues (OR operation)""" + + sort: Optional[Literal["created_at", "primary_eval_issue_score", "total_count", "custom_rank"]] + + was_cache_hit: Optional[bool] + """Filter by cache hit status""" diff --git a/src/codex/types/projects/query_log_list_groups_response.py b/src/codex/types/projects/query_log_list_groups_response.py new file mode 100644 index 00000000..979d34d6 --- /dev/null +++ b/src/codex/types/projects/query_log_list_groups_response.py @@ -0,0 +1,91 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["QueryLogListGroupsResponse", "QueryLogGroup", "QueryLogGroupContext"] + + +class QueryLogGroupContext(BaseModel): + content: str + """The actual content/text of the document.""" + + id: Optional[str] = None + """Unique identifier for the document. Useful for tracking documents""" + + source: Optional[str] = None + """Source or origin of the document. Useful for citations.""" + + tags: Optional[List[str]] = None + """Tags or categories for the document. Useful for filtering""" + + title: Optional[str] = None + """Title or heading of the document. Useful for display and context.""" + + +class QueryLogGroup(BaseModel): + id: str + + created_at: datetime + + formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None + """Format evaluation scores for frontend display with pass/fail status. + + Returns: Dictionary mapping eval keys to their formatted representation: { + "eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if + eval_scores is None. + """ + + is_bad_response: bool + """If an eval with should_escalate=True failed""" + + project_id: str + + question: str + + remediation_id: str + + status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"] + + total_count: int + + was_cache_hit: Optional[bool] = None + """If similar query already answered, or None if cache was not checked""" + + context: Optional[List[QueryLogGroupContext]] = None + """RAG context used for the query""" + + custom_metadata: Optional[object] = None + """Arbitrary metadata supplied by the user/system""" + + custom_metadata_keys: Optional[List[str]] = None + """Keys of the custom metadata""" + + eval_issue_labels: Optional[List[str]] = None + """Labels derived from evaluation scores""" + + eval_scores: Optional[Dict[str, float]] = None + """Evaluation scores for the original response""" + + eval_thresholds: Optional[Dict[str, Dict[str, Union[float, str]]]] = None + """Evaluation thresholds and directions at time of creation""" + + evaluated_response: Optional[str] = None + """The response being evaluated from the RAG system (before any remediation)""" + + primary_eval_issue: Optional[str] = None + """Primary issue identified in evaluation""" + + primary_eval_issue_score: Optional[float] = None + """Score of the primary eval issue""" + + +class QueryLogListGroupsResponse(BaseModel): + custom_metadata_columns: List[str] + + query_log_groups: List[QueryLogGroup] + + total_count: int diff --git a/src/codex/types/projects/query_log_list_params.py b/src/codex/types/projects/query_log_list_params.py new file mode 100644 index 00000000..9cf3211f --- /dev/null +++ b/src/codex/types/projects/query_log_list_params.py @@ -0,0 +1,38 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from datetime import datetime +from typing_extensions import Literal, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["QueryLogListParams"] + + +class QueryLogListParams(TypedDict, total=False): + created_at_end: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")] + """Filter logs created at or before this timestamp""" + + created_at_start: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")] + """Filter logs created at or after this timestamp""" + + custom_metadata: Optional[str] + """Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}""" + + limit: int + + offset: int + + order: Literal["asc", "desc"] + + primary_eval_issue: Optional[ + List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]] + ] + """Filter logs that have ANY of these primary evaluation issues (OR operation)""" + + sort: Optional[Literal["created_at", "primary_eval_issue_score"]] + + was_cache_hit: Optional[bool] + """Filter by cache hit status""" diff --git a/src/codex/types/projects/query_log_list_response.py b/src/codex/types/projects/query_log_list_response.py new file mode 100644 index 00000000..d570ea50 --- /dev/null +++ b/src/codex/types/projects/query_log_list_response.py @@ -0,0 +1,87 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["QueryLogListResponse", "QueryLog", "QueryLogContext"] + + +class QueryLogContext(BaseModel): + content: str + """The actual content/text of the document.""" + + id: Optional[str] = None + """Unique identifier for the document. Useful for tracking documents""" + + source: Optional[str] = None + """Source or origin of the document. Useful for citations.""" + + tags: Optional[List[str]] = None + """Tags or categories for the document. Useful for filtering""" + + title: Optional[str] = None + """Title or heading of the document. Useful for display and context.""" + + +class QueryLog(BaseModel): + id: str + + created_at: datetime + + formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None + """Format evaluation scores for frontend display with pass/fail status. + + Returns: Dictionary mapping eval keys to their formatted representation: { + "eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if + eval_scores is None. + """ + + is_bad_response: bool + """If an eval with should_escalate=True failed""" + + project_id: str + + question: str + + remediation_id: str + + was_cache_hit: Optional[bool] = None + """If similar query already answered, or None if cache was not checked""" + + context: Optional[List[QueryLogContext]] = None + """RAG context used for the query""" + + custom_metadata: Optional[object] = None + """Arbitrary metadata supplied by the user/system""" + + custom_metadata_keys: Optional[List[str]] = None + """Keys of the custom metadata""" + + eval_issue_labels: Optional[List[str]] = None + """Labels derived from evaluation scores""" + + eval_scores: Optional[Dict[str, float]] = None + """Evaluation scores for the original response""" + + eval_thresholds: Optional[Dict[str, Dict[str, Union[float, str]]]] = None + """Evaluation thresholds and directions at time of creation""" + + evaluated_response: Optional[str] = None + """The response being evaluated from the RAG system (before any remediation)""" + + primary_eval_issue: Optional[str] = None + """Primary issue identified in evaluation""" + + primary_eval_issue_score: Optional[float] = None + """Score of the primary eval issue""" + + +class QueryLogListResponse(BaseModel): + custom_metadata_columns: List[str] + + query_logs: List[QueryLog] + + total_count: int diff --git a/src/codex/types/projects/query_log_retrieve_response.py b/src/codex/types/projects/query_log_retrieve_response.py new file mode 100644 index 00000000..f918c214 --- /dev/null +++ b/src/codex/types/projects/query_log_retrieve_response.py @@ -0,0 +1,79 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["QueryLogRetrieveResponse", "Context"] + + +class Context(BaseModel): + content: str + """The actual content/text of the document.""" + + id: Optional[str] = None + """Unique identifier for the document. Useful for tracking documents""" + + source: Optional[str] = None + """Source or origin of the document. Useful for citations.""" + + tags: Optional[List[str]] = None + """Tags or categories for the document. Useful for filtering""" + + title: Optional[str] = None + """Title or heading of the document. Useful for display and context.""" + + +class QueryLogRetrieveResponse(BaseModel): + id: str + + created_at: datetime + + formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None + """Format evaluation scores for frontend display with pass/fail status. + + Returns: Dictionary mapping eval keys to their formatted representation: { + "eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if + eval_scores is None. + """ + + is_bad_response: bool + """If an eval with should_escalate=True failed""" + + project_id: str + + question: str + + remediation_id: str + + was_cache_hit: Optional[bool] = None + """If similar query already answered, or None if cache was not checked""" + + context: Optional[List[Context]] = None + """RAG context used for the query""" + + custom_metadata: Optional[object] = None + """Arbitrary metadata supplied by the user/system""" + + custom_metadata_keys: Optional[List[str]] = None + """Keys of the custom metadata""" + + eval_issue_labels: Optional[List[str]] = None + """Labels derived from evaluation scores""" + + eval_scores: Optional[Dict[str, float]] = None + """Evaluation scores for the original response""" + + eval_thresholds: Optional[Dict[str, Dict[str, Union[float, str]]]] = None + """Evaluation thresholds and directions at time of creation""" + + evaluated_response: Optional[str] = None + """The response being evaluated from the RAG system (before any remediation)""" + + primary_eval_issue: Optional[str] = None + """Primary issue identified in evaluation""" + + primary_eval_issue_score: Optional[float] = None + """Score of the primary eval issue""" diff --git a/src/codex/types/projects/query_log_start_remediation_response.py b/src/codex/types/projects/query_log_start_remediation_response.py new file mode 100644 index 00000000..0250fb15 --- /dev/null +++ b/src/codex/types/projects/query_log_start_remediation_response.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["QueryLogStartRemediationResponse"] + + +class QueryLogStartRemediationResponse(BaseModel): + id: str + + answered_at: Optional[datetime] = None + + answered_by: Optional[str] = None + + created_at: datetime + + last_edited_at: Optional[datetime] = None + + last_edited_by: Optional[str] = None + + project_id: str + + question: str + + status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"] + + answer: Optional[str] = None + + draft_answer: Optional[str] = None diff --git a/src/codex/types/projects/remediation_create_params.py b/src/codex/types/projects/remediation_create_params.py new file mode 100644 index 00000000..0d347490 --- /dev/null +++ b/src/codex/types/projects/remediation_create_params.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Required, TypedDict + +__all__ = ["RemediationCreateParams"] + + +class RemediationCreateParams(TypedDict, total=False): + question: Required[str] + + answer: Optional[str] + + draft_answer: Optional[str] diff --git a/src/codex/types/projects/remediation_create_response.py b/src/codex/types/projects/remediation_create_response.py new file mode 100644 index 00000000..ad4e6893 --- /dev/null +++ b/src/codex/types/projects/remediation_create_response.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RemediationCreateResponse"] + + +class RemediationCreateResponse(BaseModel): + id: str + + answered_at: Optional[datetime] = None + + answered_by: Optional[str] = None + + created_at: datetime + + last_edited_at: Optional[datetime] = None + + last_edited_by: Optional[str] = None + + project_id: str + + question: str + + status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"] + + answer: Optional[str] = None + + draft_answer: Optional[str] = None diff --git a/src/codex/types/projects/remediation_edit_answer_params.py b/src/codex/types/projects/remediation_edit_answer_params.py new file mode 100644 index 00000000..0dbc7d8d --- /dev/null +++ b/src/codex/types/projects/remediation_edit_answer_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["RemediationEditAnswerParams"] + + +class RemediationEditAnswerParams(TypedDict, total=False): + project_id: Required[str] + + answer: Required[str] diff --git a/src/codex/types/projects/remediation_edit_answer_response.py b/src/codex/types/projects/remediation_edit_answer_response.py new file mode 100644 index 00000000..d8b34323 --- /dev/null +++ b/src/codex/types/projects/remediation_edit_answer_response.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RemediationEditAnswerResponse"] + + +class RemediationEditAnswerResponse(BaseModel): + id: str + + answered_at: Optional[datetime] = None + + answered_by: Optional[str] = None + + created_at: datetime + + last_edited_at: Optional[datetime] = None + + last_edited_by: Optional[str] = None + + project_id: str + + question: str + + status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"] + + answer: Optional[str] = None + + draft_answer: Optional[str] = None diff --git a/src/codex/types/projects/remediation_edit_draft_answer_params.py b/src/codex/types/projects/remediation_edit_draft_answer_params.py new file mode 100644 index 00000000..9f51ec6f --- /dev/null +++ b/src/codex/types/projects/remediation_edit_draft_answer_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["RemediationEditDraftAnswerParams"] + + +class RemediationEditDraftAnswerParams(TypedDict, total=False): + project_id: Required[str] + + draft_answer: Required[str] diff --git a/src/codex/types/projects/remediation_edit_draft_answer_response.py b/src/codex/types/projects/remediation_edit_draft_answer_response.py new file mode 100644 index 00000000..828035e8 --- /dev/null +++ b/src/codex/types/projects/remediation_edit_draft_answer_response.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RemediationEditDraftAnswerResponse"] + + +class RemediationEditDraftAnswerResponse(BaseModel): + id: str + + answered_at: Optional[datetime] = None + + answered_by: Optional[str] = None + + created_at: datetime + + last_edited_at: Optional[datetime] = None + + last_edited_by: Optional[str] = None + + project_id: str + + question: str + + status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"] + + answer: Optional[str] = None + + draft_answer: Optional[str] = None diff --git a/src/codex/types/projects/remediation_get_resolved_logs_count_response.py b/src/codex/types/projects/remediation_get_resolved_logs_count_response.py new file mode 100644 index 00000000..79997b09 --- /dev/null +++ b/src/codex/types/projects/remediation_get_resolved_logs_count_response.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RemediationGetResolvedLogsCountResponse"] + + +class RemediationGetResolvedLogsCountResponse(BaseModel): + id: str + + answered_at: Optional[datetime] = None + + answered_by: Optional[str] = None + + created_at: datetime + + last_edited_at: Optional[datetime] = None + + last_edited_by: Optional[str] = None + + project_id: str + + question: str + + resolved_logs_count: int + + status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"] + + answer: Optional[str] = None + + draft_answer: Optional[str] = None diff --git a/src/codex/types/projects/remediation_list_params.py b/src/codex/types/projects/remediation_list_params.py new file mode 100644 index 00000000..65cd08d0 --- /dev/null +++ b/src/codex/types/projects/remediation_list_params.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from datetime import datetime +from typing_extensions import Literal, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["RemediationListParams"] + + +class RemediationListParams(TypedDict, total=False): + created_at_end: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")] + """Filter remediations created at or before this timestamp""" + + created_at_start: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")] + """Filter remediations created at or after this timestamp""" + + last_edited_at_end: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")] + """Filter remediations last edited at or before this timestamp""" + + last_edited_at_start: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")] + """Filter remediations last edited at or after this timestamp""" + + last_edited_by: Optional[str] + """Filter by last edited by user ID""" + + limit: int + + offset: int + + order: Literal["asc", "desc"] + + sort: Optional[Literal["created_at", "last_edited_at", "resolved_logs_count"]] + + status: Optional[List[Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "PAUSED"]]] + """Filter remediations that have ANY of these statuses (OR operation)""" diff --git a/src/codex/types/projects/remediation_list_resolved_logs_response.py b/src/codex/types/projects/remediation_list_resolved_logs_response.py new file mode 100644 index 00000000..6181028d --- /dev/null +++ b/src/codex/types/projects/remediation_list_resolved_logs_response.py @@ -0,0 +1,85 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RemediationListResolvedLogsResponse", "QueryLog", "QueryLogContext"] + + +class QueryLogContext(BaseModel): + content: str + """The actual content/text of the document.""" + + id: Optional[str] = None + """Unique identifier for the document. Useful for tracking documents""" + + source: Optional[str] = None + """Source or origin of the document. Useful for citations.""" + + tags: Optional[List[str]] = None + """Tags or categories for the document. Useful for filtering""" + + title: Optional[str] = None + """Title or heading of the document. Useful for display and context.""" + + +class QueryLog(BaseModel): + id: str + + created_at: datetime + + formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None + """Format evaluation scores for frontend display with pass/fail status. + + Returns: Dictionary mapping eval keys to their formatted representation: { + "eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if + eval_scores is None. + """ + + is_bad_response: bool + """If an eval with should_escalate=True failed""" + + project_id: str + + question: str + + remediation_id: str + + was_cache_hit: Optional[bool] = None + """If similar query already answered, or None if cache was not checked""" + + context: Optional[List[QueryLogContext]] = None + """RAG context used for the query""" + + custom_metadata: Optional[object] = None + """Arbitrary metadata supplied by the user/system""" + + custom_metadata_keys: Optional[List[str]] = None + """Keys of the custom metadata""" + + eval_issue_labels: Optional[List[str]] = None + """Labels derived from evaluation scores""" + + eval_scores: Optional[Dict[str, float]] = None + """Evaluation scores for the original response""" + + eval_thresholds: Optional[Dict[str, Dict[str, Union[float, str]]]] = None + """Evaluation thresholds and directions at time of creation""" + + evaluated_response: Optional[str] = None + """The response being evaluated from the RAG system (before any remediation)""" + + primary_eval_issue: Optional[str] = None + """Primary issue identified in evaluation""" + + primary_eval_issue_score: Optional[float] = None + """Score of the primary eval issue""" + + +class RemediationListResolvedLogsResponse(BaseModel): + query_logs: List[QueryLog] + + total_count: int diff --git a/src/codex/types/projects/remediation_list_response.py b/src/codex/types/projects/remediation_list_response.py new file mode 100644 index 00000000..3e737970 --- /dev/null +++ b/src/codex/types/projects/remediation_list_response.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RemediationListResponse", "Remediation"] + + +class Remediation(BaseModel): + id: str + + answered_at: Optional[datetime] = None + + answered_by: Optional[str] = None + + created_at: datetime + + last_edited_at: Optional[datetime] = None + + last_edited_by: Optional[str] = None + + project_id: str + + question: str + + resolved_logs_count: int + + status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"] + + answer: Optional[str] = None + + draft_answer: Optional[str] = None + + +class RemediationListResponse(BaseModel): + remediations: List[Remediation] + + total_count: int diff --git a/src/codex/types/projects/remediation_pause_response.py b/src/codex/types/projects/remediation_pause_response.py new file mode 100644 index 00000000..ae453581 --- /dev/null +++ b/src/codex/types/projects/remediation_pause_response.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RemediationPauseResponse"] + + +class RemediationPauseResponse(BaseModel): + id: str + + answered_at: Optional[datetime] = None + + answered_by: Optional[str] = None + + created_at: datetime + + last_edited_at: Optional[datetime] = None + + last_edited_by: Optional[str] = None + + project_id: str + + question: str + + status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"] + + answer: Optional[str] = None + + draft_answer: Optional[str] = None diff --git a/src/codex/types/projects/remediation_publish_response.py b/src/codex/types/projects/remediation_publish_response.py new file mode 100644 index 00000000..5eb2c622 --- /dev/null +++ b/src/codex/types/projects/remediation_publish_response.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RemediationPublishResponse"] + + +class RemediationPublishResponse(BaseModel): + id: str + + answered_at: Optional[datetime] = None + + answered_by: Optional[str] = None + + created_at: datetime + + last_edited_at: Optional[datetime] = None + + last_edited_by: Optional[str] = None + + project_id: str + + question: str + + status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"] + + answer: Optional[str] = None + + draft_answer: Optional[str] = None diff --git a/src/codex/types/projects/remediation_retrieve_response.py b/src/codex/types/projects/remediation_retrieve_response.py new file mode 100644 index 00000000..6fbd60d2 --- /dev/null +++ b/src/codex/types/projects/remediation_retrieve_response.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RemediationRetrieveResponse"] + + +class RemediationRetrieveResponse(BaseModel): + id: str + + answered_at: Optional[datetime] = None + + answered_by: Optional[str] = None + + created_at: datetime + + last_edited_at: Optional[datetime] = None + + last_edited_by: Optional[str] = None + + project_id: str + + question: str + + status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"] + + answer: Optional[str] = None + + draft_answer: Optional[str] = None diff --git a/src/codex/types/projects/remediation_unpause_response.py b/src/codex/types/projects/remediation_unpause_response.py new file mode 100644 index 00000000..789484cd --- /dev/null +++ b/src/codex/types/projects/remediation_unpause_response.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RemediationUnpauseResponse"] + + +class RemediationUnpauseResponse(BaseModel): + id: str + + answered_at: Optional[datetime] = None + + answered_by: Optional[str] = None + + created_at: datetime + + last_edited_at: Optional[datetime] = None + + last_edited_by: Optional[str] = None + + project_id: str + + question: str + + status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"] + + answer: Optional[str] = None + + draft_answer: Optional[str] = None diff --git a/tests/api_resources/projects/test_evals.py b/tests/api_resources/projects/test_evals.py new file mode 100644 index 00000000..c16fbc37 --- /dev/null +++ b/tests/api_resources/projects/test_evals.py @@ -0,0 +1,679 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from codex import Codex, AsyncCodex +from codex.types import ProjectReturnSchema +from tests.utils import assert_matches_type +from codex.types.projects import EvalListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestEvals: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: Codex) -> None: + eval = client.projects.evals.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + eval_key="eval_key", + name="name", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: Codex) -> None: + eval = client.projects.evals.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + eval_key="eval_key", + name="name", + context_identifier="context_identifier", + enabled=True, + is_default=True, + priority=0, + query_identifier="query_identifier", + response_identifier="response_identifier", + should_escalate=True, + threshold=0, + threshold_direction="above", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: Codex) -> None: + response = client.projects.evals.with_raw_response.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + eval_key="eval_key", + name="name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + eval = response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: Codex) -> None: + with client.projects.evals.with_streaming_response.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + eval_key="eval_key", + name="name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + eval = response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.evals.with_raw_response.create( + project_id="", + criteria="criteria", + eval_key="eval_key", + name="name", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_overload_1(self, client: Codex) -> None: + eval = client.projects.evals.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + body_eval_key="eval_key", + name="name", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params_overload_1(self, client: Codex) -> None: + eval = client.projects.evals.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + body_eval_key="eval_key", + name="name", + context_identifier="context_identifier", + enabled=True, + is_default=True, + priority=0, + query_identifier="query_identifier", + response_identifier="response_identifier", + should_escalate=True, + threshold=0, + threshold_direction="above", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_overload_1(self, client: Codex) -> None: + response = client.projects.evals.with_raw_response.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + body_eval_key="eval_key", + name="name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + eval = response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_overload_1(self, client: Codex) -> None: + with client.projects.evals.with_streaming_response.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + body_eval_key="eval_key", + name="name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + eval = response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_overload_1(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.evals.with_raw_response.update( + path_eval_key="eval_key", + project_id="", + criteria="criteria", + body_eval_key="eval_key", + name="name", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_eval_key` but received ''"): + client.projects.evals.with_raw_response.update( + path_eval_key="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + body_eval_key="eval_key", + name="name", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_overload_2(self, client: Codex) -> None: + eval = client.projects.evals.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + body_eval_key="eval_key", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params_overload_2(self, client: Codex) -> None: + eval = client.projects.evals.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + body_eval_key="eval_key", + enabled=True, + priority=0, + should_escalate=True, + threshold=0, + threshold_direction="above", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_overload_2(self, client: Codex) -> None: + response = client.projects.evals.with_raw_response.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + body_eval_key="eval_key", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + eval = response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_overload_2(self, client: Codex) -> None: + with client.projects.evals.with_streaming_response.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + body_eval_key="eval_key", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + eval = response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_overload_2(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.evals.with_raw_response.update( + path_eval_key="eval_key", + project_id="", + body_eval_key="eval_key", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_eval_key` but received ''"): + client.projects.evals.with_raw_response.update( + path_eval_key="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + body_eval_key="eval_key", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: Codex) -> None: + eval = client.projects.evals.list( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(EvalListResponse, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: Codex) -> None: + response = client.projects.evals.with_raw_response.list( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + eval = response.parse() + assert_matches_type(EvalListResponse, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: Codex) -> None: + with client.projects.evals.with_streaming_response.list( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + eval = response.parse() + assert_matches_type(EvalListResponse, eval, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.evals.with_raw_response.list( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: Codex) -> None: + eval = client.projects.evals.delete( + eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: Codex) -> None: + response = client.projects.evals.with_raw_response.delete( + eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + eval = response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: Codex) -> None: + with client.projects.evals.with_streaming_response.delete( + eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + eval = response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.evals.with_raw_response.delete( + eval_key="eval_key", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `eval_key` but received ''"): + client.projects.evals.with_raw_response.delete( + eval_key="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + +class TestAsyncEvals: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncCodex) -> None: + eval = await async_client.projects.evals.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + eval_key="eval_key", + name="name", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> None: + eval = await async_client.projects.evals.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + eval_key="eval_key", + name="name", + context_identifier="context_identifier", + enabled=True, + is_default=True, + priority=0, + query_identifier="query_identifier", + response_identifier="response_identifier", + should_escalate=True, + threshold=0, + threshold_direction="above", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.evals.with_raw_response.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + eval_key="eval_key", + name="name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + eval = await response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncCodex) -> None: + async with async_client.projects.evals.with_streaming_response.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + eval_key="eval_key", + name="name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + eval = await response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.evals.with_raw_response.create( + project_id="", + criteria="criteria", + eval_key="eval_key", + name="name", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_overload_1(self, async_client: AsyncCodex) -> None: + eval = await async_client.projects.evals.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + body_eval_key="eval_key", + name="name", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params_overload_1(self, async_client: AsyncCodex) -> None: + eval = await async_client.projects.evals.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + body_eval_key="eval_key", + name="name", + context_identifier="context_identifier", + enabled=True, + is_default=True, + priority=0, + query_identifier="query_identifier", + response_identifier="response_identifier", + should_escalate=True, + threshold=0, + threshold_direction="above", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_overload_1(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.evals.with_raw_response.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + body_eval_key="eval_key", + name="name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + eval = await response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_overload_1(self, async_client: AsyncCodex) -> None: + async with async_client.projects.evals.with_streaming_response.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + body_eval_key="eval_key", + name="name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + eval = await response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_overload_1(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.evals.with_raw_response.update( + path_eval_key="eval_key", + project_id="", + criteria="criteria", + body_eval_key="eval_key", + name="name", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_eval_key` but received ''"): + await async_client.projects.evals.with_raw_response.update( + path_eval_key="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + criteria="criteria", + body_eval_key="eval_key", + name="name", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_overload_2(self, async_client: AsyncCodex) -> None: + eval = await async_client.projects.evals.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + body_eval_key="eval_key", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params_overload_2(self, async_client: AsyncCodex) -> None: + eval = await async_client.projects.evals.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + body_eval_key="eval_key", + enabled=True, + priority=0, + should_escalate=True, + threshold=0, + threshold_direction="above", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_overload_2(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.evals.with_raw_response.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + body_eval_key="eval_key", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + eval = await response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_overload_2(self, async_client: AsyncCodex) -> None: + async with async_client.projects.evals.with_streaming_response.update( + path_eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + body_eval_key="eval_key", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + eval = await response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_overload_2(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.evals.with_raw_response.update( + path_eval_key="eval_key", + project_id="", + body_eval_key="eval_key", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_eval_key` but received ''"): + await async_client.projects.evals.with_raw_response.update( + path_eval_key="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + body_eval_key="eval_key", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncCodex) -> None: + eval = await async_client.projects.evals.list( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(EvalListResponse, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.evals.with_raw_response.list( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + eval = await response.parse() + assert_matches_type(EvalListResponse, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncCodex) -> None: + async with async_client.projects.evals.with_streaming_response.list( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + eval = await response.parse() + assert_matches_type(EvalListResponse, eval, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.evals.with_raw_response.list( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncCodex) -> None: + eval = await async_client.projects.evals.delete( + eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.evals.with_raw_response.delete( + eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + eval = await response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncCodex) -> None: + async with async_client.projects.evals.with_streaming_response.delete( + eval_key="eval_key", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + eval = await response.parse() + assert_matches_type(ProjectReturnSchema, eval, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.evals.with_raw_response.delete( + eval_key="eval_key", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `eval_key` but received ''"): + await async_client.projects.evals.with_raw_response.delete( + eval_key="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) diff --git a/tests/api_resources/projects/test_query_logs.py b/tests/api_resources/projects/test_query_logs.py new file mode 100644 index 00000000..847af5df --- /dev/null +++ b/tests/api_resources/projects/test_query_logs.py @@ -0,0 +1,593 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from codex import Codex, AsyncCodex +from tests.utils import assert_matches_type +from codex._utils import parse_datetime +from codex.types.projects import ( + QueryLogListResponse, + QueryLogRetrieveResponse, + QueryLogListGroupsResponse, + QueryLogListByGroupResponse, + QueryLogStartRemediationResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestQueryLogs: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: Codex) -> None: + query_log = client.projects.query_logs.retrieve( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(QueryLogRetrieveResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: Codex) -> None: + response = client.projects.query_logs.with_raw_response.retrieve( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + query_log = response.parse() + assert_matches_type(QueryLogRetrieveResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: Codex) -> None: + with client.projects.query_logs.with_streaming_response.retrieve( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + query_log = response.parse() + assert_matches_type(QueryLogRetrieveResponse, query_log, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.query_logs.with_raw_response.retrieve( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `query_log_id` but received ''"): + client.projects.query_logs.with_raw_response.retrieve( + query_log_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: Codex) -> None: + query_log = client.projects.query_logs.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(QueryLogListResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: Codex) -> None: + query_log = client.projects.query_logs.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"), + created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"), + custom_metadata="custom_metadata", + limit=1, + offset=0, + order="asc", + primary_eval_issue=["hallucination"], + sort="created_at", + was_cache_hit=True, + ) + assert_matches_type(QueryLogListResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: Codex) -> None: + response = client.projects.query_logs.with_raw_response.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + query_log = response.parse() + assert_matches_type(QueryLogListResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: Codex) -> None: + with client.projects.query_logs.with_streaming_response.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + query_log = response.parse() + assert_matches_type(QueryLogListResponse, query_log, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.query_logs.with_raw_response.list( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_by_group(self, client: Codex) -> None: + query_log = client.projects.query_logs.list_by_group( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(QueryLogListByGroupResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_by_group_with_all_params(self, client: Codex) -> None: + query_log = client.projects.query_logs.list_by_group( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"), + created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"), + custom_metadata="custom_metadata", + limit=1, + offset=0, + order="asc", + primary_eval_issue=["hallucination"], + remediation_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"], + sort="created_at", + was_cache_hit=True, + ) + assert_matches_type(QueryLogListByGroupResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_by_group(self, client: Codex) -> None: + response = client.projects.query_logs.with_raw_response.list_by_group( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + query_log = response.parse() + assert_matches_type(QueryLogListByGroupResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_by_group(self, client: Codex) -> None: + with client.projects.query_logs.with_streaming_response.list_by_group( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + query_log = response.parse() + assert_matches_type(QueryLogListByGroupResponse, query_log, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_by_group(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.query_logs.with_raw_response.list_by_group( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_groups(self, client: Codex) -> None: + query_log = client.projects.query_logs.list_groups( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(QueryLogListGroupsResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_groups_with_all_params(self, client: Codex) -> None: + query_log = client.projects.query_logs.list_groups( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"), + created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"), + custom_metadata="custom_metadata", + limit=1, + offset=0, + order="asc", + primary_eval_issue=["hallucination"], + sort="created_at", + was_cache_hit=True, + ) + assert_matches_type(QueryLogListGroupsResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_groups(self, client: Codex) -> None: + response = client.projects.query_logs.with_raw_response.list_groups( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + query_log = response.parse() + assert_matches_type(QueryLogListGroupsResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_groups(self, client: Codex) -> None: + with client.projects.query_logs.with_streaming_response.list_groups( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + query_log = response.parse() + assert_matches_type(QueryLogListGroupsResponse, query_log, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_groups(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.query_logs.with_raw_response.list_groups( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_start_remediation(self, client: Codex) -> None: + query_log = client.projects.query_logs.start_remediation( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(QueryLogStartRemediationResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_start_remediation(self, client: Codex) -> None: + response = client.projects.query_logs.with_raw_response.start_remediation( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + query_log = response.parse() + assert_matches_type(QueryLogStartRemediationResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_start_remediation(self, client: Codex) -> None: + with client.projects.query_logs.with_streaming_response.start_remediation( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + query_log = response.parse() + assert_matches_type(QueryLogStartRemediationResponse, query_log, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_start_remediation(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.query_logs.with_raw_response.start_remediation( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `query_log_id` but received ''"): + client.projects.query_logs.with_raw_response.start_remediation( + query_log_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + +class TestAsyncQueryLogs: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncCodex) -> None: + query_log = await async_client.projects.query_logs.retrieve( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(QueryLogRetrieveResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.query_logs.with_raw_response.retrieve( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + query_log = await response.parse() + assert_matches_type(QueryLogRetrieveResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncCodex) -> None: + async with async_client.projects.query_logs.with_streaming_response.retrieve( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + query_log = await response.parse() + assert_matches_type(QueryLogRetrieveResponse, query_log, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.query_logs.with_raw_response.retrieve( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `query_log_id` but received ''"): + await async_client.projects.query_logs.with_raw_response.retrieve( + query_log_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncCodex) -> None: + query_log = await async_client.projects.query_logs.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(QueryLogListResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncCodex) -> None: + query_log = await async_client.projects.query_logs.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"), + created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"), + custom_metadata="custom_metadata", + limit=1, + offset=0, + order="asc", + primary_eval_issue=["hallucination"], + sort="created_at", + was_cache_hit=True, + ) + assert_matches_type(QueryLogListResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.query_logs.with_raw_response.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + query_log = await response.parse() + assert_matches_type(QueryLogListResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncCodex) -> None: + async with async_client.projects.query_logs.with_streaming_response.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + query_log = await response.parse() + assert_matches_type(QueryLogListResponse, query_log, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.query_logs.with_raw_response.list( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_by_group(self, async_client: AsyncCodex) -> None: + query_log = await async_client.projects.query_logs.list_by_group( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(QueryLogListByGroupResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_by_group_with_all_params(self, async_client: AsyncCodex) -> None: + query_log = await async_client.projects.query_logs.list_by_group( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"), + created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"), + custom_metadata="custom_metadata", + limit=1, + offset=0, + order="asc", + primary_eval_issue=["hallucination"], + remediation_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"], + sort="created_at", + was_cache_hit=True, + ) + assert_matches_type(QueryLogListByGroupResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_by_group(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.query_logs.with_raw_response.list_by_group( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + query_log = await response.parse() + assert_matches_type(QueryLogListByGroupResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_by_group(self, async_client: AsyncCodex) -> None: + async with async_client.projects.query_logs.with_streaming_response.list_by_group( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + query_log = await response.parse() + assert_matches_type(QueryLogListByGroupResponse, query_log, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_by_group(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.query_logs.with_raw_response.list_by_group( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_groups(self, async_client: AsyncCodex) -> None: + query_log = await async_client.projects.query_logs.list_groups( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(QueryLogListGroupsResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_groups_with_all_params(self, async_client: AsyncCodex) -> None: + query_log = await async_client.projects.query_logs.list_groups( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"), + created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"), + custom_metadata="custom_metadata", + limit=1, + offset=0, + order="asc", + primary_eval_issue=["hallucination"], + sort="created_at", + was_cache_hit=True, + ) + assert_matches_type(QueryLogListGroupsResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_groups(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.query_logs.with_raw_response.list_groups( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + query_log = await response.parse() + assert_matches_type(QueryLogListGroupsResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_groups(self, async_client: AsyncCodex) -> None: + async with async_client.projects.query_logs.with_streaming_response.list_groups( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + query_log = await response.parse() + assert_matches_type(QueryLogListGroupsResponse, query_log, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_groups(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.query_logs.with_raw_response.list_groups( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_start_remediation(self, async_client: AsyncCodex) -> None: + query_log = await async_client.projects.query_logs.start_remediation( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(QueryLogStartRemediationResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_start_remediation(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.query_logs.with_raw_response.start_remediation( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + query_log = await response.parse() + assert_matches_type(QueryLogStartRemediationResponse, query_log, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_start_remediation(self, async_client: AsyncCodex) -> None: + async with async_client.projects.query_logs.with_streaming_response.start_remediation( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + query_log = await response.parse() + assert_matches_type(QueryLogStartRemediationResponse, query_log, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_start_remediation(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.query_logs.with_raw_response.start_remediation( + query_log_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `query_log_id` but received ''"): + await async_client.projects.query_logs.with_raw_response.start_remediation( + query_log_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) diff --git a/tests/api_resources/projects/test_remediations.py b/tests/api_resources/projects/test_remediations.py new file mode 100644 index 00000000..cfd5ae76 --- /dev/null +++ b/tests/api_resources/projects/test_remediations.py @@ -0,0 +1,1224 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from codex import Codex, AsyncCodex +from tests.utils import assert_matches_type +from codex._utils import parse_datetime +from codex.types.projects import ( + RemediationListResponse, + RemediationPauseResponse, + RemediationCreateResponse, + RemediationPublishResponse, + RemediationUnpauseResponse, + RemediationRetrieveResponse, + RemediationEditAnswerResponse, + RemediationEditDraftAnswerResponse, + RemediationListResolvedLogsResponse, + RemediationGetResolvedLogsCountResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestRemediations: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: Codex) -> None: + remediation = client.projects.remediations.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) + assert_matches_type(RemediationCreateResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: Codex) -> None: + remediation = client.projects.remediations.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + answer="answer", + draft_answer="draft_answer", + ) + assert_matches_type(RemediationCreateResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: Codex) -> None: + response = client.projects.remediations.with_raw_response.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = response.parse() + assert_matches_type(RemediationCreateResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: Codex) -> None: + with client.projects.remediations.with_streaming_response.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = response.parse() + assert_matches_type(RemediationCreateResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.remediations.with_raw_response.create( + project_id="", + question="question", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: Codex) -> None: + remediation = client.projects.remediations.retrieve( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationRetrieveResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: Codex) -> None: + response = client.projects.remediations.with_raw_response.retrieve( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = response.parse() + assert_matches_type(RemediationRetrieveResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: Codex) -> None: + with client.projects.remediations.with_streaming_response.retrieve( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = response.parse() + assert_matches_type(RemediationRetrieveResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.remediations.with_raw_response.retrieve( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + client.projects.remediations.with_raw_response.retrieve( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: Codex) -> None: + remediation = client.projects.remediations.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationListResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: Codex) -> None: + remediation = client.projects.remediations.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"), + created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"), + last_edited_at_end=parse_datetime("2019-12-27T18:11:19.117Z"), + last_edited_at_start=parse_datetime("2019-12-27T18:11:19.117Z"), + last_edited_by="last_edited_by", + limit=1, + offset=0, + order="asc", + sort="created_at", + status=["ACTIVE"], + ) + assert_matches_type(RemediationListResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: Codex) -> None: + response = client.projects.remediations.with_raw_response.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = response.parse() + assert_matches_type(RemediationListResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: Codex) -> None: + with client.projects.remediations.with_streaming_response.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = response.parse() + assert_matches_type(RemediationListResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.remediations.with_raw_response.list( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: Codex) -> None: + remediation = client.projects.remediations.delete( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert remediation is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: Codex) -> None: + response = client.projects.remediations.with_raw_response.delete( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = response.parse() + assert remediation is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: Codex) -> None: + with client.projects.remediations.with_streaming_response.delete( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = response.parse() + assert remediation is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.remediations.with_raw_response.delete( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + client.projects.remediations.with_raw_response.delete( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + def test_method_edit_answer(self, client: Codex) -> None: + remediation = client.projects.remediations.edit_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + answer="answer", + ) + assert_matches_type(RemediationEditAnswerResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_edit_answer(self, client: Codex) -> None: + response = client.projects.remediations.with_raw_response.edit_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + answer="answer", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = response.parse() + assert_matches_type(RemediationEditAnswerResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_edit_answer(self, client: Codex) -> None: + with client.projects.remediations.with_streaming_response.edit_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + answer="answer", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = response.parse() + assert_matches_type(RemediationEditAnswerResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_edit_answer(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.remediations.with_raw_response.edit_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + answer="answer", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + client.projects.remediations.with_raw_response.edit_answer( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + answer="answer", + ) + + @pytest.mark.skip() + @parametrize + def test_method_edit_draft_answer(self, client: Codex) -> None: + remediation = client.projects.remediations.edit_draft_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + draft_answer="draft_answer", + ) + assert_matches_type(RemediationEditDraftAnswerResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_edit_draft_answer(self, client: Codex) -> None: + response = client.projects.remediations.with_raw_response.edit_draft_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + draft_answer="draft_answer", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = response.parse() + assert_matches_type(RemediationEditDraftAnswerResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_edit_draft_answer(self, client: Codex) -> None: + with client.projects.remediations.with_streaming_response.edit_draft_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + draft_answer="draft_answer", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = response.parse() + assert_matches_type(RemediationEditDraftAnswerResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_edit_draft_answer(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.remediations.with_raw_response.edit_draft_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + draft_answer="draft_answer", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + client.projects.remediations.with_raw_response.edit_draft_answer( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + draft_answer="draft_answer", + ) + + @pytest.mark.skip() + @parametrize + def test_method_get_resolved_logs_count(self, client: Codex) -> None: + remediation = client.projects.remediations.get_resolved_logs_count( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationGetResolvedLogsCountResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_get_resolved_logs_count(self, client: Codex) -> None: + response = client.projects.remediations.with_raw_response.get_resolved_logs_count( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = response.parse() + assert_matches_type(RemediationGetResolvedLogsCountResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_get_resolved_logs_count(self, client: Codex) -> None: + with client.projects.remediations.with_streaming_response.get_resolved_logs_count( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = response.parse() + assert_matches_type(RemediationGetResolvedLogsCountResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_get_resolved_logs_count(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.remediations.with_raw_response.get_resolved_logs_count( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + client.projects.remediations.with_raw_response.get_resolved_logs_count( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_resolved_logs(self, client: Codex) -> None: + remediation = client.projects.remediations.list_resolved_logs( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationListResolvedLogsResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_resolved_logs(self, client: Codex) -> None: + response = client.projects.remediations.with_raw_response.list_resolved_logs( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = response.parse() + assert_matches_type(RemediationListResolvedLogsResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_resolved_logs(self, client: Codex) -> None: + with client.projects.remediations.with_streaming_response.list_resolved_logs( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = response.parse() + assert_matches_type(RemediationListResolvedLogsResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_resolved_logs(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.remediations.with_raw_response.list_resolved_logs( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + client.projects.remediations.with_raw_response.list_resolved_logs( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + def test_method_pause(self, client: Codex) -> None: + remediation = client.projects.remediations.pause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationPauseResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_pause(self, client: Codex) -> None: + response = client.projects.remediations.with_raw_response.pause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = response.parse() + assert_matches_type(RemediationPauseResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_pause(self, client: Codex) -> None: + with client.projects.remediations.with_streaming_response.pause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = response.parse() + assert_matches_type(RemediationPauseResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_pause(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.remediations.with_raw_response.pause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + client.projects.remediations.with_raw_response.pause( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + def test_method_publish(self, client: Codex) -> None: + remediation = client.projects.remediations.publish( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationPublishResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_publish(self, client: Codex) -> None: + response = client.projects.remediations.with_raw_response.publish( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = response.parse() + assert_matches_type(RemediationPublishResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_publish(self, client: Codex) -> None: + with client.projects.remediations.with_streaming_response.publish( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = response.parse() + assert_matches_type(RemediationPublishResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_publish(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.remediations.with_raw_response.publish( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + client.projects.remediations.with_raw_response.publish( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + def test_method_unpause(self, client: Codex) -> None: + remediation = client.projects.remediations.unpause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationUnpauseResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_unpause(self, client: Codex) -> None: + response = client.projects.remediations.with_raw_response.unpause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = response.parse() + assert_matches_type(RemediationUnpauseResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_unpause(self, client: Codex) -> None: + with client.projects.remediations.with_streaming_response.unpause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = response.parse() + assert_matches_type(RemediationUnpauseResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_unpause(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.remediations.with_raw_response.unpause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + client.projects.remediations.with_raw_response.unpause( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + +class TestAsyncRemediations: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) + assert_matches_type(RemediationCreateResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + answer="answer", + draft_answer="draft_answer", + ) + assert_matches_type(RemediationCreateResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.remediations.with_raw_response.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = await response.parse() + assert_matches_type(RemediationCreateResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncCodex) -> None: + async with async_client.projects.remediations.with_streaming_response.create( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + question="question", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = await response.parse() + assert_matches_type(RemediationCreateResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.remediations.with_raw_response.create( + project_id="", + question="question", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.retrieve( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationRetrieveResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.remediations.with_raw_response.retrieve( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = await response.parse() + assert_matches_type(RemediationRetrieveResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncCodex) -> None: + async with async_client.projects.remediations.with_streaming_response.retrieve( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = await response.parse() + assert_matches_type(RemediationRetrieveResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.remediations.with_raw_response.retrieve( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + await async_client.projects.remediations.with_raw_response.retrieve( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationListResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"), + created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"), + last_edited_at_end=parse_datetime("2019-12-27T18:11:19.117Z"), + last_edited_at_start=parse_datetime("2019-12-27T18:11:19.117Z"), + last_edited_by="last_edited_by", + limit=1, + offset=0, + order="asc", + sort="created_at", + status=["ACTIVE"], + ) + assert_matches_type(RemediationListResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.remediations.with_raw_response.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = await response.parse() + assert_matches_type(RemediationListResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncCodex) -> None: + async with async_client.projects.remediations.with_streaming_response.list( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = await response.parse() + assert_matches_type(RemediationListResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.remediations.with_raw_response.list( + project_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.delete( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert remediation is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.remediations.with_raw_response.delete( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = await response.parse() + assert remediation is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncCodex) -> None: + async with async_client.projects.remediations.with_streaming_response.delete( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = await response.parse() + assert remediation is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.remediations.with_raw_response.delete( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + await async_client.projects.remediations.with_raw_response.delete( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_edit_answer(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.edit_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + answer="answer", + ) + assert_matches_type(RemediationEditAnswerResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_edit_answer(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.remediations.with_raw_response.edit_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + answer="answer", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = await response.parse() + assert_matches_type(RemediationEditAnswerResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_edit_answer(self, async_client: AsyncCodex) -> None: + async with async_client.projects.remediations.with_streaming_response.edit_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + answer="answer", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = await response.parse() + assert_matches_type(RemediationEditAnswerResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_edit_answer(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.remediations.with_raw_response.edit_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + answer="answer", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + await async_client.projects.remediations.with_raw_response.edit_answer( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + answer="answer", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_edit_draft_answer(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.edit_draft_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + draft_answer="draft_answer", + ) + assert_matches_type(RemediationEditDraftAnswerResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_edit_draft_answer(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.remediations.with_raw_response.edit_draft_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + draft_answer="draft_answer", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = await response.parse() + assert_matches_type(RemediationEditDraftAnswerResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_edit_draft_answer(self, async_client: AsyncCodex) -> None: + async with async_client.projects.remediations.with_streaming_response.edit_draft_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + draft_answer="draft_answer", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = await response.parse() + assert_matches_type(RemediationEditDraftAnswerResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_edit_draft_answer(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.remediations.with_raw_response.edit_draft_answer( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + draft_answer="draft_answer", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + await async_client.projects.remediations.with_raw_response.edit_draft_answer( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + draft_answer="draft_answer", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_get_resolved_logs_count(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.get_resolved_logs_count( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationGetResolvedLogsCountResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_get_resolved_logs_count(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.remediations.with_raw_response.get_resolved_logs_count( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = await response.parse() + assert_matches_type(RemediationGetResolvedLogsCountResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_get_resolved_logs_count(self, async_client: AsyncCodex) -> None: + async with async_client.projects.remediations.with_streaming_response.get_resolved_logs_count( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = await response.parse() + assert_matches_type(RemediationGetResolvedLogsCountResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_get_resolved_logs_count(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.remediations.with_raw_response.get_resolved_logs_count( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + await async_client.projects.remediations.with_raw_response.get_resolved_logs_count( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_resolved_logs(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.list_resolved_logs( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationListResolvedLogsResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_resolved_logs(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.remediations.with_raw_response.list_resolved_logs( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = await response.parse() + assert_matches_type(RemediationListResolvedLogsResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_resolved_logs(self, async_client: AsyncCodex) -> None: + async with async_client.projects.remediations.with_streaming_response.list_resolved_logs( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = await response.parse() + assert_matches_type(RemediationListResolvedLogsResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_resolved_logs(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.remediations.with_raw_response.list_resolved_logs( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + await async_client.projects.remediations.with_raw_response.list_resolved_logs( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_pause(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.pause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationPauseResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_pause(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.remediations.with_raw_response.pause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = await response.parse() + assert_matches_type(RemediationPauseResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_pause(self, async_client: AsyncCodex) -> None: + async with async_client.projects.remediations.with_streaming_response.pause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = await response.parse() + assert_matches_type(RemediationPauseResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_pause(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.remediations.with_raw_response.pause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + await async_client.projects.remediations.with_raw_response.pause( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_publish(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.publish( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationPublishResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_publish(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.remediations.with_raw_response.publish( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = await response.parse() + assert_matches_type(RemediationPublishResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_publish(self, async_client: AsyncCodex) -> None: + async with async_client.projects.remediations.with_streaming_response.publish( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = await response.parse() + assert_matches_type(RemediationPublishResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_publish(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.remediations.with_raw_response.publish( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + await async_client.projects.remediations.with_raw_response.publish( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_unpause(self, async_client: AsyncCodex) -> None: + remediation = await async_client.projects.remediations.unpause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(RemediationUnpauseResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_unpause(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.remediations.with_raw_response.unpause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + remediation = await response.parse() + assert_matches_type(RemediationUnpauseResponse, remediation, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_unpause(self, async_client: AsyncCodex) -> None: + async with async_client.projects.remediations.with_streaming_response.unpause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + remediation = await response.parse() + assert_matches_type(RemediationUnpauseResponse, remediation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_unpause(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.remediations.with_raw_response.unpause( + remediation_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + project_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `remediation_id` but received ''"): + await async_client.projects.remediations.with_raw_response.unpause( + remediation_id="", + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py index c2b5b7db..86bd8053 100644 --- a/tests/api_resources/test_projects.py +++ b/tests/api_resources/test_projects.py @@ -13,6 +13,7 @@ ProjectReturnSchema, ProjectRetrieveResponse, ProjectValidateResponse, + ProjectInviteSmeResponse, ProjectRetrieveAnalyticsResponse, ) from tests.utils import assert_matches_type @@ -515,6 +516,60 @@ def test_path_params_increment_queries(self, client: Codex) -> None: project_id="", ) + @pytest.mark.skip() + @parametrize + def test_method_invite_sme(self, client: Codex) -> None: + project = client.projects.invite_sme( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + email="email", + page_type="query_log", + url_query_string="url_query_string", + ) + assert_matches_type(ProjectInviteSmeResponse, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_invite_sme(self, client: Codex) -> None: + response = client.projects.with_raw_response.invite_sme( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + email="email", + page_type="query_log", + url_query_string="url_query_string", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = response.parse() + assert_matches_type(ProjectInviteSmeResponse, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_invite_sme(self, client: Codex) -> None: + with client.projects.with_streaming_response.invite_sme( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + email="email", + page_type="query_log", + url_query_string="url_query_string", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = response.parse() + assert_matches_type(ProjectInviteSmeResponse, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_invite_sme(self, client: Codex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + client.projects.with_raw_response.invite_sme( + project_id="", + email="email", + page_type="query_log", + url_query_string="url_query_string", + ) + @pytest.mark.skip() @parametrize def test_method_retrieve_analytics(self, client: Codex) -> None: @@ -1160,6 +1215,60 @@ async def test_path_params_increment_queries(self, async_client: AsyncCodex) -> project_id="", ) + @pytest.mark.skip() + @parametrize + async def test_method_invite_sme(self, async_client: AsyncCodex) -> None: + project = await async_client.projects.invite_sme( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + email="email", + page_type="query_log", + url_query_string="url_query_string", + ) + assert_matches_type(ProjectInviteSmeResponse, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_invite_sme(self, async_client: AsyncCodex) -> None: + response = await async_client.projects.with_raw_response.invite_sme( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + email="email", + page_type="query_log", + url_query_string="url_query_string", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + project = await response.parse() + assert_matches_type(ProjectInviteSmeResponse, project, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_invite_sme(self, async_client: AsyncCodex) -> None: + async with async_client.projects.with_streaming_response.invite_sme( + project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + email="email", + page_type="query_log", + url_query_string="url_query_string", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + project = await response.parse() + assert_matches_type(ProjectInviteSmeResponse, project, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_invite_sme(self, async_client: AsyncCodex) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"): + await async_client.projects.with_raw_response.invite_sme( + project_id="", + email="email", + page_type="query_log", + url_query_string="url_query_string", + ) + @pytest.mark.skip() @parametrize async def test_method_retrieve_analytics(self, async_client: AsyncCodex) -> None: From ee0e109227593a26ff771b265bc838b29c25c66e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sun, 22 Jun 2025 20:07:30 +0000 Subject: [PATCH 31/32] feat(api): add bearer token auth --- .stats.yml | 2 +- src/codex/_client.py | 44 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/.stats.yml b/.stats.yml index bd77f787..20dc969f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 65 openapi_spec_hash: eeb8ebc5600523bdfad046381a929572 -config_hash: 63e520502003839482d0dbeb82132064 +config_hash: 14b2643a0ec60cf326dfed00939644ff diff --git a/src/codex/_client.py b/src/codex/_client.py index 7464f6dc..352d0dbc 100644 --- a/src/codex/_client.py +++ b/src/codex/_client.py @@ -63,6 +63,7 @@ class Codex(SyncAPIClient): with_streaming_response: CodexWithStreamedResponse # client options + auth_token: str | None api_key: str | None access_key: str | None @@ -71,6 +72,7 @@ class Codex(SyncAPIClient): def __init__( self, *, + auth_token: str | None = None, api_key: str | None = None, access_key: str | None = None, environment: Literal["production", "staging", "local"] | NotGiven = NOT_GIVEN, @@ -94,6 +96,8 @@ def __init__( _strict_response_validation: bool = False, ) -> None: """Construct a new synchronous Codex client instance.""" + self.auth_token = auth_token + self.api_key = api_key self.access_key = access_key @@ -151,7 +155,14 @@ def qs(self) -> Querystring: @property @override def auth_headers(self) -> dict[str, str]: - return {**self._authenticated_api_key, **self._public_access_key} + return {**self._http_bearer, **self._authenticated_api_key, **self._public_access_key} + + @property + def _http_bearer(self) -> dict[str, str]: + auth_token = self.auth_token + if auth_token is None: + return {} + return {"Authorization": f"Bearer {auth_token}"} @property def _authenticated_api_key(self) -> dict[str, str]: @@ -178,6 +189,11 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: + if self.auth_token and headers.get("Authorization"): + return + if isinstance(custom_headers.get("Authorization"), Omit): + return + if self.api_key and headers.get("X-API-Key"): return if isinstance(custom_headers.get("X-API-Key"), Omit): @@ -189,12 +205,13 @@ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: return raise TypeError( - '"Could not resolve authentication method. Expected either api_key or access_key to be set. Or for one of the `X-API-Key` or `X-Access-Key` headers to be explicitly omitted"' + '"Could not resolve authentication method. Expected one of auth_token, api_key or access_key to be set. Or for one of the `Authorization`, `X-API-Key` or `X-Access-Key` headers to be explicitly omitted"' ) def copy( self, *, + auth_token: str | None = None, api_key: str | None = None, access_key: str | None = None, environment: Literal["production", "staging", "local"] | None = None, @@ -231,6 +248,7 @@ def copy( http_client = http_client or self._client return self.__class__( + auth_token=auth_token or self.auth_token, api_key=api_key or self.api_key, access_key=access_key or self.access_key, base_url=base_url or self.base_url, @@ -291,6 +309,7 @@ class AsyncCodex(AsyncAPIClient): with_streaming_response: AsyncCodexWithStreamedResponse # client options + auth_token: str | None api_key: str | None access_key: str | None @@ -299,6 +318,7 @@ class AsyncCodex(AsyncAPIClient): def __init__( self, *, + auth_token: str | None = None, api_key: str | None = None, access_key: str | None = None, environment: Literal["production", "staging", "local"] | NotGiven = NOT_GIVEN, @@ -322,6 +342,8 @@ def __init__( _strict_response_validation: bool = False, ) -> None: """Construct a new async AsyncCodex client instance.""" + self.auth_token = auth_token + self.api_key = api_key self.access_key = access_key @@ -379,7 +401,14 @@ def qs(self) -> Querystring: @property @override def auth_headers(self) -> dict[str, str]: - return {**self._authenticated_api_key, **self._public_access_key} + return {**self._http_bearer, **self._authenticated_api_key, **self._public_access_key} + + @property + def _http_bearer(self) -> dict[str, str]: + auth_token = self.auth_token + if auth_token is None: + return {} + return {"Authorization": f"Bearer {auth_token}"} @property def _authenticated_api_key(self) -> dict[str, str]: @@ -406,6 +435,11 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: + if self.auth_token and headers.get("Authorization"): + return + if isinstance(custom_headers.get("Authorization"), Omit): + return + if self.api_key and headers.get("X-API-Key"): return if isinstance(custom_headers.get("X-API-Key"), Omit): @@ -417,12 +451,13 @@ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: return raise TypeError( - '"Could not resolve authentication method. Expected either api_key or access_key to be set. Or for one of the `X-API-Key` or `X-Access-Key` headers to be explicitly omitted"' + '"Could not resolve authentication method. Expected one of auth_token, api_key or access_key to be set. Or for one of the `Authorization`, `X-API-Key` or `X-Access-Key` headers to be explicitly omitted"' ) def copy( self, *, + auth_token: str | None = None, api_key: str | None = None, access_key: str | None = None, environment: Literal["production", "staging", "local"] | None = None, @@ -459,6 +494,7 @@ def copy( http_client = http_client or self._client return self.__class__( + auth_token=auth_token or self.auth_token, api_key=api_key or self.api_key, access_key=access_key or self.access_key, base_url=base_url or self.base_url, From b034a7c95005d0ce22b04db78681db6e3ef33c38 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sun, 22 Jun 2025 20:07:56 +0000 Subject: [PATCH 32/32] release: 0.1.0-alpha.21 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 46 +++++++++++++++++++++++++++++++++++ pyproject.toml | 2 +- src/codex/_version.py | 2 +- 4 files changed, 49 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index fac14074..7c31fce2 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.20" + ".": "0.1.0-alpha.21" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index f151d604..00c2b1a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,51 @@ # Changelog +## 0.1.0-alpha.21 (2025-06-22) + +Full Changelog: [v0.1.0-alpha.20...v0.1.0-alpha.21](https://github.com/cleanlab/codex-python/compare/v0.1.0-alpha.20...v0.1.0-alpha.21) + +### Features + +* **api:** add bearer token auth ([ee0e109](https://github.com/cleanlab/codex-python/commit/ee0e109227593a26ff771b265bc838b29c25c66e)) +* **api:** add new endpoints ([7a72928](https://github.com/cleanlab/codex-python/commit/7a729283e11ae9dfb3ea7b337db31f179a5b108c)) +* **api:** api update ([b8729dd](https://github.com/cleanlab/codex-python/commit/b8729ddd238b931c8739b960f0c5f5c185c69119)) +* **api:** api update ([20a7b36](https://github.com/cleanlab/codex-python/commit/20a7b367fab70147c0ce071ad56b9ee02d27df7b)) +* **api:** api update ([425caff](https://github.com/cleanlab/codex-python/commit/425caff55b0a813f637afee7a973b66053bd366f)) +* **api:** api update ([0a636fa](https://github.com/cleanlab/codex-python/commit/0a636fa5930c53671923de2f191a4e5c74328f7f)) +* **api:** api update ([8489bc7](https://github.com/cleanlab/codex-python/commit/8489bc74223f2132cec88583f3fabbf5620dc599)) +* **api:** api update ([ef4acf2](https://github.com/cleanlab/codex-python/commit/ef4acf292c728a838aecd6539d278b8c128be68a)) +* **api:** api update ([3a52931](https://github.com/cleanlab/codex-python/commit/3a5293161e7313d7c18ec61be1b8e7ee56bad8c9)) +* **api:** api update ([7bbf57a](https://github.com/cleanlab/codex-python/commit/7bbf57ae5327ddd85e6729997a4f85b427758258)) +* **api:** api update ([40ae04a](https://github.com/cleanlab/codex-python/commit/40ae04a279ba1e2573d17a17e097f71d1347a3d3)) +* **client:** add follow_redirects request option ([f8a8e15](https://github.com/cleanlab/codex-python/commit/f8a8e15df5a70e7065127943dfae2261724ca3f2)) + + +### Bug Fixes + +* **client:** correctly parse binary response | stream ([9debf1d](https://github.com/cleanlab/codex-python/commit/9debf1d4208f1c65a67211fc4e7adce3193ed291)) +* **docs/api:** remove references to nonexistent types ([7ba3858](https://github.com/cleanlab/codex-python/commit/7ba3858c1c968c093c676478a1c7e5e13b92c12a)) +* **tests:** fix: tests which call HTTP endpoints directly with the example parameters ([14a4f3c](https://github.com/cleanlab/codex-python/commit/14a4f3cff659d280f002fa694024e6c9814b3922)) + + +### Chores + +* **api:** mark some methods as deprecated ([57f522f](https://github.com/cleanlab/codex-python/commit/57f522fbd04637849146636a83976fe696160a97)) +* **ci:** enable for pull requests ([a28cd9c](https://github.com/cleanlab/codex-python/commit/a28cd9c9e47987a1193e345fb48c3dc75512e87a)) +* **ci:** fix installation instructions ([3aa9884](https://github.com/cleanlab/codex-python/commit/3aa98843e0f042734eb5b74ea86c8dcca8636954)) +* **docs:** grammar improvements ([428e500](https://github.com/cleanlab/codex-python/commit/428e5001b6b5576f5383c0f2ffd3ad5fe085128a)) +* **docs:** remove reference to rye shell ([8fcb74d](https://github.com/cleanlab/codex-python/commit/8fcb74d6d5ef8db36a78031c9bf59f0840def1d4)) +* **docs:** remove unnecessary param examples ([2ef6184](https://github.com/cleanlab/codex-python/commit/2ef6184b1f5acfb9e924b6adb0ca6b453507ddd2)) +* **internal:** codegen related update ([18f661d](https://github.com/cleanlab/codex-python/commit/18f661d21b849f15cbe85ce5063ef0dea877d89f)) +* **internal:** update conftest.py ([9bd8925](https://github.com/cleanlab/codex-python/commit/9bd89258f9ce4448e92957c31cc0ff2457f105c9)) +* **readme:** update badges ([0bb6eec](https://github.com/cleanlab/codex-python/commit/0bb6eec41fc0ccb6fcc2d987552dd56a602d6ccb)) +* **tests:** add tests for httpx client instantiation & proxies ([bf46662](https://github.com/cleanlab/codex-python/commit/bf466627bf3e9dc4a8d1f3283f94a75bab241a58)) +* **tests:** run tests in parallel ([0a3aaa1](https://github.com/cleanlab/codex-python/commit/0a3aaa1955e03895d340cfb8b5512bbcc4556d88)) + + +### Documentation + +* **client:** fix httpx.Timeout documentation reference ([2836ef3](https://github.com/cleanlab/codex-python/commit/2836ef3a6e7fca6fb2a92d6b13fbe0e38f30f1ad)) + ## 0.1.0-alpha.20 (2025-05-15) Full Changelog: [v0.1.0-alpha.19...v0.1.0-alpha.20](https://github.com/cleanlab/codex-python/compare/v0.1.0-alpha.19...v0.1.0-alpha.20) diff --git a/pyproject.toml b/pyproject.toml index 03a1e985..088289dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "codex-sdk" -version = "0.1.0-alpha.20" +version = "0.1.0-alpha.21" description = "Internal SDK used within cleanlab-codex package. Refer to https://pypi.org/project/cleanlab-codex/ instead." dynamic = ["readme"] license = "MIT" diff --git a/src/codex/_version.py b/src/codex/_version.py index 44d6131d..3b23c98f 100644 --- a/src/codex/_version.py +++ b/src/codex/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "codex" -__version__ = "0.1.0-alpha.20" # x-release-please-version +__version__ = "0.1.0-alpha.21" # x-release-please-version